code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def get_triangles():
m = ((50-10*(5**(1/2)))**(1/2))/10
n = ((50+10*(5**(1/2)))**(1/2))/10
# print(m, n)
viewpoints = [[m, 0, n], [-m, 0, n], [m, 0, -n], [-m, 0, -n],
[0, n, m], [0, n, -m], [0, -n, m], [0, -n, -m],
[n, m, 0], [n, -m, 0], [-n, m, 0], [-n, -m, 0]]
viewpoints = np.asarray(viewpoints)
indices = []
triangle_indices = set()
for i in range(len(viewpoints)):
for j in range(i+1, len(viewpoints)):
print(np.linalg.norm(viewpoints[i]-viewpoints[j]))
if round(np.linalg.norm(viewpoints[i]-viewpoints[j]), 1) == 1.1:
# print(i, j, np.linalg.norm(viewpoints[i]-viewpoints[j]))
indices.append([i, j])
print(len(indices)) # 30条棱
for i in range(len(indices)):
for j in range(i+1, len(indices)):
set1 = set(indices[i])
set2 = set(indices[j])
if set1 & set2: # 如果有交集
ssd = set1 ^ set2 # 对称差集 "symmetric set difference"
if list(ssd) in indices:
print(set1 | set2 | ssd) # 打印并集
triangle_indices.add(tuple(sorted(list(set1 | set2 | ssd))))
triangles = [] # 一共有20个面
for t_i in triangle_indices:
total_points.append(viewpoints[t_i[0]])
total_points.append(viewpoints[t_i[1]])
total_points.append(viewpoints[t_i[2]])
# print(viewpoints[t_i[0]], viewpoints[t_i[1]], viewpoints[t_i[2]])
triangles.append(viewpoints[np.array(t_i)])
return triangles
def sample_points(data, accum=2):
global total_data, total_points
new_data = []
for triangle in data:
# triangle中存着三角形的三个顶点的坐标
# 求三个顶点的三个中点
center_point1 = np.array((triangle[0]+triangle[1])/2)
center_point2 = np.array((triangle[0]+triangle[2])/2)
center_point3 = np.array((triangle[1]+triangle[2])/2)
center_point1 = center_point1/np.linalg.norm(center_point1)
center_point2 = center_point2 / np.linalg.norm(center_point2)
center_point3 = center_point3 / np.linalg.norm(center_point3)
total_points.append(center_point1)
total_points.append(center_point2)
total_points.append(center_point3)
# total_points += list(triangle)
new_data.append([triangle[0], center_point1, center_point2])
new_data.append([triangle[1], center_point1, center_point3])
new_data.append([triangle[2], center_point2, center_point3])
new_data.append([center_point1, center_point2, center_point3])
print(new_data)
total_data += new_data
if accum == 0:
return
else:
sample_points(new_data, accum-1)
total_data = []
total_points = []
triangles = get_triangles() # 20个面
sample_points(triangles)
fig = plt.figure()
ax = Axes3D(fig=fig)
print(len(total_points))
color = []
total_points = np.asarray(total_points)
for view in total_points:
color.append('r')
ax.scatter(total_points[:, 0], total_points[:, 1], total_points[:, 2], color=color, s=1)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show() | [
"matplotlib.pyplot.show",
"mpl_toolkits.mplot3d.Axes3D",
"numpy.asarray",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.linalg.norm"
] | [((2917, 2929), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2927, 2929), True, 'import matplotlib.pyplot as plt\n'), ((2935, 2950), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', ([], {'fig': 'fig'}), '(fig=fig)\n', (2941, 2950), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((3003, 3027), 'numpy.asarray', 'np.asarray', (['total_points'], {}), '(total_points)\n', (3013, 3027), True, 'import numpy as np\n'), ((3223, 3233), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3231, 3233), True, 'import matplotlib.pyplot as plt\n'), ((427, 449), 'numpy.asarray', 'np.asarray', (['viewpoints'], {}), '(viewpoints)\n', (437, 449), True, 'import numpy as np\n'), ((1857, 1898), 'numpy.array', 'np.array', (['((triangle[0] + triangle[1]) / 2)'], {}), '((triangle[0] + triangle[1]) / 2)\n', (1865, 1898), True, 'import numpy as np\n'), ((1919, 1960), 'numpy.array', 'np.array', (['((triangle[0] + triangle[2]) / 2)'], {}), '((triangle[0] + triangle[2]) / 2)\n', (1927, 1960), True, 'import numpy as np\n'), ((1981, 2022), 'numpy.array', 'np.array', (['((triangle[1] + triangle[2]) / 2)'], {}), '((triangle[1] + triangle[2]) / 2)\n', (1989, 2022), True, 'import numpy as np\n'), ((2058, 2087), 'numpy.linalg.norm', 'np.linalg.norm', (['center_point1'], {}), '(center_point1)\n', (2072, 2087), True, 'import numpy as np\n'), ((2128, 2157), 'numpy.linalg.norm', 'np.linalg.norm', (['center_point2'], {}), '(center_point2)\n', (2142, 2157), True, 'import numpy as np\n'), ((2198, 2227), 'numpy.linalg.norm', 'np.linalg.norm', (['center_point3'], {}), '(center_point3)\n', (2212, 2227), True, 'import numpy as np\n'), ((599, 644), 'numpy.linalg.norm', 'np.linalg.norm', (['(viewpoints[i] - viewpoints[j])'], {}), '(viewpoints[i] - viewpoints[j])\n', (613, 644), True, 'import numpy as np\n'), ((1626, 1639), 'numpy.array', 'np.array', (['t_i'], {}), '(t_i)\n', (1634, 1639), True, 'import numpy as np\n'), ((665, 710), 'numpy.linalg.norm', 'np.linalg.norm', (['(viewpoints[i] - viewpoints[j])'], {}), '(viewpoints[i] - viewpoints[j])\n', (679, 710), True, 'import numpy as np\n')] |
from typing import Sequence, Any
import pandas as pd
import numpy as np
from skimage.exposure import equalize_hist
from skimage.metrics import structural_similarity as ssim
from tqdm import tqdm
import ngram_heatmap
from plot_data import getSarcasmDf
from utilitys import widgets
def bigramSimilarity(condList: Sequence[str], sarcasmDf=None):
if sarcasmDf is None:
sarcasmDf = getSarcasmDf(context=True, returnBert=False)
scores = pd.DataFrame(columns=condList, index=condList)
imgs = {}
allToks = np.unique(ngram_heatmap.tokenize(sarcasmDf['text'], 1))
tokToIdxMapping = pd.Series(np.arange(len(allToks)), allToks)
for cond in tqdm(condList, 'Getting bigram images'):
df = sarcasmDf.query(cond)
imgs[cond] = ngram_heatmap.makeBigramImage(
df, 1, allToks=allToks, tokToIdxMapping=tokToIdxMapping)
numConds = len(condList)
ssimProgbar = tqdm(desc='Calculating ssim', total=int(((numConds+1)*numConds)/2))
for ii, cond in enumerate(condList):
for jj in range(ii+1):
cmpCond = condList[jj]
score = ssim(imgs[cond], imgs[cmpCond])
score = np.round(score, 3)
scores.at[cond, cmpCond] = score
scores.at[cmpCond, cond] = score
ssimProgbar.update()
scores.to_csv('./data/bigram_sim_scores.csv')
return scores
sar_ctxScores = bigramSimilarity(['context & sarcasm', 'context & (~sarcasm)',
'(~context) & sarcasm', '(~context) & (~sarcasm)'])
bp = 1 | [
"pandas.DataFrame",
"tqdm.tqdm",
"plot_data.getSarcasmDf",
"ngram_heatmap.tokenize",
"skimage.metrics.structural_similarity",
"ngram_heatmap.makeBigramImage",
"numpy.round"
] | [((442, 488), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'condList', 'index': 'condList'}), '(columns=condList, index=condList)\n', (454, 488), True, 'import pandas as pd\n'), ((647, 686), 'tqdm.tqdm', 'tqdm', (['condList', '"""Getting bigram images"""'], {}), "(condList, 'Getting bigram images')\n", (651, 686), False, 'from tqdm import tqdm\n'), ((386, 430), 'plot_data.getSarcasmDf', 'getSarcasmDf', ([], {'context': '(True)', 'returnBert': '(False)'}), '(context=True, returnBert=False)\n', (398, 430), False, 'from plot_data import getSarcasmDf\n'), ((523, 567), 'ngram_heatmap.tokenize', 'ngram_heatmap.tokenize', (["sarcasmDf['text']", '(1)'], {}), "(sarcasmDf['text'], 1)\n", (545, 567), False, 'import ngram_heatmap\n'), ((736, 827), 'ngram_heatmap.makeBigramImage', 'ngram_heatmap.makeBigramImage', (['df', '(1)'], {'allToks': 'allToks', 'tokToIdxMapping': 'tokToIdxMapping'}), '(df, 1, allToks=allToks, tokToIdxMapping=\n tokToIdxMapping)\n', (765, 827), False, 'import ngram_heatmap\n'), ((1050, 1081), 'skimage.metrics.structural_similarity', 'ssim', (['imgs[cond]', 'imgs[cmpCond]'], {}), '(imgs[cond], imgs[cmpCond])\n', (1054, 1081), True, 'from skimage.metrics import structural_similarity as ssim\n'), ((1096, 1114), 'numpy.round', 'np.round', (['score', '(3)'], {}), '(score, 3)\n', (1104, 1114), True, 'import numpy as np\n')] |
import numpy as np
from numpy.testing import assert_allclose
import torch as th
from gluonnlp.torch import attention_cell as ac
def check_attention_cell_basic(attention_cell, q_channel, k_channel, v_channel, use_mask,
multi_head=False, num_heads=None, layout='NTK'):
for query_length, mem_length in [(10, 5), (1, 5), (5, 1)]:
for batch_size in [1, 3]:
if use_mask:
mask_nd = th.rand(batch_size, query_length, mem_length) > 0.3
else:
mask_nd = None
if layout == 'NTK':
query_nd = th.randn(batch_size, query_length, num_heads, q_channel)
key_nd = th.randn(batch_size, mem_length, num_heads, k_channel)
value_nd = th.randn(batch_size, mem_length, num_heads, v_channel)
elif layout == 'NKT':
query_nd = th.randn(batch_size, num_heads, query_length, q_channel)
key_nd = th.randn(batch_size, num_heads, mem_length, k_channel)
value_nd = th.randn(batch_size, num_heads, mem_length, v_channel)
elif layout == 'TNK':
query_nd = th.randn(query_length, batch_size, num_heads, q_channel)
key_nd = th.randn(mem_length, batch_size, num_heads, k_channel)
value_nd = th.randn(mem_length, batch_size, num_heads, v_channel)
read_value, (scores, att_weights) = attention_cell(query_nd, key_nd, value_nd, mask_nd)
att_weights_npy = att_weights.numpy()
read_value_npy = read_value.numpy()
value_npy = value_nd.numpy()
if not multi_head:
if use_mask:
assert_allclose(att_weights_npy.sum(axis=-1),
th.sum(mask_nd, dim=-1).numpy() > 0, 1E-5, 1E-5)
else:
assert_allclose(att_weights_npy.sum(axis=-1), np.ones(att_weights.shape[:-1]),
1E-5, 1E-5)
# Check the read value is correct
for i in range(batch_size):
assert_allclose(read_value_npy[i], att_weights_npy[i].dot(value_npy[i]), 1E-5,
1E-5)
if use_mask:
assert_allclose(th.norm((1 - mask_nd) * att_weights).asscalar(), 0)
else:
read_value_npy = read_value_npy.reshape((batch_size, query_length, num_heads, -1))
if use_mask:
mask_npy = mask_nd.numpy()
for j in range(num_heads):
if use_mask:
assert_allclose(att_weights_npy[:, j, :, :].sum(axis=-1),
mask_npy.sum(axis=-1) > 0, 1E-5, 1E-5)
else:
assert_allclose(att_weights_npy[:, j, :, :].sum(axis=-1),
np.ones((batch_size, query_length)), 1E-5, 1E-5)
if use_mask:
assert_allclose((1 - mask_npy) * att_weights_npy[:, j, :, :], 0)
def test_multihead_attention():
for query_units, key_units, value_units, num_heads in [(4, 4, 8, 2), (3, 3, 9, 3),
(6, 6, 5, 1)]:
for use_mask in [True, False]:
for scaled in [True, False]:
for normalized in [True, False]:
for layout in ['NKT', 'NTK', 'TNK']:
cell = ac.MultiHeadAttentionCell(query_units=query_units,
num_heads=num_heads, scaled=scaled,
normalized=normalized, layout=layout)
check_attention_cell_basic(cell, q_channel=query_units // num_heads,
k_channel=key_units // num_heads,
v_channel=value_units // num_heads,
use_mask=use_mask, multi_head=True,
num_heads=num_heads, layout=layout)
| [
"torch.norm",
"numpy.ones",
"torch.randn",
"gluonnlp.torch.attention_cell.MultiHeadAttentionCell",
"torch.rand",
"numpy.testing.assert_allclose",
"torch.sum"
] | [((608, 664), 'torch.randn', 'th.randn', (['batch_size', 'query_length', 'num_heads', 'q_channel'], {}), '(batch_size, query_length, num_heads, q_channel)\n', (616, 664), True, 'import torch as th\n'), ((690, 744), 'torch.randn', 'th.randn', (['batch_size', 'mem_length', 'num_heads', 'k_channel'], {}), '(batch_size, mem_length, num_heads, k_channel)\n', (698, 744), True, 'import torch as th\n'), ((772, 826), 'torch.randn', 'th.randn', (['batch_size', 'mem_length', 'num_heads', 'v_channel'], {}), '(batch_size, mem_length, num_heads, v_channel)\n', (780, 826), True, 'import torch as th\n'), ((448, 493), 'torch.rand', 'th.rand', (['batch_size', 'query_length', 'mem_length'], {}), '(batch_size, query_length, mem_length)\n', (455, 493), True, 'import torch as th\n'), ((888, 944), 'torch.randn', 'th.randn', (['batch_size', 'num_heads', 'query_length', 'q_channel'], {}), '(batch_size, num_heads, query_length, q_channel)\n', (896, 944), True, 'import torch as th\n'), ((970, 1024), 'torch.randn', 'th.randn', (['batch_size', 'num_heads', 'mem_length', 'k_channel'], {}), '(batch_size, num_heads, mem_length, k_channel)\n', (978, 1024), True, 'import torch as th\n'), ((1052, 1106), 'torch.randn', 'th.randn', (['batch_size', 'num_heads', 'mem_length', 'v_channel'], {}), '(batch_size, num_heads, mem_length, v_channel)\n', (1060, 1106), True, 'import torch as th\n'), ((1168, 1224), 'torch.randn', 'th.randn', (['query_length', 'batch_size', 'num_heads', 'q_channel'], {}), '(query_length, batch_size, num_heads, q_channel)\n', (1176, 1224), True, 'import torch as th\n'), ((1250, 1304), 'torch.randn', 'th.randn', (['mem_length', 'batch_size', 'num_heads', 'k_channel'], {}), '(mem_length, batch_size, num_heads, k_channel)\n', (1258, 1304), True, 'import torch as th\n'), ((1332, 1386), 'torch.randn', 'th.randn', (['mem_length', 'batch_size', 'num_heads', 'v_channel'], {}), '(mem_length, batch_size, num_heads, v_channel)\n', (1340, 1386), True, 'import torch as th\n'), ((1925, 1956), 'numpy.ones', 'np.ones', (['att_weights.shape[:-1]'], {}), '(att_weights.shape[:-1])\n', (1932, 1956), True, 'import numpy as np\n'), ((3042, 3106), 'numpy.testing.assert_allclose', 'assert_allclose', (['((1 - mask_npy) * att_weights_npy[:, j, :, :])', '(0)'], {}), '((1 - mask_npy) * att_weights_npy[:, j, :, :], 0)\n', (3057, 3106), False, 'from numpy.testing import assert_allclose\n'), ((3519, 3647), 'gluonnlp.torch.attention_cell.MultiHeadAttentionCell', 'ac.MultiHeadAttentionCell', ([], {'query_units': 'query_units', 'num_heads': 'num_heads', 'scaled': 'scaled', 'normalized': 'normalized', 'layout': 'layout'}), '(query_units=query_units, num_heads=num_heads,\n scaled=scaled, normalized=normalized, layout=layout)\n', (3544, 3647), True, 'from gluonnlp.torch import attention_cell as ac\n'), ((2936, 2971), 'numpy.ones', 'np.ones', (['(batch_size, query_length)'], {}), '((batch_size, query_length))\n', (2943, 2971), True, 'import numpy as np\n'), ((2306, 2342), 'torch.norm', 'th.norm', (['((1 - mask_nd) * att_weights)'], {}), '((1 - mask_nd) * att_weights)\n', (2313, 2342), True, 'import torch as th\n'), ((1788, 1811), 'torch.sum', 'th.sum', (['mask_nd'], {'dim': '(-1)'}), '(mask_nd, dim=-1)\n', (1794, 1811), True, 'import torch as th\n')] |
from likurai.model import BayesianNeuralNetwork
from likurai.layer import BayesianDense, Likelihood
from sklearn.datasets import load_iris
from sklearn.metrics import classification_report, log_loss
from theano import shared
from sklearn import ensemble
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils import shuffle
import numpy as np
from likurai import floatX
if __name__ == '__main__':
# Define some constants for the model architecture
HIDDEN_SIZE = 6
# Load the dataset
iris = load_iris()
X, y = iris['data'], iris['target']
X, y = shuffle(X, y)
y_one_hot = OneHotEncoder(sparse=False, dtype=floatX).fit_transform(y.reshape(-1, 1))
print(y.shape)
# We'll arbitrarily leave out the last 50 data points as our test set
TEST_SIZE = 100
X_train, y_train, y_train_one_hot = X[:-TEST_SIZE, :], y[:-TEST_SIZE], y_one_hot[:-TEST_SIZE]
X_test, y_test, y_test_one_hot = X[-TEST_SIZE:, :], y[-TEST_SIZE:], y_one_hot[-TEST_SIZE:]
# Get some information about the dataset that we'll need for the model
n_features = X.shape[1]
n_samples = X.shape[0]
n_classes = y_one_hot.shape[1]
# Initialize a model object
bnn = BayesianNeuralNetwork()
bnn.x.set_value(X_train.astype(floatX))
bnn.y = shared(y_train_one_hot)
# Create our first layer (Input -> Hidden1). We specify the priors for the weights/bias in the kwargs
with bnn.model:
input_layer = BayesianDense('input', input_size=n_features, neurons=HIDDEN_SIZE, activation='relu')(bnn.x)
# # Create a hidden layer. We can also specify the shapes for the weights/bias in the kwargs
hidden_layer_1 = BayesianDense('hidden1', input_size=HIDDEN_SIZE, neurons=HIDDEN_SIZE, activation='relu')(input_layer)
# Create our output layer. We tell it not to use a bias.
output_layer = BayesianDense('output', input_size=HIDDEN_SIZE, neurons=n_classes, activation='softmax')(hidden_layer_1)
likelihood_layer = Likelihood('Multinomial', 'p')(output_layer, **{'observed': bnn.y, 'n': 1})
# The model itself follows the scikit-learn interface for training/predicting
bnn.fit(X_train, y, epochs=1000, method='nuts', **{'tune': 2000, 'njobs': 1, 'chains': 1})
bnn.save_model('classification_example.pickle')
# bnn.fit(X_train, y_train_one_hot, epochs=100000, method='advi', batch_size=32)
# Generate predictions
pred = bnn.predict(X_test, n_samples=1000)
print(pred.shape)
# However, for simplicity's sake, we can also tell the model to just give us point-estimate predictions
point_pred = bnn.predict(X_test, n_samples=1000, point_estimate=True)
point_pred = point_pred
print(point_pred)
print(point_pred.shape)
# Let's just make a simple baseline using a scikit model. Eventually I'll use a comparable NN
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2, 'learning_rate': 0.01}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
clf_pred = clf.predict(X_test)
print(np.unique(y_test))
print('BNN:\n{}'.format(classification_report(y_test, point_pred.argmax(axis=0))))
print('Baseline:\n{}'.format(classification_report(y_test, clf_pred)))
print("Log-Loss BNN: {}".format(log_loss(y_test, point_pred.argmax(axis=0))))
print("Log-Loss BNN (probabilistic): {}".format(log_loss(y_test, point_pred.T)))
print("Log-Loss Baseline: {}".format(log_loss(y_test, clf_pred)))
| [
"sklearn.datasets.load_iris",
"likurai.layer.BayesianDense",
"likurai.model.BayesianNeuralNetwork",
"likurai.layer.Likelihood",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.metrics.log_loss",
"sklearn.metrics.classification_report",
"sklearn.ensemble.GradientBoostingClassifier",
"theano.shared",
... | [((521, 532), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (530, 532), False, 'from sklearn.datasets import load_iris\n'), ((584, 597), 'sklearn.utils.shuffle', 'shuffle', (['X', 'y'], {}), '(X, y)\n', (591, 597), False, 'from sklearn.utils import shuffle\n'), ((1205, 1228), 'likurai.model.BayesianNeuralNetwork', 'BayesianNeuralNetwork', ([], {}), '()\n', (1226, 1228), False, 'from likurai.model import BayesianNeuralNetwork\n'), ((1285, 1308), 'theano.shared', 'shared', (['y_train_one_hot'], {}), '(y_train_one_hot)\n', (1291, 1308), False, 'from theano import shared\n'), ((2956, 3001), 'sklearn.ensemble.GradientBoostingClassifier', 'ensemble.GradientBoostingClassifier', ([], {}), '(**params)\n', (2991, 3001), False, 'from sklearn import ensemble\n'), ((3078, 3095), 'numpy.unique', 'np.unique', (['y_test'], {}), '(y_test)\n', (3087, 3095), True, 'import numpy as np\n'), ((614, 655), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'sparse': '(False)', 'dtype': 'floatX'}), '(sparse=False, dtype=floatX)\n', (627, 655), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((1458, 1547), 'likurai.layer.BayesianDense', 'BayesianDense', (['"""input"""'], {'input_size': 'n_features', 'neurons': 'HIDDEN_SIZE', 'activation': '"""relu"""'}), "('input', input_size=n_features, neurons=HIDDEN_SIZE,\n activation='relu')\n", (1471, 1547), False, 'from likurai.layer import BayesianDense, Likelihood\n'), ((1678, 1770), 'likurai.layer.BayesianDense', 'BayesianDense', (['"""hidden1"""'], {'input_size': 'HIDDEN_SIZE', 'neurons': 'HIDDEN_SIZE', 'activation': '"""relu"""'}), "('hidden1', input_size=HIDDEN_SIZE, neurons=HIDDEN_SIZE,\n activation='relu')\n", (1691, 1770), False, 'from likurai.layer import BayesianDense, Likelihood\n'), ((1869, 1961), 'likurai.layer.BayesianDense', 'BayesianDense', (['"""output"""'], {'input_size': 'HIDDEN_SIZE', 'neurons': 'n_classes', 'activation': '"""softmax"""'}), "('output', input_size=HIDDEN_SIZE, neurons=n_classes,\n activation='softmax')\n", (1882, 1961), False, 'from likurai.layer import BayesianDense, Likelihood\n'), ((2002, 2032), 'likurai.layer.Likelihood', 'Likelihood', (['"""Multinomial"""', '"""p"""'], {}), "('Multinomial', 'p')\n", (2012, 2032), False, 'from likurai.layer import BayesianDense, Likelihood\n'), ((3218, 3257), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'clf_pred'], {}), '(y_test, clf_pred)\n', (3239, 3257), False, 'from sklearn.metrics import classification_report, log_loss\n'), ((3395, 3425), 'sklearn.metrics.log_loss', 'log_loss', (['y_test', 'point_pred.T'], {}), '(y_test, point_pred.T)\n', (3403, 3425), False, 'from sklearn.metrics import classification_report, log_loss\n'), ((3469, 3495), 'sklearn.metrics.log_loss', 'log_loss', (['y_test', 'clf_pred'], {}), '(y_test, clf_pred)\n', (3477, 3495), False, 'from sklearn.metrics import classification_report, log_loss\n')] |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import sys
i = sys.argv[1]
uf = sys.argv[2]
size = sys.argv[3]
page = sys.argv[4]
latency_path = "./log/latency_" + str(i) + "_" + str(uf) + "k_" + str(size) + "_" + str(page) + "_0.log"
prepare_path = "./log/" + str(i) + "_" + str(uf) + "k_" + str(size) + "_" + str(page) + "_overhead.log"
latency_file = open(latency_path)
prepare_file = open(prepare_path)
latencys = []
prepares = []
overheads = []
for eachline in latency_file.readlines():
latencys.append(float(eachline))
for el in prepare_file.readlines():
el.replace(' ', '')
prepare, overhead, total = el.split("\t")
prepares.append(float(prepare))
overheads.append(float(overhead))
mean1 = (np.sum(latencys) - np.sum(prepares) / 1000.0) / (len(latencys) - len(prepares))
mean2 = np.mean(prepares)
mean3 = np.mean(overheads)
str2 = "%d\t%d\t%d" % (mean1, mean2, mean3)
print(str2)
| [
"numpy.mean",
"numpy.sum"
] | [((894, 911), 'numpy.mean', 'np.mean', (['prepares'], {}), '(prepares)\n', (901, 911), True, 'import numpy as np\n'), ((921, 939), 'numpy.mean', 'np.mean', (['overheads'], {}), '(overheads)\n', (928, 939), True, 'import numpy as np\n'), ((805, 821), 'numpy.sum', 'np.sum', (['latencys'], {}), '(latencys)\n', (811, 821), True, 'import numpy as np\n'), ((824, 840), 'numpy.sum', 'np.sum', (['prepares'], {}), '(prepares)\n', (830, 840), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
#
# *****************************************************************************
from numpy import interp
from nicos.core import Attach, Moveable, Override, Param, listof
from nicos.core.errors import ConfigurationError
from nicos.devices.abstract import TransformedMoveable
class InterpolatedMotor(TransformedMoveable):
"""
This class implements a logical motor driving a real one
according to an interpolation table provided.
"""
parameters = {
'target_positions': Param('List of positions for this motor',
type=listof(float)),
'raw_positions': Param('List of matching positions for the attached '
'motor',
type=listof(float)),
}
attached_devices = {
'raw_motor': Attach('Motor to drive when moving this logical motor',
Moveable),
}
parameter_overrides = {
'mapping': Override(mandatory=False),
}
valuetype = float
relax_mapping = True
def doInit(self, mode):
if len(self.target_positions) != len(self.raw_positions):
raise ConfigurationError('Length of target and raw '
'positions must match')
def _readRaw(self, maxage=0):
return self._attached_raw_motor.read(maxage)
def doStatus(self, maxage=0):
return self._attached_raw_motor.doStatus(maxage)
def doIsAllowed(self, target):
low = self.target_positions[0]
high = self.target_positions[-1]
if not (low <= target <= high):
return False, 'Out of limits (%f, %f)' % (low, high)
return self._attached_raw_motor.isAllowed(
self._mapTargetValue(target))
def _startRaw(self, target):
self._attached_raw_motor.start(target)
def _mapReadValue(self, value):
return interp([value], self._raw_positions, self._target_positions)[0]
def _mapTargetValue(self, target):
return interp([target], self._target_positions, self._raw_positions)[0]
| [
"nicos.core.Attach",
"nicos.core.listof",
"nicos.core.Override",
"numpy.interp",
"nicos.core.errors.ConfigurationError"
] | [((1793, 1858), 'nicos.core.Attach', 'Attach', (['"""Motor to drive when moving this logical motor"""', 'Moveable'], {}), "('Motor to drive when moving this logical motor', Moveable)\n", (1799, 1858), False, 'from nicos.core import Attach, Moveable, Override, Param, listof\n'), ((1942, 1967), 'nicos.core.Override', 'Override', ([], {'mandatory': '(False)'}), '(mandatory=False)\n', (1950, 1967), False, 'from nicos.core import Attach, Moveable, Override, Param, listof\n'), ((2136, 2203), 'nicos.core.errors.ConfigurationError', 'ConfigurationError', (['"""Length of target and raw positions must match"""'], {}), "('Length of target and raw positions must match')\n", (2154, 2203), False, 'from nicos.core.errors import ConfigurationError\n'), ((2871, 2931), 'numpy.interp', 'interp', (['[value]', 'self._raw_positions', 'self._target_positions'], {}), '([value], self._raw_positions, self._target_positions)\n', (2877, 2931), False, 'from numpy import interp\n'), ((2990, 3051), 'numpy.interp', 'interp', (['[target]', 'self._target_positions', 'self._raw_positions'], {}), '([target], self._target_positions, self._raw_positions)\n', (2996, 3051), False, 'from numpy import interp\n'), ((1554, 1567), 'nicos.core.listof', 'listof', (['float'], {}), '(float)\n', (1560, 1567), False, 'from nicos.core import Attach, Moveable, Override, Param, listof\n'), ((1724, 1737), 'nicos.core.listof', 'listof', (['float'], {}), '(float)\n', (1730, 1737), False, 'from nicos.core import Attach, Moveable, Override, Param, listof\n')] |
import json
import numpy as np
def create_minutely(time, hours):
minutely = [{"key": time + 60*x, "value": np.random.normal(25, 1) + (x/60)%300} for x in range(60*hours)]
return minutely
def create_hourly(time):
hourly = [{"key": time + 3600*x, "value": np.random.normal(20, 1) + x%5} for x in range(24)]
return hourly
##Dump to text file
hour = create_minutely(1436920200 - 1770, 1)
hour_json = json.dumps(hour,separators=(',', ': '), sort_keys=True, indent=4)
print (hour[1]['value'])
text_file = open("hour.txt", 'w')
text_file.write(hour_json)
text_file.flush()
text_file.close()
min_day = create_minutely(1436920200 - 1770, 24)
min_day_json = json.dumps(min_day,separators=(',', ': '), sort_keys=True, indent=4)
text_file = open("min_day.txt", 'w')
text_file.write(min_day_json)
text_file.flush()
text_file.close()
day = create_hourly(1436920200)
day_json = json.dumps(day,separators=(',', ': '), sort_keys=True, indent=4)
text_file = open("day.txt", 'w')
text_file.write(day_json)
text_file.flush()
text_file.close()
##Test importing
text_file = open("hour.txt", 'r')
hour2 = json.loads(text_file.read())
if hour2 == hour:
print ("Worked!")
print (hour2[1]['value'])
| [
"numpy.random.normal",
"json.dumps"
] | [((403, 469), 'json.dumps', 'json.dumps', (['hour'], {'separators': "(',', ': ')", 'sort_keys': '(True)', 'indent': '(4)'}), "(hour, separators=(',', ': '), sort_keys=True, indent=4)\n", (413, 469), False, 'import json\n'), ((657, 726), 'json.dumps', 'json.dumps', (['min_day'], {'separators': "(',', ': ')", 'sort_keys': '(True)', 'indent': '(4)'}), "(min_day, separators=(',', ': '), sort_keys=True, indent=4)\n", (667, 726), False, 'import json\n'), ((875, 940), 'json.dumps', 'json.dumps', (['day'], {'separators': "(',', ': ')", 'sort_keys': '(True)', 'indent': '(4)'}), "(day, separators=(',', ': '), sort_keys=True, indent=4)\n", (885, 940), False, 'import json\n'), ((109, 132), 'numpy.random.normal', 'np.random.normal', (['(25)', '(1)'], {}), '(25, 1)\n', (125, 132), True, 'import numpy as np\n'), ((259, 282), 'numpy.random.normal', 'np.random.normal', (['(20)', '(1)'], {}), '(20, 1)\n', (275, 282), True, 'import numpy as np\n')] |
"""
# ============================================================================= #
| Project: [MuJoCo Practivces]
| Title: Python + mujoco-py
| Author: <NAME>
| Email: [Moses ] <EMAIL>
| Creation Date: Monday, September 7th, 2020
# ============================================================================= #
# ============================================================================= #
| (0A) [DESCRIPTION]
|
| - Python Script for running multiple models with its corresponding controllers.
| - This will be useful for educational purpose.
|
# ============================================================================= #
# ============================================================================= #
| (0B) [KEYWORDS DEFINITION]
| : type the following "keywords" for cases as...
| - [BACKUP] [NAME]: Back-up code in case it's needed for the near future
| - [TIP]: The reason why the following code was written.
| - [TODO]: The part where modification is needed in the near future
# ============================================================================= #
# ============================================================================= #
| (0C) [PYTHON NAMING CONVENTION]
| Our project will follow the python naming convention, [REF]: https://stackoverflow.com/a/8423697/13437196
| ---------------------------------------------------------------------------------------------------------
| module_name, package_name, ClassName, method_name, ExceptionName, function_name,
| GLOBAL_CONSTANT_NAME, global_var_name, instance_var_name, function_parameter_name, local_var_name.
# ============================================================================= #
# ============================================================================= #
| (0D) [DOCOPT PARSE]
| From now on, the written comments are specifically for "docopt" function.
| [REF] http://docopt.org/
# ============================================================================= #
Usage:
run.py [options]
run.py -h | --help
Arguments:
Options:
-h --help Showing the usage and options
--version Show version
-s --saveData Saving the neccessary data from MuJoCo simulation as a txt file in the current directory
[default: False]
-r --recordVideo Record simulation video as a .mp4 file in the current directory
[default: False]
--runTime=TIME The total time of the simulation
[default: 5.0]
--startTime=TIME The start time of the movement, or controller
[default: 1.0]
--modelName=NAME Setting the xml model file name which will be used for the simulation.
The starting number of the xml model file indicates the type of simulation, hence the --modelName
[default: 1_mass_PD.xml]
--camPos=STRING Setting the Camera Position of the simulation.
default is None
--quiet Print less text
[default: False]
--verbose Print more text
[default: False]
Examples, try:
python3 run.py --help
python3 run.py --version
python3 run.py --modelName="mass_PD.xml" --findCamPos
"""
# ============================================================================= #
# (0A) [IMPORT MODULES]
# Importing necessary modules + declaring basic configurations for running the whole mujoco simulator.
# [Built-in modules]
import sys
import os
import re
import argparse
import datetime
import shutil
# [3rd party modules]
import numpy as np
import cv2
# [3rd party modules] - mujoco-py
try:
import mujoco_py as mjPy
except ImportError as e:
raise error.DependencyNotInstalled( "{}. (HINT: you need to install mujoco_py, \
and also perform the setup instructions here: \
https://github.com/openai/mujoco-py/.)".format( e ) )
from docopt import docopt
# [3rd party modules] - pyPlot for graphs
import matplotlib.pyplot as plt
# import nevergrad as ng # [BACKUP] Needed for Optimization
# [Local modules]
from modules.constants import Constants
from modules.controllers import ( PID_Controller )
from modules.utils import ( args_cleanup, my_print, my_mkdir, my_rmdir )
from modules.simulation import Simulation
# from modules.output_funcs import (dist_from_tip2target, tip_velocity )
# from modules.input_ctrls import ( ImpedanceController, Excitator, ForwardKinematics, PositionController )
# from modules.utils import ( add_whip_model, my_print, my_mkdir, args_cleanup,
# my_rmdir, str2float, camel2snake, snake2camel )
# ============================================================================= #
# ============================================================================= #
# (0B) [SYSTEM SETTINGS]
# [Printing Format]
np.set_printoptions( linewidth = Constants.PRINT_LW ,
suppress = True ,
precision = Constants.PREC ) # Setting the numpy print options, useful for printing out data with consistent pattern.
args = docopt( __doc__, version = Constants.VERSION ) # Parsing the Argument
args = args_cleanup( args, '--' ) # Cleaning up the dictionary, discard prefix string '--' for the variables
if sys.version_info[ : 3 ] < ( 3, 0, 0 ): # Simple version check of the python version. python3+ is recommended for this file.
my_print( NOTIFICATION = " PYTHON3+ is recommended for this script " )
# If video needs to be recorded or data should be saved, then append 'saveDir' element to args dictionary
args[ 'saveDir' ] = my_mkdir( ) if args[ 'recordVideo' ] or args[ 'saveData' ] else None
assert not ( args[ 'quiet' ] and args[ 'verbose' ] ) # If quiet and verbose are true at the same time, assert!
my_print( saveDir = args[ 'saveDir' ] )
# ============================================================================= #
# ============================================================================= #
def main( ):
# ============================================================================= #
model_name = args[ 'modelName' ] # Calling Model
my_print( modelName = model_name )
mySim = Simulation( model_name = model_name,
arg_parse = args )
sim_type = model_name[ 0 ] # The first charater of model name is the index of simulation type.
if "1" == sim_type: # 1: Simple Mass Simulation
controller_object = PID_Controller( mySim.mjModel, mySim.mjData,
Kp = 0, Kd = 0, Ki = 0, ref_type = 0)
mySim.attach_controller( controller_object )
mySim.run( )
if args[ 'saveDir' ] is not None:
mySim.save_simulation_data( args[ 'saveDir' ] )
shutil.copyfile( Constants.MODEL_DIR + model_name,
args[ 'saveDir' ] + model_name )
mySim.reset( )
# ============================================================================= #
if __name__ == "__main__":
try:
main( )
except KeyboardInterrupt:
print( "Ctrl-C was inputted. Halting the program. ", end = ' ' )
if args[ 'saveDir' ] is not None:
my_rmdir( args[ 'saveDir' ] )
except ( FileNotFoundError, IndexError, ValueError ) as e:
print( e, end = ' ' )
if args[ 'saveDir' ] is not None:
my_rmdir( args[ 'saveDir' ] )
| [
"modules.simulation.Simulation",
"numpy.set_printoptions",
"modules.utils.my_mkdir",
"docopt.docopt",
"modules.utils.my_print",
"modules.utils.my_rmdir",
"modules.utils.args_cleanup",
"shutil.copyfile",
"modules.controllers.PID_Controller"
] | [((5319, 5414), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': 'Constants.PRINT_LW', 'suppress': '(True)', 'precision': 'Constants.PREC'}), '(linewidth=Constants.PRINT_LW, suppress=True, precision=\n Constants.PREC)\n', (5338, 5414), True, 'import numpy as np\n'), ((5606, 5648), 'docopt.docopt', 'docopt', (['__doc__'], {'version': 'Constants.VERSION'}), '(__doc__, version=Constants.VERSION)\n', (5612, 5648), False, 'from docopt import docopt\n'), ((5709, 5733), 'modules.utils.args_cleanup', 'args_cleanup', (['args', '"""--"""'], {}), "(args, '--')\n", (5721, 5733), False, 'from modules.utils import args_cleanup, my_print, my_mkdir, my_rmdir\n'), ((6433, 6466), 'modules.utils.my_print', 'my_print', ([], {'saveDir': "args['saveDir']"}), "(saveDir=args['saveDir'])\n", (6441, 6466), False, 'from modules.utils import args_cleanup, my_print, my_mkdir, my_rmdir\n'), ((6027, 6093), 'modules.utils.my_print', 'my_print', ([], {'NOTIFICATION': '""" PYTHON3+ is recommended for this script """'}), "(NOTIFICATION=' PYTHON3+ is recommended for this script ')\n", (6035, 6093), False, 'from modules.utils import args_cleanup, my_print, my_mkdir, my_rmdir\n'), ((6225, 6235), 'modules.utils.my_mkdir', 'my_mkdir', ([], {}), '()\n', (6233, 6235), False, 'from modules.utils import args_cleanup, my_print, my_mkdir, my_rmdir\n'), ((6841, 6871), 'modules.utils.my_print', 'my_print', ([], {'modelName': 'model_name'}), '(modelName=model_name)\n', (6849, 6871), False, 'from modules.utils import args_cleanup, my_print, my_mkdir, my_rmdir\n'), ((6889, 6938), 'modules.simulation.Simulation', 'Simulation', ([], {'model_name': 'model_name', 'arg_parse': 'args'}), '(model_name=model_name, arg_parse=args)\n', (6899, 6938), False, 'from modules.simulation import Simulation\n'), ((7268, 7341), 'modules.controllers.PID_Controller', 'PID_Controller', (['mySim.mjModel', 'mySim.mjData'], {'Kp': '(0)', 'Kd': '(0)', 'Ki': '(0)', 'ref_type': '(0)'}), '(mySim.mjModel, mySim.mjData, Kp=0, Kd=0, Ki=0, ref_type=0)\n', (7282, 7341), False, 'from modules.controllers import PID_Controller\n'), ((7566, 7645), 'shutil.copyfile', 'shutil.copyfile', (['(Constants.MODEL_DIR + model_name)', "(args['saveDir'] + model_name)"], {}), "(Constants.MODEL_DIR + model_name, args['saveDir'] + model_name)\n", (7581, 7645), False, 'import shutil\n'), ((7997, 8022), 'modules.utils.my_rmdir', 'my_rmdir', (["args['saveDir']"], {}), "(args['saveDir'])\n", (8005, 8022), False, 'from modules.utils import args_cleanup, my_print, my_mkdir, my_rmdir\n'), ((8176, 8201), 'modules.utils.my_rmdir', 'my_rmdir', (["args['saveDir']"], {}), "(args['saveDir'])\n", (8184, 8201), False, 'from modules.utils import args_cleanup, my_print, my_mkdir, my_rmdir\n')] |
"""
source: https://github.com/llSourcell/Q-learning-for-trading.git
"""
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import itertools
import model
class TradingEnv(gym.Env):
"""
A 3-stock (AAPL, IBM, MSFT) trading environment.
State: [# of stock owned, current stock prices, cash in hand]
- array of length n_stock * 2 + 1
- price is discretized (to integer) to reduce state space
- use close price for each stock
- cash in hand is evaluated at each step based on action performed
Action: sell (0), hold (1), and buy (2)
- when selling, sell all the shares
- when buying, buy as many as cash in hand allows
- if buying multiple stock, equally distribute cash in hand and then utilize the balance
"""
def __init__(self, train_data, init_invest=2000, price_history_length=10):
# data
self.stock_price_history = train_data
self.n_stock, self.n_step = self.stock_price_history.shape
# instance attributes
self.init_invest = init_invest
self.cur_step = None
self.stock_owned = None
self.stock_price = None
self.cash_in_hand = None
# action space
self.action_space = spaces.Discrete(3**self.n_stock)
# observation space: give estimates in order to sample and build scaler
stock_max_price = self.stock_price_history.max(axis=1)
stock_range = [[0, init_invest * 2 // mx] for mx in stock_max_price]
price_range = [[0, mx] for mx in stock_max_price]
estimates_range = [[-1.0, 1.0] for i in range(self.n_stock)]
cash_in_hand_range = [[0, init_invest * 2]]
self.observation_space = spaces.MultiDiscrete(stock_range + price_range + cash_in_hand_range + estimates_range)
# seed and start
self.hist_length = price_history_length
self.history = np.zeros((1, self.hist_length, self.n_stock))
self.model_hist_est = self._load_est()
self._seed()
self.reset()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
self.cur_step = self.hist_length - 1
self.stock_owned = [0] * self.n_stock
self.stock_price = self.stock_price_history[:, self.cur_step]
self.history[0, :self.cur_step] = self.stock_price_history.T[:self.cur_step, :]
self.history[0, self.cur_step] = self.stock_price
self.cash_in_hand = self.init_invest
return self.get_obs()
def step(self, action):
assert self.action_space.contains(action)
prev_val = self._get_val()
self.cur_step += 1
self.stock_price = self.stock_price_history[:, self.cur_step] # update price
self.history = np.roll(self.history, shift=-1, axis=1)
self.history[0, self.history.shape[0] - 1] = self.stock_price
self._trade(action)
cur_val = self._get_val()
reward = cur_val - prev_val
done = self.cur_step == self.n_step - 1
info = {'cur_val': cur_val}
return self.get_obs(), reward, done, info
def get_obs(self):
obs = []
obs.extend(self.stock_owned)
obs.extend(list(self.stock_price))
obs.append(self.cash_in_hand)
history_estimate = self._get_estimation()
obs.extend(history_estimate)
return obs
def _get_val(self):
return np.sum(self.stock_owned * self.stock_price) + self.cash_in_hand
def _get_estimation(self):
X_set = np.split(self.history, self.n_stock, axis=2)
res = []
for i in range(len(X_set)):
res.append(self.model_hist_est[i].predict(X_set[i]))
return res
def _load_est(self):
direct = "est_models/"
f_names = ["aapl", "ibm", "msft"]
ext = ".weights"
models = []
for i in range(self.n_stock):
tmp_model = model.lstm(self.history.shape)
tmp_model.load_weights(direct + f_names[i] + ext)
models.append(tmp_model)
return models
def _trade(self, action):
# all combo to sell(0), hold(1), or buy(2) stocks
action_combo = list(map(list, itertools.product([0, 1, 2], repeat=self.n_stock)))
action_vec = action_combo[action]
print(action_vec)
input()
# one pass to get sell/buy index
sell_index = []
buy_index = []
for i, a in enumerate(action_vec):
if a == 0:
sell_index.append(i)
elif a == 2:
buy_index.append(i)
# two passes: sell first, then buy; might be naive in real-world settings
if sell_index:
for i in sell_index:
self.cash_in_hand += self.stock_price[i] * self.stock_owned[i]
self.stock_owned[i] = 0
if buy_index:
can_buy = True
while can_buy:
for i in buy_index:
if self.cash_in_hand > self.stock_price[i]:
self.stock_owned[i] += 1 # buy one share
self.cash_in_hand -= self.stock_price[i]
else:
can_buy = False
| [
"numpy.sum",
"numpy.roll",
"numpy.zeros",
"gym.spaces.Discrete",
"gym.spaces.MultiDiscrete",
"numpy.split",
"model.lstm",
"itertools.product",
"gym.utils.seeding.np_random"
] | [((1190, 1224), 'gym.spaces.Discrete', 'spaces.Discrete', (['(3 ** self.n_stock)'], {}), '(3 ** self.n_stock)\n', (1205, 1224), False, 'from gym import spaces\n'), ((1628, 1718), 'gym.spaces.MultiDiscrete', 'spaces.MultiDiscrete', (['(stock_range + price_range + cash_in_hand_range + estimates_range)'], {}), '(stock_range + price_range + cash_in_hand_range +\n estimates_range)\n', (1648, 1718), False, 'from gym import spaces\n'), ((1800, 1845), 'numpy.zeros', 'np.zeros', (['(1, self.hist_length, self.n_stock)'], {}), '((1, self.hist_length, self.n_stock))\n', (1808, 1845), True, 'import numpy as np\n'), ((1988, 2011), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (2005, 2011), False, 'from gym.utils import seeding\n'), ((2644, 2683), 'numpy.roll', 'np.roll', (['self.history'], {'shift': '(-1)', 'axis': '(1)'}), '(self.history, shift=-1, axis=1)\n', (2651, 2683), True, 'import numpy as np\n'), ((3341, 3385), 'numpy.split', 'np.split', (['self.history', 'self.n_stock'], {'axis': '(2)'}), '(self.history, self.n_stock, axis=2)\n', (3349, 3385), True, 'import numpy as np\n'), ((3234, 3277), 'numpy.sum', 'np.sum', (['(self.stock_owned * self.stock_price)'], {}), '(self.stock_owned * self.stock_price)\n', (3240, 3277), True, 'import numpy as np\n'), ((3691, 3721), 'model.lstm', 'model.lstm', (['self.history.shape'], {}), '(self.history.shape)\n', (3701, 3721), False, 'import model\n'), ((3945, 3994), 'itertools.product', 'itertools.product', (['[0, 1, 2]'], {'repeat': 'self.n_stock'}), '([0, 1, 2], repeat=self.n_stock)\n', (3962, 3994), False, 'import itertools\n')] |
#coding: utf-8
"""
TODO: unfinished
"""
import os
import sys
import json
import shutil
import copy
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from torch.autograd import Variable
from tensorboardX import SummaryWriter
from abc import ABCMeta, abstractmethod
import socket
from datetime import datetime
from tqdm import tqdm
import logging
from .log import logger, formatter
import clks
import clks.optim
import clks.optim.lr_scheduler
import clks.func.tensor as T
class ModeKeys:
def __init__(self):
self.__state__ = {}
def __getattr__(self, name):
if name not in self.__state__:
# self.__state__[name] = len(self.__state__)
self.__state__[name] = name
return self.__state__[name]
ModeKeys = ModeKeys()
class Scaffold:
def __init__(self):
super().__init__()
def build(self, args, config, save_path, scaffold_name):
self.args = args
self.config = config
self.save_path = save_path
self.scaffold_name = scaffold_name
self.scaffold_path = os.path.join(self.save_path, self.scaffold_name)
if not os.path.exists(self.scaffold_path):
# build new workspace.
print("built scaffold at {}".format(self.scaffold_path))
os.makedirs(self.scaffold_path)
# init logger
self.logger = logger
self.logger.setLevel(logging.INFO)
if len(self.logger.handlers) > 0:
self.logger.handlers = self.logger.handlers[:1]
# formatter = logging.Formatter(
# "%(asctime)s - %(levelname)s - %(message)s",
# datefmt='%Y-%m-%d %H:%M:%S')
# sh = logging.StreamHandler()
# sh.setLevel(logging.INFO)
# sh.setFormatter(formatter)
# self.logger.addHandler(sh)
log_file = os.path.join(
self.scaffold_path, "{}.log".format(self.scaffold_name))
fh = logging.FileHandler(log_file)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
for hdl in self.logger.root.handlers:
hdl.setLevel(logging.ERROR)
hdl.setFormatter(formatter)
return self
def train(self, train_spec, eval_spec = None):
train_loader = train_spec["train_loader"]
self.model = train_spec["model"]
self.optimizer = train_spec["optimizer"]
if "lr_scheduler" in train_spec:
self.lr_scheduler = train_spec["lr_scheduler"]
do_eval = eval_spec is not None
if do_eval:
eval_loader = eval_spec["eval_loader"]
eval_callback = eval_spec["callback"]
total_numel = sum(p.numel() for p in self.model.parameters())
optim_numel = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
self.logger.info("{} parameters in total; {} ({:.2f}%) parameters to optimize.".format(
total_numel, optim_numel, optim_numel/total_numel*100))
self.ckpt = self.init_ckpt()
if self.args["load_ckpt"]:
self.logger.info("load checkpoint for resumation.")
# load ckpt file if restored
if self.args["target"] is not None:
ckpt_file = os.path.join(self.scaffold_path,
"{}.{}.ckpt".format(
self.scaffold_name, self.args["target"]))
else:
ckpt_file = os.path.join(self.scaffold_path,
"{}.ckpt".format(self.scaffold_name))
if not os.path.exists(ckpt_file):
msg = ckpt_file + " doesn't exists."
self.logger.error(msg)
raise Exception(msg)
self.unpack_ckpt(self.load_by_torch(ckpt_file))
elif train_spec["do_init"]:
# init models weights
self.logger.info("init weights for training the first time.")
if "init_model" in train_spec:
self.model.load_state_dict(train_spec["init_model"])
elif hasattr(self.model, "init_weights"):
self.model.init_weights()
self.logger.info("start training model {} early stopping.".format(
"with" if train_spec["max_patience"] > 0 else "without"))
self.early_stop = False
# outer loop.
for epo in range(self.ckpt["curr_epoch"], train_spec["max_epoch"]):
# inner loop
if (hasattr(self, "lr_scheduler") and
isinstance(self.lr_scheduler, clks.optim.lr_scheduler.LRScheduler)):
self.lr_scheduler.step_epoch(self.ckpt["curr_epoch"])
self.model.train()
self.model.before_epoch(self.ckpt["curr_epoch"])
num_batches = len(train_loader)
# pbar = tqdm(enumerate(train_loader), total=num_batches)
for it, batch_data in enumerate(train_loader):
# schedule lr.
if (hasattr(self, "lr_scheduler") and
isinstance(self.lr_scheduler, clks.optim.lr_scheduler.LRScheduler)):
self.lr_scheduler.step_update(self.ckpt["global_count"])
self.model.train()
self.optimizer.zero_grad()
self.model.before_update(self.ckpt["global_count"])
loss, disp_vals = self.model.compute_loss(batch_data)
loss.backward()
if train_spec["clip_grad_norm"]:
grad_norm = torch.nn.utils.clip_grad_norm_(
self.model.trainable_parameters(),
train_spec["grad_clip_value"])
else:
grad_norm = T.total_norm([
p.grad.data for p in self.model.trainable_parameters()
if p.grad is not None])
self.optimizer.step()
self.ckpt["global_count"] += 1
train_loss = loss.data.item()
# periodical display
if (train_spec["disp_freq"] >= 1
and self.ckpt["global_count"] % train_spec["disp_freq"] == 0
or train_spec["disp_freq"] > 0
and self.ckpt["global_count"] % max(
int(train_spec["disp_freq"] * num_batches), 1) == 0):
msg = "[{}][Train] E {} B {}({:.0f}%) U {}; C {:.4f} N {}; ".format(
self.scaffold_name, epo, it, it * 100. / num_batches,
self.ckpt["global_count"], train_loss,
"na" if grad_norm is None else "{:.4f}".format(grad_norm))
for k, v in disp_vals.items():
msg += "{} {:.4f} ".format(k, v.item() if isinstance(v, torch.Tensor) else v)
self.logger.info(msg)
if np.isnan(train_loss):
self.logger.info("NaN occurred! Stop training.")
self.early_stop = True
break
# periodical evaluation
if do_eval and not train_spec["do_epo_eval"] and (
train_spec["eval_freq"] >= 1
and self.ckpt["global_count"] % train_spec["eval_freq"] == 0
or train_spec["eval_freq"] > 0
and self.ckpt["global_count"] % max(
int(train_spec["eval_freq"] * num_batches), 1) == 0):
with torch.no_grad():
curr_eval_score = eval_callback(self, eval_loader)
msg = "[{}][Eval] E {} B {} SC {:.4f}".format(
self.scaffold_name, epo, it, curr_eval_score)
if self.ckpt["best_eval_score"] is not None:
msg += " Best SC {:.4f}".format(self.ckpt["best_eval_score"])
self.logger.info(msg)
# check whether better.
if (self.ckpt["best_eval_score"] is None
or not np.isfinite(self.ckpt["best_eval_score"])
or curr_eval_score > self.ckpt["best_eval_score"]
and np.isfinite(curr_eval_score)):
self.ckpt["best_eval_score"] = curr_eval_score
self.ckpt["best_eval_model"] = copy.deepcopy(self.model.state_dict())
self.logger.info("saving best eval model.")
self.save_by_torch(self.ckpt["best_eval_model"], os.path.join(
self.scaffold_path, "{}.best.pth".format(self.scaffold_name)))
if train_spec["max_patience"] > 0:
self.ckpt["curr_patience"] = 0
else:
if train_spec["max_patience"] > 0:
self.ckpt["curr_patience"] += 1
# check early stopping.
if (train_spec["max_patience"] > 0
and self.ckpt["curr_patience"] >= train_spec["max_patience"]):
self.early_stop = True
break
# periodical checkpoint. (for resumation)
if (train_spec["save_freq"] > 0
and self.ckpt["global_count"] % train_spec["save_freq"] == 0):
self.logger.info("auto save new checkpoint.")
self.save_by_torch(self.pack_ckpt(), os.path.join(self.scaffold_path,
'{}.ckpt'.format(self.scaffold_name)))
# epo eval
if do_eval and train_spec["do_epo_eval"] and (
train_spec["eval_freq"] >= 1
and (self.ckpt["curr_epoch"] + 1) % train_spec["eval_freq"] == 0
or train_spec["eval_freq"] > 0
and (self.ckpt["curr_epoch"] + 1) % max(
int(train_spec["eval_freq"] * train_spec["max_epoch"]), 1) == 0):
with torch.no_grad():
curr_eval_score = eval_callback(self, eval_loader)
msg = "[{}][Eval] E {} B {} SC {:.4f}".format(self.scaffold_name, epo, it, curr_eval_score)
if self.ckpt["best_eval_score"] is not None:
msg += " Best SC {:.4f}".format(self.ckpt["best_eval_score"])
self.logger.info(msg)
if (hasattr(self, "lr_scheduler")
and isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau)):
self.lr_scheduler.step(curr_eval_score)
# check whether better.
if (self.ckpt["best_eval_score"] is None or
curr_eval_score > self.ckpt["best_eval_score"]):
self.ckpt["best_eval_score"] = curr_eval_score
self.ckpt["best_eval_model"] = copy.deepcopy(self.model.state_dict())
self.logger.info("saving best eval model.")
self.save_by_torch(self.ckpt["best_eval_model"], os.path.join(
self.scaffold_path, "{}.best.pth".format(self.scaffold_name)))
if train_spec["max_patience"] > 0:
self.ckpt["curr_patience"] = 0
else:
if train_spec["max_patience"] > 0:
self.ckpt["curr_patience"] += 1
# check early stopping.
if (train_spec["max_patience"] > 0
and self.ckpt["curr_patience"] >= train_spec["max_patience"]):
self.early_stop = True
if self.early_stop:
# incomplete epoch doesn't need to be saved, otherwise there'll be bug.
break
self.ckpt["curr_epoch"] = epo + 1
if (hasattr(self, "lr_scheduler")
and isinstance(self.lr_scheduler, torch.optim.lr_scheduler._LRScheduler)):
self.lr_scheduler.step()
# save epoch checkpoint. (for historic performance analysis)
if train_spec["save_epo_ckpts"]:
self.logger.info("save checkpoint of epoch {}.".format(epo))
self.save_by_torch(self.pack_ckpt(),
os.path.join(self.scaffold_path, '{}.epo-{}.ckpt'.format(
self.scaffold_name, epo)))
self.logger.info("save final model")
# not the best, in case of eval_freq <= 0.
if train_spec["save_final_model"]:
self.save_by_torch(self.model.state_dict(), os.path.join(
self.scaffold_path, "{}.final.pth".format(self.scaffold_name)))
self.logger.info("final model saved.")
return self.model
def evaluate(self, eval_spec):
self.model = eval_spec["model"]
eval_loader = eval_spec["eval_loader"]
eval_callback = eval_spec["callback"]
# restore model image.
if self.args["load_ckpt"]:
if self.args["target"] is not None:
file_name = os.path.join(self.scaffold_path, "{}.{}.ckpt".format(
self.scaffold_name, self.args["target"]))
else:
file_name = os.path.join(self.scaffold_path, "{}.ckpt".format(
self.scaffold_name))
self.logger.info("loading model weights from {}.".format(file_name))
self.model.load_state_dict(self.load_by_torch(file_name)["best_eval_model"])
else:
file_name = os.path.join(self.scaffold_path, "{}.{}.pth".format(
self.scaffold_name, self.args["target"]))
self.logger.info("loading model weights from {}.".format(file_name))
self.model.load_state_dict(self.load_by_torch(file_name))
self.logger.info("start evalution.")
with torch.no_grad():
eval_callback(self, eval_loader)
self.logger.info("done.")
def init_ckpt(self):
ckpt = {
"curr_epoch" : 0,
"global_count" : 0,
"best_eval_score" : None,
"best_eval_model" : None,
"curr_patience" : 0
}
return ckpt
def pack_ckpt(self):
ckpt = copy.deepcopy(self.ckpt)
# collect images
ckpt["model"] = self.model.state_dict()
ckpt["optimizer"] = self.optimizer.state_dict()
ckpt["train_stream"] = self.train_stream.state_dict()
ckpt["eval_stream"] = self.eval_stream.state_dict()
return ckpt
def unpack_ckpt(self, ckpt):
self.model.load_state_dict(ckpt["model"])
self.optimizer.load_state_dict(ckpt["optimizer"])
self.train_stream.load_state_dict(ckpt["train_stream"])
self.eval_stream.load_state_dict(ckpt["eval_stream"])
# restore self.ckpt
for k in self.ckpt.keys():
self.ckpt[k] = ckpt[k]
def save_by_torch(self, obj, file_name):
if os.path.exists(file_name):
backup_file = file_name+".backup"
if os.path.exists(backup_file):
os.remove(backup_file)
os.rename(file_name, backup_file)
torch.save(obj, file_name)
def load_by_torch(self, file_name):
assert os.path.exists(file_name)
return torch.load(file_name)
if __name__=='__main__':
scf = Scaffold()
assert hasattr(scf, "build")
| [
"copy.deepcopy",
"os.remove",
"logging.FileHandler",
"os.makedirs",
"torch.load",
"os.rename",
"os.path.exists",
"numpy.isfinite",
"numpy.isnan",
"torch.save",
"torch.no_grad",
"os.path.join"
] | [((1086, 1134), 'os.path.join', 'os.path.join', (['self.save_path', 'self.scaffold_name'], {}), '(self.save_path, self.scaffold_name)\n', (1098, 1134), False, 'import os\n'), ((1940, 1969), 'logging.FileHandler', 'logging.FileHandler', (['log_file'], {}), '(log_file)\n', (1959, 1969), False, 'import logging\n'), ((14355, 14379), 'copy.deepcopy', 'copy.deepcopy', (['self.ckpt'], {}), '(self.ckpt)\n', (14368, 14379), False, 'import copy\n'), ((15074, 15099), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (15088, 15099), False, 'import os\n'), ((15284, 15310), 'torch.save', 'torch.save', (['obj', 'file_name'], {}), '(obj, file_name)\n', (15294, 15310), False, 'import torch\n'), ((15367, 15392), 'os.path.exists', 'os.path.exists', (['file_name'], {}), '(file_name)\n', (15381, 15392), False, 'import os\n'), ((15408, 15429), 'torch.load', 'torch.load', (['file_name'], {}), '(file_name)\n', (15418, 15429), False, 'import torch\n'), ((1151, 1185), 'os.path.exists', 'os.path.exists', (['self.scaffold_path'], {}), '(self.scaffold_path)\n', (1165, 1185), False, 'import os\n'), ((1303, 1334), 'os.makedirs', 'os.makedirs', (['self.scaffold_path'], {}), '(self.scaffold_path)\n', (1314, 1334), False, 'import os\n'), ((13974, 13989), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13987, 13989), False, 'import torch\n'), ((15162, 15189), 'os.path.exists', 'os.path.exists', (['backup_file'], {}), '(backup_file)\n', (15176, 15189), False, 'import os\n'), ((15242, 15275), 'os.rename', 'os.rename', (['file_name', 'backup_file'], {}), '(file_name, backup_file)\n', (15251, 15275), False, 'import os\n'), ((3565, 3590), 'os.path.exists', 'os.path.exists', (['ckpt_file'], {}), '(ckpt_file)\n', (3579, 3590), False, 'import os\n'), ((6886, 6906), 'numpy.isnan', 'np.isnan', (['train_loss'], {}), '(train_loss)\n', (6894, 6906), True, 'import numpy as np\n'), ((15207, 15229), 'os.remove', 'os.remove', (['backup_file'], {}), '(backup_file)\n', (15216, 15229), False, 'import os\n'), ((10138, 10153), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10151, 10153), False, 'import torch\n'), ((7527, 7542), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7540, 7542), False, 'import torch\n'), ((8104, 8145), 'numpy.isfinite', 'np.isfinite', (["self.ckpt['best_eval_score']"], {}), "(self.ckpt['best_eval_score'])\n", (8115, 8145), True, 'import numpy as np\n'), ((8261, 8289), 'numpy.isfinite', 'np.isfinite', (['curr_eval_score'], {}), '(curr_eval_score)\n', (8272, 8289), True, 'import numpy as np\n')] |
import pickle
import numpy as np
import re
import sys
with open(sys.argv[1], 'rb') as handle:
dict_out = pickle.load(handle)
U = np.array(dict_out['10 metre U wind component']['values'])
V = np.array(dict_out['10 metre V wind component']['values'])
W = np.sqrt(U**2+V**2)
dict_out['Wind Speed']={'values':W,'units':dict_out['10 metre V wind component']['units']}
#Get the wind height
s = '10 metre U wind component'
m = re.findall(r'\d+', s)[0]
dict_out['Wind reference height']={'values':int(m),'units':dict_out['10 metre V wind component']['units']}
with open(sys.argv[2],"wb") as handle:
pickle.dump(dict_out, handle, protocol=pickle.HIGHEST_PROTOCOL)
| [
"pickle.dump",
"pickle.load",
"numpy.array",
"re.findall",
"numpy.sqrt"
] | [((139, 196), 'numpy.array', 'np.array', (["dict_out['10 metre U wind component']['values']"], {}), "(dict_out['10 metre U wind component']['values'])\n", (147, 196), True, 'import numpy as np\n'), ((201, 258), 'numpy.array', 'np.array', (["dict_out['10 metre V wind component']['values']"], {}), "(dict_out['10 metre V wind component']['values'])\n", (209, 258), True, 'import numpy as np\n'), ((264, 288), 'numpy.sqrt', 'np.sqrt', (['(U ** 2 + V ** 2)'], {}), '(U ** 2 + V ** 2)\n', (271, 288), True, 'import numpy as np\n'), ((110, 129), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (121, 129), False, 'import pickle\n'), ((433, 454), 're.findall', 're.findall', (['"""\\\\d+"""', 's'], {}), "('\\\\d+', s)\n", (443, 454), False, 'import re\n'), ((608, 671), 'pickle.dump', 'pickle.dump', (['dict_out', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(dict_out, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (619, 671), False, 'import pickle\n')] |
import numpy as np
from contextlib import contextmanager
import os
import sys
def unique_rows(a):
order = np.lexsort(a.T)
a = a[order]
diff = np.diff(a, axis=0)
ui = np.ones(len(a), 'bool')
ui[1:] = (diff != 0).any(axis=1)
return a[ui]
def merge_dicts(*dict_args):
'''
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
'''
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
@contextmanager
def stdout_redirected(to=os.devnull):
'''
import os
with stdout_redirected(to=filename):
print("from Python")
os.system("echo non-Python applications are also supported")
'''
fd = sys.stdout.fileno()
##### assert that Python and C stdio write using the same file descriptor
####assert libc.fileno(ctypes.c_void_p.in_dll(libc, "stdout")) == fd == 1
def _redirect_stdout(to):
sys.stdout.flush() # + implicit flush()
os.dup2(to.fileno(), fd) # fd writes to 'to' file
#sys.stdout = os.fdopen(fd, 'w') # Python writes to fd
with os.fdopen(os.dup(fd), 'w') as old_stdout:
with open(to, 'w') as file:
_redirect_stdout(to=file)
try:
yield # allow code to be run with the redirected stdout
finally:
_redirect_stdout(to=old_stdout)
# class HideOutput(object):
# '''
# A context manager that block stdout for its scope, usage:
# with HideOutput():
# os.system('ls -l')
# '''
# def __init__(self, *args, **kw):
# sys.stdout.flush()
# self._origstdout = sys.stdout
# self._oldstdout_fno = os.dup(sys.stdout.fileno())
# self._devnull = os.open(os.devnull, os.O_WRONLY)
# def __enter__(self):
# self._newstdout = os.dup(1)
# os.dup2(self._devnull, 1)
# os.close(self._devnull)
# sys.stdout = os.fdopen(self._newstdout, 'w')
# def __exit__(self, exc_type, exc_val, exc_tb):
# sys.stdout = self._origstdout
# sys.stdout.flush()
# os.dup2(self._oldstdout_fno, 1) | [
"numpy.lexsort",
"os.dup",
"sys.stdout.fileno",
"numpy.diff",
"sys.stdout.flush"
] | [((112, 127), 'numpy.lexsort', 'np.lexsort', (['a.T'], {}), '(a.T)\n', (122, 127), True, 'import numpy as np\n'), ((156, 174), 'numpy.diff', 'np.diff', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (163, 174), True, 'import numpy as np\n'), ((772, 791), 'sys.stdout.fileno', 'sys.stdout.fileno', ([], {}), '()\n', (789, 791), False, 'import sys\n'), ((988, 1006), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1004, 1006), False, 'import sys\n'), ((1169, 1179), 'os.dup', 'os.dup', (['fd'], {}), '(fd)\n', (1175, 1179), False, 'import os\n')] |
#Import the libraries
import pandas as pd
import numpy as np
import requests
import matplotlib.pyplot as plt
import yfinance as yf
import datetime
import math
from datetime import timedelta
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt import plotting
import cvxpy as cp
# Import libraries to fetch historical EUR/USD prices
from forex_python.converter import get_rate
from joblib import Parallel, delayed
DATE_FORMAT = '%Y-%m-%d'
# Database maintainance functions
#Connects to a the pre-existing CSV price database
def connectAndLoadDb(exchange):
"""Connects to and loads the data for an exchange.
Parameters
----------
exchange : str
The name of the exchange stored at
"Price Databases\database_"+str(exchange)+".csv"
Returns
-------
DataFrame
database with dates & assets prices
in the native currency in each column
"""
print("Connecting database:"+str(exchange))
filename="Price Databases\database_"+str(exchange)+".csv"
database = pd.read_csv(filename,index_col=False)
print("Database connected!")
return database
#Gets the latest date of data in the db
def getLastEntryDate(database):
"""Gets the most recent entry date from
the prices database
Parameters
----------
database : DataFrame
The database of prices with a date column or index 'Date'
Returns
-------
str
The most recent entry date in '%Y-%m-%d' format
"""
lastDateEntry = database.iloc[-1]['Date']
lastDateEntry = datetime.datetime.strptime(lastDateEntry, DATE_FORMAT)
return lastDateEntry
#Writes the updated pandas dataframe to the CSV
def writeDbToExcelFile(database,exchange):
"""Saves the database as a csv to the directory:
'Price Databases\database_'+str(exchange)+'.csv'
----------
database : DataFrame
The database of prices with a date column or index 'Date'
exchange : str
The name of the index to use in the filename
"""
filename='Price Databases\database_'+str(exchange)+'.csv'
print('Writing database to filename: '+ filename)
database.index=database['Date']
database.drop(['Date'],axis=1,inplace=True)
database.to_csv(filename)
print('Database updated with new entries!!')
#Formats the date from number for printing
def prettyPrintDate(date):
"""Formats a date string to '%Y-%m-%d' format,
used to consistantly print the same date format
----------
date : str
The date we want to format
"""
return date.strftime(DATE_FORMAT)
#Data Fetching functions
#get ticker list from our tsv files
def getTickers(exchange):
"""Pulls in the list of stock tickers for an exchange
stored at 'Company lists/companylist_'+str(exchange)+'.tsv'
Parameters
----------
exchange : str
The name of the exchange stored at
'Company lists/companylist_'+str(exchange)+'.tsv'
Returns
-------
l_tickers : list
list of stock tickers listed on the exchange
"""
#We have the lists saved as TSV ie delimited with tabs rather than commas
df_info=pd.read_csv('Company lists/companylist_'+str(exchange)+'.tsv',sep='\t')
l_tickers=df_info.Symbol.tolist()
return l_tickers
# Updates data for a given exchange or creates a db from a given ticker list
def fetchData(database,exchange,start_date, refetchAll = False):
"""adds adj closing price data from a given exchange
from date using Yfinance.
Parameters
----------
database : DataFrame
The data base of prices to be appended.
Empty DataFrame if starting a new prices database.
start_date : str
When refetchAll=True this denotes the start date 'YYYY-MM-DD'
to pull data from up to yesterday
default is '2006-01-01'.
exchange : str
The name of the exchange stored at
'Company lists/companylist_'+str(exchange)+'.tsv'
refetchAll : Boolean
False: updates price data from the latest entry up to yesterday
True: refetches all price data from '2006-01-01' to yesterday
Returns
-------
database : DataFrame
The database of with latest prices added.
"""
if refetchAll == True:
lastEntryDate = datetime.datetime.strptime(start_date, DATE_FORMAT) #Start date here
else:
lastEntryDate = getLastEntryDate(database)
ydaysDate = datetime.datetime.today() - timedelta(days = 1)
# checks is the data base already up to date
if lastEntryDate >= ydaysDate:
print('Data already loaded up to Yesterday')
return database
else:
print("Last entry in Db is of :" + prettyPrintDate(lastEntryDate))
print("----------------------------------------------")
dateToFetch = lastEntryDate + timedelta(days=1)
dateStr = prettyPrintDate(dateToFetch)
print('Fetching stock closing price of '+str(exchange)+' for days over: ' + dateStr)
l_tickers=getTickers(exchange)
#Pulling adj closing price data from yfinance
mergedData = yf.download(l_tickers,dateToFetch)['Adj Close']
#Making date the index col
mergedData['Date']=mergedData.index
#append our new data onto the existing databae
database = database.append(mergedData, ignore_index=True)
print("----------------------------------------------")
print("Data fill completed! 👍👍")
return database
# one line function to create or update a db for a given exchange
def update_db(exchange, start_date='2006-01-01',refetchAll = False):
"""One line funcion that pulls adj closing price data for
a given exchange into a DataFrame and saves as a csv to:
'Price Databases\database_'+str(exchange)+'.csv'.
Parameters
----------
exchange : str
The name of the exchange stored at
'Company lists/companylist_'+str(exchange)+'.tsv'
start_date : str
When refetchAll=True this denotes the start date 'YYYY-MM-DD'
to pull data from up to yesterday
default is '2006-01-01'.
refetchAll : Boolean
False: updates price data from the latest entry up to yesterday
True: refetches all price data from '2006-01-01' to yesterday
Returns
-------
database : DataFrame
The database of with latest prices added for the exchange.
"""
if refetchAll == True:
#For a fresh run
database = pd.DataFrame()
database = fetchData(database, exchange, start_date, refetchAll)
else:
# Load in & Update an existing database
database = connectAndLoadDb(exchange)
database = fetchData(database,exchange, start_date)
# Drop the last entry prior to saving as it probably is not a full days data
database.drop(database.tail(1).index, inplace = True)
# Write the data to CSV
writeDbToExcelFile(database,exchange)
return
# for a given echange removes any tickers which have all NULLS in the data base
def cleanCompanyList(exchange):
"""After database is created run this to check for any empty
columns and remove the ticket from the company list.
After this is ran re run update_db with Refetchall = True.
Parameters
----------
exchange : str
The name of the database stored at
'Company lists/companylist_'+str(exchange)+'.tsv'
"""
#Load db
df=connectAndLoadDb(exchange)
#create list of NULL columns
l_drop=df.columns[df.isna().all()].tolist()
#read in company list TSV
df_info=pd.read_csv('Company lists/companylist_'+str(exchange)+'.tsv',sep='\t')
df_info.drop(columns=['Unnamed: 0'],inplace=True)
df_info.index=df_info.Symbol
#drop listed rows
df_info.drop(index=l_drop, inplace=True)
df_info.reset_index(drop=True, inplace=True)
df_info.to_csv('Company lists/companylist_'+str(exchange)+'.tsv',sep='\t')
print(str(len(l_drop))+' Rows dropped from '+str(exchange))
return
def net_gains(principal,expected_returns,years,people=1):
"""Calculates the net gain after Irish Capital Gains Tax of a given principal for a given expected_returns over a given period of years"""
cgt_tax_exemption=1270*people #tax free threashold all gains after this are taxed at the cgt_ta_rate
cgt_tax_rate=0.33 #cgt_tax_rate as of 19/3/21
total_p=principal
year=0
while year < years:
year+=1
gross_returns=total_p*expected_returns
if gross_returns >cgt_tax_exemption:
taxable_returns=gross_returns-cgt_tax_exemption
net_returns=cgt_tax_exemption+(taxable_returns*(1-cgt_tax_rate))
else:
net_returns=gross_returns
total_p= total_p + net_returns
return total_p
def gen_curr_csv(start_date='2006-01-01'):
"""
Generates dataframe for currency pairs between 1st Jan. 2006 up to yesterday,
and saves to "Price Databases\curr_rates.csv
start_date : str
When refetchAll=True this denotes the start date 'YYYY-MM-DD'
to pull data from up to yesterday
default is '2006-01-01'.
"""
input_currencies = ['USD','JPY','GBP']
start_date = datetime.datetime.strptime(start_date, DATE_FORMAT)
print("Fetching Currecy rates from : "+prettyPrintDate(start_date))
print("For Eur from : "+str(input_currencies))
# May take up to 50 minutes to generate full set of rates
end_date = (datetime.datetime.today() - timedelta(1))
#end_date = datetime.datetime(2008,2,2).date() # For testing
print("Generating date list")
# Generate list of dates
dates = []
for i in range((end_date - start_date).days + 1):
dates.append((start_date + timedelta(i)))
# Add dates to dataframe
rates_df = pd.DataFrame()
rates_df['Date'] = dates
#attempted to speed up by parallelising the date loops, this just over halves the time to run on the 15 years of data
for curr in input_currencies:
print("Fetching exchange data for: "+str(curr))
rates_df[curr]=Parallel(n_jobs=-1)(delayed(get_rate)(curr,'EUR', date) for date in dates)
print("Currecy rates updated")
# Saved into the folder with the rest of our pricing data
print("Writing database to filename: Price Databases\curr_rates.csv")
rates_df.to_csv("Price Databases\curr_rates.csv")
print("Database updated with new entries!!")
return
def load_curr_csv(stocks_df,input_curr):
"""
Loads FX rates data, and converts historical stock prices to EUR using the rate at the time
"""
rates_df = pd.read_csv("Price Databases\curr_rates.csv")
rates_df=rates_df.set_index(pd.DatetimeIndex(rates_df['Date'].values))
rates_df.drop(columns=['Date'],axis=1, inplace=True)
if not input_curr in list(rates_df.columns):
return 'Currency not supported'
rates_df = rates_df.merge(stocks_df,left_index=True, right_index=True).drop(columns=stocks_df.columns)
# Multiply each row of stocks dataframe by its' corresponding exchange rate
result = pd.DataFrame(np.expand_dims(np.array(rates_df[input_curr]), axis=-1) * np.array(stocks_df),columns=stocks_df.columns,index=stocks_df.index)
return result
def priceDB_validation(database):
"""Takes the prices database checkes for negative stock prices,
if there are it attempts to repull the data,
if it cannot it drops those columns.
Parameters
----------
database : DataFrame
The dataframe of stock prices to be checked.
Returns
-------
database : DataFrame
The database of negative prices ammended
or offending stocks removed.
"""
#check for negative prices (should not have any)
neg_cols=database.columns[(database < 0).any()]
print('---------------------------------------------------------------------')
print('Negative prices are seen in the following assets: '+str(len(neg_cols)))
if len(neg_cols) >0:
print(neg_cols.tolist())
#Drop the offending columns
print('The following columns have been dropped: ')
print(neg_cols.tolist())
database.drop(columns=neg_cols.tolist(), inplace=True)
#I cant get this part working so i am just droping the columns that have issues for now
#Try to fix by rerunning the data
#df_retry=yf.download(neg_cols.tolist(),'2006-1-1')['Adj Close']
#print('Are there negatives in the repulled data : '+str((df_retry< 0).any()))
#if (df_retry< 0).any() ==True:
# print('Issue not solved by repulling data so the following columns have been dropped:')
# print(neg_cols.tolist())
# database.drop(columns=neg_cols.tolist(), inplace=True)
#else:
# print('Issue has been solved by repulling data, the following columns have been updated with repulled data:')
# print(neg_cols.tolist())
# database[neg_cols]=yf.download(neg_cols.tolist(),'2006-1-1')['Adj Close']
return database
#generates historic performance data
def portfolio_generate_test(database,startdate,enddate,p_max=400, min_returns=0.01, s_asset=0, asset_len=50, obj_method='SHARPE', target_percent=0.1, silent=True):
"""Takes the prices database checkes for negative stock prices,
if there are it attempts to repull the data,
if it cannot it drops those columns.
Parameters
----------
database : DataFrame
The dataframe of stock prices.
Returns
-------
database : DataFrame
The database of negative prices ammended
or offending stocks removed.
"""
if silent == False:
print('Running for :'+str(startdate)+' to '+str(enddate))
# Subset for date range
df_input=database[startdate:enddate]
if silent == False:
print ("Initial number of stocks: "+str(len(df_input.columns)))
#Check for stocks which are too expensive for us to buy & drop those
p_now=database.iloc[-1,:]
df_unaffordable=p_now[p_now>p_max] #we can set max price here maybe as an optional
l_unaffordable=df_unaffordable.index.tolist()
df_input.drop(columns=l_unaffordable, inplace=True)
if silent == False:
print ("-----------------------------------------------------")
print ("Our max price is : €"+str(p_max))
print ("Number of stocks to drop due being unnaffordble: "+str(len(l_unaffordable)))
print ("Number of stocks remaining: "+str(len(df_input.columns)))
# drop any columns with more than half or more Nas as the models dont like these
half_length=int(len(df_input)*0.50)
l_drop=df_input.columns[df_input.iloc[:half_length,:].isna().all()].tolist()
df_input.drop(columns=l_drop, inplace=True)
if silent == False:
print ("-----------------------------------------------------")
print ("Number of stocks due to NAs: "+str(len(l_drop)))
print ("Number of stocks remaining: "+str(len(df_input.columns)))
# drop any columns with more Nas for their last 5 rows as these have been delisted
l_drop=df_input.columns[df_input.iloc[-3:,:].isna().all()].tolist()
df_input.drop(columns=l_drop, inplace=True)
if silent == False:
print ("-----------------------------------------------------")
print ("Number of stocks due to being delisted: "+str(len(l_drop)))
print ("Number of stocks remaining: "+str(len(df_input.columns)))
#see which stocks have negative returns or low returns in the period & drop those
df_pct=(df_input.iloc[-1,:].fillna(0) / df_input.iloc[0,:])
df_pct=df_pct[df_pct<= (min_returns + 1)] #we can set minimum returns here maybe as an optional
l_pct=df_pct.index.tolist()
df_input.drop(columns=l_pct, inplace=True)
if silent == False:
print ("-----------------------------------------------------")
print ("Number of stocks due to Negative returns: "+str(len(l_pct)))
print ("Number of stocks remaining: "+str(len(df_input.columns)))
print ("Number of days data: "+str(len(df_input)))
print ("As default we will only keep the top 50 performing stocks when creating our portfolio(this can be varied using s_asset & asset_len)")
#We will only keep the X best performing assets can make this an optional input
e_asset=s_asset + asset_len
df=df_input
mu = expected_returns.mean_historical_return(df)
top_stocks = mu.sort_values(ascending=False).index[s_asset:e_asset]
df = df[top_stocks]
#Calculate expected annulised returns & annual sample covariance matrix of the daily asset
mu = expected_returns.mean_historical_return(df)
S = risk_models.sample_cov(df)
# Optomise for maximal Sharpe ratio
ef= EfficientFrontier(mu, S) #Create the Efficient Frontier Object
#We can try a variety of objectives look at adding this as an input
if obj_method == "SHARPE":
objective_summary=obj_method #description of the objective we used for our output df
weights = ef.max_sharpe()
elif obj_method == "MIN_VOL":
objective_summary=obj_method #description of the objective we used for our output df
weights = ef.min_volatility()
elif obj_method == "RISK":
objective_summary=obj_method+"_"+str(target_percent) #description of the objective we used for our output df
weights = ef.efficient_risk(target_percent)
elif obj_method == "RETURN":
objective_summary=obj_method+"_"+str(target_percent) #description of the objective we used for our output df
weights = ef.efficient_return(target_percent)
else:
print("obj_method must be one of SHARPE, MIN_VOL, RISK, RETURN")
cl_weights= ef.clean_weights()
#print(cl_weights)
if silent == False:
print("-------------------------------------------------------------")
print("Our Benchmark portfolio the S&P 500 has: Volatility 18.1% & Annual Return: 10.6%")
ef.portfolio_performance(verbose=True)
expected_portfolio_returns=ef.portfolio_performance()[0]
volatility=ef.portfolio_performance()[1]
r_sharpe=ef.portfolio_performance()[2]
#calculates the actual performance date range work on this
actual_startdate = pd.to_datetime(enddate) + pd.DateOffset(days=2)
actual_enddate = pd.to_datetime(actual_startdate) + pd.DateOffset(years=1)
#create df of price changes in the folowing year
df_actual=database[actual_startdate:actual_enddate]
#some days have nans in them use the next valid value to fill the gap
df_actual=df_actual.fillna(method='bfill')
#then fill any tail nans with 0 as we assume delisted
df_actual=df_actual.fillna(0)
#select only the stocks we used for our porfolio generator
df_actual=df_actual[top_stocks]
#create the percentage daily changes for easch stock
df_actual=df_actual.apply(lambda x: x.div(x.iloc[0]))
#refomat the weights so we can apply them to the df_actual
df_weights=pd.DataFrame(cl_weights.values())
df_weights=df_weights.transpose()
df_weights.columns=df_actual.columns
#our total weighted returns by day
df_weighted_actual=df_actual.mul(df_weights.iloc[-1,:])
df_weighted_actual=df_weighted_actual.sum(axis=1)
#now we calculate some stats more can be added here
max_returns=df_weighted_actual.max()-1
mean_returns=df_weighted_actual.mean()-1
min_returns=df_weighted_actual.min()-1
actual_returns=df_weighted_actual[-1]-1
if silent == False:
#Create dataframe for graph
df_graph=pd.DataFrame(df_weighted_actual, columns=['Actual_Returns'])
df_graph['Actual_Returns']=df_graph['Actual_Returns']-1
df_graph['Expected_Returns']=expected_portfolio_returns
df_graph.plot(figsize=(10,5))
plt.show()
print("-------------------------------------------------------------")
print("Our portfolio performed at : " + str(f'{actual_returns*100:.{1}f}')+"%")
print("Max : " + str(f'{max_returns*100:.{1}f}')+"%, "
+"Min : " + str(f'{min_returns*100:.{1}f}')+"%, "
+"Mean : " + str(f'{mean_returns*100:.{1}f}')+"%")
return [pd.to_datetime(startdate), pd.to_datetime(enddate), expected_portfolio_returns, volatility, r_sharpe, max_returns, min_returns, actual_returns,mean_returns, objective_summary] | [
"pypfopt.risk_models.sample_cov",
"pandas.DataFrame",
"matplotlib.pyplot.show",
"datetime.datetime.today",
"yfinance.download",
"pandas.read_csv",
"pypfopt.efficient_frontier.EfficientFrontier",
"pandas.DatetimeIndex",
"datetime.datetime.strptime",
"datetime.timedelta",
"pandas.to_datetime",
"... | [((1116, 1154), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'index_col': '(False)'}), '(filename, index_col=False)\n', (1127, 1154), True, 'import pandas as pd\n'), ((1634, 1688), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['lastDateEntry', 'DATE_FORMAT'], {}), '(lastDateEntry, DATE_FORMAT)\n', (1660, 1688), False, 'import datetime\n'), ((9323, 9374), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['start_date', 'DATE_FORMAT'], {}), '(start_date, DATE_FORMAT)\n', (9349, 9374), False, 'import datetime\n'), ((9916, 9930), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (9928, 9930), True, 'import pandas as pd\n'), ((10733, 10779), 'pandas.read_csv', 'pd.read_csv', (['"""Price Databases\\\\curr_rates.csv"""'], {}), "('Price Databases\\\\curr_rates.csv')\n", (10744, 10779), True, 'import pandas as pd\n'), ((16542, 16585), 'pypfopt.expected_returns.mean_historical_return', 'expected_returns.mean_historical_return', (['df'], {}), '(df)\n', (16581, 16585), False, 'from pypfopt import expected_returns\n'), ((16787, 16830), 'pypfopt.expected_returns.mean_historical_return', 'expected_returns.mean_historical_return', (['df'], {}), '(df)\n', (16826, 16830), False, 'from pypfopt import expected_returns\n'), ((16839, 16865), 'pypfopt.risk_models.sample_cov', 'risk_models.sample_cov', (['df'], {}), '(df)\n', (16861, 16865), False, 'from pypfopt import risk_models\n'), ((16915, 16939), 'pypfopt.efficient_frontier.EfficientFrontier', 'EfficientFrontier', (['mu', 'S'], {}), '(mu, S)\n', (16932, 16939), False, 'from pypfopt.efficient_frontier import EfficientFrontier\n'), ((4379, 4430), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['start_date', 'DATE_FORMAT'], {}), '(start_date, DATE_FORMAT)\n', (4405, 4430), False, 'import datetime\n'), ((4526, 4551), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (4549, 4551), False, 'import datetime\n'), ((4554, 4571), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (4563, 4571), False, 'from datetime import timedelta\n'), ((6589, 6603), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6601, 6603), True, 'import pandas as pd\n'), ((9577, 9602), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (9600, 9602), False, 'import datetime\n'), ((9605, 9617), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (9614, 9617), False, 'from datetime import timedelta\n'), ((10816, 10857), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["rates_df['Date'].values"], {}), "(rates_df['Date'].values)\n", (10832, 10857), True, 'import pandas as pd\n'), ((18412, 18435), 'pandas.to_datetime', 'pd.to_datetime', (['enddate'], {}), '(enddate)\n', (18426, 18435), True, 'import pandas as pd\n'), ((18438, 18459), 'pandas.DateOffset', 'pd.DateOffset', ([], {'days': '(2)'}), '(days=2)\n', (18451, 18459), True, 'import pandas as pd\n'), ((18481, 18513), 'pandas.to_datetime', 'pd.to_datetime', (['actual_startdate'], {}), '(actual_startdate)\n', (18495, 18513), True, 'import pandas as pd\n'), ((18516, 18538), 'pandas.DateOffset', 'pd.DateOffset', ([], {'years': '(1)'}), '(years=1)\n', (18529, 18538), True, 'import pandas as pd\n'), ((19733, 19793), 'pandas.DataFrame', 'pd.DataFrame', (['df_weighted_actual'], {'columns': "['Actual_Returns']"}), "(df_weighted_actual, columns=['Actual_Returns'])\n", (19745, 19793), True, 'import pandas as pd\n'), ((19968, 19978), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19976, 19978), True, 'import matplotlib.pyplot as plt\n'), ((20349, 20374), 'pandas.to_datetime', 'pd.to_datetime', (['startdate'], {}), '(startdate)\n', (20363, 20374), True, 'import pandas as pd\n'), ((20376, 20399), 'pandas.to_datetime', 'pd.to_datetime', (['enddate'], {}), '(enddate)\n', (20390, 20399), True, 'import pandas as pd\n'), ((4928, 4945), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (4937, 4945), False, 'from datetime import timedelta\n'), ((5203, 5238), 'yfinance.download', 'yf.download', (['l_tickers', 'dateToFetch'], {}), '(l_tickers, dateToFetch)\n', (5214, 5238), True, 'import yfinance as yf\n'), ((10196, 10215), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (10204, 10215), False, 'from joblib import Parallel, delayed\n'), ((11278, 11297), 'numpy.array', 'np.array', (['stocks_df'], {}), '(stocks_df)\n', (11286, 11297), True, 'import numpy as np\n'), ((9852, 9864), 'datetime.timedelta', 'timedelta', (['i'], {}), '(i)\n', (9861, 9864), False, 'from datetime import timedelta\n'), ((11235, 11265), 'numpy.array', 'np.array', (['rates_df[input_curr]'], {}), '(rates_df[input_curr])\n', (11243, 11265), True, 'import numpy as np\n'), ((10216, 10233), 'joblib.delayed', 'delayed', (['get_rate'], {}), '(get_rate)\n', (10223, 10233), False, 'from joblib import Parallel, delayed\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 29 04:04:53 2021
@author: shahin75
"""
# thermal state imports
import numpy as np
import random
from tenpy.networks.mps import MPS
from tenpy.networks.mpo import MPO
from tenpy.networks.site import SpinHalfSite
import scipy.linalg as la
class thermal_state(object):
"""
Represents thermal states (in the forms of Matrix Product Density Operator
and random-hologrphic Matrix Product State) and is used for finite-temperature
simulations.
"""
def __init__(self, tensor, N):
"""
Parameters
--------------
N: int
Number of sites in the main network chain (= L * l_uc, where L is number of
repetitions of the unit cell and l_uc is the length of unit-cell).
tensor: numpy.ndarray
Bulk rank-4 tensors of the main chain.
tensor index ordering: physical-out, bond-out, physical-in, bond-in
(with "in/out" referring to the right canonical form ordering)
"""
self.N = N
self.tensor = tensor
# tensor dimensions (consistent with rank-4 structure)
self.d = tensor[:,0,0,0].size # physical leg dimension (assumes rank-4 structures)
self.chi = tensor[0,:,0,0].size # bond leg dimension (assumes rank-4 structures)
def network_from_cells(self, network_type,
L, chi_MPO=None,
params=None, bdry_vecs=None,
method=None, T=None):
"""
Returns network of finite thermal-holographic Matrix Product State (random-holoMPS), finite
holo-MPS, finite holographic Matrix Product Operator (holoMPO), or MPO of a given model.
--------------
Inputs:
--the input assumes the list of unitary tensors at each unit-cell or rank-4 numpy.ndarray--
network_type: str
One of "random_state", "circuit_MPS", "circuit_MPO", or "MPO" options.
L: int
Length (number) of repetitions of unit cell in the main network chain.
chi_MPO: int
Bond leg dimension for MPO-based structures.
params: numpy.ndarray
Optimized parameters for unitary structure and probability weights.
bdry_vecs: list
List of left (first element) and right (second element) boundary vectors.
(must be set to [None,None] by default, which gives left and right boundary vectors = |0>
for MPO-based structures. For holoMPS-based structures, the default [None,None]
would give left boundary = |0> while the right boundary is traced over).
method: str
One of "thermal_state_class" or "tenpy" options. (if set to "tenpy", the returned structure
would be one of physics-TenPy networks). This option is currently only available for
"random_state", "circuit_MPS", and "MPO" options.
T: float
Temperature (for thermal-holoMPS option).
Note:
-For random_state, circuit_MPS and circuit_MPO options, the original circuit with
parameters must be inserted as args. In this case, the returned list of bulk tensors
includes rank-3 numpy.ndarray for random_state/circuit_MPS and rank-4 numpy.ndarray for
circuit_MPO.
-For holoMPS-based structures, the index ordering is: site, physical_out, bond-in, bond-out
while for holoMPO-based structures, the index ordering is: physical-out, bond-out,
physical-in, bond-in (with "in/out" referring to right canonical form ordering).
-For MPO structures constructed by "thermal_state_class method", the unit cell tensor of MPO
network must be inserted as arg (e.g. Hamiltonian unit cell). In this case, the bulk tensors
would be rank-4 numpy.ndarray (consistent with final structure of MPO). For "tenpy"-method-based
structures, the list of bulk tensors must be inserted (see TeNPy docs for more detail).
-Tracing over right boundary for holoMPS-based structures is appropriate for
holographic simulations.
-Set bdry_vecs to None by default for "tenpy" method. Set method to None for holoMPO-based
structures.
"""
# for circuit-based structures:
# both circuit and params must be included
if network_type == 'random_state' or network_type == 'circuit_MPS' or network_type == 'circuit_MPO':
l_uc = len(self) # length of unit-cell
N = l_uc * L # number of sites
unitary_list = L * self # list of unitaries
# if network_type is set to random-holoMPS:
if network_type == 'random_state':
# defining tensor dimensions
tensor = np.swapaxes(unitary_list[0][:,:,0,:],1,2) # change to MPS-based structure
d = tensor[:,0,0].size # physical leg dimension (for random state)
chi = tensor[0,:,0].size # bond leg dimension (for random state)
if T == 0:
tensor_list1 = [np.swapaxes(unitary[:,:,0,:],1,2) for unitary in unitary_list]
else:
# list of variational probability weights and random selections at each site
probs_list = L * thermal_state.prob_list(self,params,T)
random_list = [random.choice(p) for p in probs_list]
index_list = [probs_list[j].index(random_list[j]) for j in range(N)]
tensor_list1 = [np.swapaxes(unitary[:,:,j,:],1,2) for unitary,j in zip(unitary_list,
index_list)]
# if network_type is set to holoMPS:
elif network_type == 'circuit_MPS':
# defining tensor dimensions
tensor = np.swapaxes(unitary_list[0][:,:,0,:],1,2) # change to MPS-based structure
d = tensor[:,0,0].size # physical leg dimension (for random state)
chi = tensor[0,:,0].size # bond leg dimension (for random state)
# bulk tensors of holoMPS structure
tensor_list1 = [np.swapaxes(unitary[:,:,0,:],1,2) for unitary in unitary_list]
# if network_type is set to circuit_MPO
# this option assumes original, circuit-based MPO structures (e.g. holoMPO)
elif network_type == 'circuit_MPO':
# defining tensor dimensions (consistent with rank-4 structures)
# index ordering consistent with holographic-based MPO structures
d = unitary_list[0][:,0,0,0].size # physical leg dimension (for MPO)
chi = unitary_list[0][0,:,0,0].size # bond leg dimension (for MPO)
tensor_list1 = unitary_list
# testing boundary conditions
if network_type == 'random_state' or network_type == 'circuit_MPS':
# specific to holoMPS-based structures
if method == 'tenpy':
# based on previous circuit file
tensor_list1[0] = tensor_list1[0][:,0:1,:]
tensor_list1[-1] = tensor_list1[-1][:,:,0:1]
site = SpinHalfSite(None)
M = MPS.from_Bflat([site]*N, tensor_list1, bc='finite', dtype=complex, form=None)
MPS.canonical_form_finite(M,renormalize=True,cutoff=0.0)
elif method == 'thermal_state_class':
bdry = []
# if boundary vectors are not specified for holoMPS-based structures:
# checking left boundary vector
# if left boundary vector not specified, set to (1,0,0,0...)
if np.array(bdry_vecs[0] == None).all():
bdry += [np.zeros(chi)]
bdry[0][0] = 1
else:
if bdry_vecs[0].size != chi:
raise ValueError('left boundary vector different size than bulk tensors')
bdry += [bdry_vecs[0]]
# checking right boundary vector (special to holoMPS-based structures)
if np.array(bdry_vecs[1] == None).all():
bdry += [None]
else:
if bdry_vecs[1].size != chi:
raise ValueError('right boundary vector different size than bulk tensors')
bdry += [bdry_vecs[1]]
# if both boundary vectors are specified
for j in range(2):
if np.array(bdry_vecs[j] != None).all() and bdry_vecs[j].size == chi:
bdry.append(bdry_vecs[j])
M = [[bdry[0]],tensor_list1,[bdry[1]]] # final state structure
else:
raise ValueError('only one of "thermal_state_class" or "tenpy" options')
elif network_type == 'circuit_MPO': # specific to holoMPO-based structures
bdry = []
for j in range(2):
# if both boundary vectors are specified
if np.array(bdry_vecs[j] != None).all() and bdry_vecs[j].size == chi:
bdry.append(bdry_vecs[j])
# if boundary vectors not specified, set to (1,0,0,0...)
elif np.array(bdry_vecs[j] == None).all():
bdry += [np.zeros(chi)]
bdry[j][0] = 1
else:
if bdry_vecs[j].size != chi:
raise ValueError('boundary vectors different size than bulk tensors')
bdry += [bdry_vecs[j]]
M = [[bdry[0]],tensor_list1,[bdry[1]]] # final state structure
# if network_type is set to MPO:
# this option assumes genuine MPO_based structures (e.g. Hamiltonian MPO)
elif network_type == 'MPO':
if method == 'tenpy': # tenpy-based MPO
site = SpinHalfSite(None)
M = MPO.from_grids([site]*L, self, bc = 'finite', IdL=0, IdR=-1)
elif method == 'thermal_state_class':
# only bulk tensors of the main chain must be included (w/out params)
tensor_list1 = [self]*L
# testing boundary conditions
bdry = []
for j in range(2):
# if both boundary vectors are specified
if np.array(bdry_vecs[j] != None).all() and bdry_vecs[j].size == chi_MPO:
bdry.append(bdry_vecs[j])
# if boundary vectors not specified, set to (1,0,0,0...)
elif np.array(bdry_vecs[j] == None).all():
bdry += [np.zeros(chi_MPO)]
bdry[j][0] = 1
else:
if bdry_vecs[j].size != chi_MPO:
raise ValueError('boundary vectors different size than bulk tensors')
bdry += [bdry_vecs[j]]
M = [[bdry[0]],tensor_list1,[bdry[1]]] # final state structure
else:
raise ValueError('only one of "thermal_state_class" or "tenpy" options')
else:
raise ValueError('only one of "random_state", "circuit_MPS", "circuit_MPO", "MPO" options')
return M
def prob_list(self, params, T):
"""
Returns list of variational probability weights (based on Boltzmann ditribution)
for thermal-holographic matrix product state or thermal density matrix
product operator for each unit-cell.
--------------
Inputs:
--the input assumes the list of unitary tensors at each unit-cell--
params: numpy.ndarray
Optimized parameters for unitary structure and probability weights.
T: float
Temperature.
"""
# tensor dimensions (consistent with rank-4 structure)
# physical leg dimension list (for all circuits in unit-cell)
d_list = [unitary[:,0,0,0].size for unitary in self]
# bond leg dimension (for all circuits in unit-cell)
chi_list = [unitary[0,:,0,0].size for unitary in self]
l_uc = len(self) # length of unit-cell
d_tot = sum(d_list) # total physical leg dimension * l_uc
prob_params = params[:d_tot]
if T != 0: # checking temperature
exc_list= [np.exp(-k/T) for k in prob_params] # list of boltzmann weights
# grouping boltzmann weights for each site
group_list = [exc_list[j:j+d] for d,j in zip(d_list,range(0,len(exc_list),d))]
z_list = [sum(j) for j in group_list] # list of partition functions
norm_list = []
for j in range(len(group_list)): # normalizing weights
for k in group_list[j]:
norm_list.append(k/z_list[j])
# final probs for unit-cell
prob_list = [norm_list[j:j+d] for d,j in zip(d_list,range(0,len(norm_list),d))]
else:
prob_list = [(np.zeros(d)).tolist() for d in d_list]
# setting probability of |0> to 1 at T = 0
for j in range(len(prob_list)):
prob_list[j][0] += 1
return prob_list
def density_matrix(self, params,
T, L,
bdry_vecs=None):
"""
Returns thermal Matrix Product Density Operator (MPDO).
--------------
Inputs:
--the input assumes the list of unitary tensors at each unit-cell--
params: numpy.ndarray
Optimized parameters for unitary structure and probability weights.
L: int
Length (number) of repetitions of unit cell in the main network chain.
T: float
Tempreture.
prob_list: list
List of probability weights of each physical state (the length of prob_list
should match the physical leg dimension). If set to None, it would call
thermal_based prob_list fuction to compute probability weights for density
matrix.
bdry_vecs: list
List of left (first element) and right (second element) boundary vectors.
Note:
-If bdry_vecs is set to None, the infinite version of MPDO (iMPDO) would be returned.
In this case, the steady-state state of density matrix is computed based on MPDO
transfer-matrix structure.
-If bdry_vecs is set to [None,None], the left and right boundary = id would be returned,
where id is the identity element with appropriate dimension consistent with the bond
dimension chi.
"""
# tensor dimensions (consistent with rank-4 structure)
# index ordering consistent with holographic-based MPO structures
d = self[0][:,0,0,0].size # physical leg dimension
chi = self[0][0,:,0,0].size # bond leg dimension
l_uc = len(self) # length of unit-cell
N = l_uc * L # number of sites
# construction of state
if bdry_vecs == None:
# constructing state for iMPDO
state = thermal_state.network_from_cells(self,'circuit_MPO',
L,None,
params,[None,None],
None,T)
else: # constructing state for MPDO
state = thermal_state.network_from_cells(self,'circuit_MPO',
L,None,
params,bdry_vecs,
None,T)
# constructing the probability weights chain
probs_list = L * thermal_state.prob_list(self,params,T)
#p_matrix_chain = [np.diag(p) for p in probs_list]
# contractions of density matrix:
contractions = []
for j in range(N):
W1 = np.einsum('abcd,c,eick->abdeik',state[1][j],probs_list[j],state[1][j].conj())
#W1 = np.einsum('abcd,ck->abdk',state[1][j],p_matrix_chain[j])
# reordering and reshaping indices
W2 = np.einsum('abdeik->abiedk',W1)
#W2 = np.einsum('abdk,cjki->abdcji',W1,state[1][j].conj())
#W3 = np.einsum('abdcji->abjcdi',W2)
contractions.append(W2)
if bdry_vecs != None:
# left boundary contraction
bvecl = np.eye(chi)
bvecr = np.eye(chi)
method = 'MPDO' # structure label
new_contractions = contractions
else:
# finding steady-state of transfer matrix of iMPDO
# finding tranfer matrix:
if l_uc == 1: # if only one unitary within each unit-cell
transfer_mat = np.einsum('abad->bd',np.reshape(contractions[0],[d,chi**2,d,chi**2]))
new_contractions = contractions
else: # for l_uc > 1
W_list = [np.reshape(contractions[j],[d,chi**2,d,chi**2]) for j in range(l_uc)]
W_list1 = [W_list[0]]
for j in range(1,len(W_list)):
tensor = np.reshape(np.einsum('acdijk->aijckd',
np.einsum('abcd,ijkb->acdijk',W_list1[0],W_list[j])),
[d**2,chi**2,d**2,chi**2])
W_list1 = []
W_list1.append(tensor)
transfer_mat = np.einsum('abad->bd',W_list1[0]) # tranfer matrix
new_contractions = contractions[:-1]
eig_vals,eig_vecs = la.eig(transfer_mat)
idx = np.where(np.abs(1-abs(eig_vals))<1e-5)[0][0] # index of steady-state
steady_den = np.reshape(eig_vecs[:,idx],[chi,chi]) # steady-state density matrix
bvecl = steady_den/np.trace(steady_den) # normalization of steady-state
#v = eig_vecs[:,idx] # steady-state
method = 'iMPDO' # structure label
bvecr = np.eye(chi)
density_matrix = [[bvecl],new_contractions,[bvecr],method] ## last element = structure label
return density_matrix
def network_site_contraction(self, state_type, chi_MPO=None, MPO=None):
"""
Returns a list of contractions of the newtork at each site for <MPS|MPS>
(tranfer-matrix-like structures), <MPS|MPO|MPS>, or <MPO|DMPO> networks.
MPS: holographic-matrix-product-state-based structures.
MPO: matrix product operator.
MPDO: matrix product density operator.
--------------
Inputs:
--the input assumes thermal_state_class-based holoMPS and MPDO structures--
state_type: str
One of "random_state", "circuit_MPS", or "density_matrix" options.
chi_MPO: int
Bond leg dimension for MPO-based structure.
MPO: thermal_state_class-based MPO structure.
Set to None for pure wave function simulations for MPS states.
Note:
-If MPO is not inserted for holoMPS states, the function computes transfer matrices
for the state wave fucntion at each site.
-Length of MPO structure might be less than length of state.
-The output would be returned as a list of contraction matrices computed for each
unit cell at each site.
"""
contraction_list = []
# for holoMPS and random holoMPS-based structures:
if state_type == 'random_state' or state_type == 'circuit_MPS':
# tensor dimensions (consistent with rank-3 structures)
# index ordering consistent with holoMPS-based structures
tensor = self[1][0]
d = tensor[:,0,0].size # physical leg dimension
chi = tensor[0,:,0].size # bond leg dimension
N = len(self[1]) # number of sites (L * l_uc) in the main network chain (for holoMPS state).
# contracted (transfer) matrices for the wave function:
# (w/out MPO inserted)
if MPO == None:
# site contractions for state and its dual
for j in range(N):
# contraction state/dual state
tensor1 = np.tensordot(self[1][j].conj(),self[1][j],axes=[0,0])
# reshaping into matrix
# contraction (transfer) matrix at each site
tensor2 = np.reshape(np.swapaxes(np.swapaxes(np.swapaxes(tensor1,0,3),0,1),2,3),
[chi**2,chi**2])
contraction_list.append(tensor2)
# contracted matrices w/ MPO inserted
else:
N_MPO = len(MPO[1]) # number of sites for squeezed MPO structure.
# site contractions for state/MPO/dual-state
for j in range(N_MPO):
# contractions with state
chi_s = chi_MPO * chi
tensor1 = np.tensordot(MPO[1][j],self[1][j],axes=[2,0])
tensor2 = np.reshape(np.swapaxes(np.swapaxes(tensor1,1,2),2,3),[d,chi_s,chi_s])
# contractions with dual state
chi_tot = chi_s * chi # total bond dimension
tensor3 = np.tensordot(self[1][j].conj(),tensor2,axes=[0,0])
# contracted matrices at each site
tensor4 = np.reshape(np.swapaxes(np.swapaxes(np.swapaxes(tensor3,0,3),0,1),2,3),
[chi_tot,chi_tot])
contraction_list.append(tensor4)
# contraction of rest of state and its dual if N_MPO different than N
for j in range(N-N_MPO):
# contraction state/dual state
tensor5 = np.tensordot(self[1][j].conj(),self[1][j],axes=[0,0])
# reshaping into matrix
# contraction (transfer) matrix at each site
tensor6 = np.reshape(np.swapaxes(np.swapaxes(np.swapaxes(tensor5,0,3),0,1),2,3),
[chi**2,chi**2])
contraction_list.append(tensor6)
# contracted network structure at each site for density matrix
# must include MPO (e.g. Hamiltonian MPO)
elif state_type == 'density_matrix':
N = len(self[1]) # number of sites in main network chain.
N_MPO = len(MPO[1]) # number of sites for inserted MPO structure.
# tensor dimensions
d = np.shape(self[1][0])[0] # density matrix physical leg dimension
d_MPO = MPO[1][0][:,0,0,0].size # MPO physical leg dimension
chi = np.shape(self[1][0])[1] # density matrix bond leg dimension
chi_MPO = MPO[1][0][0,:,0,0].size # MPO bond leg dimension
chi_tot = chi**2 * chi_MPO # total bond leg dimension
tensor_list = []
# confirming the label of structure
if self[3] == 'MPDO' or self[3] == 'iMPDO':
if self[3] == 'MPDO': # left and right contractions for finite structure
s_left = np.reshape(np.einsum('abcdee->abcd',self[1][0]),[d,chi**2,d])
#s_left = np.reshape(np.einsum('abcdej,ej->abcd',self[1][0],self[0][0]),[d,chi**2,d])
s_right = np.reshape(np.einsum('aeebcd->abcd',self[1][-1]),[d,d,chi**2])
#s_right = np.reshape(np.einsum('aejbcd,ej->abcd',self[1][-1],self[2][0]),[d,d,chi**2])
elif self[3] == 'iMPDO': # left and right contractions infinite structure
s_left = np.reshape(np.einsum('abcdee,ee->abcd',self[1][0],self[0][0]),[d,chi**2,d])
#s_right = np.reshape(np.einsum('aeebcd,ee->abcd',self[1][-1],self[3][0]),[d_s,d_s,chi_s**2])
s_right = np.reshape(np.einsum('aeebcd->abcd',self[1][-1]),[d,d,chi**2])
# MPO boundary contractions
MPO_left = np.einsum('abcd,d->abc',MPO[1][0],MPO[0][0])
MPO_right = np.einsum('abcd,b->acd',MPO[1][-1],MPO[2][0])
# contractions of boundary MPO and state and reshaping
left_tensor = np.einsum('bcde->dbec',np.einsum('abc,dea->bcde',s_left,MPO_left))
right_tensor = np.einsum('bcde->dbce',np.einsum('abc,dae->bcde',s_right,MPO_right))
bvecl = np.reshape(np.einsum('jbkj->bk',left_tensor),[chi_tot])
bvecr = np.reshape(np.einsum('aack->ck',right_tensor),[chi_tot])
# bulk contractions
for j in range(1,N-1):
# MPO and density matrix contractions
tensor1 = np.einsum('abcd,ijak->bcdijk',np.reshape(self[1][j],[d,chi**2,d,chi**2]),
MPO[1][j])
#tensor1 = np.einsum('abcd,cijk->abdijk',self[1][j],MPO[1][j])
tensor2 = np.einsum('bcdijk->ibjcdk',tensor1) # index reordering
#tensor2 = np.einsum('abdijk->abijdk',tensor1)
tensor3 = np.reshape(tensor2,[d,chi_tot,d_MPO,chi_tot])
# tracing over p_out and p_in
tensor4 = np.trace(tensor3,axis1=0,axis2=2)
tensor_list.append(np.reshape(tensor4,[chi_tot,chi_tot]))
contraction_list = [bvecl] + tensor_list + [bvecr]
else:
raise ValueError('structure label must be "MPDO" or "iMPDO"')
else:
raise ValueError('only one of "random_state", "circuit_MPS", or "density_matrix" options')
return contraction_list
def expectation_value(self, state_type,
L, chi_MPO=None,
MPO=None, params=None,
method=None, T=None):
"""
Returns the numerical result of full contractions of <MPS|MPS>,
<MPS|MPO|MPS>, or <MPO|DMPO> networks.
MPS: holographic-matrix-product-state-based structures.
MPO: matrix product operator.
DMPO: density matrix product operator.
--------------
Inputs:
--the input assumes thermal_state_class-based holoMPS, DMPO structures,
and the list of unitary tensors at each unit-cell--
state_type: str
One of "random_state", "circuit_MPS", or "density_matrix" options.
chi_MPO: int
Bond leg dimension for MPO-based structures.
MPO: thermal_state_class-based MPO structure.
Set to None for pure wave function simulations.
L: int
Length (number) of repetitions of unit cell in the main network chain.
params: numpy.ndarray
Optimized parameters for unitary structure and probability weights.
(only required for density matrix-method II).
method: str
One of "method_I" or "method_II" options. This option is available
for "density_matrix".
T: float
Temperature
Note:
-Left boundary condition is set by the given holoMPS boundary vectors, and the right
boundary is averaged over (as consistent with holographic-based simulations).
-If MPO is not inserted (for MPS structures), the function computes the expectation value
for the state wave fucntion (<MPS|MPS>).
-The expectation value could be computed with two different methods using density matrix-
based structures. If method is set to "method_II", the list of unitary tensors at each
unit-cell must be passed as input.
-If "method_II" is selected for density matrix operations, the MPO must have the same
index ordering as TenPy (virutal left, virtual right, physical out, physical in).
"""
# for holoMPS and random holoMPS-based structures:
if state_type == 'random_state' or state_type == 'circuit_MPS':
# list of contracted matrices
con_mat = thermal_state.network_site_contraction(self,state_type,
chi_MPO,MPO)
# accumulation of contracted matrices defined at each site
con_mat0 = con_mat[0]
for j in range(1,len(con_mat)):
con_mat0 = con_mat[j] @ con_mat0
# tensor dimensions (consistent with rank-3 structure)
# index ordering consistent with holoMPS-based structures
tensor = self[1][0]
d = tensor[:,0,0].size # physical leg dimension (for holoMPS)
chi = tensor[0,:,0].size # bond leg dimension (for holoMPS)
# w/out MPO inserted
if MPO == None:
bvecl = np.kron(self[0][0].conj(),self[0][0]) # left boundary contraction
# right boundary contraction:
if np.array(self[2][0] == None).all():
# summing over right vector if right boundary condition is not specified
con_mat_on_rvec = np.reshape(con_mat0 @ bvecl,[chi**2])
rvec = np.reshape(np.eye(chi),[chi**2])
expect_val = np.dot(rvec,con_mat_on_rvec)
else:
bvecr = np.kron(self[2][0].conj(),self[2][0])
expect_val = bvecr.conj().T @ con_mat0 @ bvecl
# w/ MPO inserted
else:
# left boundary contraction
bvecl = np.kron(self[0][0].conj(),np.kron(MPO[0][0],self[0][0]))
# right boundary constraction:
if np.array(self[2][0] == None).all():
# summing over right vectors if right boundary condition is not specified
# employ the specified right boundary vector of MPO.
con_vleft = np.reshape((con_mat0 @ bvecl),[chi,chi_MPO,chi]) # con_mat on left vector
MPO_rvec_contracted = np.reshape(np.tensordot(MPO[2][0],con_vleft,axes=[0,1]),[chi**2])
rvec = np.reshape(np.eye(chi),[chi**2])
expect_val = np.dot(rvec,MPO_rvec_contracted)
else:
bvecr = np.kron(self[2][0].conj(),np.kron(MPO[2][0],self[2][0]))
expect_val = bvecr.conj().T @ con_mat0 @ bvecl
# for density-matrix-based structures:
# must include MPO (e.g. Hamiltonian MPO)
elif state_type == 'density_matrix':
# first method of contracting density matrix structures
if method == 'method_I':
chi_MPO = MPO[1][0][0,:,0,0].size # MPO bond leg dimension
chi = np.shape(self[1][0])[1] # density matrix bond leg dimension
# list of contracted matrices
con_mat = thermal_state.network_site_contraction(self,state_type,
chi_MPO,MPO)
con_mat0 = con_mat[1]
# boundary vectors
bvecl = con_mat[0]
bvecr = con_mat[-1]
# checking structure label
if self[3] == "MPDO": # finite structures
if len(con_mat) == 2:
expect_val = (bvecr.T @ bvecl)/chi
else:
#M = np.linalg.matrix_power(con_mat0,len(con_mat)-2)
for j in range(2,len(con_mat)-1):
con_mat0 = con_mat[j] @ con_mat0
expect_val = (bvecr.T @ con_mat0 @ bvecl)/chi
elif self[3] == "iMPDO": # infinite structures
if len(con_mat) == 2:
expect_val = bvecr.T @ bvecl
else: # case of l_uc > 1
#M = np.linalg.matrix_power(con_mat0,len(con_mat)-2)
for j in range(2,len(con_mat)-1):
con_mat0 = con_mat[j] @ con_mat0
expect_val = bvecr.T @ con_mat0 @ bvecl
else:
raise ValueError('structure label must be "MPDO" or "iMPDO"')
# second method of contracting density matrix structures
elif method == 'method_II':
l_uc = len(self) # length of each unit-cell
N = L * l_uc # total number of sites
# changing index ordering to: p_out, b_out, p_in, b_in
MPO_list = [np.einsum('abcd->cadb',MPO[1][0]) for j in range(l_uc)]
# list of probabilities for each unit-cell
prob_list = thermal_state.prob_list(self,params,T)
# list of contracted matrices
con_mat = [np.einsum('arbs,bick,c,ajcl->rijskl',MPO_list[m],self[m],
prob_list[m],self[m].conj()) for m in range(l_uc)]
# contraction with left boundary vector
bvecl = np.einsum('rijskk,s->rij',con_mat[0],MPO[0][0])
for n in range(1,N):
bvecl = np.einsum('rijskl,skl->rij',con_mat[n % l_uc],bvecl)
expect_val = np.einsum('rii,r',bvecl,MPO[2][0])
else:
ValueError('only one of "method_I" or "method_II" options')
else:
raise ValueError('only one of "random_state", "circuit_MPS", or "density_matrix" options')
return (expect_val).real
def entropy(self):
"""
Returns the von Neumann entropy of a given probability
weight list (in the form of Shannon entropy) for each unit-cell.
--------------
--the input assumes thermal_state_class-based prob_list--
"""
# physical leg dimension list (for all circuits in unit-cell)
d_list = [len(j) for j in self]
l_uc = len(self) # unit-cell length
# avoiding NaN in numpy.log() function
new_prob_list = [np.array(j)[np.array(j) > 1.e-30] for j in self]
s_list1 = []
for j in range(len(new_prob_list)):
for p in new_prob_list[j]:
s_list1.append(-p*np.log(p)) # converting to form of Shannon entropy
# list of entropies at each site (within unit-cell)
s_list2 = [sum(s_list1[j:j+d]) for d,j in zip(d_list,range(0,len(s_list1),d))]
s = sum(s_list2)/l_uc # entropy
return s
def free_energy(self, params,
state_type, L,
Hamiltonian, T,
chi_H=None, bdry_vecs1=None,
bdry_vecs2=None, method=None,
N_sample=None,
S=None):
"""
Returns the Helmholtz free energy of a thermal density matrix structure
or thermal holographic matrix product state.
--------------
Inputs:
--the input assumes the list of unitary tensors at each unit-cell--
state_type: str
One of "density_matrix" or "random_state" options.
params: numpy.ndarray
Optimized parameters for unitary structure and probability weights.
L: int
Length (number) of repetitions of unit cell in the main network chain.
Hamiltonian: numpy.ndarray
The Hamiltonian MPO of model.
T: float
Temperature
chi_H: int
Bond leg dimension for Hamiltonian MPO structure.
bdry_vecs1 and bdry_vecs2: list
List of left (first element) and right (second element) boundary vectors for
state and Hamiltonian networks, respectively (set to [None,None] by default
for finite structures and None for infinte structures).
method: str
For densit matrix: one of "method_I" or "method_II" options.
For random-holoMPS: one of 'thermal_state_class', 'tenpy', or 'MPO_tenpy' options.
("MPO_tenpy" is set when MPO structure is already constructed in TenPy).
N_sample: int
Number of samples for averaging energy of thermal random holoMPS (only for
"random_state" option.
S: float
Optional entropy setting (if set to None, returns thermal_state-based entropy).
"""
l_uc = len(self) # length of unit-cell
N = l_uc * L # total number of sites
# for density-matrix-based structures:
if state_type == 'density_matrix':
if S == None:
S = thermal_state.entropy(thermal_state.prob_list(self,params,T)) # entropy
if method == 'method_I': # first method of computing density matrix structures free energy
density_mat = thermal_state.density_matrix(self,params,
T,L,
bdry_vecs1) # density matrix
MPO_Hamiltonian = thermal_state.network_from_cells(Hamiltonian,'MPO',
N,chi_H,
None,bdry_vecs2,
'thermal_state_class',T) # Hamiltonian MPO
E = thermal_state.expectation_value(density_mat,'density_matrix',
L,chi_H,
MPO_Hamiltonian,params,
'method_I',T) # energy of system
F = E - T*S # total Helmholtz free energy
elif method == 'method_II':
MPO_Hamiltonian = thermal_state.network_from_cells(Hamiltonian,'MPO',
N,chi_H,
None,bdry_vecs2,
'thermal_state_class',T) # Hamiltonian MPO
E = thermal_state.expectation_value(self,'density_matrix',
L,chi_H,
MPO_Hamiltonian,params,
'method_II',T) # energy of system
F = (E/(N-l_uc)) - T*S # Helmholtz free energy
else:
ValueError('only one of "method_I" or "method_II" options')
# for random-holoMPS-based structures:
elif state_type == 'random_state':
N = l_uc * L # total number of sites
if S == None:
S = thermal_state.entropy(thermal_state.prob_list(self,params,T)) # entropy
# computing free energy using thermal state library built-in functions
if method == 'thermal_state_class':
random_state = thermal_state.network_from_cells(self,'random_state',
L,None,
params,bdry_vecs1,
'thermal_state_class',T) # random_state MPS
MPO_Hamiltonian = thermal_state.network_from_cells(Hamiltonian,'MPO',
N,chi_H,
None,bdry_vecs2,
'thermal_state_class',T) # Hamiltonian MPO
# sampling over different runs
Es = [thermal_state.expectation_value(random_state,'random_state',
L,chi_H,
MPO_Hamiltonian,params,
method,T) for j in range(N_sample)]
E = np.mean(Es) # energy of system
F = (E/N) - T*S # Helmholtz free energy
elif method == 'tenpy':
MPO_Hamiltonian = thermal_state.network_from_cells(Hamiltonian,'MPO',
N,chi_H,
None,bdry_vecs2,
'tenpy',T) # Hamiltonian MPO
# sampling over different runs
Es = [(MPO_Hamiltonian.expectation_value(random_state)).real for j in range(N_sample)]
E = np.mean(Es) # energy of system
F = (E/N) - T*S # Helmholtz free energy
elif method == 'MPO_tenpy':
random_state = thermal_state.network_from_cells(self,'random_state',
L,None,
params,bdry_vecs1,
'tenpy',T) # random_state MPS
# sampling over different runs
Es = [(Hamiltonian.expectation_value(random_state)).real for j in range(N_sample)]
E = np.mean(Es) # energy of system
F = (E/N) - T*S # Helmholtz free energy
else:
raise ValueError('only one of "thermal_state_class", "tenpy", or MPO_tenpy options')
else:
raise ValueError('only one of "random_state" or "density_matrix" options')
return F
| [
"numpy.trace",
"numpy.einsum",
"numpy.shape",
"numpy.mean",
"numpy.exp",
"tenpy.networks.mps.MPS.canonical_form_finite",
"numpy.eye",
"numpy.swapaxes",
"numpy.reshape",
"numpy.kron",
"numpy.tensordot",
"scipy.linalg.eig",
"tenpy.networks.site.SpinHalfSite",
"numpy.dot",
"tenpy.networks.m... | [((17097, 17128), 'numpy.einsum', 'np.einsum', (['"""abdeik->abiedk"""', 'W1'], {}), "('abdeik->abiedk', W1)\n", (17106, 17128), True, 'import numpy as np\n'), ((17375, 17386), 'numpy.eye', 'np.eye', (['chi'], {}), '(chi)\n', (17381, 17386), True, 'import numpy as np\n'), ((17407, 17418), 'numpy.eye', 'np.eye', (['chi'], {}), '(chi)\n', (17413, 17418), True, 'import numpy as np\n'), ((18510, 18530), 'scipy.linalg.eig', 'la.eig', (['transfer_mat'], {}), '(transfer_mat)\n', (18516, 18530), True, 'import scipy.linalg as la\n'), ((18643, 18683), 'numpy.reshape', 'np.reshape', (['eig_vecs[:, idx]', '[chi, chi]'], {}), '(eig_vecs[:, idx], [chi, chi])\n', (18653, 18683), True, 'import numpy as np\n'), ((18910, 18921), 'numpy.eye', 'np.eye', (['chi'], {}), '(chi)\n', (18916, 18921), True, 'import numpy as np\n'), ((4977, 5023), 'numpy.swapaxes', 'np.swapaxes', (['unitary_list[0][:, :, 0, :]', '(1)', '(2)'], {}), '(unitary_list[0][:, :, 0, :], 1, 2)\n', (4988, 5023), True, 'import numpy as np\n'), ((13197, 13211), 'numpy.exp', 'np.exp', (['(-k / T)'], {}), '(-k / T)\n', (13203, 13211), True, 'import numpy as np\n'), ((18375, 18408), 'numpy.einsum', 'np.einsum', (['"""abad->bd"""', 'W_list1[0]'], {}), "('abad->bd', W_list1[0])\n", (18384, 18408), True, 'import numpy as np\n'), ((18742, 18762), 'numpy.trace', 'np.trace', (['steady_den'], {}), '(steady_den)\n', (18750, 18762), True, 'import numpy as np\n'), ((35268, 35279), 'numpy.array', 'np.array', (['j'], {}), '(j)\n', (35276, 35279), True, 'import numpy as np\n'), ((6115, 6161), 'numpy.swapaxes', 'np.swapaxes', (['unitary_list[0][:, :, 0, :]', '(1)', '(2)'], {}), '(unitary_list[0][:, :, 0, :], 1, 2)\n', (6126, 6161), True, 'import numpy as np\n'), ((7551, 7569), 'tenpy.networks.site.SpinHalfSite', 'SpinHalfSite', (['None'], {}), '(None)\n', (7563, 7569), False, 'from tenpy.networks.site import SpinHalfSite\n'), ((7595, 7674), 'tenpy.networks.mps.MPS.from_Bflat', 'MPS.from_Bflat', (['([site] * N)', 'tensor_list1'], {'bc': '"""finite"""', 'dtype': 'complex', 'form': 'None'}), "([site] * N, tensor_list1, bc='finite', dtype=complex, form=None)\n", (7609, 7674), False, 'from tenpy.networks.mps import MPS\n'), ((7693, 7751), 'tenpy.networks.mps.MPS.canonical_form_finite', 'MPS.canonical_form_finite', (['M'], {'renormalize': '(True)', 'cutoff': '(0.0)'}), '(M, renormalize=True, cutoff=0.0)\n', (7718, 7751), False, 'from tenpy.networks.mps import MPS\n'), ((10626, 10644), 'tenpy.networks.site.SpinHalfSite', 'SpinHalfSite', (['None'], {}), '(None)\n', (10638, 10644), False, 'from tenpy.networks.site import SpinHalfSite\n'), ((10665, 10725), 'tenpy.networks.mpo.MPO.from_grids', 'MPO.from_grids', (['([site] * L)', 'self'], {'bc': '"""finite"""', 'IdL': '(0)', 'IdR': '(-1)'}), "([site] * L, self, bc='finite', IdL=0, IdR=-1)\n", (10679, 10725), False, 'from tenpy.networks.mpo import MPO\n'), ((17749, 17804), 'numpy.reshape', 'np.reshape', (['contractions[0]', '[d, chi ** 2, d, chi ** 2]'], {}), '(contractions[0], [d, chi ** 2, d, chi ** 2])\n', (17759, 17804), True, 'import numpy as np\n'), ((17906, 17961), 'numpy.reshape', 'np.reshape', (['contractions[j]', '[d, chi ** 2, d, chi ** 2]'], {}), '(contractions[j], [d, chi ** 2, d, chi ** 2])\n', (17916, 17961), True, 'import numpy as np\n'), ((21967, 22015), 'numpy.tensordot', 'np.tensordot', (['MPO[1][j]', 'self[1][j]'], {'axes': '[2, 0]'}), '(MPO[1][j], self[1][j], axes=[2, 0])\n', (21979, 22015), True, 'import numpy as np\n'), ((23566, 23586), 'numpy.shape', 'np.shape', (['self[1][0]'], {}), '(self[1][0])\n', (23574, 23586), True, 'import numpy as np\n'), ((23721, 23741), 'numpy.shape', 'np.shape', (['self[1][0]'], {}), '(self[1][0])\n', (23729, 23741), True, 'import numpy as np\n'), ((25074, 25120), 'numpy.einsum', 'np.einsum', (['"""abcd,d->abc"""', 'MPO[1][0]', 'MPO[0][0]'], {}), "('abcd,d->abc', MPO[1][0], MPO[0][0])\n", (25083, 25120), True, 'import numpy as np\n'), ((25147, 25194), 'numpy.einsum', 'np.einsum', (['"""abcd,b->acd"""', 'MPO[1][-1]', 'MPO[2][0]'], {}), "('abcd,b->acd', MPO[1][-1], MPO[2][0])\n", (25156, 25194), True, 'import numpy as np\n'), ((30268, 30308), 'numpy.reshape', 'np.reshape', (['(con_mat0 @ bvecl)', '[chi ** 2]'], {}), '(con_mat0 @ bvecl, [chi ** 2])\n', (30278, 30308), True, 'import numpy as np\n'), ((30399, 30428), 'numpy.dot', 'np.dot', (['rvec', 'con_mat_on_rvec'], {}), '(rvec, con_mat_on_rvec)\n', (30405, 30428), True, 'import numpy as np\n'), ((30733, 30763), 'numpy.kron', 'np.kron', (['MPO[0][0]', 'self[0][0]'], {}), '(MPO[0][0], self[0][0])\n', (30740, 30763), True, 'import numpy as np\n'), ((31083, 31132), 'numpy.reshape', 'np.reshape', (['(con_mat0 @ bvecl)', '[chi, chi_MPO, chi]'], {}), '(con_mat0 @ bvecl, [chi, chi_MPO, chi])\n', (31093, 31132), True, 'import numpy as np\n'), ((31358, 31391), 'numpy.dot', 'np.dot', (['rvec', 'MPO_rvec_contracted'], {}), '(rvec, MPO_rvec_contracted)\n', (31364, 31391), True, 'import numpy as np\n'), ((35280, 35291), 'numpy.array', 'np.array', (['j'], {}), '(j)\n', (35288, 35291), True, 'import numpy as np\n'), ((41389, 41400), 'numpy.mean', 'np.mean', (['Es'], {}), '(Es)\n', (41396, 41400), True, 'import numpy as np\n'), ((5295, 5333), 'numpy.swapaxes', 'np.swapaxes', (['unitary[:, :, 0, :]', '(1)', '(2)'], {}), '(unitary[:, :, 0, :], 1, 2)\n', (5306, 5333), True, 'import numpy as np\n'), ((5588, 5604), 'random.choice', 'random.choice', (['p'], {}), '(p)\n', (5601, 5604), False, 'import random\n'), ((5751, 5789), 'numpy.swapaxes', 'np.swapaxes', (['unitary[:, :, j, :]', '(1)', '(2)'], {}), '(unitary[:, :, j, :], 1, 2)\n', (5762, 5789), True, 'import numpy as np\n'), ((6437, 6475), 'numpy.swapaxes', 'np.swapaxes', (['unitary[:, :, 0, :]', '(1)', '(2)'], {}), '(unitary[:, :, 0, :], 1, 2)\n', (6448, 6475), True, 'import numpy as np\n'), ((13859, 13870), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (13867, 13870), True, 'import numpy as np\n'), ((25327, 25371), 'numpy.einsum', 'np.einsum', (['"""abc,dea->bcde"""', 's_left', 'MPO_left'], {}), "('abc,dea->bcde', s_left, MPO_left)\n", (25336, 25371), True, 'import numpy as np\n'), ((25425, 25471), 'numpy.einsum', 'np.einsum', (['"""abc,dae->bcde"""', 's_right', 'MPO_right'], {}), "('abc,dae->bcde', s_right, MPO_right)\n", (25434, 25471), True, 'import numpy as np\n'), ((25506, 25540), 'numpy.einsum', 'np.einsum', (['"""jbkj->bk"""', 'left_tensor'], {}), "('jbkj->bk', left_tensor)\n", (25515, 25540), True, 'import numpy as np\n'), ((25586, 25621), 'numpy.einsum', 'np.einsum', (['"""aack->ck"""', 'right_tensor'], {}), "('aack->ck', right_tensor)\n", (25595, 25621), True, 'import numpy as np\n'), ((26052, 26088), 'numpy.einsum', 'np.einsum', (['"""bcdijk->ibjcdk"""', 'tensor1'], {}), "('bcdijk->ibjcdk', tensor1)\n", (26061, 26088), True, 'import numpy as np\n'), ((26204, 26253), 'numpy.reshape', 'np.reshape', (['tensor2', '[d, chi_tot, d_MPO, chi_tot]'], {}), '(tensor2, [d, chi_tot, d_MPO, chi_tot])\n', (26214, 26253), True, 'import numpy as np\n'), ((26330, 26365), 'numpy.trace', 'np.trace', (['tensor3'], {'axis1': '(0)', 'axis2': '(2)'}), '(tensor3, axis1=0, axis2=2)\n', (26338, 26365), True, 'import numpy as np\n'), ((30100, 30128), 'numpy.array', 'np.array', (['(self[2][0] == None)'], {}), '(self[2][0] == None)\n', (30108, 30128), True, 'import numpy as np\n'), ((30344, 30355), 'numpy.eye', 'np.eye', (['chi'], {}), '(chi)\n', (30350, 30355), True, 'import numpy as np\n'), ((30831, 30859), 'numpy.array', 'np.array', (['(self[2][0] == None)'], {}), '(self[2][0] == None)\n', (30839, 30859), True, 'import numpy as np\n'), ((31210, 31257), 'numpy.tensordot', 'np.tensordot', (['MPO[2][0]', 'con_vleft'], {'axes': '[0, 1]'}), '(MPO[2][0], con_vleft, axes=[0, 1])\n', (31222, 31257), True, 'import numpy as np\n'), ((31303, 31314), 'numpy.eye', 'np.eye', (['chi'], {}), '(chi)\n', (31309, 31314), True, 'import numpy as np\n'), ((31467, 31497), 'numpy.kron', 'np.kron', (['MPO[2][0]', 'self[2][0]'], {}), '(MPO[2][0], self[2][0])\n', (31474, 31497), True, 'import numpy as np\n'), ((31915, 31935), 'numpy.shape', 'np.shape', (['self[1][0]'], {}), '(self[1][0])\n', (31923, 31935), True, 'import numpy as np\n'), ((34243, 34292), 'numpy.einsum', 'np.einsum', (['"""rijskk,s->rij"""', 'con_mat[0]', 'MPO[0][0]'], {}), "('rijskk,s->rij', con_mat[0], MPO[0][0])\n", (34252, 34292), True, 'import numpy as np\n'), ((34455, 34491), 'numpy.einsum', 'np.einsum', (['"""rii,r"""', 'bvecl', 'MPO[2][0]'], {}), "('rii,r', bvecl, MPO[2][0])\n", (34464, 34491), True, 'import numpy as np\n'), ((35456, 35465), 'numpy.log', 'np.log', (['p'], {}), '(p)\n', (35462, 35465), True, 'import numpy as np\n'), ((42042, 42053), 'numpy.mean', 'np.mean', (['Es'], {}), '(Es)\n', (42049, 42053), True, 'import numpy as np\n'), ((18158, 18211), 'numpy.einsum', 'np.einsum', (['"""abcd,ijkb->acdijk"""', 'W_list1[0]', 'W_list[j]'], {}), "('abcd,ijkb->acdijk', W_list1[0], W_list[j])\n", (18167, 18211), True, 'import numpy as np\n'), ((22066, 22092), 'numpy.swapaxes', 'np.swapaxes', (['tensor1', '(1)', '(2)'], {}), '(tensor1, 1, 2)\n', (22077, 22092), True, 'import numpy as np\n'), ((24211, 24248), 'numpy.einsum', 'np.einsum', (['"""abcdee->abcd"""', 'self[1][0]'], {}), "('abcdee->abcd', self[1][0])\n", (24220, 24248), True, 'import numpy as np\n'), ((24409, 24447), 'numpy.einsum', 'np.einsum', (['"""aeebcd->abcd"""', 'self[1][-1]'], {}), "('aeebcd->abcd', self[1][-1])\n", (24418, 24447), True, 'import numpy as np\n'), ((25843, 25893), 'numpy.reshape', 'np.reshape', (['self[1][j]', '[d, chi ** 2, d, chi ** 2]'], {}), '(self[1][j], [d, chi ** 2, d, chi ** 2])\n', (25853, 25893), True, 'import numpy as np\n'), ((26403, 26442), 'numpy.reshape', 'np.reshape', (['tensor4', '[chi_tot, chi_tot]'], {}), '(tensor4, [chi_tot, chi_tot])\n', (26413, 26442), True, 'import numpy as np\n'), ((33760, 33794), 'numpy.einsum', 'np.einsum', (['"""abcd->cadb"""', 'MPO[1][0]'], {}), "('abcd->cadb', MPO[1][0])\n", (33769, 33794), True, 'import numpy as np\n'), ((34373, 34427), 'numpy.einsum', 'np.einsum', (['"""rijskl,skl->rij"""', 'con_mat[n % l_uc]', 'bvecl'], {}), "('rijskl,skl->rij', con_mat[n % l_uc], bvecl)\n", (34382, 34427), True, 'import numpy as np\n'), ((42684, 42695), 'numpy.mean', 'np.mean', (['Es'], {}), '(Es)\n', (42691, 42695), True, 'import numpy as np\n'), ((8102, 8132), 'numpy.array', 'np.array', (['(bdry_vecs[0] == None)'], {}), '(bdry_vecs[0] == None)\n', (8110, 8132), True, 'import numpy as np\n'), ((8173, 8186), 'numpy.zeros', 'np.zeros', (['chi'], {}), '(chi)\n', (8181, 8186), True, 'import numpy as np\n'), ((8586, 8616), 'numpy.array', 'np.array', (['(bdry_vecs[1] == None)'], {}), '(bdry_vecs[1] == None)\n', (8594, 8616), True, 'import numpy as np\n'), ((21422, 21448), 'numpy.swapaxes', 'np.swapaxes', (['tensor1', '(0)', '(3)'], {}), '(tensor1, 0, 3)\n', (21433, 21448), True, 'import numpy as np\n'), ((22431, 22457), 'numpy.swapaxes', 'np.swapaxes', (['tensor3', '(0)', '(3)'], {}), '(tensor3, 0, 3)\n', (22442, 22457), True, 'import numpy as np\n'), ((23034, 23060), 'numpy.swapaxes', 'np.swapaxes', (['tensor5', '(0)', '(3)'], {}), '(tensor5, 0, 3)\n', (23045, 23060), True, 'import numpy as np\n'), ((24714, 24766), 'numpy.einsum', 'np.einsum', (['"""abcdee,ee->abcd"""', 'self[1][0]', 'self[0][0]'], {}), "('abcdee,ee->abcd', self[1][0], self[0][0])\n", (24723, 24766), True, 'import numpy as np\n'), ((24934, 24972), 'numpy.einsum', 'np.einsum', (['"""aeebcd->abcd"""', 'self[1][-1]'], {}), "('aeebcd->abcd', self[1][-1])\n", (24943, 24972), True, 'import numpy as np\n'), ((9661, 9691), 'numpy.array', 'np.array', (['(bdry_vecs[j] != None)'], {}), '(bdry_vecs[j] != None)\n', (9669, 9691), True, 'import numpy as np\n'), ((9901, 9931), 'numpy.array', 'np.array', (['(bdry_vecs[j] == None)'], {}), '(bdry_vecs[j] == None)\n', (9909, 9931), True, 'import numpy as np\n'), ((9972, 9985), 'numpy.zeros', 'np.zeros', (['chi'], {}), '(chi)\n', (9980, 9985), True, 'import numpy as np\n'), ((9040, 9070), 'numpy.array', 'np.array', (['(bdry_vecs[j] != None)'], {}), '(bdry_vecs[j] != None)\n', (9048, 9070), True, 'import numpy as np\n'), ((11128, 11158), 'numpy.array', 'np.array', (['(bdry_vecs[j] != None)'], {}), '(bdry_vecs[j] != None)\n', (11136, 11158), True, 'import numpy as np\n'), ((11368, 11398), 'numpy.array', 'np.array', (['(bdry_vecs[j] == None)'], {}), '(bdry_vecs[j] == None)\n', (11376, 11398), True, 'import numpy as np\n'), ((11439, 11456), 'numpy.zeros', 'np.zeros', (['chi_MPO'], {}), '(chi_MPO)\n', (11447, 11456), True, 'import numpy as np\n')] |
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the number operator, S^2 operator, Sz operator, and time-reveral
operator."""
import copy
from typing import TYPE_CHECKING
import numpy as np
from fqe.util import alpha_beta_electrons, vdot
from fqe.fqe_ops import fqe_operator
if TYPE_CHECKING:
from fqe.wavefunction import Wavefunction
class NumberOperator(fqe_operator.FqeOperator):
"""The number operator."""
def contract(self, brastate: "Wavefunction",
ketstate: "Wavefunction") -> complex:
"""Given two wavefunctions, generate the expectation value of the
operator according to its representation.
Args:
brastate: Wavefunction on the bra side.
ketstate: Wavefunction on the ket side.
"""
out = copy.deepcopy(ketstate)
for _, sector in out._civec.items():
sector.scale(sector.nalpha() + sector.nbeta())
return vdot(brastate, out)
def representation(self):
"""Returns the representation of the number operator, which is 'N'."""
return "N"
def rank(self):
"""Returns the rank of the number operator."""
return 2
class S2Operator(fqe_operator.FqeOperator):
r"""The :math:`S^2` operator."""
def contract(self, brastate: "Wavefunction",
ketstate: "Wavefunction") -> complex:
"""Given two wavefunctions, generate the expectation value of the
operator according to its representation.
Args:
brastate: Wavefunction on the bra side.
ketstate: Wavefunction on the ket side.
"""
out = copy.deepcopy(ketstate)
for _, sector in out._civec.items():
sector.apply_inplace_s2()
return vdot(brastate, out)
def representation(self):
"""Returns the representation of the operator."""
return "s_2"
def rank(self):
"""Returns rank of the operator."""
return 2
class SzOperator(fqe_operator.FqeOperator):
r"""The :math:`S_z` operator."""
def contract(self, brastate: "Wavefunction",
ketstate: "Wavefunction") -> complex:
"""Given two wavefunctions, generate the expectation value of the
operator according to its representation.
Args:
brastate: Wavefunction on the bra side.
ketstate: Wavefunction on the ket side.
"""
out = copy.deepcopy(ketstate)
for _, sector in out._civec.items():
sector.scale((sector.nalpha() - sector.nbeta()) * 0.5)
return vdot(brastate, out)
def representation(self):
"""Returns the representation of the Sz operator."""
return "s_z"
def rank(self):
"""Returns the rank of the Sz operator."""
return 2
class TimeReversalOp(fqe_operator.FqeOperator):
"""The time-reversal operator.
The program assumes the Kramers-paired storage for the wavefunction.
"""
def contract(self, brastate: "Wavefunction",
ketstate: "Wavefunction") -> complex:
"""Given two wavefunctions, generate the expectation value of the
operator according to its representation.
Args:
brastate: Wavefunction on the bra side.
ketstate: Wavefunction on the ket side.
"""
out = copy.deepcopy(ketstate)
for (nele, nab), sector in out._civec.items():
nalpha, nbeta = alpha_beta_electrons(nele, nab)
if nalpha < nbeta:
if not (nele, nbeta - nalpha) in out._civec.keys():
raise ValueError(
"The wavefunction space is not closed under "
"time reversal.")
sector2 = out._civec[(nele, nbeta - nalpha)]
tmp = np.copy(sector.coeff)
phase = (-1)**(nbeta * (nalpha + 1))
phase2 = (-1)**(nalpha * (nbeta + 1))
sector.coeff = sector2.coeff.T.conj() * phase2
sector2.coeff = tmp.T.conj() * phase
elif nalpha > nbeta:
if not (nele, nbeta - nalpha) in out._civec.keys():
raise ValueError(
"The wavefunction space is not closed under "
"time reversal.")
elif nalpha == nbeta:
sector.coeff = sector.coeff.T.conj()
return vdot(brastate, out)
def representation(self):
"""Returns the representation of the operator."""
return "T"
def rank(self):
"""Returns the rank of the operator."""
return 2
| [
"fqe.util.alpha_beta_electrons",
"copy.deepcopy",
"fqe.util.vdot",
"numpy.copy"
] | [((1354, 1377), 'copy.deepcopy', 'copy.deepcopy', (['ketstate'], {}), '(ketstate)\n', (1367, 1377), False, 'import copy\n'), ((1497, 1516), 'fqe.util.vdot', 'vdot', (['brastate', 'out'], {}), '(brastate, out)\n', (1501, 1516), False, 'from fqe.util import alpha_beta_electrons, vdot\n'), ((2196, 2219), 'copy.deepcopy', 'copy.deepcopy', (['ketstate'], {}), '(ketstate)\n', (2209, 2219), False, 'import copy\n'), ((2318, 2337), 'fqe.util.vdot', 'vdot', (['brastate', 'out'], {}), '(brastate, out)\n', (2322, 2337), False, 'from fqe.util import alpha_beta_electrons, vdot\n'), ((2987, 3010), 'copy.deepcopy', 'copy.deepcopy', (['ketstate'], {}), '(ketstate)\n', (3000, 3010), False, 'import copy\n'), ((3138, 3157), 'fqe.util.vdot', 'vdot', (['brastate', 'out'], {}), '(brastate, out)\n', (3142, 3157), False, 'from fqe.util import alpha_beta_electrons, vdot\n'), ((3901, 3924), 'copy.deepcopy', 'copy.deepcopy', (['ketstate'], {}), '(ketstate)\n', (3914, 3924), False, 'import copy\n'), ((4970, 4989), 'fqe.util.vdot', 'vdot', (['brastate', 'out'], {}), '(brastate, out)\n', (4974, 4989), False, 'from fqe.util import alpha_beta_electrons, vdot\n'), ((4008, 4039), 'fqe.util.alpha_beta_electrons', 'alpha_beta_electrons', (['nele', 'nab'], {}), '(nele, nab)\n', (4028, 4039), False, 'from fqe.util import alpha_beta_electrons, vdot\n'), ((4372, 4393), 'numpy.copy', 'np.copy', (['sector.coeff'], {}), '(sector.coeff)\n', (4379, 4393), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.metrics import pairwise_distances_argmin_min
def resample(path, spacing=1):
"""Resample a path (linearly) according to maximum distance between points
Args:
path (np.array): points of path, shaped as number points x number of features.
spacing (int, optional): maximum distance between points. Defaults to 1.
Returns:
np.array: points of resampled path, shaped as number points x number of features.
"""
new_path = []
for n in np.arange(path.shape[0]):
pt1 = path[n - 1 : n, :]
pt2 = path[n : n + 1, :]
new_path.append(pt1)
dist = np.linalg.norm(pt1 - pt2)
if dist > spacing:
ts = np.arange(0, dist, spacing)
mid = np.zeros((len(ts) - 1, 3))
for i, t in enumerate(ts[1:]):
mid[i, :] = pt1 + (t / dist) * (pt2 - pt1)
new_path.append(mid)
new_path.append(pt2)
new_path = np.concatenate(new_path)
return new_path
def sd(pts1, pts2, substantial=False):
"""Compute spatial distance between two paths according to Peng et. al. 2010.
Args:
pts1 (np.array): points of first path, shaped as number points x number of features.
pts2 (np.array): points of second path, shaped as number points x number of features.
substantial (bool, optional): whether to compute substantial spatial distance which ignores all points that have a closest point within 2 voxels. Defaults to False.
verbose (bool, optional): [description]. Defaults to False.
Returns:
float : spatial distance or substantial spatial distance
"""
_, dists1 = pairwise_distances_argmin_min(pts1, pts2)
_, dists2 = pairwise_distances_argmin_min(pts2, pts1)
if substantial:
if any(dists1 > 2):
ddiv1 = np.mean(dists1[dists1 > 2])
else:
ddiv1 = 0
if any(dists2 > 2):
ddiv2 = np.mean(dists2[dists2 > 2])
else:
ddiv2 = 0
return np.mean([ddiv1, ddiv2])
else:
ddiv1 = np.mean(dists1)
ddiv2 = np.mean(dists2)
return np.mean([ddiv1, ddiv2])
| [
"sklearn.metrics.pairwise_distances_argmin_min",
"numpy.mean",
"numpy.linalg.norm",
"numpy.arange",
"numpy.concatenate"
] | [((511, 535), 'numpy.arange', 'np.arange', (['path.shape[0]'], {}), '(path.shape[0])\n', (520, 535), True, 'import numpy as np\n'), ((967, 991), 'numpy.concatenate', 'np.concatenate', (['new_path'], {}), '(new_path)\n', (981, 991), True, 'import numpy as np\n'), ((1676, 1717), 'sklearn.metrics.pairwise_distances_argmin_min', 'pairwise_distances_argmin_min', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (1705, 1717), False, 'from sklearn.metrics import pairwise_distances_argmin_min\n'), ((1734, 1775), 'sklearn.metrics.pairwise_distances_argmin_min', 'pairwise_distances_argmin_min', (['pts2', 'pts1'], {}), '(pts2, pts1)\n', (1763, 1775), False, 'from sklearn.metrics import pairwise_distances_argmin_min\n'), ((648, 673), 'numpy.linalg.norm', 'np.linalg.norm', (['(pt1 - pt2)'], {}), '(pt1 - pt2)\n', (662, 673), True, 'import numpy as np\n'), ((2036, 2059), 'numpy.mean', 'np.mean', (['[ddiv1, ddiv2]'], {}), '([ddiv1, ddiv2])\n', (2043, 2059), True, 'import numpy as np\n'), ((2086, 2101), 'numpy.mean', 'np.mean', (['dists1'], {}), '(dists1)\n', (2093, 2101), True, 'import numpy as np\n'), ((2118, 2133), 'numpy.mean', 'np.mean', (['dists2'], {}), '(dists2)\n', (2125, 2133), True, 'import numpy as np\n'), ((2149, 2172), 'numpy.mean', 'np.mean', (['[ddiv1, ddiv2]'], {}), '([ddiv1, ddiv2])\n', (2156, 2172), True, 'import numpy as np\n'), ((719, 746), 'numpy.arange', 'np.arange', (['(0)', 'dist', 'spacing'], {}), '(0, dist, spacing)\n', (728, 746), True, 'import numpy as np\n'), ((1844, 1871), 'numpy.mean', 'np.mean', (['dists1[dists1 > 2]'], {}), '(dists1[dists1 > 2])\n', (1851, 1871), True, 'import numpy as np\n'), ((1956, 1983), 'numpy.mean', 'np.mean', (['dists2[dists2 > 2]'], {}), '(dists2[dists2 > 2])\n', (1963, 1983), True, 'import numpy as np\n')] |
import numba
from numba import jit, prange
import numpy as np
import nltk
from ProcessEntropy.Preprocessing import *
@jit(nopython=True, fastmath=True, parallel=True)
def get_all_self_lambdas(source, lambdas):
"""
Internal function.
Finds the Lambda value for each index in the source.
Lambda value denotes the longest subsequence of the source,
starting from the index, that in contained contiguously in the source,
before the index.
Args:
source: Arry of ints, usually corresponding to hashed words.
lambdas: A premade array of length(target), usually filled with zeros.
Used for efficiency reasons.
Return:
A list of ints, denoting the value for Lambda for each index in the target.
"""
N = len(source)
for i in prange(1, N):
# The target process is everything ahead of i.
t_max = 0
c_max = 0
for j in range(0, i): # Look back at the past
if source[j] == source[i]: # Check if matches future's next element
c_max = 1
for k in range(1,min(N-i, i-j)): # Look through more of future
if source[j+k] != source[i+k]:
break
else:
c_max = c_max+1
if c_max > t_max:
t_max = c_max
lambdas[i] = t_max+1
return lambdas
def self_entropy_rate(source, get_lambdas = False):
"""
Args:
source: The source is an array of ints.
Returns:
The non-parametric estimate of the entropy rate based on match lengths.
$$
\hat{h}(S)=\frac{N \log _{2} N{\sum_{i=1}^{N \Lambda_{i}(S)}
$$
This is described mathematically in [1] as,
[1] <NAME>, <NAME>, <NAME>, and <NAME>. Nonparametric entropy
esti mation for stationary processes and random fields, with applications to English text.
IEEE Transactions on Information Theory, 44(3):1319–1327, May 1998.
"""
N = len(source)
source = np.array(source)
lambdas = np.zeros(N)
lambdas = get_all_self_lambdas(source, lambdas)
if get_lambdas:
return lambdas
else:
return N*np.log2(N) / np.sum(lambdas)
def text_array_self_entropy(token_source):
"""
This is a wrapper for `self_entropy_rate' to allow for raw text to be used.
Args:
token_source: A list of token strings (hint: a list of words).
Returns:
The non-parametric estimate of the entropy rate based on match lengths.
"""
return self_entropy_rate(np.array([fnv(word) for word in token_source]))
def tweet_self_entropy(tweets_source):
"""
This is a wrapper for `self_entropy_rate' to allow for raw tweets to be used.
Args:
tweets_source: A list of long strings (hint: a list of tweets).
If it detects that you have added a list of (time, tweet) tuple pairs
(as in timeseries_cross_entropy) it will recover.
Returns:
The non-parametric estimate of the entropy rate based on match lengths.
"""
source = []
if type(tweets_source[0]) == tuple:
for time, text in tweets_source:
source.extend(tweet_to_hash_array(text))
else:
for text in tweets_source:
source.extend(tweet_to_hash_array(text))
return self_entropy_rate(source)
def convergence(tweets_source, plot_for_me = False):
"""Calculates the entropy rate of a process at every point in time along the sequence. This is very useful for plotting / checking the convergence of a sequence. Uses the same interface as `tweet_self_entropy`. We recommend this result is ploted against it's index.
Args:
tweets_source: A list of long strings (hint: a list of tweets).
If it detects that you have added a list of (time, tweet) tuple pairs
(as in timeseries_cross_entropy) it will recover.
Returns:
The non-parametric estimate of the entropy rate at every point along the sequence.
"""
# This code is the
source = []
if type(tweets_source[0]) == tuple:
for time, text in tweets_source:
source.extend(tweet_to_hash_array(text))
else:
for text in tweets_source:
source.extend(tweet_to_hash_array(text))
lambdas = self_entropy_rate(source, get_lambdas=True)
entropies = [(N*np.log2(N)/np.sum(lambdas[:N])) for N in range(2,len(lambdas))]
if plot_for_me:
import matplotlib.pyplot as plt
plt.plot(range(2,len(entropies)), entropies)
plt.show()
else:
return entropies | [
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.log2",
"numpy.zeros",
"numpy.array",
"numba.jit",
"numba.prange"
] | [((121, 169), 'numba.jit', 'jit', ([], {'nopython': '(True)', 'fastmath': '(True)', 'parallel': '(True)'}), '(nopython=True, fastmath=True, parallel=True)\n', (124, 169), False, 'from numba import jit, prange\n'), ((854, 866), 'numba.prange', 'prange', (['(1)', 'N'], {}), '(1, N)\n', (860, 866), False, 'from numba import jit, prange\n'), ((2145, 2161), 'numpy.array', 'np.array', (['source'], {}), '(source)\n', (2153, 2161), True, 'import numpy as np\n'), ((2176, 2187), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (2184, 2187), True, 'import numpy as np\n'), ((4710, 4720), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4718, 4720), True, 'import matplotlib.pyplot as plt\n'), ((2328, 2343), 'numpy.sum', 'np.sum', (['lambdas'], {}), '(lambdas)\n', (2334, 2343), True, 'import numpy as np\n'), ((4535, 4554), 'numpy.sum', 'np.sum', (['lambdas[:N]'], {}), '(lambdas[:N])\n', (4541, 4554), True, 'import numpy as np\n'), ((2315, 2325), 'numpy.log2', 'np.log2', (['N'], {}), '(N)\n', (2322, 2325), True, 'import numpy as np\n'), ((4524, 4534), 'numpy.log2', 'np.log2', (['N'], {}), '(N)\n', (4531, 4534), True, 'import numpy as np\n')] |
import argparse
import multiprocessing
from concurrent.futures import ProcessPoolExecutor
import csv
from pathlib import Path
import urllib.request
import urllib.parse
import astropy.io.fits.header
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from reproject import reproject_exact, mosaicking, reproject_interp, reproject_adaptive
#from reproject.utils import reproject_blocked
from astropy.wcs import WCS
from astropy.wcs import utils as wutils
from astropy import units as u
from astropy import coordinates
from timeit import default_timer as timer
from astroquery.astrometry_net import AstrometryNet
def calc_sky_area(header):
return wutils.proj_plane_pixel_area(WCS(header)) * header['IMAGEH'] * header['IMAGEW']
def prepare_image(img_url: str, img_name: str, img_author: str, imgs_path: Path, astrometry_net_key):
img_file_name = urllib.parse.quote_plus(img_url)
print(img_file_name)
img_prep_folder = imgs_path / Path(urllib.parse.quote_plus(img_name) + "_"+ urllib.parse.quote_plus(img_author)+"_" +urllib.parse.quote_plus(img_url))
img_prep_folder.mkdir(exist_ok=True, parents=True)
downloaded_img_path = img_prep_folder / urllib.parse.quote_plus(img_url)
if downloaded_img_path.exists():
print("Already downloaded file from: " + img_url + " - skipping download!")
else:
print("Preparing image from: " + img_url + " and downloading to: " + str(downloaded_img_path))
urllib.request.urlretrieve(img_url, downloaded_img_path)
wcs_file_name = str(downloaded_img_path.stem) + ".wcs"
wcs_file_path = img_prep_folder / wcs_file_name
if wcs_file_path.exists():
print("Found WCS data at: " + str(wcs_file_path))
fp = open(wcs_file_path)
wcs_header = astropy.io.fits.header.Header()
wcs_header = wcs_header.fromtextfile(fp)
fp.close()
else:
print("Couldn't find WCS data at: " + str(wcs_file_path) + " - beginning Astrometry.net platesolving")
ast = AstrometryNet()
ast.api_key = astrometry_net_key
try_again = True
submission_id = None
while try_again:
try:
if not submission_id:
wcs_header = ast.solve_from_image(str(downloaded_img_path),
submission_id=submission_id,
solve_timeout=1200)
else:
wcs_header = ast.monitor_submission(submission_id,
solve_timeout=600)
except TimeoutError as e:
submission_id = e.args[1]
else:
# got a result, so terminate
try_again = False
fp = open(wcs_file_path, "wb")
wcs_header.totextfile(fp)
fp.close()
#combien into one fits file
img_data = np.array(Image.open(downloaded_img_path))
if img_data.ndim != 3:
img_data = np.stack((img_data,) * 3, axis=-1)
channel_names = ['r','g','b']
for i in range(0, len(channel_names)):
channel_fits = astropy.io.fits.PrimaryHDU(data=img_data[:,:,i], header=wcs_header)
channel_fits_path_parent = downloaded_img_path.parent
channel_fits_path_filename = downloaded_img_path.stem
channel_fits_path = Path(str(channel_fits_path_parent / channel_fits_path_filename) +"_"+channel_names[i]+".fits")
channel_fits.writeto(channel_fits_path, overwrite=True)
return
def load_all_wcs_meta(img_dir_path):
wcs_glob = img_dir_path.glob("**/*.wcs")
data = []
for w in wcs_glob:
img_dict = {}
fp = open(w)
wcs_h = astropy.io.fits.header.Header()
wcs_h = wcs_h.fromtextfile(fp)
img_dict['wcs'] = wcs_h
red_path = Path(str(w.parent / w.stem) + "_r.fits")
green_path = Path(str(w.parent / w.stem) + "_g.fits")
blue_path = Path(str(w.parent / w.stem) + "_b.fits")
img_channels = [red_path, green_path, blue_path]
img_dict['paths'] = (img_channels)
img_dict['area'] = calc_sky_area(wcs_h)
data.append(img_dict)
fp.close()
return data
def main():
ap = argparse.ArgumentParser()
ap.add_argument("--imgs_csv", help="CSV file containing links to patch work images", default="patchwork.csv")
ap.add_argument("--arc_sec_per_px", help="An override for final arc sec/px, if no value is given the smallest "
"values in all images will be used")
ap.add_argument("--imgs_dir", help="Directory for images to be downloaded to and prepared in, defaults to the same name as CSV file used")
ap.add_argument("--astronometry_net_api_key", "-ast_key")
ap.add_argument("--swap_dir", help="Directory memory mapped ararys will be saved during stacking. "
"A fast SSD with lots of space will increase speed, however having enough space is more important",
default="patchwork_swap")
args = ap.parse_args()
px_scale = None
if args.arc_sec_per_px is not None:
px_scale = float(args.arc_sec_per_px) * u.arcsec
imgs_csv_path = Path(args.imgs_csv)
# check args
img_dir_path = None
if args.imgs_dir is not None:
img_dir_path = Path(args.imgs_dir)
if img_dir_path.exists() == False or img_dir_path.exists() == False:
assert NotADirectoryError("Couldn't find directory: "+ str(args.imgs_dir))
else:
img_dir_path = (imgs_csv_path.resolve().parent / imgs_csv_path.stem)
img_dir_path.mkdir(exist_ok=True, parents=True)
csv_rows = []
with open(imgs_csv_path, newline='') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in csvreader:
print(row)
if (row[0][0] != '#' ):
csv_rows.append(row)
prep_pool = ProcessPoolExecutor()
for row in csv_rows[1:]:
url = row[0]
name = row[1]
author = row[2]
#prep_pool.submit(prepare_image, url, name, author, img_dir_path, args.astronometry_net_api_key)
prepare_image(url, name, author, img_dir_path, args.astronometry_net_api_key)
print("all images submitted for prep, waiting for all to be done...")
prep_pool.shutdown()
all_data = load_all_wcs_meta(img_dir_path)
#sort based upon sky area,
all_data = sorted(all_data,key=lambda x: x['area'], reverse=True)
for d in all_data:
print(wutils.proj_plane_pixel_area(WCS(d['wcs'])))
#open all the red fits and hope we have enough RAM I guess....
red_hdus = []
for img in all_data:
red_hdus.append(astropy.io.fits.open(img['paths'][0]))
#TODO: add a base coordinate frame to the center of the patchwork area
final_wcs, final_shape = mosaicking.find_optimal_celestial_wcs(red_hdus, resolution=px_scale,
auto_rotate=False, projection='MER')
fp = open('final.wcs', "wb")
final_wcs.to_header().totextfile(fp)
fp.close()
print("Final pixel dimensions will be: " + str(final_shape))
print("Final image scale: " + str(px_scale) + " \"/px ")
swap_path = Path(args.swap_dir)
print("Saving intermediate files at: " + str(swap_path.absolute()))
swap_path.mkdir(exist_ok=True, parents=True)
final_image = np.memmap(filename=swap_path / "final_patchwork.mmap",
shape=(final_shape[0],final_shape[1],3), mode='w+', dtype=np.float)
## project and align
channel_names = ['r', 'g', 'b']
all_channel_start_time = timer()
for c in range(0, len(channel_names)): #for RGB, once per channel
print("#####")
print("Reprojecting channel:" + str(c))
print("#####")
channel_start_time = timer()
channel_canvas_array = np.memmap(filename=swap_path / str(channel_names[c]+".mmap"),
shape=final_shape, mode='w+', dtype=np.float)
for img in all_data:
current_image_start_time = timer()
curent_img_path = str(img['paths'][c])
print(curent_img_path)
patch = astropy.io.fits.open(curent_img_path)
patch.info()
#plt.subplot(projection=final_wcs)
#plt.imshow(channel_canvas_array)
#plt.grid(color='white', ls='solid')
#plt.show()
mmapped_patch = np.memmap(filename=swap_path / "current_patch.mmap",
shape=patch[0].data.shape, mode='w+', dtype=np.float)
mmapped_patch[:] = (patch[0].data.astype(np.float) / 255)[:]
#this should probs be reproject_exact for the final
array = np.memmap(filename=swap_path / "current_img.mmap",
shape=final_shape, mode='w+', dtype=np.float)
footprint = np.memmap(filename=swap_path / "current_footprint.mmap",
shape=final_shape, mode='w+', dtype=np.float)
method = "interp"
if method == "interp":
reproject_interp((mmapped_patch,patch[0].header), final_wcs, shape_out=final_shape, output_array=array, return_footprint = True, output_footprint=footprint,
block_size=(700,700), parallel=True)
print("footprint created")
# now we need to do this without allocating any new arrays
# channel_canvas_array[:] = channel_canvas_array * (footprint-1)*-1
elif method == "exact":
array = reproject_exact((patch[0].data.astype(np.float) / 255,patch[0].header),
output_projection=final_wcs,shape_out=final_shape,
parallel=True, return_footprint=False)
elif method == 'blocked':
reproject_blocked(reproject_interp, input_data=(mmapped_patch,patch[0].header), shape_out=final_shape,
output_projection=final_wcs,
output_array=array,output_footprint=footprint, return_footprint=True, parallel=True, block_size=(700,700))
print("Reprojection took: " + str(timer() - current_image_start_time) + "\n")
#footprint -= 1
#print('subtracted')
#
#
#footprint *= -1
#print("multiplied")
#
#
#
#channel_canvas_array *= footprint
#print("cleared")
channel_canvas_array[:] = channel_canvas_array * (footprint-1)*-1
np.nan_to_num(array, copy=False)
print("de-NAN'd")
channel_canvas_array += array
print("inserted")
#let go of all file handles so they can be overwitten for the next image
del mmapped_patch, array, footprint
print("Reprojection and insertion took: " + str(timer() - current_image_start_time) + "\n")
print("Reprojection for " + channel_names[c] +"_channel took: "+ str(timer() - channel_start_time))
#plt.imshow(channel_canvas_array)
#plt.show()
final_image[:,:,c] = channel_canvas_array[:,:]
del channel_canvas_array
final_fits = astropy.io.fits.PrimaryHDU(data=final_image[:,:,0], header=final_wcs.to_header())
final_fits_path = Path("final_image.fits")
final_fits.writeto(final_fits_path, overwrite=True)
plt.imsave('final_image.png', np.flipud(np.clip(final_image, a_min=0, a_max=1)))
plt.subplot(projection=final_wcs)
plt.imshow(final_image)
plt.grid(color='white', ls='dotted')
plt.show()
if __name__ == "__main__":
multiprocessing.freeze_support()
Image.MAX_IMAGE_PIXELS = None #keep PIL happy when opening stupidly 3x drizzled files :P
main() | [
"csv.reader",
"argparse.ArgumentParser",
"numpy.nan_to_num",
"concurrent.futures.ProcessPoolExecutor",
"numpy.clip",
"pathlib.Path",
"astroquery.astrometry_net.AstrometryNet",
"matplotlib.pyplot.imshow",
"numpy.stack",
"matplotlib.pyplot.show",
"reproject.mosaicking.find_optimal_celestial_wcs",
... | [((4259, 4284), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4282, 4284), False, 'import argparse\n'), ((5258, 5277), 'pathlib.Path', 'Path', (['args.imgs_csv'], {}), '(args.imgs_csv)\n', (5262, 5277), False, 'from pathlib import Path\n'), ((5989, 6010), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {}), '()\n', (6008, 6010), False, 'from concurrent.futures import ProcessPoolExecutor\n'), ((6910, 7019), 'reproject.mosaicking.find_optimal_celestial_wcs', 'mosaicking.find_optimal_celestial_wcs', (['red_hdus'], {'resolution': 'px_scale', 'auto_rotate': '(False)', 'projection': '"""MER"""'}), "(red_hdus, resolution=px_scale,\n auto_rotate=False, projection='MER')\n", (6947, 7019), False, 'from reproject import reproject_exact, mosaicking, reproject_interp, reproject_adaptive\n'), ((7314, 7333), 'pathlib.Path', 'Path', (['args.swap_dir'], {}), '(args.swap_dir)\n', (7318, 7333), False, 'from pathlib import Path\n'), ((7475, 7604), 'numpy.memmap', 'np.memmap', ([], {'filename': "(swap_path / 'final_patchwork.mmap')", 'shape': '(final_shape[0], final_shape[1], 3)', 'mode': '"""w+"""', 'dtype': 'np.float'}), "(filename=swap_path / 'final_patchwork.mmap', shape=(final_shape[0\n ], final_shape[1], 3), mode='w+', dtype=np.float)\n", (7484, 7604), True, 'import numpy as np\n'), ((7731, 7738), 'timeit.default_timer', 'timer', ([], {}), '()\n', (7736, 7738), True, 'from timeit import default_timer as timer\n'), ((11504, 11528), 'pathlib.Path', 'Path', (['"""final_image.fits"""'], {}), "('final_image.fits')\n", (11508, 11528), False, 'from pathlib import Path\n'), ((11676, 11709), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {'projection': 'final_wcs'}), '(projection=final_wcs)\n', (11687, 11709), True, 'import matplotlib.pyplot as plt\n'), ((11714, 11737), 'matplotlib.pyplot.imshow', 'plt.imshow', (['final_image'], {}), '(final_image)\n', (11724, 11737), True, 'import matplotlib.pyplot as plt\n'), ((11742, 11778), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'color': '"""white"""', 'ls': '"""dotted"""'}), "(color='white', ls='dotted')\n", (11750, 11778), True, 'import matplotlib.pyplot as plt\n'), ((11783, 11793), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11791, 11793), True, 'import matplotlib.pyplot as plt\n'), ((11828, 11860), 'multiprocessing.freeze_support', 'multiprocessing.freeze_support', ([], {}), '()\n', (11858, 11860), False, 'import multiprocessing\n'), ((2015, 2030), 'astroquery.astrometry_net.AstrometryNet', 'AstrometryNet', ([], {}), '()\n', (2028, 2030), False, 'from astroquery.astrometry_net import AstrometryNet\n'), ((2941, 2972), 'PIL.Image.open', 'Image.open', (['downloaded_img_path'], {}), '(downloaded_img_path)\n', (2951, 2972), False, 'from PIL import Image\n'), ((3020, 3054), 'numpy.stack', 'np.stack', (['((img_data,) * 3)'], {'axis': '(-1)'}), '((img_data,) * 3, axis=-1)\n', (3028, 3054), True, 'import numpy as np\n'), ((5377, 5396), 'pathlib.Path', 'Path', (['args.imgs_dir'], {}), '(args.imgs_dir)\n', (5381, 5396), False, 'from pathlib import Path\n'), ((5794, 5843), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""|"""'}), "(csvfile, delimiter=',', quotechar='|')\n", (5804, 5843), False, 'import csv\n'), ((7933, 7940), 'timeit.default_timer', 'timer', ([], {}), '()\n', (7938, 7940), True, 'from timeit import default_timer as timer\n'), ((8189, 8196), 'timeit.default_timer', 'timer', ([], {}), '()\n', (8194, 8196), True, 'from timeit import default_timer as timer\n'), ((8563, 8674), 'numpy.memmap', 'np.memmap', ([], {'filename': "(swap_path / 'current_patch.mmap')", 'shape': 'patch[0].data.shape', 'mode': '"""w+"""', 'dtype': 'np.float'}), "(filename=swap_path / 'current_patch.mmap', shape=patch[0].data.\n shape, mode='w+', dtype=np.float)\n", (8572, 8674), True, 'import numpy as np\n'), ((8859, 8960), 'numpy.memmap', 'np.memmap', ([], {'filename': "(swap_path / 'current_img.mmap')", 'shape': 'final_shape', 'mode': '"""w+"""', 'dtype': 'np.float'}), "(filename=swap_path / 'current_img.mmap', shape=final_shape, mode=\n 'w+', dtype=np.float)\n", (8868, 8960), True, 'import numpy as np\n'), ((9021, 9127), 'numpy.memmap', 'np.memmap', ([], {'filename': "(swap_path / 'current_footprint.mmap')", 'shape': 'final_shape', 'mode': '"""w+"""', 'dtype': 'np.float'}), "(filename=swap_path / 'current_footprint.mmap', shape=final_shape,\n mode='w+', dtype=np.float)\n", (9030, 9127), True, 'import numpy as np\n'), ((10747, 10779), 'numpy.nan_to_num', 'np.nan_to_num', (['array'], {'copy': '(False)'}), '(array, copy=False)\n', (10760, 10779), True, 'import numpy as np\n'), ((11631, 11669), 'numpy.clip', 'np.clip', (['final_image'], {'a_min': '(0)', 'a_max': '(1)'}), '(final_image, a_min=0, a_max=1)\n', (11638, 11669), True, 'import numpy as np\n'), ((703, 714), 'astropy.wcs.WCS', 'WCS', (['header'], {}), '(header)\n', (706, 714), False, 'from astropy.wcs import WCS\n'), ((6614, 6627), 'astropy.wcs.WCS', 'WCS', (["d['wcs']"], {}), "(d['wcs'])\n", (6617, 6627), False, 'from astropy.wcs import WCS\n'), ((9247, 9449), 'reproject.reproject_interp', 'reproject_interp', (['(mmapped_patch, patch[0].header)', 'final_wcs'], {'shape_out': 'final_shape', 'output_array': 'array', 'return_footprint': '(True)', 'output_footprint': 'footprint', 'block_size': '(700, 700)', 'parallel': '(True)'}), '((mmapped_patch, patch[0].header), final_wcs, shape_out=\n final_shape, output_array=array, return_footprint=True,\n output_footprint=footprint, block_size=(700, 700), parallel=True)\n', (9263, 9449), False, 'from reproject import reproject_exact, mosaicking, reproject_interp, reproject_adaptive\n'), ((11201, 11208), 'timeit.default_timer', 'timer', ([], {}), '()\n', (11206, 11208), True, 'from timeit import default_timer as timer\n'), ((10387, 10394), 'timeit.default_timer', 'timer', ([], {}), '()\n', (10392, 10394), True, 'from timeit import default_timer as timer\n'), ((11079, 11086), 'timeit.default_timer', 'timer', ([], {}), '()\n', (11084, 11086), True, 'from timeit import default_timer as timer\n')] |
# Importing the basic Image library
from PIL import Image
import numpy, serial, time
BAUDRATE = 9600
def openSerial(port):
ser = serial.Serial(port, baudrate = BAUDRATE, parity = serial.PARITY_NONE,
stopbits = 1, bytesize = 8)
print("Using [ " + ser.name + " ]")
def processImage(imageLoc):
# Opening up the target image
im = Image.open(imageLoc)
#displaying the target image for checking function
im.show()
#converting the image to greyscale and resizing
grey = im.convert('L')
# size = 248, 128
size = 128, 128
grey = grey.resize(size,Image.LANCZOS)
#displaying the final to ensure it worked well
grey.show()
#format is [width][height]
pixels = numpy.asarray(grey, dtype=numpy.uint8)
#cycling through and writing all pixel values to the PIC
print("[ %s ] dimension of array" % str(pixels.shape))
return pixels
def sendPixels(pixelArray):
for i in range(size[1]):
for j in range(size[0]):
print("[ %d ] out of [ %d ] uploading" % (count, (size[0]*size[1])))
val = "p " + str(pixels[i][j]) + "\r"
print("Writing [ %s ]" % val[ : -1])
ser.write(str.encode(val))
time.sleep(0.03)
exit = "e\r"
ser.write(str.encode(exit))
time.sleep(1)
ser.close()
print("[ Done ] All pixels Loaded")
##########################
def main():
openSerial("/dev/ttyUSB0")
pixelArray = processImage("/home/ho-jung/Downloads/black_c.jpg")
sendPixels(pixelArray)
if __name__ == "__main__":
print("******[ Sculpt Start ]******")
main()
| [
"serial.Serial",
"numpy.asarray",
"time.sleep",
"PIL.Image.open"
] | [((134, 228), 'serial.Serial', 'serial.Serial', (['port'], {'baudrate': 'BAUDRATE', 'parity': 'serial.PARITY_NONE', 'stopbits': '(1)', 'bytesize': '(8)'}), '(port, baudrate=BAUDRATE, parity=serial.PARITY_NONE, stopbits=\n 1, bytesize=8)\n', (147, 228), False, 'import numpy, serial, time\n'), ((352, 372), 'PIL.Image.open', 'Image.open', (['imageLoc'], {}), '(imageLoc)\n', (362, 372), False, 'from PIL import Image\n'), ((737, 775), 'numpy.asarray', 'numpy.asarray', (['grey'], {'dtype': 'numpy.uint8'}), '(grey, dtype=numpy.uint8)\n', (750, 775), False, 'import numpy, serial, time\n'), ((1311, 1324), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1321, 1324), False, 'import numpy, serial, time\n'), ((1241, 1257), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (1251, 1257), False, 'import numpy, serial, time\n')] |
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pandas as pd
from numpy import uint8, float32, float64, std, mean, min, max, random, identity, array, argsort
from numpy import linalg as LA
from sklearn.preprocessing import normalize
class DataPreprocessor:
"""Performs converting of the dataset to NumPy format for feeding into the machine learning algorithm."""
_float_precision = float64 # 64
def __init__(self, path_to_feature_values, path_to_labels, path_to_feature_graph=None):
self.path_feat_val = path_to_feature_graph
self.path_feat_graph = path_to_feature_graph
self.path_to_labels = path_to_labels
self.feature_values = pd.read_csv(path_to_feature_values)
## converting (transposing) the data frame: each sample (patient) correspond to each row
cols = self.feature_values.columns.tolist()
self.feature_names = list(self.feature_values[cols[-1]])
self.feature_values = self.feature_values[cols[:-1]]
self.feature_values = self.feature_values.transpose()
self.feature_values.columns = self.feature_names
if self.path_feat_graph is not None:
self.adj_feature_graph = pd.read_csv(path_to_feature_graph)
else:
self.adj_feature_graph = None
# labels are in a row
self.labels = pd.read_csv(path_to_labels)
# if self.feature_values.shape[1] != self.adj_feature_graph.shape[0]:
# print("The graph dimensionality is not equal to the number of features in the dataset")
# raise
if self.feature_values.shape[0] != self.labels.shape[1]:
print("The number of patients %d is not equal to the number of the labels %d" % (
self.feature_values.shape[0], self.labels.shape[1]))
raise
def get_feature_values_as_np_array(self, columns=None):
"""Can extract values from fixed columns."""
if not columns:
return self.feature_values.values.astype(self._float_precision) # as_matrix()
else:
return self.feature_values[columns].values.astype(self._float_precision)
def get_labels_as_np_array(self):
return self.labels.values[0].astype(int) # [0] because wrapped in an additional dimension
def get_adj_feature_graph_as_np_array(self):
if self.adj_feature_graph is not None:
return self.adj_feature_graph.values.astype(self._float_precision)
else:
print("The adjacency matrix of the graph was not provided by the user")
raise
def get_data_frame_for_mRMR_method(self):
"""
Returns the data frame of feature values and labels.
The first raw: "class" "feat1" "feat2"...
The second raw: "label" "value of feat1" values of feat2"
"""
values = self.get_feature_values_as_np_array() # *10e+7
print(max(values))
df = pd.DataFrame(data=values) # .astype(int))
feat_list = list(['class'])
feat_list.extend(self.feature_names)
df[len(df.columns)] = self.get_labels_as_np_array() # add labels to the right last side
df = df[[len(df.columns) - 1] + list(range(0, len(df.columns) - 1))] # put the labels into the left side
df.columns = feat_list # keeping order of columns for the next concatenation
return df
@staticmethod
def normalize_data_0_1(X, eps=5e-7):
"""
Normalize in 0_1 interval, each column of X is a feature, each row is a sample.
Returns three variables:
X normalized,
array of min
array of max values.
"""
column_max = max(X, axis=0).astype(float64)
column_min = min(X, axis=0).astype(float64)
non_zero_ind = column_max > eps
column_min = column_min[non_zero_ind]
column_max = column_max[non_zero_ind]
X_norm = X[:, non_zero_ind]
X_norm = (X_norm - column_min) / (column_max - column_min)
return X_norm, column_min, column_max, non_zero_ind
@staticmethod
def scale_data_0_1(X_val, column_min, column_max, non_zero_ind):
"""
Scaling the validation data according to the min and max of features in the training data.
Returns:
X_val scaled.
"""
return (X_val[:, non_zero_ind] - column_min) / (column_max - column_min)
@staticmethod
def normalize_data(X, eps=5e-7):
"""
Z score calculation, each column of X is a feature, each row is a sample.
Returns three variables:
X normalized,
array of mean values
array of std values.
"""
column_std = std(X, axis=0, ddof=1).astype(float64) # along rows
print("column_std.shape:", column_std.shape)
print("column_std, num of el less than 1:", sum(column_std < 1))
column_mean = mean(X, axis=0).astype(float64)
non_zero_ind = column_std > eps
column_std = column_std[non_zero_ind]
column_mean = column_mean[non_zero_ind]
X_norm = X[:, non_zero_ind]
X_norm = (X_norm - column_mean) / column_std
return X_norm, column_mean, column_std, non_zero_ind
@staticmethod
def scale_data(X_val, column_mean, column_std, non_zero_ind):
"""
Scaling the validation data according to the mean and std of features in the training data.
Returns:
X_val scaled.
"""
return (X_val[:, non_zero_ind] - column_mean) / column_std
@staticmethod
def generate_Q(transition_matrix, q_power):
"""Generate Q^{q_power} for n variables."""
n = transition_matrix.shape[0]
q_tmp = tr_0 = identity(n)
for k in range(1, q_power + 1):
q_tmp += LA.matrix_power(transition_matrix, k)
return array(normalize(q_tmp, norm='l1', axis=1), dtype='float64') # float 32 works inappropriately
| [
"pandas.DataFrame",
"pandas.read_csv",
"numpy.std",
"numpy.identity",
"numpy.max",
"numpy.linalg.matrix_power",
"numpy.min",
"sklearn.preprocessing.normalize",
"numpy.mean"
] | [((1741, 1776), 'pandas.read_csv', 'pd.read_csv', (['path_to_feature_values'], {}), '(path_to_feature_values)\n', (1752, 1776), True, 'import pandas as pd\n'), ((2398, 2425), 'pandas.read_csv', 'pd.read_csv', (['path_to_labels'], {}), '(path_to_labels)\n', (2409, 2425), True, 'import pandas as pd\n'), ((3979, 4004), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'values'}), '(data=values)\n', (3991, 4004), True, 'import pandas as pd\n'), ((6735, 6746), 'numpy.identity', 'identity', (['n'], {}), '(n)\n', (6743, 6746), False, 'from numpy import uint8, float32, float64, std, mean, min, max, random, identity, array, argsort\n'), ((2255, 2289), 'pandas.read_csv', 'pd.read_csv', (['path_to_feature_graph'], {}), '(path_to_feature_graph)\n', (2266, 2289), True, 'import pandas as pd\n'), ((3953, 3964), 'numpy.max', 'max', (['values'], {}), '(values)\n', (3956, 3964), False, 'from numpy import uint8, float32, float64, std, mean, min, max, random, identity, array, argsort\n'), ((6808, 6845), 'numpy.linalg.matrix_power', 'LA.matrix_power', (['transition_matrix', 'k'], {}), '(transition_matrix, k)\n', (6823, 6845), True, 'from numpy import linalg as LA\n'), ((6867, 6902), 'sklearn.preprocessing.normalize', 'normalize', (['q_tmp'], {'norm': '"""l1"""', 'axis': '(1)'}), "(q_tmp, norm='l1', axis=1)\n", (6876, 6902), False, 'from sklearn.preprocessing import normalize\n'), ((4716, 4730), 'numpy.max', 'max', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (4719, 4730), False, 'from numpy import uint8, float32, float64, std, mean, min, max, random, identity, array, argsort\n'), ((4768, 4782), 'numpy.min', 'min', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (4771, 4782), False, 'from numpy import uint8, float32, float64, std, mean, min, max, random, identity, array, argsort\n'), ((5721, 5743), 'numpy.std', 'std', (['X'], {'axis': '(0)', 'ddof': '(1)'}), '(X, axis=0, ddof=1)\n', (5724, 5743), False, 'from numpy import uint8, float32, float64, std, mean, min, max, random, identity, array, argsort\n'), ((5923, 5938), 'numpy.mean', 'mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (5927, 5938), False, 'from numpy import uint8, float32, float64, std, mean, min, max, random, identity, array, argsort\n')] |
import numpy as np
import pandas as pd
def build_chart(array, min_label_percentage=0.05):
sizes = pd.value_counts(array)
N = array.shape[0]
out_labels = []
out_sizes = []
others_size = 0
for index, values in sizes.iteritems():
if (values/N) > min_label_percentage:
out_labels.append(index)
out_sizes.append(values)
else:
others_size += values
out_labels.append("Others")
out_sizes.append(others_size)
out_percentage = (np.array(out_sizes)/N).tolist()
return out_labels, out_percentage | [
"numpy.array",
"pandas.value_counts"
] | [((103, 125), 'pandas.value_counts', 'pd.value_counts', (['array'], {}), '(array)\n', (118, 125), True, 'import pandas as pd\n'), ((518, 537), 'numpy.array', 'np.array', (['out_sizes'], {}), '(out_sizes)\n', (526, 537), True, 'import numpy as np\n')] |
import numpy as np
import warnings
from .explainer import Explainer
class LinearExplainer(Explainer):
""" Computes SHAP values for a linear model, optionally accounting for inter-feature correlations.
This computes the SHAP values for a linear model and can account for the correlations among
the input features. Assuming features are independent leads to interventional SHAP values which
for a linear model are coef[i] * (x[i] - X.mean(0)[i]) for the ith feature. If instead we account
for correlations then we prevent any problems arising from colinearity and share credit among
correlated features. Accounting for correlations can be computationally challenging, but
LinearExplainer uses sampling to estimate a transform that can then be applied to explain
any prediction of the model.
Parameters
----------
model : (coef, intercept) or sklearn.linear_model.*
User supplied linear model either as either a parameter pair or sklearn object.
data : (mean, cov), numpy.array, pandas.DataFrame, or iml.DenseData
The background dataset to use for computing conditional expectations. Note that only the
mean and covariance of the dataset are used. This means passing a raw data matrix is just
a convienent alternative to passing the mean and covariance directly.
nsamples : int
Number of samples to use when estimating the transformation matrix used to account for
feature correlations.
feature_dependence : "correlation" (default) or "interventional"
There are two ways we might want to compute SHAP values, either the full conditional SHAP
values or the interventional SHAP values. For interventional SHAP values we break any
dependence structure in the model and so uncover how the model would behave if we
intervened and changed some of the inputs. For the full conditional SHAP values we respect
the correlations among the input features, so if the model depends on one input but that
input is correlated with another input, then both get some credit for the model's behavior.
"""
def __init__(self, model, data, nsamples=1000, feature_dependence="correlation"):
self.nsamples = nsamples
self.feature_dependence = feature_dependence
# raw coefficents
if type(model) == tuple and len(model) == 2:
self.coef = model[0]
self.intercept = model[1]
# sklearn style model
elif hasattr(model, "coef_") and hasattr(model, "intercept_"):
# work around for multi-class with a single class
if len(model.coef_.shape) and model.coef_.shape[0] == 1:
self.coef = model.coef_[0]
self.intercept = model.intercept_[0]
else:
self.coef = model.coef_
self.intercept = model.intercept_
else:
raise Exception("An unknown model type was passed: " + str(type(model)))
# convert DataFrame's to numpy arrays
if str(type(data)).endswith("'pandas.core.frame.DataFrame'>"):
data = data.values
# get the mean and covariance of the model
if type(data) == tuple and len(data) == 2:
self.mean = data[0]
self.cov = data[1]
elif str(type(data)).endswith("'numpy.ndarray'>"):
self.mean = data.mean(0)
self.cov = np.cov(data, rowvar=False)
elif data is None:
raise Exception("A background data distribution must be provided!")
self.expected_value = np.dot(self.coef, self.mean) + self.intercept
# if needed, estimate the transform matrices
if feature_dependence == "correlation":
mean_transform, x_transform = self._estimate_transforms(nsamples)
self.mean_transformed = np.matmul(mean_transform, self.mean)
self.x_transform = x_transform
elif feature_dependence == "interventional":
if nsamples != 1000:
warnings.warn("Setting nsamples has no effect when feature_dependence = 'interventional'!")
else:
raise Exception("Unknown type of feature_dependence provided: " + feature_dependence)
def _estimate_transforms(self, nsamples):
""" Uses block matrix inversion identities to quickly estimate transforms.
After a bit of matrix math we can isolate a transoform matrix (# features x # features)
that is independent of any sample we are explaining. It is the result of averaging over
all feature permutations, but we just use a fixed number of samples to estimate the value.
TODO: Do a brute force enumeration when # feature subsets is less than nsamples. This could
happen through a recursive method that uses the same block matrix inversion as below.
"""
coef = self.coef
cov = self.cov
M = len(self.coef)
mean_transform = np.zeros((M,M))
x_transform = np.zeros((M,M))
inds = np.arange(M, dtype=np.int)
for _ in range(nsamples):
np.random.shuffle(inds)
cov_inv_SiSi = np.zeros((0,0))
cov_Si = np.zeros((M,0))
for j in range(M):
i = inds[j]
# use the last Si as the new S
cov_S = cov_Si
cov_inv_SS = cov_inv_SiSi
# get the new cov_Si
cov_Si = self.cov[:,inds[:j+1]]
# compute the new cov_inv_SiSi from cov_inv_SS
d = cov_Si[i,:-1].T
t = np.matmul(cov_inv_SS, d)
Z = cov[i, i]
u = Z - np.matmul(t.T, d)
cov_inv_SiSi = np.zeros((j+1, j+1))
if j > 0:
cov_inv_SiSi[:-1, :-1] = cov_inv_SS + np.outer(t, t) / u
cov_inv_SiSi[:-1, -1] = cov_inv_SiSi[-1,:-1] = -t / u
cov_inv_SiSi[-1, -1] = 1 / u
# + coef @ (Q(bar(Sui)) - Q(bar(S)))
mean_transform[i, i] += self.coef[i]
# + coef @ R(Sui)
coef_R_Si = np.matmul(self.coef[inds[j+1:]], np.matmul(cov_Si, cov_inv_SiSi)[inds[j+1:]])
mean_transform[i, inds[:j+1]] += coef_R_Si
# - coef @ R(S)
coef_R_S = np.matmul(self.coef[inds[j:]], np.matmul(cov_S, cov_inv_SS)[inds[j:]])
mean_transform[i, inds[:j]] -= coef_R_S
# - coef @ (Q(Sui) - Q(S))
x_transform[i, i] += self.coef[i]
# + coef @ R(Sui)
x_transform[i, inds[:j+1]] += coef_R_Si
# - coef @ R(S)
x_transform[i, inds[:j]] -= coef_R_S
mean_transform /= nsamples
x_transform /= nsamples
return mean_transform, x_transform
def shap_values(self, X):
""" Estimate the SHAP values for a set of samples.
Parameters
----------
X : numpy.array or pandas.DataFrame
A matrix of samples (# samples x # features) on which to explain the model's output.
Returns
-------
For models with a single output this returns a matrix of SHAP values
(# samples x # features). Each row sums to the difference between the model output for that
sample and the expected value of the model output (which is stored as expected_value
attribute of the explainer).
"""
# convert dataframes
if str(type(X)).endswith("pandas.core.series.Series'>"):
X = X.values
elif str(type(X)).endswith("'pandas.core.frame.DataFrame'>"):
X = X.values
assert str(type(X)).endswith("'numpy.ndarray'>"), "Unknown instance type: " + str(type(X))
assert len(X.shape) == 1 or len(X.shape) == 2, "Instance must have 1 or 2 dimensions!"
if self.feature_dependence == "correlation":
return np.matmul(X, self.x_transform.T) - self.mean_transformed
elif self.feature_dependence == "interventional":
return self.coef * (X - self.mean)
| [
"numpy.outer",
"numpy.zeros",
"numpy.arange",
"numpy.matmul",
"numpy.dot",
"numpy.cov",
"warnings.warn",
"numpy.random.shuffle"
] | [((4987, 5003), 'numpy.zeros', 'np.zeros', (['(M, M)'], {}), '((M, M))\n', (4995, 5003), True, 'import numpy as np\n'), ((5025, 5041), 'numpy.zeros', 'np.zeros', (['(M, M)'], {}), '((M, M))\n', (5033, 5041), True, 'import numpy as np\n'), ((5056, 5082), 'numpy.arange', 'np.arange', (['M'], {'dtype': 'np.int'}), '(M, dtype=np.int)\n', (5065, 5082), True, 'import numpy as np\n'), ((3603, 3631), 'numpy.dot', 'np.dot', (['self.coef', 'self.mean'], {}), '(self.coef, self.mean)\n', (3609, 3631), True, 'import numpy as np\n'), ((3865, 3901), 'numpy.matmul', 'np.matmul', (['mean_transform', 'self.mean'], {}), '(mean_transform, self.mean)\n', (3874, 3901), True, 'import numpy as np\n'), ((5129, 5152), 'numpy.random.shuffle', 'np.random.shuffle', (['inds'], {}), '(inds)\n', (5146, 5152), True, 'import numpy as np\n'), ((5180, 5196), 'numpy.zeros', 'np.zeros', (['(0, 0)'], {}), '((0, 0))\n', (5188, 5196), True, 'import numpy as np\n'), ((5217, 5233), 'numpy.zeros', 'np.zeros', (['(M, 0)'], {}), '((M, 0))\n', (5225, 5233), True, 'import numpy as np\n'), ((3438, 3464), 'numpy.cov', 'np.cov', (['data'], {'rowvar': '(False)'}), '(data, rowvar=False)\n', (3444, 3464), True, 'import numpy as np\n'), ((5619, 5643), 'numpy.matmul', 'np.matmul', (['cov_inv_SS', 'd'], {}), '(cov_inv_SS, d)\n', (5628, 5643), True, 'import numpy as np\n'), ((5747, 5771), 'numpy.zeros', 'np.zeros', (['(j + 1, j + 1)'], {}), '((j + 1, j + 1))\n', (5755, 5771), True, 'import numpy as np\n'), ((7971, 8003), 'numpy.matmul', 'np.matmul', (['X', 'self.x_transform.T'], {}), '(X, self.x_transform.T)\n', (7980, 8003), True, 'import numpy as np\n'), ((4047, 4148), 'warnings.warn', 'warnings.warn', (['"""Setting nsamples has no effect when feature_dependence = \'interventional\'!"""'], {}), '(\n "Setting nsamples has no effect when feature_dependence = \'interventional\'!"\n )\n', (4060, 4148), False, 'import warnings\n'), ((5698, 5715), 'numpy.matmul', 'np.matmul', (['t.T', 'd'], {}), '(t.T, d)\n', (5707, 5715), True, 'import numpy as np\n'), ((6193, 6224), 'numpy.matmul', 'np.matmul', (['cov_Si', 'cov_inv_SiSi'], {}), '(cov_Si, cov_inv_SiSi)\n', (6202, 6224), True, 'import numpy as np\n'), ((6388, 6416), 'numpy.matmul', 'np.matmul', (['cov_S', 'cov_inv_SS'], {}), '(cov_S, cov_inv_SS)\n', (6397, 6416), True, 'import numpy as np\n'), ((5852, 5866), 'numpy.outer', 'np.outer', (['t', 't'], {}), '(t, t)\n', (5860, 5866), True, 'import numpy as np\n')] |
"""utility functions for the models module"""
from concurrent.futures import ProcessPoolExecutor as Pool
from itertools import islice
from typing import Callable, Iterable, Iterator, List, TypeVar
import numpy as np
from tqdm import tqdm
T = TypeVar('T')
def batches(it: Iterable[T], chunk_size: int) -> Iterator[List]:
"""Batch an iterable into batches of size chunk_size, with the final
batch potentially being smaller"""
it = iter(it)
return iter(lambda: list(islice(it, chunk_size)), [])
def get_model_types() -> List[str]:
return ['rf', 'gp', 'nn', 'mpn']
def feature_matrix(xs: Iterable[T], featurize: Callable[[T], np.ndarray],
ncpu: int = 0) -> np.ndarray:
"""Calculate the feature matrix of xs with the given featurization
function"""
if ncpu <= 1:
X = [featurize(x) for x in tqdm(xs, desc='Featurizing', smoothing=0.)]
else:
with Pool(max_workers=ncpu) as pool:
X = list(tqdm(pool.map(featurize, xs), desc='Featurizing'))
return np.array(X)
| [
"tqdm.tqdm",
"concurrent.futures.ProcessPoolExecutor",
"numpy.array",
"itertools.islice",
"typing.TypeVar"
] | [((244, 256), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (251, 256), False, 'from typing import Callable, Iterable, Iterator, List, TypeVar\n'), ((1036, 1047), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1044, 1047), True, 'import numpy as np\n'), ((916, 938), 'concurrent.futures.ProcessPoolExecutor', 'Pool', ([], {'max_workers': 'ncpu'}), '(max_workers=ncpu)\n', (920, 938), True, 'from concurrent.futures import ProcessPoolExecutor as Pool\n'), ((482, 504), 'itertools.islice', 'islice', (['it', 'chunk_size'], {}), '(it, chunk_size)\n', (488, 504), False, 'from itertools import islice\n'), ((849, 892), 'tqdm.tqdm', 'tqdm', (['xs'], {'desc': '"""Featurizing"""', 'smoothing': '(0.0)'}), "(xs, desc='Featurizing', smoothing=0.0)\n", (853, 892), False, 'from tqdm import tqdm\n')] |
import argparse
import logging
import numpy as np
from human import Man, Child
from robot import Qolo, Wheelchair, Pepper
from controller import NoControl, AdmittanceController, PassiveDSController
from simulator import Simulator
human_class = {
"man": Man,
"child": Child,
}
robot_class = {
"qolo": Qolo,
"wheelchair": Wheelchair,
"pepper": Pepper,
}
controller_class = {
"no_control": NoControl,
"admittance": AdmittanceController,
"passive_ds": PassiveDSController,
}
def parse_arguments():
parser = argparse.ArgumentParser(
prog="HRC",
description="""Simulation of robot and human collision"""
)
parser.add_argument("-b", "--human",
choices=[key for key in human_class],
default="man",
help="Human to collide with the robot (default = man)")
parser.add_argument("-r", "--robot",
choices=[key for key in robot_class],
default="qolo",
help="Robot to collide with the human (default = qolo)")
parser.add_argument("-c", "--controller",
choices=[key for key in controller_class],
default="no_control",
help="Adaptive controller to use (default = no_control)")
parser.add_argument("-g", "--gui",
action="store_true",
help="Set to show GUI")
parser.add_argument("-v", "--video",
action="store_true",
help="Set to record video")
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_arguments()
simulator = Simulator(
Robot=robot_class[args.robot],
Human=human_class[args.human],
Controller=controller_class[args.controller],
show_GUI=args.gui,
make_video=args.video,
)
robot_angles = np.linspace(0, np.pi*2, 16, False)
human_angles = np.linspace(0, np.pi*2, 16, False)
robot_speed_factors = [0.5] # np.linspace(0.6, 1.4, 3, True)
human_speed_factors = [1.0]
gait_phases = [0] # np.linspace(0, 1, 4, False)
result = [
simulator.simulate(
robot_angle=robot_angle,
human_angle=human_angle,
gait_phase=gait_phase,
robot_speed_factor=robot_speed_factor,
human_speed_factor=human_speed_factor,
)
for robot_angle in robot_angles
for human_angle in human_angles
for robot_speed_factor in robot_speed_factors
for human_speed_factor in human_speed_factors
for gait_phase in gait_phases
]
np.save("controlled_collision.npy", result)
| [
"numpy.save",
"argparse.ArgumentParser",
"numpy.linspace",
"simulator.Simulator"
] | [((545, 640), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""HRC"""', 'description': '"""Simulation of robot and human collision"""'}), "(prog='HRC', description=\n 'Simulation of robot and human collision')\n", (568, 640), False, 'import argparse\n'), ((1741, 1908), 'simulator.Simulator', 'Simulator', ([], {'Robot': 'robot_class[args.robot]', 'Human': 'human_class[args.human]', 'Controller': 'controller_class[args.controller]', 'show_GUI': 'args.gui', 'make_video': 'args.video'}), '(Robot=robot_class[args.robot], Human=human_class[args.human],\n Controller=controller_class[args.controller], show_GUI=args.gui,\n make_video=args.video)\n', (1750, 1908), False, 'from simulator import Simulator\n'), ((1968, 2004), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi * 2)', '(16)', '(False)'], {}), '(0, np.pi * 2, 16, False)\n', (1979, 2004), True, 'import numpy as np\n'), ((2022, 2058), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi * 2)', '(16)', '(False)'], {}), '(0, np.pi * 2, 16, False)\n', (2033, 2058), True, 'import numpy as np\n'), ((2710, 2753), 'numpy.save', 'np.save', (['"""controlled_collision.npy"""', 'result'], {}), "('controlled_collision.npy', result)\n", (2717, 2753), True, 'import numpy as np\n')] |
import matplotlib.pylab as pylab
import matplotlib.pyplot as plt
import numpy as np
class ToyPlot(object):
def __init__(self, x_range, y_range):
range_x = np.linspace(x_range[0], x_range[1], 100)
range_y = np.linspace(y_range[0], y_range[1], 100)
self.extent = [x_range[0], x_range[1], y_range[0], y_range[1]]
self.X, self.Y = np.meshgrid(range_x, range_y)
pylab.rcParams['figure.figsize'] = 5, 5
self.repcolordict = {0: 'k-', 1: 'r-', 2: 'g-', 3: 'b-', 4: 'r-'}
self.contour_range = np.arange(0.0, 1.5, 0.1)
self._states = None
self._pes = None
self._interfaces = None
self._initcond = None
def add_pes(self, pes):
if self._pes is None:
self._pes = np.vectorize(CallablePES(pes))(self.X, self.Y)
def plot(self, trajectories=[]):
fig, ax = plt.subplots()
if self._pes is not None:
plt.contour(
self.X, self.Y, self._pes, levels=self.contour_range,
colors='k')
for traj in trajectories:
plt.plot(
traj.xyz[:, 0, 0], traj.xyz[:, 0, 1],
self.repcolordict[trajectories.index(traj) % 5], zorder=2)
return fig
def reset(self):
self._pes = None
self._interfaces = None
self._initcond = None
self._states = None
class CallablePES(object):
def __init__(self, pes):
self.pes = pes
def __call__(self, x, y):
self.positions = [x, y]
return self.pes.V(self) | [
"numpy.meshgrid",
"matplotlib.pyplot.contour",
"numpy.arange",
"numpy.linspace",
"matplotlib.pyplot.subplots"
] | [((169, 209), 'numpy.linspace', 'np.linspace', (['x_range[0]', 'x_range[1]', '(100)'], {}), '(x_range[0], x_range[1], 100)\n', (180, 209), True, 'import numpy as np\n'), ((228, 268), 'numpy.linspace', 'np.linspace', (['y_range[0]', 'y_range[1]', '(100)'], {}), '(y_range[0], y_range[1], 100)\n', (239, 268), True, 'import numpy as np\n'), ((365, 394), 'numpy.meshgrid', 'np.meshgrid', (['range_x', 'range_y'], {}), '(range_x, range_y)\n', (376, 394), True, 'import numpy as np\n'), ((546, 570), 'numpy.arange', 'np.arange', (['(0.0)', '(1.5)', '(0.1)'], {}), '(0.0, 1.5, 0.1)\n', (555, 570), True, 'import numpy as np\n'), ((872, 886), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (884, 886), True, 'import matplotlib.pyplot as plt\n'), ((933, 1010), 'matplotlib.pyplot.contour', 'plt.contour', (['self.X', 'self.Y', 'self._pes'], {'levels': 'self.contour_range', 'colors': '"""k"""'}), "(self.X, self.Y, self._pes, levels=self.contour_range, colors='k')\n", (944, 1010), True, 'import matplotlib.pyplot as plt\n')] |
#! /usr/bin/env python
# Copyright 2019. IBM All Rights Reserved.
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Inference client batch request example running TensorRT-optimized model
Usage: python resnet_v1_50_inference_client_batch_dataset.py --data_dir <dir> \
--max_test_images <number> --batch_size <number>
"""
import os
import argparse
import grpc
import requests
import numpy
import urllib.request, json
import tensorflow as tf
from tensorflow.python import keras
from tensorflow.python.keras.preprocessing.image import load_img
from tensorflow.python.keras.preprocessing.image import img_to_array
from tensorflow.python.keras.applications.vgg16 import preprocess_input
from tensorflow.compat.v1 import make_tensor_proto
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
_IMAGENET_SYNSET = 'http://www.image-net.org/api/text/imagenet.synset.geturls?wnid=n02958343'
_IMAGES_DIR = 'car'
_IMAGENET_CLASS_INDEX_URL = 'https://storage.googleapis.com/download.tensorflow.org/data/imagenet_class_index.json'
_IMAGE_HEIGHT = 256
_IMAGE_WIDTH = 256
_IMAGE_CHANNELS = 3
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default=None, help='The directory where to save downloaded files.')
parser.add_argument('--max_test_images', type=int, default=50, help='Limit number of loops.')
parser.add_argument('--batch_size', type=int, default=10, help='The number of samples in each batch.')
imagenet_class_index = None
# Test data is image-net images
def download_testdata(data_dir):
testdata_dir = os.path.join(data_dir, _IMAGES_DIR)
if not os.path.isdir(testdata_dir):
os.makedirs(testdata_dir)
r = requests.get(_IMAGENET_SYNSET)
image_url_list = r.text
image_urls = image_url_list.split()
actual_number_of_images_downloaded = 0
for num_test_images, url in enumerate(image_urls):
filename = url.split('/')[-1]
try:
r = requests.get(url, timeout=30)
except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError) as err:
print('Exception: ', r.status_code, 'url: ', url)
if r.status_code == 200:
filename_and_path = os.path.join(testdata_dir, filename)
with open(filename_and_path, 'wb') as f:
f.write(r.content)
actual_number_of_images_downloaded += 1
if actual_number_of_images_downloaded >= args.max_test_images:
break
return testdata_dir
# Get file names of downloaded images
def get_filenames(testdata_dir):
photo_filenames = []
for filename in os.listdir(testdata_dir):
path = os.path.join(testdata_dir, filename)
photo_filenames.append(path)
return photo_filenames
# Convert predictions to image-net class label
def decode_predictions(predictions, top=3):
global imagenet_class_index
if imagenet_class_index is None:
with urllib.request.urlopen(_IMAGENET_CLASS_INDEX_URL) as url:
imagenet_class_index = json.loads(url.read())
top_predictions = predictions.argsort()
result_classes = []; i = 0
for prediction in reversed(list(top_predictions)):
result_class = tuple(imagenet_class_index[str(prediction-1)]) + (predictions[prediction],)
result_classes.append(result_class)
i += 1
if i >= top:
break
return result_classes
def main(args):
if args.batch_size > 64:
print('The maximum batch size for the model is 64.')
return
testdata_dir = download_testdata(args.data_dir)
photo_filenames = get_filenames(testdata_dir)
num_test_images = args.max_test_images; num_batch_images = args.batch_size; predict_images = []
for filename in photo_filenames:
try:
# Load the image and resize to (224, 224)
image = load_img(filename, target_size=(224, 224))
except:
print('load_img exception - skipping image')
num_test_images -= 1
continue
# Add channels (224, 224, 3)
image = img_to_array(image)
# Scale pixels for Tensorflow
image = preprocess_input(image)
num_test_images -= 1
predict_images.append(image)
num_batch_images -= 1
if num_batch_images == 0 or num_test_images == 0:
predict_images = numpy.array(predict_images)
server = 'server:8500' # gRPC port
channel = grpc.insecure_channel(server)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = 'resnet_v1_50_fp32' # Model name from example
request.model_spec.signature_name = 'predict' # See saved_model_cli show command
request.inputs['input'].CopyFrom(
tf.compat.v1.make_tensor_proto(predict_images))
result = stub.Predict(request, 30.0) # 30 secs timeout
num_batch_probabilities = result.outputs['probabilities'].tensor_shape.dim[0].size
num_probabilities = result.outputs['probabilities'].tensor_shape.dim[1].size
probabilities_shape = (num_batch_probabilities, num_probabilities)
probabilities = numpy.array(result.outputs['probabilities'].float_val)
probabilities = numpy.reshape(probabilities, probabilities_shape)
for probability_batch in probabilities:
print("predictions: ", decode_predictions(probability_batch, top=3)) # Convert probabilities to class labels
num_batch_images = args.batch_size; predict_images = []
if num_test_images == 0:
break
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| [
"os.makedirs",
"argparse.ArgumentParser",
"tensorflow_serving.apis.predict_pb2.PredictRequest",
"os.path.isdir",
"tensorflow.python.keras.preprocessing.image.img_to_array",
"grpc.insecure_channel",
"tensorflow.python.keras.preprocessing.image.load_img",
"tensorflow.python.keras.applications.vgg16.prep... | [((1717, 1742), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1740, 1742), False, 'import argparse\n'), ((2156, 2191), 'os.path.join', 'os.path.join', (['data_dir', '_IMAGES_DIR'], {}), '(data_dir, _IMAGES_DIR)\n', (2168, 2191), False, 'import os\n'), ((3274, 3298), 'os.listdir', 'os.listdir', (['testdata_dir'], {}), '(testdata_dir)\n', (3284, 3298), False, 'import os\n'), ((2204, 2231), 'os.path.isdir', 'os.path.isdir', (['testdata_dir'], {}), '(testdata_dir)\n', (2217, 2231), False, 'import os\n'), ((2241, 2266), 'os.makedirs', 'os.makedirs', (['testdata_dir'], {}), '(testdata_dir)\n', (2252, 2266), False, 'import os\n'), ((2280, 2310), 'requests.get', 'requests.get', (['_IMAGENET_SYNSET'], {}), '(_IMAGENET_SYNSET)\n', (2292, 2310), False, 'import requests\n'), ((3315, 3351), 'os.path.join', 'os.path.join', (['testdata_dir', 'filename'], {}), '(testdata_dir, filename)\n', (3327, 3351), False, 'import os\n'), ((4738, 4757), 'tensorflow.python.keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), '(image)\n', (4750, 4757), False, 'from tensorflow.python.keras.preprocessing.image import img_to_array\n'), ((4812, 4835), 'tensorflow.python.keras.applications.vgg16.preprocess_input', 'preprocess_input', (['image'], {}), '(image)\n', (4828, 4835), False, 'from tensorflow.python.keras.applications.vgg16 import preprocess_input\n'), ((4515, 4557), 'tensorflow.python.keras.preprocessing.image.load_img', 'load_img', (['filename'], {'target_size': '(224, 224)'}), '(filename, target_size=(224, 224))\n', (4523, 4557), False, 'from tensorflow.python.keras.preprocessing.image import load_img\n'), ((5022, 5049), 'numpy.array', 'numpy.array', (['predict_images'], {}), '(predict_images)\n', (5033, 5049), False, 'import numpy\n'), ((5132, 5161), 'grpc.insecure_channel', 'grpc.insecure_channel', (['server'], {}), '(server)\n', (5153, 5161), False, 'import grpc\n'), ((5181, 5239), 'tensorflow_serving.apis.prediction_service_pb2_grpc.PredictionServiceStub', 'prediction_service_pb2_grpc.PredictionServiceStub', (['channel'], {}), '(channel)\n', (5230, 5239), False, 'from tensorflow_serving.apis import prediction_service_pb2_grpc\n'), ((5263, 5291), 'tensorflow_serving.apis.predict_pb2.PredictRequest', 'predict_pb2.PredictRequest', ([], {}), '()\n', (5289, 5291), False, 'from tensorflow_serving.apis import predict_pb2\n'), ((5939, 5993), 'numpy.array', 'numpy.array', (["result.outputs['probabilities'].float_val"], {}), "(result.outputs['probabilities'].float_val)\n", (5950, 5993), False, 'import numpy\n'), ((6022, 6071), 'numpy.reshape', 'numpy.reshape', (['probabilities', 'probabilities_shape'], {}), '(probabilities, probabilities_shape)\n', (6035, 6071), False, 'import numpy\n'), ((2576, 2605), 'requests.get', 'requests.get', (['url'], {'timeout': '(30)'}), '(url, timeout=30)\n', (2588, 2605), False, 'import requests\n'), ((2844, 2880), 'os.path.join', 'os.path.join', (['testdata_dir', 'filename'], {}), '(testdata_dir, filename)\n', (2856, 2880), False, 'import os\n'), ((5532, 5578), 'tensorflow.compat.v1.make_tensor_proto', 'tf.compat.v1.make_tensor_proto', (['predict_images'], {}), '(predict_images)\n', (5562, 5578), True, 'import tensorflow as tf\n')] |
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from typing import List
import numpy as np
import torch.nn
import torch
from nboost.models.rerank.base import RerankModel
from nboost import defaults
class PtBertModel(RerankModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger.info('Loading from checkpoint %s' % self.model_dir)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if self.device == torch.device("cpu"):
self.logger.info("RUNNING ON CPU")
else:
self.logger.info("RUNNING ON CUDA")
torch.cuda.synchronize(self.device)
self.rerank_model = AutoModelForSequenceClassification.from_pretrained(self.model_dir)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_dir)
self.rerank_model.to(self.device, non_blocking=True)
def rank(self, query: str, choices: List[str],
filter_results=defaults.filter_results):
if len(choices) == 0:
return []
input_ids, attention_mask, token_type_ids = self.encode(query, choices)
with torch.no_grad():
logits = self.rerank_model(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids)[0]
scores = logits.detach().cpu().numpy()
if filter_results:
scores = np.extract(scores[:, 0] < scores[:, 1], scores)
if len(scores.shape) > 1 and scores.shape[1] == 2:
scores = np.squeeze(scores[:, 1])
return list(np.argsort(scores)[::-1])
def encode(self, query: str, choices: List[str]):
inputs = [self.tokenizer.encode_plus(query, choice, add_special_tokens=True)
for choice in choices]
max_len = min(max(len(t['input_ids']) for t in inputs), self.max_seq_len)
input_ids = [t['input_ids'][:max_len] +
[0] * (max_len - len(t['input_ids'][:max_len])) for t in inputs]
attention_mask = [[1] * len(t['input_ids'][:max_len]) +
[0] * (max_len - len(t['input_ids'][:max_len])) for t in inputs]
token_type_ids = [t['token_type_ids'][:max_len] +
[0] * (max_len - len(t['token_type_ids'][:max_len])) for t in inputs]
input_ids = torch.tensor(input_ids).to(self.device, non_blocking=True)
attention_mask = torch.tensor(attention_mask).to(self.device, non_blocking=True)
token_type_ids = torch.tensor(token_type_ids).to(self.device, non_blocking=True)
return input_ids, attention_mask, token_type_ids
| [
"torch.cuda.synchronize",
"numpy.extract",
"numpy.argsort",
"transformers.AutoTokenizer.from_pretrained",
"torch.cuda.is_available",
"transformers.AutoModelForSequenceClassification.from_pretrained",
"torch.device",
"numpy.squeeze",
"torch.no_grad",
"torch.tensor"
] | [((732, 798), 'transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (['self.model_dir'], {}), '(self.model_dir)\n', (782, 798), False, 'from transformers import AutoModelForSequenceClassification, AutoTokenizer\n'), ((824, 869), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['self.model_dir'], {}), '(self.model_dir)\n', (853, 869), False, 'from transformers import AutoModelForSequenceClassification, AutoTokenizer\n'), ((525, 544), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (537, 544), False, 'import torch\n'), ((667, 702), 'torch.cuda.synchronize', 'torch.cuda.synchronize', (['self.device'], {}), '(self.device)\n', (689, 702), False, 'import torch\n'), ((1184, 1199), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1197, 1199), False, 'import torch\n'), ((460, 485), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (483, 485), False, 'import torch\n'), ((1501, 1548), 'numpy.extract', 'np.extract', (['(scores[:, 0] < scores[:, 1])', 'scores'], {}), '(scores[:, 0] < scores[:, 1], scores)\n', (1511, 1548), True, 'import numpy as np\n'), ((1637, 1661), 'numpy.squeeze', 'np.squeeze', (['scores[:, 1]'], {}), '(scores[:, 1])\n', (1647, 1661), True, 'import numpy as np\n'), ((2435, 2458), 'torch.tensor', 'torch.tensor', (['input_ids'], {}), '(input_ids)\n', (2447, 2458), False, 'import torch\n'), ((2519, 2547), 'torch.tensor', 'torch.tensor', (['attention_mask'], {}), '(attention_mask)\n', (2531, 2547), False, 'import torch\n'), ((2608, 2636), 'torch.tensor', 'torch.tensor', (['token_type_ids'], {}), '(token_type_ids)\n', (2620, 2636), False, 'import torch\n'), ((1686, 1704), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (1696, 1704), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import boxcox, boxcox_normplot, boxcox_normmax
class BoxCox(object):
@staticmethod
def transform(samples, shape):
samples = np.asarray(samples)
return boxcox(samples, shape)
@staticmethod
def back_transform(data, shape):
data = np.asarray(data)
if shape == .0:
return np.exp(data)
else:
for d in data:
if np.isnan(np.power(shape*d+1, 1/shape)):
assert False, 'data: {}'.format(d)
trans_data = np.power(shape*data+1, 1/shape)
return trans_data
@staticmethod
def find_lambda(samples):
return boxcox_normmax(samples)
@staticmethod
def test(samples, la=-20, lb=20):
fig = plt.figure()
ax = fig.add_subplot(111)
prob = boxcox_normplot(samples, la, lb, plot=ax)
best_lambda = boxcox_normmax(samples)
ax.axvline(best_lambda, color='r')
plt.show()
| [
"matplotlib.pyplot.show",
"scipy.stats.boxcox",
"numpy.power",
"numpy.asarray",
"scipy.stats.boxcox_normplot",
"matplotlib.pyplot.figure",
"scipy.stats.boxcox_normmax",
"numpy.exp"
] | [((257, 276), 'numpy.asarray', 'np.asarray', (['samples'], {}), '(samples)\n', (267, 276), True, 'import numpy as np\n'), ((292, 314), 'scipy.stats.boxcox', 'boxcox', (['samples', 'shape'], {}), '(samples, shape)\n', (298, 314), False, 'from scipy.stats import boxcox, boxcox_normplot, boxcox_normmax\n'), ((386, 402), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (396, 402), True, 'import numpy as np\n'), ((765, 788), 'scipy.stats.boxcox_normmax', 'boxcox_normmax', (['samples'], {}), '(samples)\n', (779, 788), False, 'from scipy.stats import boxcox, boxcox_normplot, boxcox_normmax\n'), ((860, 872), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (870, 872), True, 'import matplotlib.pyplot as plt\n'), ((922, 963), 'scipy.stats.boxcox_normplot', 'boxcox_normplot', (['samples', 'la', 'lb'], {'plot': 'ax'}), '(samples, la, lb, plot=ax)\n', (937, 963), False, 'from scipy.stats import boxcox, boxcox_normplot, boxcox_normmax\n'), ((986, 1009), 'scipy.stats.boxcox_normmax', 'boxcox_normmax', (['samples'], {}), '(samples)\n', (1000, 1009), False, 'from scipy.stats import boxcox, boxcox_normplot, boxcox_normmax\n'), ((1061, 1071), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1069, 1071), True, 'import matplotlib.pyplot as plt\n'), ((446, 458), 'numpy.exp', 'np.exp', (['data'], {}), '(data)\n', (452, 458), True, 'import numpy as np\n'), ((639, 676), 'numpy.power', 'np.power', (['(shape * data + 1)', '(1 / shape)'], {}), '(shape * data + 1, 1 / shape)\n', (647, 676), True, 'import numpy as np\n'), ((528, 562), 'numpy.power', 'np.power', (['(shape * d + 1)', '(1 / shape)'], {}), '(shape * d + 1, 1 / shape)\n', (536, 562), True, 'import numpy as np\n')] |
from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp
import numpy
import pytest
from pathlib import Path
DATA_DIR = Path(__file__).absolute().parent.parent.parent / Path('data')
@pytest.fixture
def filename():
return str(DATA_DIR / Path('lfric_diag_wind.nc'))
@pytest.fixture
def grid(filename):
gr = Grid()
gr.setFlags(1, 1)
gr.loadFromUgrid2D(f'{filename}$Mesh2d')
return gr
@pytest.fixture
def target_points():
# target points
nx, ny = 16, 8
llon, llat = numpy.meshgrid(numpy.linspace(-170., 170., nx),
numpy.linspace(-80., 80., ny))
ntarget = llon.shape[0] * llon.shape[1]
target_points = numpy.zeros((ntarget, 3), numpy.float64)
target_points[:, 0] = llon.flat
target_points[:, 1] = llat.flat
return target_points
@pytest.fixture
def connectivity(filename):
import netCDF4
nc = netCDF4.Dataset(filename)
# longitudes and latitudes at cell vertices
lon = nc.variables['Mesh2d_node_x']
lat = nc.variables['Mesh2d_node_y']
# edge to node connectivity
edge_node_connect = nc.variables['Mesh2d_edge_nodes']
return {'lon': lon, 'lat': lat, 'edge_node_connect': edge_node_connect}
def test_east_w1_xyz(grid, target_points, connectivity):
"""
Test computation of edge integrals using an eastward vector field
"""
nedge = grid.getNumberOfEdges()
ntarget = target_points.shape[0]
lon = connectivity['lon']
lat = connectivity['lat']
edge_node_connect = connectivity['edge_node_connect']
# set the components
u1 = numpy.ones((nedge,), numpy.float64) # east
u2 = numpy.zeros((nedge,), numpy.float64)
ue_integrated = getIntegralsInXYZ(lon, lat, edge_node_connect,
u1, u2, w1=True)
vi = VectorInterp()
vi.setGrid(grid)
vi.buildLocator(numCellsPerBucket=100)
vi.findPoints(target_points, tol2=1.e-10)
vects = vi.getEdgeVectors(ue_integrated, placement=1)
error = numpy.sum(numpy.fabs(vects[:, 1]/vects[:, 0]))/ntarget
print(f'test_east_w1_xyz: error = {error}')
assert(error < 0.29)
if False:
from matplotlib import pylab
pylab.quiver(target_points[:, 0], target_points[:, 1],
vects[:, 0], vects[:, 1])
pylab.title('test_east_w1_xyz')
pylab.show()
def test_east_w1_lonlat(grid, target_points, connectivity):
"""
Test computation of edge integrals using an eastward vector field
"""
nedge = grid.getNumberOfEdges()
ntarget = target_points.shape[0]
lon = connectivity['lon']
lat = connectivity['lat']
edge_node_connect = connectivity['edge_node_connect']
# set the components
u1 = numpy.ones((nedge,), numpy.float64) # east
u2 = numpy.zeros((nedge,), numpy.float64)
ue_integrated = getIntegralsInLonLat(lon, lat, edge_node_connect,
u1, u2, w1=True)
vi = VectorInterp()
vi.setGrid(grid)
vi.buildLocator(numCellsPerBucket=100)
vi.findPoints(target_points, tol2=1.e-10)
vects = vi.getEdgeVectors(ue_integrated, placement=1)
error = numpy.sum(numpy.fabs(vects[:, 1]/vects[:, 0]))/ntarget
print(f'test_east_w1_lonlat: error = {error}')
assert(error < 2.e-8)
if False:
from matplotlib import pylab
pylab.quiver(target_points[:, 0], target_points[:, 1],
vects[:, 0], vects[:, 1])
pylab.title('test_east_w1_lonlat')
pylab.show()
def test_east_w2_xyz(grid, target_points, connectivity):
"""
Test computation of face integrals using an eastward vector field
"""
nedge = grid.getNumberOfEdges()
ntarget = target_points.shape[0]
lon = connectivity['lon']
lat = connectivity['lat']
edge_node_connect = connectivity['edge_node_connect']
# set the components
u1 = numpy.ones((nedge,), numpy.float64) # east
u2 = numpy.zeros((nedge,), numpy.float64)
ue_integrated = getIntegralsInXYZ(lon, lat, edge_node_connect,
u1, u2, w1=False)
vi = VectorInterp()
vi.setGrid(grid)
vi.buildLocator(numCellsPerBucket=100)
vi.findPoints(target_points, tol2=1.e-10)
vects = vi.getFaceVectors(ue_integrated, placement=1)
error = numpy.sum(numpy.fabs(vects[:, 1]/vects[:, 0]))/ntarget
print(f'test_east_w2_xyz: error = {error}')
assert(error < 0.29)
if False:
from matplotlib import pylab
pylab.quiver(target_points[:, 0], target_points[:, 1],
vects[:, 0], vects[:, 1])
pylab.title('test_east_w2_xyz')
pylab.show()
def test_east_w2_lonlat(grid, target_points, connectivity):
"""
Test computation of face integrals using an eastward vector field
"""
nedge = grid.getNumberOfEdges()
ntarget = target_points.shape[0]
lon = connectivity['lon']
lat = connectivity['lat']
edge_node_connect = connectivity['edge_node_connect']
# set the components
u1 = numpy.ones((nedge,), numpy.float64) # east
u2 = numpy.zeros((nedge,), numpy.float64)
ue_integrated = getIntegralsInLonLat(lon, lat, edge_node_connect,
u1, u2, w1=False)
vi = VectorInterp()
vi.setGrid(grid)
vi.buildLocator(numCellsPerBucket=100)
vi.findPoints(target_points, tol2=1.e-10)
vects = vi.getFaceVectors(ue_integrated, placement=1)
error = numpy.sum(numpy.fabs(vects[:, 1]/vects[:, 0]))/ntarget
print(f'test_east_w2_lonlat: error = {error}')
assert(error < 1.e-12)
if False:
from matplotlib import pylab
pylab.quiver(target_points[:, 0], target_points[:, 1],
vects[:, 0], vects[:, 1])
pylab.title('test_east_w2_lonlat')
pylab.show()
def test_north_w1_xyz(grid, target_points, connectivity):
"""
Test computation of edge integrals using an eastward vector field
"""
nedge = grid.getNumberOfEdges()
ntarget = target_points.shape[0]
lon = connectivity['lon']
lat = connectivity['lat']
edge_node_connect = connectivity['edge_node_connect']
# set the components
u1 = numpy.zeros((nedge,), numpy.float64)
u2 = numpy.ones((nedge,), numpy.float64) # north
ue_integrated = getIntegralsInXYZ(lon, lat, edge_node_connect,
u1, u2, w1=True)
vi = VectorInterp()
vi.setGrid(grid)
vi.buildLocator(numCellsPerBucket=100)
vi.findPoints(target_points, tol2=1.e-10)
vects = vi.getEdgeVectors(ue_integrated, placement=1)
error = numpy.sum(numpy.fabs(vects[:, 0]/vects[:, 1]))/ntarget
print(f'test_north_w1_xyz: error = {error}')
assert(error < 0.004)
if False:
from matplotlib import pylab
pylab.quiver(target_points[:, 0], target_points[:, 1],
vects[:, 0], vects[:, 1])
pylab.title('test_north_w1_xyz')
pylab.show()
def test_north_w1_lonlat(grid, target_points, connectivity):
"""
Test computation of edge integrals using an eastward vector field
"""
nedge = grid.getNumberOfEdges()
ntarget = target_points.shape[0]
lon = connectivity['lon']
lat = connectivity['lat']
edge_node_connect = connectivity['edge_node_connect']
# set the components
u1 = numpy.zeros((nedge,), numpy.float64)
u2 = numpy.ones((nedge,), numpy.float64) # north
ue_integrated = getIntegralsInLonLat(lon, lat, edge_node_connect,
u1, u2, w1=True)
vi = VectorInterp()
vi.setGrid(grid)
vi.buildLocator(numCellsPerBucket=100)
vi.findPoints(target_points, tol2=1.e-10)
vects = vi.getEdgeVectors(ue_integrated, placement=1)
error = numpy.sum(numpy.fabs(vects[:, 0]/vects[:, 1]))/ntarget
print(f'test_north_w1_lonlat: error = {error}')
assert(error < 1.e-12)
if False:
from matplotlib import pylab
pylab.quiver(target_points[:, 0], target_points[:, 1],
vects[:, 0], vects[:, 1])
pylab.title('test_north_w1_lonlat')
pylab.show()
def test_north_w2_xyz(grid, target_points, connectivity):
"""
Test computation of face integrals using an eastward vector field
"""
nedge = grid.getNumberOfEdges()
ntarget = target_points.shape[0]
lon = connectivity['lon']
lat = connectivity['lat']
edge_node_connect = connectivity['edge_node_connect']
# set the components
u1 = numpy.zeros((nedge,), numpy.float64)
u2 = numpy.ones((nedge,), numpy.float64) # north
ue_integrated = getIntegralsInXYZ(lon, lat, edge_node_connect,
u1, u2, w1=False)
vi = VectorInterp()
vi.setGrid(grid)
vi.buildLocator(numCellsPerBucket=100)
vi.findPoints(target_points, tol2=1.e-10)
vects = vi.getFaceVectors(ue_integrated, placement=1)
error = numpy.sum(numpy.fabs(vects[:, 0]/vects[:, 1]))/ntarget
print(f'test_north_w2_xyz: error = {error}')
assert(error < 0.29)
if False:
from matplotlib import pylab
pylab.quiver(target_points[:, 0], target_points[:, 1],
vects[:, 0], vects[:, 1])
pylab.title('test_north_w2_xyz')
pylab.show()
def test_north_w2_lonlat(grid, target_points, connectivity):
"""
Test computation of face integrals using an northward vector field
"""
nedge = grid.getNumberOfEdges()
ntarget = target_points.shape[0]
lon = connectivity['lon']
lat = connectivity['lat']
edge_node_connect = connectivity['edge_node_connect']
# set the components
u1 = numpy.zeros((nedge,), numpy.float64)
u2 = numpy.ones((nedge,), numpy.float64) # north
ue_integrated = getIntegralsInLonLat(lon, lat, edge_node_connect,
u1, u2, w1=False)
vi = VectorInterp()
vi.setGrid(grid)
vi.buildLocator(numCellsPerBucket=100)
vi.findPoints(target_points, tol2=1.e-10)
vects = vi.getFaceVectors(ue_integrated, placement=1)
error = numpy.sum(numpy.fabs(vects[:, 0]/vects[:, 1]))/ntarget
print(f'test_north_w2_lonlat: error = {error}')
assert(error < 2.e-8)
if False:
from matplotlib import pylab
pylab.quiver(target_points[:, 0], target_points[:, 1],
vects[:, 0], vects[:, 1])
pylab.title('test_north_w2_lonlat')
pylab.show()
| [
"netCDF4.Dataset",
"mint.VectorInterp",
"matplotlib.pylab.title",
"numpy.zeros",
"numpy.ones",
"mint.getIntegralsInLonLat",
"pathlib.Path",
"matplotlib.pylab.quiver",
"numpy.fabs",
"numpy.linspace",
"mint.getIntegralsInXYZ",
"mint.Grid",
"matplotlib.pylab.show"
] | [((190, 202), 'pathlib.Path', 'Path', (['"""data"""'], {}), "('data')\n", (194, 202), False, 'from pathlib import Path\n'), ((338, 344), 'mint.Grid', 'Grid', ([], {}), '()\n', (342, 344), False, 'from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp\n'), ((696, 736), 'numpy.zeros', 'numpy.zeros', (['(ntarget, 3)', 'numpy.float64'], {}), '((ntarget, 3), numpy.float64)\n', (707, 736), False, 'import numpy\n'), ((908, 933), 'netCDF4.Dataset', 'netCDF4.Dataset', (['filename'], {}), '(filename)\n', (923, 933), False, 'import netCDF4\n'), ((1601, 1636), 'numpy.ones', 'numpy.ones', (['(nedge,)', 'numpy.float64'], {}), '((nedge,), numpy.float64)\n', (1611, 1636), False, 'import numpy\n'), ((1655, 1691), 'numpy.zeros', 'numpy.zeros', (['(nedge,)', 'numpy.float64'], {}), '((nedge,), numpy.float64)\n', (1666, 1691), False, 'import numpy\n'), ((1712, 1775), 'mint.getIntegralsInXYZ', 'getIntegralsInXYZ', (['lon', 'lat', 'edge_node_connect', 'u1', 'u2'], {'w1': '(True)'}), '(lon, lat, edge_node_connect, u1, u2, w1=True)\n', (1729, 1775), False, 'from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp\n'), ((1823, 1837), 'mint.VectorInterp', 'VectorInterp', ([], {}), '()\n', (1835, 1837), False, 'from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp\n'), ((2747, 2782), 'numpy.ones', 'numpy.ones', (['(nedge,)', 'numpy.float64'], {}), '((nedge,), numpy.float64)\n', (2757, 2782), False, 'import numpy\n'), ((2801, 2837), 'numpy.zeros', 'numpy.zeros', (['(nedge,)', 'numpy.float64'], {}), '((nedge,), numpy.float64)\n', (2812, 2837), False, 'import numpy\n'), ((2858, 2924), 'mint.getIntegralsInLonLat', 'getIntegralsInLonLat', (['lon', 'lat', 'edge_node_connect', 'u1', 'u2'], {'w1': '(True)'}), '(lon, lat, edge_node_connect, u1, u2, w1=True)\n', (2878, 2924), False, 'from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp\n'), ((2975, 2989), 'mint.VectorInterp', 'VectorInterp', ([], {}), '()\n', (2987, 2989), False, 'from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp\n'), ((3903, 3938), 'numpy.ones', 'numpy.ones', (['(nedge,)', 'numpy.float64'], {}), '((nedge,), numpy.float64)\n', (3913, 3938), False, 'import numpy\n'), ((3957, 3993), 'numpy.zeros', 'numpy.zeros', (['(nedge,)', 'numpy.float64'], {}), '((nedge,), numpy.float64)\n', (3968, 3993), False, 'import numpy\n'), ((4014, 4078), 'mint.getIntegralsInXYZ', 'getIntegralsInXYZ', (['lon', 'lat', 'edge_node_connect', 'u1', 'u2'], {'w1': '(False)'}), '(lon, lat, edge_node_connect, u1, u2, w1=False)\n', (4031, 4078), False, 'from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp\n'), ((4126, 4140), 'mint.VectorInterp', 'VectorInterp', ([], {}), '()\n', (4138, 4140), False, 'from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp\n'), ((5050, 5085), 'numpy.ones', 'numpy.ones', (['(nedge,)', 'numpy.float64'], {}), '((nedge,), numpy.float64)\n', (5060, 5085), False, 'import numpy\n'), ((5104, 5140), 'numpy.zeros', 'numpy.zeros', (['(nedge,)', 'numpy.float64'], {}), '((nedge,), numpy.float64)\n', (5115, 5140), False, 'import numpy\n'), ((5161, 5228), 'mint.getIntegralsInLonLat', 'getIntegralsInLonLat', (['lon', 'lat', 'edge_node_connect', 'u1', 'u2'], {'w1': '(False)'}), '(lon, lat, edge_node_connect, u1, u2, w1=False)\n', (5181, 5228), False, 'from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp\n'), ((5279, 5293), 'mint.VectorInterp', 'VectorInterp', ([], {}), '()\n', (5291, 5293), False, 'from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp\n'), ((6209, 6245), 'numpy.zeros', 'numpy.zeros', (['(nedge,)', 'numpy.float64'], {}), '((nedge,), numpy.float64)\n', (6220, 6245), False, 'import numpy\n'), ((6255, 6290), 'numpy.ones', 'numpy.ones', (['(nedge,)', 'numpy.float64'], {}), '((nedge,), numpy.float64)\n', (6265, 6290), False, 'import numpy\n'), ((6320, 6383), 'mint.getIntegralsInXYZ', 'getIntegralsInXYZ', (['lon', 'lat', 'edge_node_connect', 'u1', 'u2'], {'w1': '(True)'}), '(lon, lat, edge_node_connect, u1, u2, w1=True)\n', (6337, 6383), False, 'from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp\n'), ((6431, 6445), 'mint.VectorInterp', 'VectorInterp', ([], {}), '()\n', (6443, 6445), False, 'from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp\n'), ((7359, 7395), 'numpy.zeros', 'numpy.zeros', (['(nedge,)', 'numpy.float64'], {}), '((nedge,), numpy.float64)\n', (7370, 7395), False, 'import numpy\n'), ((7405, 7440), 'numpy.ones', 'numpy.ones', (['(nedge,)', 'numpy.float64'], {}), '((nedge,), numpy.float64)\n', (7415, 7440), False, 'import numpy\n'), ((7470, 7536), 'mint.getIntegralsInLonLat', 'getIntegralsInLonLat', (['lon', 'lat', 'edge_node_connect', 'u1', 'u2'], {'w1': '(True)'}), '(lon, lat, edge_node_connect, u1, u2, w1=True)\n', (7490, 7536), False, 'from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp\n'), ((7587, 7601), 'mint.VectorInterp', 'VectorInterp', ([], {}), '()\n', (7599, 7601), False, 'from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp\n'), ((8519, 8555), 'numpy.zeros', 'numpy.zeros', (['(nedge,)', 'numpy.float64'], {}), '((nedge,), numpy.float64)\n', (8530, 8555), False, 'import numpy\n'), ((8565, 8600), 'numpy.ones', 'numpy.ones', (['(nedge,)', 'numpy.float64'], {}), '((nedge,), numpy.float64)\n', (8575, 8600), False, 'import numpy\n'), ((8630, 8694), 'mint.getIntegralsInXYZ', 'getIntegralsInXYZ', (['lon', 'lat', 'edge_node_connect', 'u1', 'u2'], {'w1': '(False)'}), '(lon, lat, edge_node_connect, u1, u2, w1=False)\n', (8647, 8694), False, 'from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp\n'), ((8742, 8756), 'mint.VectorInterp', 'VectorInterp', ([], {}), '()\n', (8754, 8756), False, 'from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp\n'), ((9670, 9706), 'numpy.zeros', 'numpy.zeros', (['(nedge,)', 'numpy.float64'], {}), '((nedge,), numpy.float64)\n', (9681, 9706), False, 'import numpy\n'), ((9716, 9751), 'numpy.ones', 'numpy.ones', (['(nedge,)', 'numpy.float64'], {}), '((nedge,), numpy.float64)\n', (9726, 9751), False, 'import numpy\n'), ((9781, 9848), 'mint.getIntegralsInLonLat', 'getIntegralsInLonLat', (['lon', 'lat', 'edge_node_connect', 'u1', 'u2'], {'w1': '(False)'}), '(lon, lat, edge_node_connect, u1, u2, w1=False)\n', (9801, 9848), False, 'from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp\n'), ((9899, 9913), 'mint.VectorInterp', 'VectorInterp', ([], {}), '()\n', (9911, 9913), False, 'from mint import Grid, getIntegralsInXYZ, getIntegralsInLonLat, VectorInterp\n'), ((536, 569), 'numpy.linspace', 'numpy.linspace', (['(-170.0)', '(170.0)', 'nx'], {}), '(-170.0, 170.0, nx)\n', (550, 569), False, 'import numpy\n'), ((601, 632), 'numpy.linspace', 'numpy.linspace', (['(-80.0)', '(80.0)', 'ny'], {}), '(-80.0, 80.0, ny)\n', (615, 632), False, 'import numpy\n'), ((2208, 2293), 'matplotlib.pylab.quiver', 'pylab.quiver', (['target_points[:, 0]', 'target_points[:, 1]', 'vects[:, 0]', 'vects[:, 1]'], {}), '(target_points[:, 0], target_points[:, 1], vects[:, 0], vects[:, 1]\n )\n', (2220, 2293), False, 'from matplotlib import pylab\n'), ((2318, 2349), 'matplotlib.pylab.title', 'pylab.title', (['"""test_east_w1_xyz"""'], {}), "('test_east_w1_xyz')\n", (2329, 2349), False, 'from matplotlib import pylab\n'), ((2358, 2370), 'matplotlib.pylab.show', 'pylab.show', ([], {}), '()\n', (2368, 2370), False, 'from matplotlib import pylab\n'), ((3364, 3449), 'matplotlib.pylab.quiver', 'pylab.quiver', (['target_points[:, 0]', 'target_points[:, 1]', 'vects[:, 0]', 'vects[:, 1]'], {}), '(target_points[:, 0], target_points[:, 1], vects[:, 0], vects[:, 1]\n )\n', (3376, 3449), False, 'from matplotlib import pylab\n'), ((3474, 3508), 'matplotlib.pylab.title', 'pylab.title', (['"""test_east_w1_lonlat"""'], {}), "('test_east_w1_lonlat')\n", (3485, 3508), False, 'from matplotlib import pylab\n'), ((3517, 3529), 'matplotlib.pylab.show', 'pylab.show', ([], {}), '()\n', (3527, 3529), False, 'from matplotlib import pylab\n'), ((4511, 4596), 'matplotlib.pylab.quiver', 'pylab.quiver', (['target_points[:, 0]', 'target_points[:, 1]', 'vects[:, 0]', 'vects[:, 1]'], {}), '(target_points[:, 0], target_points[:, 1], vects[:, 0], vects[:, 1]\n )\n', (4523, 4596), False, 'from matplotlib import pylab\n'), ((4621, 4652), 'matplotlib.pylab.title', 'pylab.title', (['"""test_east_w2_xyz"""'], {}), "('test_east_w2_xyz')\n", (4632, 4652), False, 'from matplotlib import pylab\n'), ((4661, 4673), 'matplotlib.pylab.show', 'pylab.show', ([], {}), '()\n', (4671, 4673), False, 'from matplotlib import pylab\n'), ((5669, 5754), 'matplotlib.pylab.quiver', 'pylab.quiver', (['target_points[:, 0]', 'target_points[:, 1]', 'vects[:, 0]', 'vects[:, 1]'], {}), '(target_points[:, 0], target_points[:, 1], vects[:, 0], vects[:, 1]\n )\n', (5681, 5754), False, 'from matplotlib import pylab\n'), ((5779, 5813), 'matplotlib.pylab.title', 'pylab.title', (['"""test_east_w2_lonlat"""'], {}), "('test_east_w2_lonlat')\n", (5790, 5813), False, 'from matplotlib import pylab\n'), ((5822, 5834), 'matplotlib.pylab.show', 'pylab.show', ([], {}), '()\n', (5832, 5834), False, 'from matplotlib import pylab\n'), ((6818, 6903), 'matplotlib.pylab.quiver', 'pylab.quiver', (['target_points[:, 0]', 'target_points[:, 1]', 'vects[:, 0]', 'vects[:, 1]'], {}), '(target_points[:, 0], target_points[:, 1], vects[:, 0], vects[:, 1]\n )\n', (6830, 6903), False, 'from matplotlib import pylab\n'), ((6928, 6960), 'matplotlib.pylab.title', 'pylab.title', (['"""test_north_w1_xyz"""'], {}), "('test_north_w1_xyz')\n", (6939, 6960), False, 'from matplotlib import pylab\n'), ((6969, 6981), 'matplotlib.pylab.show', 'pylab.show', ([], {}), '()\n', (6979, 6981), False, 'from matplotlib import pylab\n'), ((7978, 8063), 'matplotlib.pylab.quiver', 'pylab.quiver', (['target_points[:, 0]', 'target_points[:, 1]', 'vects[:, 0]', 'vects[:, 1]'], {}), '(target_points[:, 0], target_points[:, 1], vects[:, 0], vects[:, 1]\n )\n', (7990, 8063), False, 'from matplotlib import pylab\n'), ((8088, 8123), 'matplotlib.pylab.title', 'pylab.title', (['"""test_north_w1_lonlat"""'], {}), "('test_north_w1_lonlat')\n", (8099, 8123), False, 'from matplotlib import pylab\n'), ((8132, 8144), 'matplotlib.pylab.show', 'pylab.show', ([], {}), '()\n', (8142, 8144), False, 'from matplotlib import pylab\n'), ((9128, 9213), 'matplotlib.pylab.quiver', 'pylab.quiver', (['target_points[:, 0]', 'target_points[:, 1]', 'vects[:, 0]', 'vects[:, 1]'], {}), '(target_points[:, 0], target_points[:, 1], vects[:, 0], vects[:, 1]\n )\n', (9140, 9213), False, 'from matplotlib import pylab\n'), ((9238, 9270), 'matplotlib.pylab.title', 'pylab.title', (['"""test_north_w2_xyz"""'], {}), "('test_north_w2_xyz')\n", (9249, 9270), False, 'from matplotlib import pylab\n'), ((9279, 9291), 'matplotlib.pylab.show', 'pylab.show', ([], {}), '()\n', (9289, 9291), False, 'from matplotlib import pylab\n'), ((10289, 10374), 'matplotlib.pylab.quiver', 'pylab.quiver', (['target_points[:, 0]', 'target_points[:, 1]', 'vects[:, 0]', 'vects[:, 1]'], {}), '(target_points[:, 0], target_points[:, 1], vects[:, 0], vects[:, 1]\n )\n', (10301, 10374), False, 'from matplotlib import pylab\n'), ((10399, 10434), 'matplotlib.pylab.title', 'pylab.title', (['"""test_north_w2_lonlat"""'], {}), "('test_north_w2_lonlat')\n", (10410, 10434), False, 'from matplotlib import pylab\n'), ((10443, 10455), 'matplotlib.pylab.show', 'pylab.show', ([], {}), '()\n', (10453, 10455), False, 'from matplotlib import pylab\n'), ((263, 289), 'pathlib.Path', 'Path', (['"""lfric_diag_wind.nc"""'], {}), "('lfric_diag_wind.nc')\n", (267, 289), False, 'from pathlib import Path\n'), ((2029, 2066), 'numpy.fabs', 'numpy.fabs', (['(vects[:, 1] / vects[:, 0])'], {}), '(vects[:, 1] / vects[:, 0])\n', (2039, 2066), False, 'import numpy\n'), ((3181, 3218), 'numpy.fabs', 'numpy.fabs', (['(vects[:, 1] / vects[:, 0])'], {}), '(vects[:, 1] / vects[:, 0])\n', (3191, 3218), False, 'import numpy\n'), ((4332, 4369), 'numpy.fabs', 'numpy.fabs', (['(vects[:, 1] / vects[:, 0])'], {}), '(vects[:, 1] / vects[:, 0])\n', (4342, 4369), False, 'import numpy\n'), ((5485, 5522), 'numpy.fabs', 'numpy.fabs', (['(vects[:, 1] / vects[:, 0])'], {}), '(vects[:, 1] / vects[:, 0])\n', (5495, 5522), False, 'import numpy\n'), ((6637, 6674), 'numpy.fabs', 'numpy.fabs', (['(vects[:, 0] / vects[:, 1])'], {}), '(vects[:, 0] / vects[:, 1])\n', (6647, 6674), False, 'import numpy\n'), ((7793, 7830), 'numpy.fabs', 'numpy.fabs', (['(vects[:, 0] / vects[:, 1])'], {}), '(vects[:, 0] / vects[:, 1])\n', (7803, 7830), False, 'import numpy\n'), ((8948, 8985), 'numpy.fabs', 'numpy.fabs', (['(vects[:, 0] / vects[:, 1])'], {}), '(vects[:, 0] / vects[:, 1])\n', (8958, 8985), False, 'import numpy\n'), ((10105, 10142), 'numpy.fabs', 'numpy.fabs', (['(vects[:, 0] / vects[:, 1])'], {}), '(vects[:, 0] / vects[:, 1])\n', (10115, 10142), False, 'import numpy\n'), ((141, 155), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (145, 155), False, 'from pathlib import Path\n')] |
import cv2
import h5py
import scipy
from scipy import ndimage
import hashlib
import numpy as np
import os
import re
import sys
import time
import tifffile
from skimage import transform
from skimage import feature
import random
def find_majority_element_in_list(k):
myMap = {}
maximum = ( '', 0 ) # (occurring element, occurrences)
for n in k:
if n in myMap: myMap[n] += 1
else: myMap[n] = 1
# Keep track of maximum on the go
if myMap[n] > maximum[1]: maximum = (n,myMap[n])
return maximum[0]
def _hist_match(source, template):
"""
Adjust the pixel values of a grayscale image such that its histogram
matches that of a target image
Arguments:
-----------
source: np.ndarray
Image to transform; the histogram is computed over the flattened
array
template: np.ndarray
Template image; can have different dimensions to source
Returns:
-----------
matched: np.ndarray
The transformed output image
"""
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and
# counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True,
return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
# take the cumsum of the counts and normalize by the number of pixels to
# get the empirical cumulative distribution functions for the source and
# template images (maps pixel value --> quantile)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
# interpolate linearly to find the pixel values in the template image
# that correspond most closely to the quantiles in the source image
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
def img_flip(im, flipH, flipV):
"""
Flip image.
"""
imFlip = im.copy()
if flipH:
imFlip = np.flip(imFlip, axis=1)
if flipV:
imFlip = np.flip(imFlip, axis=0)
return(imFlip)
def is_number(s):
"""
Check if a string is a number.
"""
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
# Define a matlab like gaussian 2D filter
def matlab_style_gauss2D(shape=(7,7),sigma=1):
"""
2D gaussian filter - should give the same result as:
MATLAB's fspecial('gaussian',[shape],[sigma])
"""
m,n = [(ss-1.)/2. for ss in shape]
y,x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
h.astype(dtype=K.floatx())
h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
h = h*2.0
h = h.astype('float32')
return h
def print_checkpoint(message):
"""
Print message and current time
"""
print(message)
tabs = message.count("\t")
print(("\t" * tabs) + time.asctime(time.localtime(time.time())) + "\n")
sys.stdout.flush()
def print_warning(error_message=""):
sys.stderr.write("Warning:\n" + error_message)
| [
"numpy.flip",
"unicodedata.numeric",
"time.time",
"numpy.cumsum",
"numpy.finfo",
"sys.stdout.flush",
"numpy.exp",
"numpy.interp",
"sys.stderr.write",
"numpy.unique"
] | [((1272, 1330), 'numpy.unique', 'np.unique', (['source'], {'return_inverse': '(True)', 'return_counts': '(True)'}), '(source, return_inverse=True, return_counts=True)\n', (1281, 1330), True, 'import numpy as np\n'), ((1400, 1439), 'numpy.unique', 'np.unique', (['template'], {'return_counts': '(True)'}), '(template, return_counts=True)\n', (1409, 1439), True, 'import numpy as np\n'), ((2002, 2047), 'numpy.interp', 'np.interp', (['s_quantiles', 't_quantiles', 't_values'], {}), '(s_quantiles, t_quantiles, t_values)\n', (2011, 2047), True, 'import numpy as np\n'), ((2949, 2997), 'numpy.exp', 'np.exp', (['(-(x * x + y * y) / (2.0 * sigma * sigma))'], {}), '(-(x * x + y * y) / (2.0 * sigma * sigma))\n', (2955, 2997), True, 'import numpy as np\n'), ((3392, 3410), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3408, 3410), False, 'import sys\n'), ((3457, 3503), 'sys.stderr.write', 'sys.stderr.write', (["('Warning:\\n' + error_message)"], {}), "('Warning:\\n' + error_message)\n", (3473, 3503), False, 'import sys\n'), ((2233, 2256), 'numpy.flip', 'np.flip', (['imFlip'], {'axis': '(1)'}), '(imFlip, axis=1)\n', (2240, 2256), True, 'import numpy as np\n'), ((2288, 2311), 'numpy.flip', 'np.flip', (['imFlip'], {'axis': '(0)'}), '(imFlip, axis=0)\n', (2295, 2311), True, 'import numpy as np\n'), ((2539, 2561), 'unicodedata.numeric', 'unicodedata.numeric', (['s'], {}), '(s)\n', (2558, 2561), False, 'import unicodedata\n'), ((1667, 1686), 'numpy.cumsum', 'np.cumsum', (['s_counts'], {}), '(s_counts)\n', (1676, 1686), True, 'import numpy as np\n'), ((1759, 1778), 'numpy.cumsum', 'np.cumsum', (['t_counts'], {}), '(t_counts)\n', (1768, 1778), True, 'import numpy as np\n'), ((3033, 3050), 'numpy.finfo', 'np.finfo', (['h.dtype'], {}), '(h.dtype)\n', (3041, 3050), True, 'import numpy as np\n'), ((3366, 3377), 'time.time', 'time.time', ([], {}), '()\n', (3375, 3377), False, 'import time\n')] |
#!/usr/bin/env python3
import argparse
from collections import Counter
from itertools import combinations
from math import lgamma, log, factorial
import numpy as np
import operator
import os
import pandas as pd
from functools import reduce
import sys
import time
import warnings
###############################
##### AUXILIARY FUNCTIONS #####
###############################
def nCr(n, r):
"""
Returns the number of combinations of r elements out a total of n.
"""
f = factorial
return f(n) // f(r) // f(n-r)
def lg(n,x):
return lgamma(n+x) - lgamma(x)
def onepositive(dist):
"""
True if there only one positive count in `dist`.
"""
npos = 0
for n in dist:
if n > 0:
npos += 1
if npos > 1:
return False
return True
def ml(dist):
"""
Compute the maximum likelihood given full instantiation counts in dist.
"""
tot = sum(dist)
res = 0.0
for n in dist:
if n > 0:
res += n*log(n/tot)
return res
def ml_sum(distss):
"""
Compute the total maximum likelihood of the full instantiation counts
in distss.
"""
res = 0.0
for dists in distss:
res += sum([ml(d) for d in dists])
return res
###############################
####### BOUND FUNCTIONS #######
###############################
def diffa(dist, alpha, r):
"""
Compute the derivative of local-local BDeu score.
"""
res = 0.0
for n in dist:
for i in range(n):
res += 1.0/(i*r+alpha)
for i in range(sum(dist)):
res -= 1.0/(i+alpha)
return res
def g(dist, aq):
"""
Compute function g (Lemma 5) for a given full parent isntantiation.
Parameters
----------
dists: list ints
Counts of the child variable for a given full parent instantiation.
aq: float
Equivalent sample size divided by the product of parents arities.
"""
res = log(2*min(dist)/aq + 1)
for d in dist:
res += - log(2*d/aq + 1)
return res
def h(dist, alpha, r):
"""
Computes function h (Lemma 8).
"""
res = -lg(sum(dist), alpha)
alphar = alpha/r
for n in dist:
if n > 0:
res += lg(n, alphar)
return res
def ubh_js(dists, alpha, r, counters=None):
"""
Compute bound h for each instantiation js of set of parents S.
See Theorem 3 for the definition of the bound.
Parameters
----------
dists: list of lists
Counts of the child variable for each full parent instantiation j
that is compatible with js.
alpha: float
The equivalent sample size (ESS).
r: int
Arity (number of possible values) of the child variable.
counters: dict (optional)
Dictionary used to store the number of times each function
(ml, f + g, h) was the minimum in the last part of the equation in
Theorem 3.
Returns
-------
Upper bound h for a given isntantiation of parent set S.
"""
is_g, is_h, is_ml = 0, 0, 1
mls = 0.0
best_diff = 0.0
for dist in dists:
ml_j = ml(dist)
mls += ml_j
ubg_plus_f = -len(dist)*log(r) + g(dist, alpha)
iffirst_ub = min(ubg_plus_f, ml_j)
ubh = 0
if not onepositive(dist) and diffa(dist, alpha, r) >= 0 and alpha <= 1:
ubh = h(dist, alpha/2, r)
iffirst_ub = min(iffirst_ub, ubh)
diff = iffirst_ub - ml_j
if diff < best_diff:
best_diff = diff
is_g, is_h, is_ml = 0, 0, 0
if iffirst_ub == ubg_plus_f:
is_g = 1
if iffirst_ub == ubh:
is_h = 1
if counters is not None:
counters['inner_ml'] += is_ml
counters['inner_g'] += is_g
counters['inner_h'] += is_h
counters['inner_total'] += 1
return best_diff + mls
###############################
######### DATA CLASS ##########
### main code ###
###############################
class Data:
"""
A dataset of complete discrete data.
This class holds all the information used during the experiments.
"""
def __init__(self, data, name):
""""
Attributes
----------
data: pandas dataframe or path to csv file.
The data to be used for the experiments.
name: str
Name used to save results (usually matching dataset name).
It is assumed that:
1. All values are separated by whitespace
2. Comment lines start with a '#'
3. The first line is a header stating the names of the variables
4. The second line states the arities of the variables
5. All other lines contain the actual data
"""
if isinstance(data, pd.DataFrame) == False:
data = pd.read_csv(data,
delim_whitespace=True,
comment='#')
arities = [int(x) for x in data.iloc[0]]
self._name = name
self._data = data[1:]
self._arities = dict(zip(list(self._data), arities))
self._variables = list(data.columns)
self._varidx = {}
# Initialize all the counters to zero
self.counters = {}
self.reset_counters()
for i, v in enumerate(self._variables):
self._varidx[v] = i
self.get_atoms()
def reset_counters(self):
"""
There are a number of counters to keep track of the number of
scores and bounds computed. This function resets them to zero.
"""
# 'min' counters are used to keep track the number of times each of
# bound g and h is the tightest.
self.counters['min_ubg'] = 0
self.counters['min_ubh'] = 0
# 'inner' counters are used inside bound h. See ubh_js function in
# utils.py or Theorem 3 in the paper.
self.counters['inner_ml'] = 0
self.counters['inner_g'] = 0
self.counters['inner_h'] = 0
self.counters['inner_total'] = 0
def upper_bound_f(self, child, posfamilyinsts):
"""
Compute a weak upper bound on supersets of a given parent set.
Parameters
----------
child: int
Index of the child of the family.
posfamilyinsts: int
The number of instantiations of the family which occur at least
once in the data
Returns
-------
Upper bound h (float).
"""
return -posfamilyinsts * log(self._arities[child])
def upper_bound_g(self, child, parents, aq, posfamilyinsts, atoms_for_parents):
"""
Compute an upper bound on supersets of parents
Parameters
----------
child: int
Index of the child of the family.
parents: list
The parents of the family (an iterable of indices)
aq: float
Equivalent sample size divided by the product of parents arities.
posfamilyinsts: int
The number of instantiations of the family which occur at least
once in the data
atoms_for_parents: dict
For each instantiation of `parents` (keys in the dictionary), a
list of list of counts the child variable. Each of the inner
lists corresponds to a full instantion of all variables (but
the child) that is compatible with the instantiation of the
parents in the key. See atoms_for_parents function.
Returns
-------
Upper bound g (float).
"""
m_final = 0
for dists in atoms_for_parents:
pens = []
# Compute g for each full instantiation.
for dist in dists:
pens.append(g(dist, aq))
m_min = min(pens)
m_final += m_min
if len(pens) > 1:
pens[pens.index(m_min)] = float('inf')
m_final += min(pens)
return -posfamilyinsts*log(self._arities[child]) + m_final
def upper_bound_h(self, child, parents, alpha, atoms_for_parents):
"""
Compute an upper bound on supersets of parents.
Parameters
----------
child: int
Index of the child of the family.
parents: list
The parents of the family (an iterable of indices)
alpha: float
Equivalent sample size.
atoms_for_parents: dict
For each instantiation of `parents` (keys in the dictionary), a
list of list of counts the child variable. Each of the inner
lists corresponds to a full instantion of all variables (but
the child) that is compatible with the instantiation of the
parents in the key. See atoms_for_parents function.
Returns
-------
Upper bound h (float).
"""
for pa in parents:
alpha /= self._arities[pa]
r = self._arities[child]
this_ub = 0.0
# Compute ubh for each instantiation of parent set S
for dists in atoms_for_parents:
this_ub += ubh_js(dists, alpha, r, self.counters)
return this_ub
def upper_bound_min_min(self, child, parents, aq, counts, atoms_for_parents):
"""
Returns the best (min) of the two bounds (g and h).
Parameters
----------
child: int
Index of the child of the family.
parents: list
The parents of the family (an iterable of indices)
aq: float
Equivalent sample size divided by the product of parents arities.
counts: pandas series
The counts for each of the full instantiations.
(Only the number of full instantations is actually needed).
atoms_for_parents: dict
For each instantiation of `parents` (keys in the dictionary), a
list of list of counts the child variable. Each of the inner
lists corresponds to a full instantion of all variables (but
the child) that is compatible with the instantiation of the
parents in the key. See atoms_for_parents function.
Returns
-------
Upper bound min(g, h) (float).
"""
r = self._arities[child]
this_ub = 0.0
m_final = 0
for child_counts in atoms_for_parents:
# Upper bound h
this_ub += ubh_js(child_counts, aq, r)
# Upper bound g
pens = []
for cc in child_counts:
pen = + log(2*min(cc)/aq + 1)
for c in cc:
pen += - log(2*c/aq + 1)
pens.append(pen)
m_min = min(pens)
m_final += m_min
if len(pens) > 1:
pens[pens.index(m_min)] = float('inf')
m_final += min(pens)
ubg = -len(counts)*log(self._arities[child]) + m_final
if this_ub < ubg:
self.counters['min_ubh'] += 1
elif this_ub > ubg:
self.counters['min_ubg'] += 1
else:
self.counters['min_ubh'] += 1
self.counters['min_ubg'] += 1
return min(this_ub, -len(counts)*log(self._arities[child]) + m_final)
def bdeu_score(self, child, parents, alpha=None, bound=None):
"""
Computes the (local) score of a given child and a parent set.
Parameters
----------
child: int
Index of the child of the family.
parents: list
The parents of the family (an iterable of indices)
alpha: float
Equivalent sample size.
Returns
-------
A tuple (score, ubs) where
- score is the BDeu score of a particular child and parent set
- ubs is a dictionary of mapping the names of upper bounds to
upper bounds on the BDeu scores of supersets of the parent set.
"""
if alpha is None:
alpha = 1.0
warnings.warn('ESS (alpha) not defined. Defaulting to alpha=1.0.')
aq = alpha
for parent in parents:
aq /= self._arities[parent]
aqr = aq / self._arities[child]
counts = self._data.groupby(list(parents)+[child], sort=True).size()
posfamilyinsts = len(counts)
bdeu_score = 0.0
if len(parents) == 0:
nij = 0
for nijk in counts:
bdeu_score += lg(nijk,aqr)
nij += nijk
bdeu_score -= lg(nij,aq)
else:
cnt = Counter()
for idx, nijk in counts.iteritems():
cnt[idx[:-1]] += nijk
bdeu_score += lg(nijk,aqr)
for nij in cnt.values():
bdeu_score -= lg(nij,aq)
atoms_for_parents = self.atoms_for_parents(child, parents).values()
if bound == 'f':
bounds = {'f': self.upper_bound_f(child, posfamilyinsts)}
elif bound == 'g':
bounds = {'g': self.upper_bound_g(child, parents, aq, posfamilyinsts, atoms_for_parents)}
elif bound == 'h':
bounds = {'h': self.upper_bound_h(child, parents, alpha, atoms_for_parents)}
elif bound == 'min':
bounds = {'min': self.upper_bound_min_min(child, parents, aq, counts, atoms_for_parents)}
elif bound == 'all':
bounds = {'f': self.upper_bound_f(child, posfamilyinsts),
'g': self.upper_bound_g(child, parents, aq, posfamilyinsts, atoms_for_parents),
'h': self.upper_bound_h(child, parents, alpha, atoms_for_parents),
'min': self.upper_bound_min_min(child, parents, aq, counts, atoms_for_parents)}
elif bound is None:
return bdeu_score
return bdeu_score, bounds
def pen_ll_score(self, child, parents, pen_type):
"""
Returns a the AIC score of a particular child and parent set
Parameters
----------
child: int
Index of the child of the family.
parents: list
The parents of the family (an iterable of indices)
pen_type: str or float
Either a type of score ('BIC' or 'AIC') or a penalisation
coefficient.
"""
counts = self._data.groupby(list(parents)+[child],sort=True).size()
posfamilyinsts = len(counts)
LL = 0
if len(parents) == 0:
nij = counts.sum()
for nijk in counts:
LL += nijk*np.log(nijk/nij)
pen = (self._arities[child] -1)
else:
cnt = Counter()
# Compute nij for each parent configuration
for idx, nijk in counts.iteritems():
cnt[idx[:-1]] += nijk
# Compute the loglikelihood
for idx, nijk in counts.iteritems():
LL += nijk*np.log(nijk/cnt[idx[:-1]])
# Compute the penalization for AIC
pen = 1
for parent in parents:
pen *= self._arities[parent]
pen *= self._arities[child] -1
if pen_type == 'AIC':
score = LL - pen
elif pen_type == 'BIC':
pen *= 0.5*np.log(counts.sum())
score = LL - pen
elif isinstance(pen_type, (int, float)):
score = LL - pen_type*pen
else:
Exception(pen_type + ' is not supported yet. Please use BIC or AIC.')
return score
def all_bdeu_scores(self, alpha=None, palim=None, bound=None, filepath=None):
"""
Exhaustively compute all BDeu scores and upper bounds for all families
up to `palim`
Parameters
----------
child: int
Index of the child of the family.
alpha: float
Equivalent sample size.
palim: int
The maximum number of parents.
bound: str
The bound to compute. Either 'f', 'g', 'h', 'min'.
If bound == 'all' computes all bounds.
filepath: str
Path to file where to save the scores. If left to None, the scores
are not saved.
Returns
-------
score_dict: dict
A dictionary dkt where dkt[child][parents] = bdeu_score
"""
if palim is None:
palim = 3
warnings.warn('Maximum number of parents (palim) not defined. Defaulting to palim=3.')
if alpha is None:
alpha = 1.0
warnings.warn('ESS (alpha) not defined. Defaulting to alpha=1.0.')
score_dict = {}
for i, child in enumerate(self._variables):
potential_parents = frozenset(self._variables[:i]+self._variables[i+1:])
child_dkt = {}
for pasize in range(palim+1):
for parents in combinations(potential_parents,pasize):
child_dkt[frozenset(parents)] = self.bdeu_score(child,parents,alpha,bound=bound)
score_dict[child] = child_dkt
if filepath is not None:
self.write_scores(filepath, score_dict)
return score_dict
def all_pen_ll_scores(self, score_type, filepath=None, palim=None):
"""
Exhaustively compute all BDeu scores and upper bounds for all families
up to `palim`
Parameters
----------
score_type: str or float
Either a type of score ('BIC' or 'AIC') or a penalisation
coefficient.
filepath: str
Path to file where to save the scores. If left to None, the scores
are not saved.
palim: int
Maximum number of parents.
Returns
-------
score_dict: dict
A dictionary dkt where dkt[child][parents] = bdeu_score
"""
if palim is None:
palim = 3
warnings.warn('Maximum number of parents (palim) not defined. Defaulting to palim=3.')
score_dict = {}
for i, child in enumerate(self._variables):
potential_parents = frozenset(self._variables[:i]+self._variables[i+1:])
child_dkt = {}
for pasize in range(palim+1):
for parents in combinations(potential_parents,pasize):
child_dkt[frozenset(parents)] = self.pen_ll_score(child, parents, score_type)
score_dict[child] = child_dkt
if filepath is not None:
self.write_scores(filepath, score_dict)
return score_dict
def write_scores(self, filepath, score_dict):
"""
Saves a dictionary of scores to filepath.
See all_pen_ll_scores or all_bdeu_scores.
"""
score_info = '{}\n'.format(len(self._variables))
for child, parents in score_dict.items():
score_info += child + ' {}\n'.format(len(score_dict[child]))
for parent, score in parents.items():
score_info += str(score) + ' ' + str(len(parent)) + ' ' + ' '.join(parent) + '\n'
with open(filepath, 'w') as w:
w.write(score_info)
def atoms_for_parents(self, child, parents):
"""
Return a dictionary whose keys are instantiations of `parents`
with positive counts in the data and whose values are lists of lists
of child counts.
Parameters
----------
child: int
The (index of) child variable.
parents: list of ints
The list of indices of the parent variables.
Returns
-------
dkt: dict
[parents instantiations] = [[child counts full intantiation 1],
[child counts full intantiation 2],
...
[child_counts full intantiation n]]
Example
-------
If dkt is the returned dictionary and dkt[0,1,0] = [[1,2], [0,4]],
then there are 3 variables in `parents` and there are 2 full parent
instantiations for the instantiation (0,1,0): one with child counts
[1,2] and one with child counts [0,4].
A full instantiation means that all variables (but the child) have
a value assigned to them. The full instantatiations in each key are
the ones compatible with the corresponding instantiation of `parents`
in that key. In the example, if we have 4 variables (plus the child)
that means there are two possible instantiation of the 4th variable:
one where the child is distributed as [1, 2], and other where it is
distributed as [0, 4]. The 4th variable might have more than 2
states, but those are not observed (zero counts) in this example.
"""
# Get indices of parents in vector of all possible parents for child
child_idx = self._varidx[child]
parentset = frozenset(parents)
pa_idxs = []
for i, parent in enumerate(self._variables[:child_idx]+self._variables[child_idx+1:]):
if parent in parentset:
pa_idxs.append(i)
# As we open the tree of variables following the index order, we only
# look at full instantations of parents and variables of higher index.
upper_pa_idxs = list(range(max(pa_idxs + [-1]) + 1, len(self._variables)-1))
upper_dkt = {}
for fullinst, childcounts in self._atoms[child].items():
inst = tuple([fullinst[i] for i in pa_idxs + upper_pa_idxs])
try:
upper_dkt[inst] = list(np.array(upper_dkt[inst]) + np.array(childcounts))
except KeyError:
upper_dkt[inst] = childcounts
# The counts for instantations that differ only on variables of lower
# index can be safely summed to improve the bounds.
dkt = {}
posfamilyinsts = 0
for fullinst, childcounts in upper_dkt.items():
inst = tuple([fullinst[i] for i in range(len(pa_idxs))])
# In this step, we have to remove the zero counts!
non_zeros = [x for x in childcounts if x>0]
posfamilyinsts += len(non_zeros)
try:
dkt[inst].append(non_zeros)
except KeyError:
dkt[inst] = [non_zeros]
return dkt
def get_atoms(self):
"""
Compute a dictionary whose keys are child variables and whose values
are dictionaries mapping instantiations of all the other parents to a
list of counts for the child variable for that instantiation.
Only parent set instantations with a positive count in the data are
included.
The dictionary is stored as the value of self._atoms
"""
# Create the counts as a pandas DataFrame with a new column 'counts'
counts = pd.DataFrame({'counts' : self._data.groupby(self._variables).size()}).reset_index()
# Save the counts inside a list to facilitate concatenation
listfy = lambda x : [x]
counts['counts'] = counts['counts'].apply(listfy)
dktall = {}
for child in self._variables:
all_but_child = [var for var in self._variables if var != child]
# The sum operation concatenate the lists of counts
# for rows that differ only on the child variable
# The unstack operation fill in the full instantations
# which do not have all possible values of child in the data
# so that we can keep the zeros in place
child_counts = counts.groupby(by=self._variables).agg({'counts': 'sum'}).unstack(child, fill_value=[0]).stack().reset_index()
child_counts = child_counts.groupby(by=all_but_child).agg({'counts': 'sum'}).reset_index()
dkt_child = child_counts.set_index(all_but_child).to_dict('index')
for cc in dkt_child:
dkt_child[cc] = dkt_child[cc]['counts']
dktall[child] = dkt_child
self._atoms = dktall
def pruned_bdeu_scores_per_child(self, child, bound, timeout, alpha=1.0, palim=None, verbose=False, save_step=False):
"""
Return a dictionary for the child variable mapping parent sets to
BDeu scores.
Not all parent sets are included. Only those parent set of cardinality
at most `palim` can be included. Also, if it can be established that
a parent set can not be a parent set for the child in an optimal Bayesian
network, then it is not included.
Also, outputs a pandas DataFrame with the number of scores computed.
The DataFrame is saved to memory every iteration over palim so not to
miss results if the process is terminated.
Parameters
----------
child: int
The (index of) child variable.
bound: str
The type of bound to use.
timeout: int
The maximum amount of time the function has to run (secs).
alpha: float
The effective sample size (prior parameter).
palim: int
The maximum number of parents.
verbose: boolean
Whether messages on progress should be printed.
save_step: boolean
Whether to save a csv per child
"""
if palim is None:
palim = 3
warnings.warn('Maximum number of parents (palim) not defined. Defaulting to palim=3.')
if bound == 'h':
scores_per_palim = pd.DataFrame(columns=['child', 'palim', 'alpha', 'all_scores', 'n_scores_' + bound, 'time_' + bound, 'inf_n_scores', 'inner_ml', 'inner_g', 'inner_h', 'inner_total', 'best_pa'])
elif bound == 'min':
scores_per_palim = pd.DataFrame(columns=['child', 'palim', 'alpha', 'all_scores', 'n_scores_' + bound, 'time_' + bound, 'inf_n_scores', 'min_ubg', 'min_ubh', 'best_pa'])
else:
scores_per_palim = pd.DataFrame(columns=['child', 'palim', 'alpha', 'all_scores', 'n_scores_' + bound, 'time_' + bound, 'inf_n_scores', 'best_pa'])
p = len(self._variables)
palim = min(palim, p-1)
child_idx = self._variables.index(child)
n_scores = 1 # number of times we calculate the score
unnecessary_scores = 0 # counts how many scores were unnecessary
all_scores = 1 # all combinations of parents
self.reset_counters()
score, ubdkt = self.bdeu_score(child, [], alpha, bound=bound)
ub = min(ubdkt.values()) # take the best bound
child_dkt = {(): score}
previous_layer = {(): (score, True) }
best_score = score
best_pa = frozenset()
if not os.path.exists(self._name):
os.makedirs(self._name)
timeout = timeout # convert to secs
start = time.time()
for pasize in range(palim):
new_layer = {}
all_scores += nCr(len(self._variables)-1, pasize+1) # all combinations with pasize+1 parents
for oldpa in previous_layer:
last_idx = -1 if oldpa == () else self._varidx[oldpa[-1]]
for newpa_idx in range(last_idx+1, p):
# We check the time in the innermost loop
end = time.time()
if end - start > timeout:
elapsed_time = end - start
print('End of time! Last palim: ', pasize)
scores_per_palim.to_csv(self._name + '/' + child + '_' + bound + '.csv', index=False)
return child_dkt, scores_per_palim
if newpa_idx == child_idx:
continue
parents = oldpa + (self._variables[newpa_idx], )
bss = None
if True:
# get best score and upper bound - this could be done more efficiently
for (parenttmp, scoretmp) in child_dkt.items():
if parenttmp < parents:
bss = scoretmp if bss is None else max(bss, scoretmp)
else:
# IF LAYERS ARE COMPLETE, THEN WE CAN DO EFFICIENTLY (THIS PREVIOUS LOOP):
for i in range(len(parents)):
try:
old_score, _ = previous_layer[parents[:i]+parents[i+1:]]
except KeyError:
if verbose:
print(parents[:i]+parents[i+1:],'is missing')
bss = None
break
bss = old_score if bss is None else max(bss, old_score)
if bss is None:
# some subset is exponentially pruned, so don't score
# or: best we can hope for 'parents' is worse than some existing subset
# of parents
if verbose:
print('Prune!', child, parents, bss, lub)
continue
score, ubdkt = self.bdeu_score(child, parents, alpha, bound=bound)
if score > best_score:
best_pa = parents
best_score = score
ub = min(ubdkt.values()) # take the best bound
n_scores = n_scores + 1
# if the previous layer had a better score
# the last computation was unnecessary
if (score <= bss):
unnecessary_scores += 1
if bss >= ub and bss >= score:
if verbose:
print('Prune!', child, parents, bss, lub)
continue
new_layer[parents] = (max(score, bss), bss < score)
child_dkt.update({parents:val[0] for (parents, val) in new_layer.items() if val[1]})
previous_layer = new_layer
elapsed_time = time.time() - start
if bound == 'h':
scores_per_palim = scores_per_palim.append({'child': child,
'palim': pasize+1,
'alpha': alpha,
'all_scores': all_scores,
'n_scores_' + bound: n_scores,
'time_' + bound: elapsed_time,
'inf_n_scores': n_scores - unnecessary_scores,
'inner_ml': self.counters['inner_ml'],
'inner_g': self.counters['inner_g'],
'inner_h': self.counters['inner_h'],
'inner_total': self.counters['inner_total'],
'best_pa': best_pa},
ignore_index=True)
elif bound == 'min':
scores_per_palim = scores_per_palim.append({'child': child,
'palim': pasize+1,
'alpha': alpha,
'all_scores': all_scores,
'n_scores_' + bound: n_scores,
'time_' + bound: elapsed_time,
'inf_n_scores': n_scores - unnecessary_scores,
'min_ubg': self.counters['min_ubg'],
'min_ubh': self.counters['min_ubh'],
'best_pa': best_pa},
ignore_index=True)
else:
scores_per_palim = scores_per_palim.append({'child': child,
'palim': pasize+1,
'alpha': alpha,
'all_scores': all_scores,
'n_scores_' + bound: n_scores,
'time_' + bound: elapsed_time,
'inf_n_scores': n_scores - unnecessary_scores,
'best_pa': best_pa},
ignore_index=True)
if save_step:
scores_per_palim.to_csv(self._name + '/' + child + '_' + bound + '.csv', index=False)
return child_dkt, scores_per_palim
| [
"pandas.DataFrame",
"os.makedirs",
"numpy.log",
"pandas.read_csv",
"collections.Counter",
"os.path.exists",
"time.time",
"itertools.combinations",
"numpy.array",
"warnings.warn",
"math.log",
"math.lgamma"
] | [((562, 575), 'math.lgamma', 'lgamma', (['(n + x)'], {}), '(n + x)\n', (568, 575), False, 'from math import lgamma, log, factorial\n'), ((576, 585), 'math.lgamma', 'lgamma', (['x'], {}), '(x)\n', (582, 585), False, 'from math import lgamma, log, factorial\n'), ((27521, 27532), 'time.time', 'time.time', ([], {}), '()\n', (27530, 27532), False, 'import time\n'), ((2084, 2103), 'math.log', 'log', (['(2 * d / aq + 1)'], {}), '(2 * d / aq + 1)\n', (2087, 2103), False, 'from math import lgamma, log, factorial\n'), ((5031, 5084), 'pandas.read_csv', 'pd.read_csv', (['data'], {'delim_whitespace': '(True)', 'comment': '"""#"""'}), "(data, delim_whitespace=True, comment='#')\n", (5042, 5084), True, 'import pandas as pd\n'), ((6792, 6817), 'math.log', 'log', (['self._arities[child]'], {}), '(self._arities[child])\n', (6795, 6817), False, 'from math import lgamma, log, factorial\n'), ((12610, 12676), 'warnings.warn', 'warnings.warn', (['"""ESS (alpha) not defined. Defaulting to alpha=1.0."""'], {}), "('ESS (alpha) not defined. Defaulting to alpha=1.0.')\n", (12623, 12676), False, 'import warnings\n'), ((13170, 13179), 'collections.Counter', 'Counter', ([], {}), '()\n', (13177, 13179), False, 'from collections import Counter\n'), ((15228, 15237), 'collections.Counter', 'Counter', ([], {}), '()\n', (15235, 15237), False, 'from collections import Counter\n'), ((16962, 17053), 'warnings.warn', 'warnings.warn', (['"""Maximum number of parents (palim) not defined. Defaulting to palim=3."""'], {}), "(\n 'Maximum number of parents (palim) not defined. Defaulting to palim=3.')\n", (16975, 17053), False, 'import warnings\n'), ((17111, 17177), 'warnings.warn', 'warnings.warn', (['"""ESS (alpha) not defined. Defaulting to alpha=1.0."""'], {}), "('ESS (alpha) not defined. Defaulting to alpha=1.0.')\n", (17124, 17177), False, 'import warnings\n'), ((18473, 18564), 'warnings.warn', 'warnings.warn', (['"""Maximum number of parents (palim) not defined. Defaulting to palim=3."""'], {}), "(\n 'Maximum number of parents (palim) not defined. Defaulting to palim=3.')\n", (18486, 18564), False, 'import warnings\n'), ((26074, 26165), 'warnings.warn', 'warnings.warn', (['"""Maximum number of parents (palim) not defined. Defaulting to palim=3."""'], {}), "(\n 'Maximum number of parents (palim) not defined. Defaulting to palim=3.')\n", (26087, 26165), False, 'import warnings\n'), ((26217, 26402), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['child', 'palim', 'alpha', 'all_scores', 'n_scores_' + bound, 'time_' +\n bound, 'inf_n_scores', 'inner_ml', 'inner_g', 'inner_h', 'inner_total',\n 'best_pa']"}), "(columns=['child', 'palim', 'alpha', 'all_scores', 'n_scores_' +\n bound, 'time_' + bound, 'inf_n_scores', 'inner_ml', 'inner_g',\n 'inner_h', 'inner_total', 'best_pa'])\n", (26229, 26402), True, 'import pandas as pd\n'), ((27395, 27421), 'os.path.exists', 'os.path.exists', (['self._name'], {}), '(self._name)\n', (27409, 27421), False, 'import os\n'), ((27435, 27458), 'os.makedirs', 'os.makedirs', (['self._name'], {}), '(self._name)\n', (27446, 27458), False, 'import os\n'), ((1030, 1042), 'math.log', 'log', (['(n / tot)'], {}), '(n / tot)\n', (1033, 1042), False, 'from math import lgamma, log, factorial\n'), ((3332, 3338), 'math.log', 'log', (['r'], {}), '(r)\n', (3335, 3338), False, 'from math import lgamma, log, factorial\n'), ((8360, 8385), 'math.log', 'log', (['self._arities[child]'], {}), '(self._arities[child])\n', (8363, 8385), False, 'from math import lgamma, log, factorial\n'), ((11443, 11468), 'math.log', 'log', (['self._arities[child]'], {}), '(self._arities[child])\n', (11446, 11468), False, 'from math import lgamma, log, factorial\n'), ((17439, 17478), 'itertools.combinations', 'combinations', (['potential_parents', 'pasize'], {}), '(potential_parents, pasize)\n', (17451, 17478), False, 'from itertools import combinations\n'), ((18821, 18860), 'itertools.combinations', 'combinations', (['potential_parents', 'pasize'], {}), '(potential_parents, pasize)\n', (18833, 18860), False, 'from itertools import combinations\n'), ((26455, 26609), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['child', 'palim', 'alpha', 'all_scores', 'n_scores_' + bound, 'time_' +\n bound, 'inf_n_scores', 'min_ubg', 'min_ubh', 'best_pa']"}), "(columns=['child', 'palim', 'alpha', 'all_scores', 'n_scores_' +\n bound, 'time_' + bound, 'inf_n_scores', 'min_ubg', 'min_ubh', 'best_pa'])\n", (26467, 26609), True, 'import pandas as pd\n'), ((26651, 26783), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['child', 'palim', 'alpha', 'all_scores', 'n_scores_' + bound, 'time_' +\n bound, 'inf_n_scores', 'best_pa']"}), "(columns=['child', 'palim', 'alpha', 'all_scores', 'n_scores_' +\n bound, 'time_' + bound, 'inf_n_scores', 'best_pa'])\n", (26663, 26783), True, 'import pandas as pd\n'), ((30848, 30859), 'time.time', 'time.time', ([], {}), '()\n', (30857, 30859), False, 'import time\n'), ((11758, 11783), 'math.log', 'log', (['self._arities[child]'], {}), '(self._arities[child])\n', (11761, 11783), False, 'from math import lgamma, log, factorial\n'), ((15135, 15153), 'numpy.log', 'np.log', (['(nijk / nij)'], {}), '(nijk / nij)\n', (15141, 15153), True, 'import numpy as np\n'), ((15497, 15525), 'numpy.log', 'np.log', (['(nijk / cnt[idx[:-1]])'], {}), '(nijk / cnt[idx[:-1]])\n', (15503, 15525), True, 'import numpy as np\n'), ((27961, 27972), 'time.time', 'time.time', ([], {}), '()\n', (27970, 27972), False, 'import time\n'), ((11185, 11204), 'math.log', 'log', (['(2 * c / aq + 1)'], {}), '(2 * c / aq + 1)\n', (11188, 11204), False, 'from math import lgamma, log, factorial\n'), ((22288, 22313), 'numpy.array', 'np.array', (['upper_dkt[inst]'], {}), '(upper_dkt[inst])\n', (22296, 22313), True, 'import numpy as np\n'), ((22316, 22337), 'numpy.array', 'np.array', (['childcounts'], {}), '(childcounts)\n', (22324, 22337), True, 'import numpy as np\n')] |
import numpy as np
class Dense:
def __init__(self, size, activation=None, init_method=None):
"""
size: Tuple holding number of neurons in the input and the output
activation: Activation function
-'elu': Exponential Linear Unit function
-'relu': Rectified linear activation function
-'none': linear activation
init_method: method for initializing the weights
-'He' initialization
-'Xavier'(Glorot) initialization
"""
self.size = size
self.in_size = size[0]
self.out_size = size[1]
self.__init_weights(init_method)
self.Z = None
self.A = None
if activation is None:
self.nonlin = lambda x: x
self.nonlin_deriv = lambda x: np.ones(x.shape)
elif activation == 'elu':
self.nonlin = Dense.elu
self.nonlin_deriv = Dense.dElu
elif activation == 'relu':
self.nonlin = Dense.relu
self.nonlin_deriv = Dense.dRelu
elif activation == 'sigmoid':
self.nonlin = Dense.sigmoid
self.nonlin_deriv = Dense.dSigmoid
def __init_weights(self, init_method):
if init_method == 'He':
self.W = np.random.randn(self.size[0], self.size[1]) * np.sqrt(1/self.size[0])
self.b = np.zeros((1, self.out_size))
elif init_method == 'Xavier':
self.W = np.random.randn(self.size[0], self.size[1]) * np.sqrt(2/(self.size[0] + self.size[1]))
self.b = np.zeros((1, self.out_size))
else:
self.W = np.random.normal(0.0, 0.1, size=self.size)
self.b = np.random.normal(0.0, 0.1, size=(1, self.out_size))
def forward(self, X):
self.X = X
self.Z = self.X.dot(self.W) + self.b
self.A = self.nonlin(self.Z)
return self.A
def backward(self, dA):
m = self.X.shape[0]
dZ = dA * self.nonlin_deriv(self.Z)
dW = (1/m) * self.X.T.dot(dZ)
db = (1/m) * np.sum(dZ, axis=0, keepdims=True)
dX = dZ.dot(self.W.T)
self.dZ = dZ
self.dW = dW
self.db = db
return dX
def update(self, lr):
self.W += lr * self.dW
self.b += lr * self.db
@staticmethod
def sigmoid(X):
return 1/(1+np.exp(-X))
@staticmethod
def dSigmoid(Z):
s = 1/(1+np.exp(-Z))
dZ = s * (1-s)
return dZ
@staticmethod
def relu(Z):
return np.maximum(0,Z)
@staticmethod
def dRelu(x):
x[x<=0] = 0
x[x>0] = 1
return x
@staticmethod
def elu(x, alpha = 0.01):
return np.where(x>=0, x, alpha*np.exp(x)-1)
@staticmethod
def dElu(x, alpha = 0.01):
return np.where(x>0, 1, alpha*np.exp(x)) | [
"numpy.maximum",
"numpy.sum",
"numpy.random.randn",
"numpy.zeros",
"numpy.ones",
"numpy.exp",
"numpy.random.normal",
"numpy.sqrt"
] | [((2563, 2579), 'numpy.maximum', 'np.maximum', (['(0)', 'Z'], {}), '(0, Z)\n', (2573, 2579), True, 'import numpy as np\n'), ((1373, 1401), 'numpy.zeros', 'np.zeros', (['(1, self.out_size)'], {}), '((1, self.out_size))\n', (1381, 1401), True, 'import numpy as np\n'), ((2081, 2114), 'numpy.sum', 'np.sum', (['dZ'], {'axis': '(0)', 'keepdims': '(True)'}), '(dZ, axis=0, keepdims=True)\n', (2087, 2114), True, 'import numpy as np\n'), ((806, 822), 'numpy.ones', 'np.ones', (['x.shape'], {}), '(x.shape)\n', (813, 822), True, 'import numpy as np\n'), ((1282, 1325), 'numpy.random.randn', 'np.random.randn', (['self.size[0]', 'self.size[1]'], {}), '(self.size[0], self.size[1])\n', (1297, 1325), True, 'import numpy as np\n'), ((1328, 1353), 'numpy.sqrt', 'np.sqrt', (['(1 / self.size[0])'], {}), '(1 / self.size[0])\n', (1335, 1353), True, 'import numpy as np\n'), ((1570, 1598), 'numpy.zeros', 'np.zeros', (['(1, self.out_size)'], {}), '((1, self.out_size))\n', (1578, 1598), True, 'import numpy as np\n'), ((1635, 1677), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(0.1)'], {'size': 'self.size'}), '(0.0, 0.1, size=self.size)\n', (1651, 1677), True, 'import numpy as np\n'), ((1699, 1750), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(0.1)'], {'size': '(1, self.out_size)'}), '(0.0, 0.1, size=(1, self.out_size))\n', (1715, 1750), True, 'import numpy as np\n'), ((2378, 2388), 'numpy.exp', 'np.exp', (['(-X)'], {}), '(-X)\n', (2384, 2388), True, 'import numpy as np\n'), ((2447, 2457), 'numpy.exp', 'np.exp', (['(-Z)'], {}), '(-Z)\n', (2453, 2457), True, 'import numpy as np\n'), ((2869, 2878), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2875, 2878), True, 'import numpy as np\n'), ((1462, 1505), 'numpy.random.randn', 'np.random.randn', (['self.size[0]', 'self.size[1]'], {}), '(self.size[0], self.size[1])\n', (1477, 1505), True, 'import numpy as np\n'), ((1508, 1550), 'numpy.sqrt', 'np.sqrt', (['(2 / (self.size[0] + self.size[1]))'], {}), '(2 / (self.size[0] + self.size[1]))\n', (1515, 1550), True, 'import numpy as np\n'), ((2764, 2773), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2770, 2773), True, 'import numpy as np\n')] |
import streamlit as st
import numpy as np
import pandas as pd
import math
st.title("Graphs")
def square(x):
return x**2
def exp_dec(x):
return math.e ** (x/-8) * math.cos(x)
x = []
for loop in np.arange(-2, 2.1, .1):
x.append(loop)
y = [square(v) for v in x]
df = pd.DataFrame(zip(x, y), columns=['x', 'y'])
df = df.set_index('x')
st.line_chart(df)
x = []
for loop in np.arange(-2*math.pi, 2*math.pi, .1):
x.append(loop)
y = [math.sin(v) for v in x]
df = pd.DataFrame(zip(x, y), columns=['x', 'y'])
df = df.set_index('x')
st.line_chart(df)
x = []
for loop in np.arange(0, 10, .1):
x.append(loop)
y = [exp_dec(v) for v in x]
df = pd.DataFrame(zip(x, y), columns=['x', 'y'])
df = df.set_index('x')
st.line_chart(df)
| [
"streamlit.line_chart",
"math.sin",
"streamlit.title",
"numpy.arange",
"math.cos"
] | [((75, 93), 'streamlit.title', 'st.title', (['"""Graphs"""'], {}), "('Graphs')\n", (83, 93), True, 'import streamlit as st\n'), ((205, 228), 'numpy.arange', 'np.arange', (['(-2)', '(2.1)', '(0.1)'], {}), '(-2, 2.1, 0.1)\n', (214, 228), True, 'import numpy as np\n'), ((347, 364), 'streamlit.line_chart', 'st.line_chart', (['df'], {}), '(df)\n', (360, 364), True, 'import streamlit as st\n'), ((385, 426), 'numpy.arange', 'np.arange', (['(-2 * math.pi)', '(2 * math.pi)', '(0.1)'], {}), '(-2 * math.pi, 2 * math.pi, 0.1)\n', (394, 426), True, 'import numpy as np\n'), ((543, 560), 'streamlit.line_chart', 'st.line_chart', (['df'], {}), '(df)\n', (556, 560), True, 'import streamlit as st\n'), ((581, 602), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(0.1)'], {}), '(0, 10, 0.1)\n', (590, 602), True, 'import numpy as np\n'), ((722, 739), 'streamlit.line_chart', 'st.line_chart', (['df'], {}), '(df)\n', (735, 739), True, 'import streamlit as st\n'), ((447, 458), 'math.sin', 'math.sin', (['v'], {}), '(v)\n', (455, 458), False, 'import math\n'), ((173, 184), 'math.cos', 'math.cos', (['x'], {}), '(x)\n', (181, 184), False, 'import math\n')] |
import os
import re
import csv
import numpy as np
from itertools import groupby
from copy import deepcopy
import numpy as np
def numpy_kl_div(p, q):
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
def flatten(nested_elems):
return [elem for elems in nested_elems for elem in elems]
def stratify(data, classes, ratios, one_hot=False):
"""Stratifying procedure. Borrowed from https://vict0rs.ch/2018/05/24/sample-multilabel-dataset/
data is a list of lists: a list of labels, for each sample.
Each sample's labels should be ints, if they are one-hot encoded, use one_hot=True
classes is the list of classes each label can take
ratios is a list, summing to 1, of how the dataset should be split
"""
# one-hot decoding
if one_hot:
temp = [[] for _ in range(len(data))]
indexes, values = np.where(np.array(data).astype(int) == 1)
for k, v in zip(indexes, values):
temp[k].append(v)
data = temp
# Organize data per label: for each label l, per_label_data[l] contains the list of samples
# in data which have this label
per_label_data = {c: set() for c in classes}
for i, d in enumerate(data):
for l in d:
per_label_data[l].add(i)
# number of samples
size = len(data)
# In order not to compute lengths each time, they are tracked here.
subset_sizes = [r * size for r in ratios]
target_subset_sizes = deepcopy(subset_sizes)
per_label_subset_sizes = {
c: [r * len(per_label_data[c]) for r in ratios]
for c in classes
}
# For each subset we want, the set of sample-ids which should end up in it
stratified_data_ids = [set() for _ in range(len(ratios))]
# For each sample in the data set
while size > 0:
# Compute |Di|
lengths = {
l: len(label_data)
for l, label_data in per_label_data.items()
}
try:
# Find label of smallest |Di|
label = min(
{k: v for k, v in lengths.items() if v > 0}, key=lengths.get
)
except ValueError:
# If the dictionary in `min` is empty we get a Value Error.
# This can happen if there are unlabeled samples.
# In this case, `size` would be > 0 but only samples without label would remain.
# "No label" could be a class in itself: it's up to you to format your data accordingly.
break
current_length = lengths[label]
# For each sample with label `label`
while per_label_data[label]:
# Select such a sample
current_id = per_label_data[label].pop()
subset_sizes_for_label = per_label_subset_sizes[label]
# Find argmax clj i.e. subset in greatest need of the current label
largest_subsets = np.argwhere(
subset_sizes_for_label == np.amax(subset_sizes_for_label)
).flatten()
if len(largest_subsets) == 1:
subset = largest_subsets[0]
# If there is more than one such subset, find the one in greatest need
# of any label
else:
largest_subsets = np.argwhere(
subset_sizes == np.amax(subset_sizes)
).flatten()
if len(largest_subsets) == 1:
subset = largest_subsets[0]
else:
# If there is more than one such subset, choose at random
subset = np.random.choice(largest_subsets)
# Store the sample's id in the selected subset
stratified_data_ids[subset].add(current_id)
# There is one fewer sample to distribute
size -= 1
# The selected subset needs one fewer sample
subset_sizes[subset] -= 1
# In the selected subset, there is one more example for each label
# the current sample has
for l in data[current_id]:
per_label_subset_sizes[l][subset] -= 1
# Remove the sample from the dataset, meaning from all per_label dataset created
for l, label_data in per_label_data.items():
if current_id in label_data:
label_data.remove(current_id)
# Create the stratified dataset as a list of subsets, each containing the orginal labels
stratified_data_ids = [sorted(strat) for strat in stratified_data_ids]
stratified_data = [
[data[i] for i in strat] for strat in stratified_data_ids
]
# Return both the stratified indexes, to be used to sample the `features` associated with your labels
# And the stratified labels dataset
return stratified_data_ids, stratified_data
def get_utterance_cmi(utterance_labels, languages={'mixed', 'lang1', 'lang2', 'fw'}):
token_count = len(utterance_labels)
label_counts = {'other': 0}
utterance_CMI = 0.0
for label in utterance_labels:
if label in languages:
label_counts[label] = label_counts.get(label, 0) + 1
else:
# u -> 'ne', 'other', 'ambiguous', 'unk', etc.
label_counts['other'] = label_counts.get('other', 0) + 1
lang_label_counts = [label_counts[key] for key in label_counts if key not in 'other']
if lang_label_counts: # if has language labels
max_lang_count = float(max(lang_label_counts))
else:
max_lang_count = 0.0
if token_count > label_counts['other']:
tmp = max_lang_count / float(token_count - label_counts['other']) # max{wi}/n-u
cmi = (1 - tmp) * 100
else:
cmi = 0.0
return cmi
def assert_cmi_implementation():
"""Test cases taken from the CMI paper: https://pdfs.semanticscholar.org/c82c/9ea0073129904738fbc051c06188c02f4f6b.pdf"""
test1_langs = {'hi', 'en'} # 'univ' and 'acro' are treated as 'other'
test1_labels = 'hi hi univ hi hi hi univ en hi en en univ univ hi hi hi hi univ en acro acro acro acro acro acro univ hi en en hi acro acro acro univ en univ en hi hi hi hi hi hi univ en en hi hi hi hi univ hi en hi acro acro en en en en hi hi en en hi hi en en en univ en hi hi hi univ univ univ en hi hi en univ en en en hi hi acro univ hi hi acro acro hi hi en hi univ univ hi hi hi acro acro hi en en acro acro hi univ '.split()
assert 39.19 == round(get_utterance_cmi(test1_labels, test1_langs), 2), 'Test #1 failed'
test2_langs = {'hi', 'en'}
test2_labels = 'hi hi hi univ hi hi univ'.split()
assert 0 == get_utterance_cmi(test2_labels, test2_langs), 'Test #2 failed'
def is_code_mixed(utterance_labels, langs):
lang_in_utterance = [lang for lang in langs if lang in utterance_labels]
return len(lang_in_utterance) >= 2 # at least two languages
def get_dataset_cmi(dataset_labels, langs):
all_sents_count = float(len(dataset_labels))
all_tokens_count = sum([len(sents) for sents in dataset_labels])
cm_sents_count = float(len([labels for labels in dataset_labels if is_code_mixed(labels, langs)]))
CMI = 0.0
for labels in dataset_labels:
CMI += get_utterance_cmi(labels, langs)
CMI_all = round(CMI / all_sents_count, 3)
CMI_cm = round(float(CMI) / cm_sents_count, 3)
stats = {'cmi_all': CMI_all,
'cmi_cm': CMI_cm,
'all_sents': all_sents_count,
'all_tokens': all_tokens_count,
'cm_sents': cm_sents_count}
return stats
def get_corpus_cmi(corpus, langs):
return {split: get_dataset_cmi(corpus[split]['lid'], langs) for split in corpus} | [
"copy.deepcopy",
"numpy.log",
"numpy.amax",
"numpy.array",
"numpy.random.choice"
] | [((1455, 1477), 'copy.deepcopy', 'deepcopy', (['subset_sizes'], {}), '(subset_sizes)\n', (1463, 1477), False, 'from copy import deepcopy\n'), ((190, 203), 'numpy.log', 'np.log', (['(p / q)'], {}), '(p / q)\n', (196, 203), True, 'import numpy as np\n'), ((3553, 3586), 'numpy.random.choice', 'np.random.choice', (['largest_subsets'], {}), '(largest_subsets)\n', (3569, 3586), True, 'import numpy as np\n'), ((867, 881), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (875, 881), True, 'import numpy as np\n'), ((2926, 2957), 'numpy.amax', 'np.amax', (['subset_sizes_for_label'], {}), '(subset_sizes_for_label)\n', (2933, 2957), True, 'import numpy as np\n'), ((3280, 3301), 'numpy.amax', 'np.amax', (['subset_sizes'], {}), '(subset_sizes)\n', (3287, 3301), True, 'import numpy as np\n')] |
import unittest
import timeout_decorator
from gradescope_utils.autograder_utils.decorators import weight
import numpy as np
from manipulation.utils import LoadDataResource
class TestSegmentationAndGrasp(unittest.TestCase):
def __init__(self, test_name, notebook_locals):
super().__init__(test_name)
self.notebook_locals = notebook_locals
@weight(4)
@timeout_decorator.timeout(10.)
def test_get_merged_masked_pcd(self):
"""Test find_antipodal_pts"""
predictions = self.notebook_locals["predictions"]
cameras = self.notebook_locals["cameras"]
get_merged_masked_pcd = self.notebook_locals["get_merged_masked_pcd"]
rgb_ims = [c.rgb_im for c in cameras]
depth_ims = [c.depth_im for c in cameras]
project_depth_to_pC_funcs = [c.project_depth_to_pC for c in cameras]
X_WCs = [c.X_WC for c in cameras]
pcd_eval = get_merged_masked_pcd(predictions, rgb_ims, depth_ims, project_depth_to_pC_funcs, X_WCs)
pcd_pts_eval = np.asarray(pcd_eval.points)
pcd_colors_eval = np.asarray(pcd_eval.colors)
num_points_eval = pcd_pts_eval.shape[0]
data_target = np.load(
LoadDataResource("segmentation_and_grasp_soln.npz"))
pcd_pts_target = data_target['points']
pcd_colors_target = data_target['colors']
num_points_target = pcd_pts_target.shape[0]
# Allow some deviation in the number of points
self.assertLessEqual(
np.linalg.norm(num_points_target - num_points_eval), 200,
"Wrong number of points returned.")
# Make sure the sizes match
min_num_pts = min(num_points_eval, num_points_target)
self.assertLessEqual(
np.linalg.norm(pcd_pts_target[:min_num_pts,:] - pcd_pts_eval[:min_num_pts,:]), 1e-4,
"Point cloud points are not close enough to the solution values.")
self.assertLessEqual(
np.linalg.norm(pcd_colors_target[:min_num_pts,:] - pcd_colors_eval[:min_num_pts,:]), 1e-4,
"Point cloud colors are not close enough to the solution values.")
| [
"manipulation.utils.LoadDataResource",
"numpy.asarray",
"numpy.linalg.norm",
"gradescope_utils.autograder_utils.decorators.weight",
"timeout_decorator.timeout"
] | [((367, 376), 'gradescope_utils.autograder_utils.decorators.weight', 'weight', (['(4)'], {}), '(4)\n', (373, 376), False, 'from gradescope_utils.autograder_utils.decorators import weight\n'), ((382, 413), 'timeout_decorator.timeout', 'timeout_decorator.timeout', (['(10.0)'], {}), '(10.0)\n', (407, 413), False, 'import timeout_decorator\n'), ((1027, 1054), 'numpy.asarray', 'np.asarray', (['pcd_eval.points'], {}), '(pcd_eval.points)\n', (1037, 1054), True, 'import numpy as np\n'), ((1081, 1108), 'numpy.asarray', 'np.asarray', (['pcd_eval.colors'], {}), '(pcd_eval.colors)\n', (1091, 1108), True, 'import numpy as np\n'), ((1201, 1252), 'manipulation.utils.LoadDataResource', 'LoadDataResource', (['"""segmentation_and_grasp_soln.npz"""'], {}), "('segmentation_and_grasp_soln.npz')\n", (1217, 1252), False, 'from manipulation.utils import LoadDataResource\n'), ((1501, 1552), 'numpy.linalg.norm', 'np.linalg.norm', (['(num_points_target - num_points_eval)'], {}), '(num_points_target - num_points_eval)\n', (1515, 1552), True, 'import numpy as np\n'), ((1749, 1828), 'numpy.linalg.norm', 'np.linalg.norm', (['(pcd_pts_target[:min_num_pts, :] - pcd_pts_eval[:min_num_pts, :])'], {}), '(pcd_pts_target[:min_num_pts, :] - pcd_pts_eval[:min_num_pts, :])\n', (1763, 1828), True, 'import numpy as np\n'), ((1956, 2046), 'numpy.linalg.norm', 'np.linalg.norm', (['(pcd_colors_target[:min_num_pts, :] - pcd_colors_eval[:min_num_pts, :])'], {}), '(pcd_colors_target[:min_num_pts, :] - pcd_colors_eval[:\n min_num_pts, :])\n', (1970, 2046), True, 'import numpy as np\n')] |
import numpy as np
from prince_cr.util import get_2Dinterp_object, get_interp_object, info, get_AZN
from .base import CrossSectionBase
class ResponseFunction(object):
"""Redistribution Function based on Crossection model
The response function is the angular average crosssection
"""
def __init__(self, cross_section):
self.cross_section = cross_section
self.xcenters = cross_section.xcenters
# Copy indices from CrossSection Model
self.nonel_idcs = cross_section.nonel_idcs
self.incl_idcs = cross_section.incl_idcs
self.incl_diff_idcs = cross_section.incl_diff_idcs
# Dictionary of reponse function interpolators
self.nonel_intp = {}
self.incl_intp = {}
self.incl_diff_intp = {}
self.incl_diff_intp_integral = {}
self._precompute_interpolators()
# forward is_differential() to CrossSectionBase
# that might break in the future...
def is_differential(self, mother, daughter):
return CrossSectionBase.is_differential(self, mother, daughter)
def get_full(self, mother, daughter, ygrid, xgrid=None):
"""Return the full response function :math:`f(y) + g(y) + h(x,y)`
on the grid that is provided. xgrid is ignored if `h(x,y)` not in the channel.
"""
if xgrid is not None and ygrid.shape != xgrid.shape:
raise Exception('ygrid and xgrid do not have the same shape!!')
if get_AZN(mother)[0] < get_AZN(daughter)[0]:
info(
3,
'WARNING: channel {:} -> {:} with daughter heavier than mother!'
.format(mother, daughter))
res = np.zeros(ygrid.shape)
if (mother, daughter) in self.incl_intp:
res += self.incl_intp[(mother, daughter)](ygrid)
elif (mother, daughter) in self.incl_diff_intp:
#incl_diff_res = self.incl_diff_intp[(mother, daughter)](
# xgrid, ygrid, grid=False)
#if mother == 101:
# incl_diff_res = np.where(xgrid < 0.9, incl_diff_res, 0.)
#res += incl_diff_res
#if not(mother == daughter):
res += self.incl_diff_intp[(mother, daughter)].inteval(xgrid,
ygrid,
grid=False)
if mother == daughter and mother in self.nonel_intp:
# nonel cross section leads to absorption, therefore the minus
if xgrid is None:
res -= self.nonel_intp[mother](ygrid)
else:
diagonal = xgrid == 1.
res[diagonal] -= self.nonel_intp[mother](ygrid[diagonal])
return res
def get_channel(self, mother, daughter=None):
"""Reponse function :math:`f(y)` or :math:`g(y)` as
defined in the note.
Returns :math:`f(y)` or :math:`g(y)` if a daughter
index is provided. If the inclusive channel has a redistribution,
:math:`h(x,y)` will be returned
Args:
mother (int): mother nucleus(on)
daughter (int, optional): daughter nucleus(on)
Returns:
(numpy.array) Reponse function on self._ygrid_tab
"""
from scipy import integrate
cs_model = self.cross_section
egrid, cross_section = None, None
if daughter is not None:
if (mother, daughter) in self.incl_diff_idcs:
egrid, cross_section = cs_model.incl_diff(mother, daughter)
elif (mother, daughter) in self.incl_idcs:
egrid, cross_section = cs_model.incl(mother, daughter)
else:
raise Exception(
'Unknown inclusive channel {:} -> {:} for this model'.
format(mother, daughter))
else:
egrid, cross_section = cs_model.nonel(mother)
# note that cumtrapz works also for 2d-arrays and will integrate along axis = 1
integral = integrate.cumtrapz(egrid * cross_section, x=egrid)
ygrid = egrid[1:] / 2.
return ygrid, integral / (2 * ygrid**2)
def get_channel_scale(self, mother, daughter=None, scale='A'):
"""Returns the reponse function scaled by `scale`.
Convenience funtion for plotting, where it is important to
compare the cross section/response function per nucleon.
Args:
mother (int): Mother nucleus(on)
scale (float): If `A` then nonel/A is returned, otherwise
scale can be any float.
Returns:
(numpy.array, numpy.array): Tuple of Energy grid in GeV,
scale * inclusive cross section
in :math:`cm^{-2}`
"""
ygr, cs = self.get_channel(mother, daughter)
if scale == 'A':
scale = 1. / get_AZN(mother)[0]
return ygr, scale * cs
def _precompute_interpolators(self):
"""Interpolate each response function and store interpolators.
Uses :func:`prince_cr.util.get_interp_object` as interpolator.
This might result in too many knots and can be subject to
future optimization.
"""
info(2, 'Computing interpolators for response functions')
info(5, 'Nonelastic response functions f(y)')
self.nonel_intp = {}
for mother in self.nonel_idcs:
self.nonel_intp[mother] = get_interp_object(
*self.get_channel(mother))
info(5, 'Inclusive (boost conserving) response functions g(y)')
self.incl_intp = {}
for mother, daughter in self.incl_idcs:
self.incl_intp[(mother, daughter)] = get_interp_object(
*self.get_channel(mother, daughter))
info(5, 'Inclusive (redistributed) response functions h(y)')
self.incl_diff_intp = {}
for mother, daughter in self.incl_diff_idcs:
ygr, rfunc = self.get_channel(mother, daughter)
self.incl_diff_intp[(mother, daughter)] = get_2Dinterp_object(
self.xcenters, ygr, rfunc, self.cross_section.xbins)
from scipy.integrate import cumtrapz
integral = cumtrapz(rfunc, ygr, axis=1,initial=0)
integral = cumtrapz(integral, self.xcenters, axis=0,initial=0)
self.incl_diff_intp_integral[(mother, daughter)] = get_2Dinterp_object(
self.xcenters, ygr, integral, self.cross_section.xbins) | [
"prince_cr.util.info",
"scipy.integrate.cumtrapz",
"numpy.zeros",
"prince_cr.util.get_2Dinterp_object",
"prince_cr.util.get_AZN"
] | [((1695, 1716), 'numpy.zeros', 'np.zeros', (['ygrid.shape'], {}), '(ygrid.shape)\n', (1703, 1716), True, 'import numpy as np\n'), ((4060, 4110), 'scipy.integrate.cumtrapz', 'integrate.cumtrapz', (['(egrid * cross_section)'], {'x': 'egrid'}), '(egrid * cross_section, x=egrid)\n', (4078, 4110), False, 'from scipy import integrate\n'), ((5319, 5376), 'prince_cr.util.info', 'info', (['(2)', '"""Computing interpolators for response functions"""'], {}), "(2, 'Computing interpolators for response functions')\n", (5323, 5376), False, 'from prince_cr.util import get_2Dinterp_object, get_interp_object, info, get_AZN\n'), ((5386, 5431), 'prince_cr.util.info', 'info', (['(5)', '"""Nonelastic response functions f(y)"""'], {}), "(5, 'Nonelastic response functions f(y)')\n", (5390, 5431), False, 'from prince_cr.util import get_2Dinterp_object, get_interp_object, info, get_AZN\n'), ((5609, 5672), 'prince_cr.util.info', 'info', (['(5)', '"""Inclusive (boost conserving) response functions g(y)"""'], {}), "(5, 'Inclusive (boost conserving) response functions g(y)')\n", (5613, 5672), False, 'from prince_cr.util import get_2Dinterp_object, get_interp_object, info, get_AZN\n'), ((5879, 5939), 'prince_cr.util.info', 'info', (['(5)', '"""Inclusive (redistributed) response functions h(y)"""'], {}), "(5, 'Inclusive (redistributed) response functions h(y)')\n", (5883, 5939), False, 'from prince_cr.util import get_2Dinterp_object, get_interp_object, info, get_AZN\n'), ((6140, 6212), 'prince_cr.util.get_2Dinterp_object', 'get_2Dinterp_object', (['self.xcenters', 'ygr', 'rfunc', 'self.cross_section.xbins'], {}), '(self.xcenters, ygr, rfunc, self.cross_section.xbins)\n', (6159, 6212), False, 'from prince_cr.util import get_2Dinterp_object, get_interp_object, info, get_AZN\n'), ((6303, 6342), 'scipy.integrate.cumtrapz', 'cumtrapz', (['rfunc', 'ygr'], {'axis': '(1)', 'initial': '(0)'}), '(rfunc, ygr, axis=1, initial=0)\n', (6311, 6342), False, 'from scipy.integrate import cumtrapz\n'), ((6365, 6417), 'scipy.integrate.cumtrapz', 'cumtrapz', (['integral', 'self.xcenters'], {'axis': '(0)', 'initial': '(0)'}), '(integral, self.xcenters, axis=0, initial=0)\n', (6373, 6417), False, 'from scipy.integrate import cumtrapz\n'), ((6481, 6556), 'prince_cr.util.get_2Dinterp_object', 'get_2Dinterp_object', (['self.xcenters', 'ygr', 'integral', 'self.cross_section.xbins'], {}), '(self.xcenters, ygr, integral, self.cross_section.xbins)\n', (6500, 6556), False, 'from prince_cr.util import get_2Dinterp_object, get_interp_object, info, get_AZN\n'), ((1476, 1491), 'prince_cr.util.get_AZN', 'get_AZN', (['mother'], {}), '(mother)\n', (1483, 1491), False, 'from prince_cr.util import get_2Dinterp_object, get_interp_object, info, get_AZN\n'), ((1497, 1514), 'prince_cr.util.get_AZN', 'get_AZN', (['daughter'], {}), '(daughter)\n', (1504, 1514), False, 'from prince_cr.util import get_2Dinterp_object, get_interp_object, info, get_AZN\n'), ((4967, 4982), 'prince_cr.util.get_AZN', 'get_AZN', (['mother'], {}), '(mother)\n', (4974, 4982), False, 'from prince_cr.util import get_2Dinterp_object, get_interp_object, info, get_AZN\n')] |
import numpy as np
import base64
from enum import IntEnum
import io
from collections import defaultdict, namedtuple
import itertools
from pathlib import Path
import json
from sklearn.metrics import f1_score
from forager_server_api.models import Dataset, DatasetItem, Annotation
import calendar
import time
import math
OUTPUT_FILENAME = "accuracy.txt"
PAUSE_THRESHOLD = 120 # seconds
DATASET_NAME = "waymo"
INDEX_DIR = Path("/home/fpoms/forager/indexes/2d2b13f9-3b30-4e51-8ab9-4e8a03ba1f03")
MODEL_OUTPUTS_PARENT_DIR = Path("~/forager/model_outputs").expanduser().resolve()
EMBEDDING_DIM = 2048
CATEGORY = "zebra crossing"
LOG_FILENAME = "zebra1.log"
VAL_IDENTIFIERS = json.load((INDEX_DIR / "val_identifiers.json").open())
ALL_LABELS = json.load((INDEX_DIR / "labels.json").open())
class LabelValue(IntEnum):
TOMBSTONE = -1
POSITIVE = 1
NEGATIVE = 2
HARD_NEGATIVE = 3
UNSURE = 4
CUSTOM = 5
def base64_to_numpy(nda_base64):
if not nda_base64:
return None
nda_bytes = base64.b64decode(nda_base64)
with io.BytesIO(nda_bytes) as nda_buffer:
nda = np.load(nda_buffer, allow_pickle=False)
return nda
Tag = namedtuple("Tag", "category value")
def nest_anns(anns, nest_category=True, nest_lf=True):
if nest_category and nest_lf:
data = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for ann in anns:
# Only use most recent perframe ann
k = ann.dataset_item.pk
c = ann.label_category
u = ann.label_function
data[k][c][u].append(ann)
elif nest_category:
data = defaultdict(lambda: defaultdict(list))
for ann in anns:
# Only use most recent perframe ann
k = ann.dataset_item.pk
c = ann.label_category
data[k][c].append(ann)
elif nest_lf:
data = defaultdict(lambda: defaultdict(list))
for ann in anns:
# Only use most recent perframe ann
k = ann.dataset_item.pk
u = ann.label_function
data[k][u].append(ann)
else:
data = defaultdict(list)
for ann in anns:
# Only use most recent perframe ann
k = ann.dataset_item.pk
data[k].append(ann)
return data
def filter_fn(anns):
filt_anns = []
most_recent = None
for ann in anns:
if ann.label_type == "klabel_frame":
if most_recent is None or ann.created > most_recent.created:
most_recent = ann
else:
filt_anns.append(ann)
if most_recent:
filt_anns.append(most_recent)
return filt_anns
def filter_most_recent_anns(nested_anns):
if len(nested_anns) == 0:
return {}
if isinstance(next(iter(nested_anns.items()))[1], list):
data = defaultdict(list)
for pk, anns in nested_anns.items():
data[pk] = filter_fn(anns)
elif isinstance(next(iter(next(iter(nested_anns.items()))[1].items()))[1], list):
data = defaultdict(lambda: defaultdict(list))
for pk, label_fns_data in nested_anns.items():
for label_fn, anns in label_fns_data.items():
data[pk][label_fn] = filter_fn(anns)
elif isinstance(
next(iter(next(iter(next(iter(nested_anns.items()))[1].items()))[1].items()))[
1
],
list,
):
data = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for pk, cat_fns_data in nested_anns.items():
for cat, label_fns_data in cat_fns_data.items():
for label_fn, anns in label_fns_data.items():
data[pk][cat][label_fn] = filter_fn(anns)
return data
def parse_tag_set_from_query_v2(s):
if isinstance(s, list):
parts = s
elif isinstance(s, str) and s:
parts = s.split(",")
else:
parts = []
ts = set()
for part in parts:
if not part:
continue
category, value_str = part.split(":")
ts.add(Tag(category, value_str))
return ts
def tag_sets_to_category_list_v2(*tagsets):
categories = set()
for ts in tagsets:
for tag in ts:
categories.add(tag.category)
return list(categories)
def serialize_tag_set_for_client_v2(ts):
return [{"category": t.category, "value": t.value} for t in sorted(list(ts))]
def get_tags_from_annotations_v2(annotations):
# [image][category][#]
anns = filter_most_recent_anns(nest_anns(annotations, nest_lf=False))
tags_by_pk = defaultdict(list)
for di_pk, anns_by_cat in anns.items():
for cat, ann_list in anns_by_cat.items():
if not cat:
continue
assert len(ann_list) == 1 # should only be one latest per-frame annotation
ann = ann_list[0]
label_data = json.loads(ann.label_data)
value = LabelValue(label_data["value"])
if value == LabelValue.TOMBSTONE:
continue
value_str = (
label_data["custom_value"] if value == LabelValue.CUSTOM else value.name
)
tags_by_pk[di_pk].append(Tag(cat, value_str))
return tags_by_pk
def get_val_examples_v2(dataset):
# Get positive and negative categories
pos_tags = set([Tag(CATEGORY, LabelValue.POSITIVE.name)])
neg_tags = set([Tag(CATEGORY, LabelValue.NEGATIVE.name)])
# Limit to validation set
eligible_dataset_items = DatasetItem.objects.filter(
dataset=dataset,
google=False,
is_val=True,
)
# Get positives and negatives matching these categories
annotations = Annotation.objects.filter(
dataset_item__in=eligible_dataset_items,
label_category__in=tag_sets_to_category_list_v2(pos_tags, neg_tags),
label_type="klabel_frame",
)
tags_by_pk = get_tags_from_annotations_v2(annotations)
pos_dataset_item_pks = []
neg_dataset_item_pks = []
for pk, tags in tags_by_pk.items():
if any(t in pos_tags for t in tags):
pos_dataset_item_pks.append(pk)
elif any(t in neg_tags for t in tags):
neg_dataset_item_pks.append(pk)
return pos_dataset_item_pks, neg_dataset_item_pks
# Get validation labels
dataset = Dataset.objects.get(name=DATASET_NAME)
pos_dataset_item_pks, neg_dataset_item_pks = get_val_examples_v2(dataset)
# Construct paths, identifiers, and labels
dataset_items_by_pk = DatasetItem.objects.in_bulk(
pos_dataset_item_pks + neg_dataset_item_pks
)
identifiers = []
labels = []
for pk, label in itertools.chain(
((pk, True) for pk in pos_dataset_item_pks),
((pk, False) for pk in neg_dataset_item_pks),
):
di = dataset_items_by_pk[pk]
identifiers.append(di.identifier)
labels.append(label)
# Read log file
lines = []
with open(LOG_FILENAME, "r") as f:
for line in f:
parts = line.strip().split(" - ")
timestamp = parts[0]
timestamp = time.strptime(timestamp[: timestamp.find(",")], "%Y-%m-%d %H:%M:%S")
timestamp = calendar.timegm(timestamp)
lines.append([timestamp] + parts[1:])
start_time = lines[0][0]
end_time = lines[-1][0]
def get_svm_accuracy(vector_b64, model, val_identifiers, val_labels):
vector = base64_to_numpy(vector_b64)
# Read embeddings from disk
embeddings = np.memmap(
str(INDEX_DIR / "local" / model / "embeddings.npy"),
dtype="float32",
mode="r",
shape=(len(ALL_LABELS), EMBEDDING_DIM),
)
# Get relevant embeddings
inds = []
for identifier in val_identifiers:
inds.append(VAL_IDENTIFIERS[identifier])
relevant_embeddings = embeddings[inds]
# Compute scores
scores = relevant_embeddings @ vector
# Compute labels
print(sum(scores > 0))
return f1_score(val_labels, scores > 0)
def get_dnn_accuracy(model, val_identifiers, val_labels):
all_scores = np.load(str(MODEL_OUTPUTS_PARENT_DIR / model / "scores.npy"))
# Get relevant scores
inds = []
for identifier in val_identifiers:
inds.append(VAL_IDENTIFIERS[identifier])
scores = all_scores[inds]
# Compute labels
return f1_score(val_labels, scores > 0.5)
# Make graph of # labels over time
i = 0
best_accuracy = 0
seen_dnns = set()
last_incremented = 0
with open(OUTPUT_FILENAME, "w") as f:
for t in range(math.floor(start_time), math.ceil(end_time) + 1):
while i < len(lines) and lines[i][0] < t:
# Parse this line
l = lines[i]
timestamp, activity_type = l[:2]
if activity_type == "NEW SVM":
svm_accuracy = get_svm_accuracy(
l[2], l[3] if len(l) > 3 else "imagenet", identifiers, labels
)
best_accuracy = max(best_accuracy, svm_accuracy)
elif activity_type == "NEW AVAILABLE DNN":
dnn_id = l[2]
if dnn_id not in seen_dnns:
dnn_accuracy = get_dnn_accuracy(dnn_id, identifiers, labels)
best_accuracy = max(best_accuracy, dnn_accuracy)
seen_dnns.add(dnn_id)
i += 1
last_incremented = t
if t - last_incremented > PAUSE_THRESHOLD:
continue
f.write(f"{best_accuracy}\n")
| [
"forager_server_api.models.Dataset.objects.get",
"forager_server_api.models.DatasetItem.objects.in_bulk",
"io.BytesIO",
"numpy.load",
"json.loads",
"math.ceil",
"forager_server_api.models.DatasetItem.objects.filter",
"math.floor",
"calendar.timegm",
"base64.b64decode",
"collections.defaultdict",... | [((421, 493), 'pathlib.Path', 'Path', (['"""/home/fpoms/forager/indexes/2d2b13f9-3b30-4e51-8ab9-4e8a03ba1f03"""'], {}), "('/home/fpoms/forager/indexes/2d2b13f9-3b30-4e51-8ab9-4e8a03ba1f03')\n", (425, 493), False, 'from pathlib import Path\n'), ((1166, 1201), 'collections.namedtuple', 'namedtuple', (['"""Tag"""', '"""category value"""'], {}), "('Tag', 'category value')\n", (1176, 1201), False, 'from collections import defaultdict, namedtuple\n'), ((6279, 6317), 'forager_server_api.models.Dataset.objects.get', 'Dataset.objects.get', ([], {'name': 'DATASET_NAME'}), '(name=DATASET_NAME)\n', (6298, 6317), False, 'from forager_server_api.models import Dataset, DatasetItem, Annotation\n'), ((6458, 6530), 'forager_server_api.models.DatasetItem.objects.in_bulk', 'DatasetItem.objects.in_bulk', (['(pos_dataset_item_pks + neg_dataset_item_pks)'], {}), '(pos_dataset_item_pks + neg_dataset_item_pks)\n', (6485, 6530), False, 'from forager_server_api.models import Dataset, DatasetItem, Annotation\n'), ((6583, 6693), 'itertools.chain', 'itertools.chain', (['((pk, True) for pk in pos_dataset_item_pks)', '((pk, False) for pk in neg_dataset_item_pks)'], {}), '(((pk, True) for pk in pos_dataset_item_pks), ((pk, False) for\n pk in neg_dataset_item_pks))\n', (6598, 6693), False, 'import itertools\n'), ((1014, 1042), 'base64.b64decode', 'base64.b64decode', (['nda_base64'], {}), '(nda_base64)\n', (1030, 1042), False, 'import base64\n'), ((4551, 4568), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4562, 4568), False, 'from collections import defaultdict, namedtuple\n'), ((5476, 5546), 'forager_server_api.models.DatasetItem.objects.filter', 'DatasetItem.objects.filter', ([], {'dataset': 'dataset', 'google': '(False)', 'is_val': '(True)'}), '(dataset=dataset, google=False, is_val=True)\n', (5502, 5546), False, 'from forager_server_api.models import Dataset, DatasetItem, Annotation\n'), ((7811, 7843), 'sklearn.metrics.f1_score', 'f1_score', (['val_labels', '(scores > 0)'], {}), '(val_labels, scores > 0)\n', (7819, 7843), False, 'from sklearn.metrics import f1_score\n'), ((8173, 8207), 'sklearn.metrics.f1_score', 'f1_score', (['val_labels', '(scores > 0.5)'], {}), '(val_labels, scores > 0.5)\n', (8181, 8207), False, 'from sklearn.metrics import f1_score\n'), ((1052, 1073), 'io.BytesIO', 'io.BytesIO', (['nda_bytes'], {}), '(nda_bytes)\n', (1062, 1073), False, 'import io\n'), ((1103, 1142), 'numpy.load', 'np.load', (['nda_buffer'], {'allow_pickle': '(False)'}), '(nda_buffer, allow_pickle=False)\n', (1110, 1142), True, 'import numpy as np\n'), ((2826, 2843), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2837, 2843), False, 'from collections import defaultdict, namedtuple\n'), ((7060, 7086), 'calendar.timegm', 'calendar.timegm', (['timestamp'], {}), '(timestamp)\n', (7075, 7086), False, 'import calendar\n'), ((8366, 8388), 'math.floor', 'math.floor', (['start_time'], {}), '(start_time)\n', (8376, 8388), False, 'import math\n'), ((4855, 4881), 'json.loads', 'json.loads', (['ann.label_data'], {}), '(ann.label_data)\n', (4865, 4881), False, 'import json\n'), ((8390, 8409), 'math.ceil', 'math.ceil', (['end_time'], {}), '(end_time)\n', (8399, 8409), False, 'import math\n'), ((521, 552), 'pathlib.Path', 'Path', (['"""~/forager/model_outputs"""'], {}), "('~/forager/model_outputs')\n", (525, 552), False, 'from pathlib import Path\n'), ((2118, 2135), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2129, 2135), False, 'from collections import defaultdict, namedtuple\n'), ((1644, 1661), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1655, 1661), False, 'from collections import defaultdict, namedtuple\n'), ((3049, 3066), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3060, 3066), False, 'from collections import defaultdict, namedtuple\n'), ((1348, 1365), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1359, 1365), False, 'from collections import defaultdict, namedtuple\n'), ((1895, 1912), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1906, 1912), False, 'from collections import defaultdict, namedtuple\n'), ((3443, 3460), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3454, 3460), False, 'from collections import defaultdict, namedtuple\n')] |
import hydroDL
from hydroDL.data import dbCsv
from hydroDL.utils import grid
from hydroDL.post import plot
import numpy as np
import matplotlib.pyplot as plt
rootDB = hydroDL.pathSMAP['DB_L3_NA']
tRange = [20150401, 20160401]
df = dbCsv.DataframeCsv(
rootDB=rootDB, subset='CONUSv2f1', tRange=tRange)
lat, lon = df.getGeo()
fieldLst = ['ecoRegionL'+str(x+1) for x in range(3)]
codeLst = df.getDataConst(fieldLst, doNorm=False, rmNan=False).astype(int)
# print code list
ngrid = len(codeLst)
u1Lst, c1Lst = np.unique(codeLst[:, 0], return_counts=True)
for (u1, c1) in zip(u1Lst, c1Lst):
# print('{:02d}-xx-xx {:.2f}% {:d}'.format(u1, c1/ngrid*100, c1))
ind = np.where(codeLst[:, 0] == u1)[0]
u2Lst, c2Lst = np.unique(codeLst[ind, 1], return_counts=True)
for (u2, c2) in zip(u2Lst, c2Lst):
# print('{:02d}-{:02d}-xx {:.2f}% {:d}'.format(u1, u2, c2/ngrid*100, c2))
ind = np.where((codeLst[:, 0] == u1) & (codeLst[:, 1] == u2))[0]
u3Lst, c3Lst = np.unique(codeLst[ind, 2], return_counts=True)
for (u3, c3) in zip(u3Lst, c3Lst):
print('{:02d}-{:02d}-{:02d} {:.2f}% {:d}'.format(u1,
u2, u3, c3/ngrid*100, c3))
# plot code maps
def indReg(l1, l2, l3):
legStr = str(l1).zfill(2)
if l2 == 0:
ind = np.where((codeLst[:, 0] == l1))[0]
else:
legStr = legStr
if l3 == 0:
ind = np.where((codeLst[:, 0] == l1) & (
codeLst[:, 1] == l2))[0]
else:
ind = np.where((codeLst[:, 0] == l1) & (
codeLst[:, 1] == l2) & (codeLst[:, 2] == l3))[0]
return ind, legStr
def codeReg(regLst):
data = np.zeros(lat.shape)
legLst = list()
indLst = list()
for (k, reg) in zip(range(len(regLst)), regLst):
ind, legStr = indReg(reg[0], reg[1], reg[2])
data[ind] = k+1
legLst.append(legStr)
indLst.append(ind)
return data, legLst, indLst
regLst = [
[8, 3, 0], [8, 4, 0], [9, 2, 0], [9, 3, 0], [10, 1, 0], [10, 2, 0]
]
regLst = [
[10, 1, 1],[10, 1, 2],[10, 1, 3]
]
fig, ax = plt.subplots(figsize=(8, 6))
data, legLst, indLst = codeReg(regLst)
import matplotlib
matplotlib.rcParams.update({'lines.markersize': 20})
plot.plotMap(data, lat=lat, lon=lon, ax=ax, cRange=[0, len(legLst)],)
fig.show()
| [
"matplotlib.rcParams.update",
"numpy.zeros",
"numpy.where",
"hydroDL.data.dbCsv.DataframeCsv",
"matplotlib.pyplot.subplots",
"numpy.unique"
] | [((234, 302), 'hydroDL.data.dbCsv.DataframeCsv', 'dbCsv.DataframeCsv', ([], {'rootDB': 'rootDB', 'subset': '"""CONUSv2f1"""', 'tRange': 'tRange'}), "(rootDB=rootDB, subset='CONUSv2f1', tRange=tRange)\n", (252, 302), False, 'from hydroDL.data import dbCsv\n'), ((514, 558), 'numpy.unique', 'np.unique', (['codeLst[:, 0]'], {'return_counts': '(True)'}), '(codeLst[:, 0], return_counts=True)\n', (523, 558), True, 'import numpy as np\n'), ((2135, 2163), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (2147, 2163), True, 'import matplotlib.pyplot as plt\n'), ((2221, 2273), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'lines.markersize': 20}"], {}), "({'lines.markersize': 20})\n", (2247, 2273), False, 'import matplotlib\n'), ((726, 772), 'numpy.unique', 'np.unique', (['codeLst[ind, 1]'], {'return_counts': '(True)'}), '(codeLst[ind, 1], return_counts=True)\n', (735, 772), True, 'import numpy as np\n'), ((1709, 1728), 'numpy.zeros', 'np.zeros', (['lat.shape'], {}), '(lat.shape)\n', (1717, 1728), True, 'import numpy as np\n'), ((674, 703), 'numpy.where', 'np.where', (['(codeLst[:, 0] == u1)'], {}), '(codeLst[:, 0] == u1)\n', (682, 703), True, 'import numpy as np\n'), ((990, 1036), 'numpy.unique', 'np.unique', (['codeLst[ind, 2]'], {'return_counts': '(True)'}), '(codeLst[ind, 2], return_counts=True)\n', (999, 1036), True, 'import numpy as np\n'), ((908, 963), 'numpy.where', 'np.where', (['((codeLst[:, 0] == u1) & (codeLst[:, 1] == u2))'], {}), '((codeLst[:, 0] == u1) & (codeLst[:, 1] == u2))\n', (916, 963), True, 'import numpy as np\n'), ((1337, 1366), 'numpy.where', 'np.where', (['(codeLst[:, 0] == l1)'], {}), '(codeLst[:, 0] == l1)\n', (1345, 1366), True, 'import numpy as np\n'), ((1444, 1499), 'numpy.where', 'np.where', (['((codeLst[:, 0] == l1) & (codeLst[:, 1] == l2))'], {}), '((codeLst[:, 0] == l1) & (codeLst[:, 1] == l2))\n', (1452, 1499), True, 'import numpy as np\n'), ((1552, 1631), 'numpy.where', 'np.where', (['((codeLst[:, 0] == l1) & (codeLst[:, 1] == l2) & (codeLst[:, 2] == l3))'], {}), '((codeLst[:, 0] == l1) & (codeLst[:, 1] == l2) & (codeLst[:, 2] == l3))\n', (1560, 1631), True, 'import numpy as np\n')] |
import os
import sys
sys.path.append("../")
import pickle
import numpy as np
from tensorflow.keras.utils import Sequence
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from official.nlp import optimization
from sklearn.utils import shuffle
from neurallog.models import NeuralLog
from neurallog import data_loader
from neurallog.utils import classification_report
log_file = "../logs/BGL.log"
embed_dim = 768 # Embedding size for each token
max_len = 75
class BatchGenerator(Sequence):
def __init__(self, X, Y, batch_size):
self.X, self.Y = X, Y
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.X) / float(self.batch_size)))
def __getitem__(self, idx):
# print(self.batch_size)
dummy = np.zeros(shape=(embed_dim,))
x = self.X[idx * self.batch_size:min((idx + 1) * self.batch_size, len(self.X))]
X = np.zeros((len(x), max_len, embed_dim))
Y = np.zeros((len(x), 2))
item_count = 0
for i in range(idx * self.batch_size, min((idx + 1) * self.batch_size, len(self.X))):
x = self.X[i]
if len(x) > max_len:
x = x[-max_len:]
x = np.pad(np.array(x), pad_width=((max_len - len(x), 0), (0, 0)), mode='constant',
constant_values=0)
X[item_count] = np.reshape(x, [max_len, embed_dim])
Y[item_count] = self.Y[i]
item_count += 1
return X[:], Y[:, 0]
def train_generator(training_generator, validate_generator, num_train_samples, num_val_samples, batch_size,
epoch_num, model_name=None):
epochs = epoch_num
steps_per_epoch = num_train_samples
num_train_steps = steps_per_epoch * epochs
num_warmup_steps = int(0.1 * num_train_steps)
init_lr = 3e-4
optimizer = optimization.create_optimizer(init_lr=init_lr,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
optimizer_type='adamw')
loss_object = SparseCategoricalCrossentropy()
model = NeuralLog(768, ff_dim=2048, max_len=75, num_heads=12, dropout=0.1)
# model.load_weights("hdfs_transformer.hdf5")
model.compile(loss=loss_object, metrics=['accuracy'],
optimizer=optimizer)
print(model.summary())
# checkpoint
filepath = model_name
checkpoint = ModelCheckpoint(filepath,
monitor='val_accuracy',
verbose=1,
save_best_only=True,
mode='max',
save_weights_only=True)
early_stop = EarlyStopping(
monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto',
baseline=None, restore_best_weights=True
)
callbacks_list = [checkpoint, early_stop]
model.fit_generator(generator=training_generator,
steps_per_epoch=int(num_train_samples / batch_size),
epochs=epoch_num,
verbose=1,
validation_data=validate_generator,
validation_steps=int(num_val_samples / batch_size),
workers=16,
max_queue_size=32,
callbacks=callbacks_list,
shuffle=True
)
return model
def train(X, Y, epoch_num, batch_size, model_file=None):
X, Y = shuffle(X, Y)
n_samples = len(X)
train_x, train_y = X[:int(n_samples * 90 / 100)], Y[:int(n_samples * 90 / 100)]
val_x, val_y = X[int(n_samples * 90 / 100):], Y[int(n_samples * 90 / 100):]
training_generator, num_train_samples = BatchGenerator(train_x, train_y, batch_size), len(train_x)
validate_generator, num_val_samples = BatchGenerator(val_x, val_y, batch_size), len(val_x)
print("Number of training samples: {0} - Number of validating samples: {1}".format(num_train_samples,
num_val_samples))
model = train_generator(training_generator, validate_generator, num_train_samples, num_val_samples, batch_size,
epoch_num, model_name=model_file)
return model
def test_model(model, x, y, batch_size):
x, y = shuffle(x, y)
x, y = x[: len(x) // batch_size * batch_size], y[: len(y) // batch_size * batch_size]
test_loader = BatchGenerator(x, y, batch_size)
prediction = model.predict_generator(test_loader, steps=(len(x) // batch_size), workers=16, max_queue_size=32,
verbose=1)
prediction = np.argmax(prediction, axis=1)
y = y[:len(prediction)]
report = classification_report(np.array(y), prediction)
print(report)
if __name__ == '__main__':
(x_tr, y_tr), (x_te, y_te) = data_loader.load_supercomputers(
log_file, train_ratio=0.8, windows_size=20,
step_size=20, e_type='bert', mode='balance')
model = train(x_tr, y_tr, 10, 256, "bgl_transformer.hdf5")
test_model(model, x_te, y_te, batch_size=1024)
| [
"sys.path.append",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"neurallog.data_loader.load_supercomputers",
"numpy.argmax",
"neurallog.models.NeuralLog",
"numpy.zeros",
"official.nlp.optimization.create_optimizer",
"tensorflow.keras.callbacks.ModelCheckpoint",
"numpy.array",
"numpy.re... | [((22, 44), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (37, 44), False, 'import sys\n'), ((1926, 2069), 'official.nlp.optimization.create_optimizer', 'optimization.create_optimizer', ([], {'init_lr': 'init_lr', 'num_train_steps': 'num_train_steps', 'num_warmup_steps': 'num_warmup_steps', 'optimizer_type': '"""adamw"""'}), "(init_lr=init_lr, num_train_steps=\n num_train_steps, num_warmup_steps=num_warmup_steps, optimizer_type='adamw')\n", (1955, 2069), False, 'from official.nlp import optimization\n'), ((2222, 2253), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'SparseCategoricalCrossentropy', ([], {}), '()\n', (2251, 2253), False, 'from tensorflow.keras.losses import SparseCategoricalCrossentropy\n'), ((2267, 2333), 'neurallog.models.NeuralLog', 'NeuralLog', (['(768)'], {'ff_dim': '(2048)', 'max_len': '(75)', 'num_heads': '(12)', 'dropout': '(0.1)'}), '(768, ff_dim=2048, max_len=75, num_heads=12, dropout=0.1)\n', (2276, 2333), False, 'from neurallog.models import NeuralLog\n'), ((2572, 2694), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""val_accuracy"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""', 'save_weights_only': '(True)'}), "(filepath, monitor='val_accuracy', verbose=1, save_best_only\n =True, mode='max', save_weights_only=True)\n", (2587, 2694), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((2872, 3001), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(5)', 'verbose': '(0)', 'mode': '"""auto"""', 'baseline': 'None', 'restore_best_weights': '(True)'}), "(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode=\n 'auto', baseline=None, restore_best_weights=True)\n", (2885, 3001), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((3689, 3702), 'sklearn.utils.shuffle', 'shuffle', (['X', 'Y'], {}), '(X, Y)\n', (3696, 3702), False, 'from sklearn.utils import shuffle\n'), ((4552, 4565), 'sklearn.utils.shuffle', 'shuffle', (['x', 'y'], {}), '(x, y)\n', (4559, 4565), False, 'from sklearn.utils import shuffle\n'), ((4891, 4920), 'numpy.argmax', 'np.argmax', (['prediction'], {'axis': '(1)'}), '(prediction, axis=1)\n', (4900, 4920), True, 'import numpy as np\n'), ((5089, 5213), 'neurallog.data_loader.load_supercomputers', 'data_loader.load_supercomputers', (['log_file'], {'train_ratio': '(0.8)', 'windows_size': '(20)', 'step_size': '(20)', 'e_type': '"""bert"""', 'mode': '"""balance"""'}), "(log_file, train_ratio=0.8, windows_size=20,\n step_size=20, e_type='bert', mode='balance')\n", (5120, 5213), False, 'from neurallog import data_loader\n'), ((863, 891), 'numpy.zeros', 'np.zeros', ([], {'shape': '(embed_dim,)'}), '(shape=(embed_dim,))\n', (871, 891), True, 'import numpy as np\n'), ((4984, 4995), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4992, 4995), True, 'import numpy as np\n'), ((1440, 1475), 'numpy.reshape', 'np.reshape', (['x', '[max_len, embed_dim]'], {}), '(x, [max_len, embed_dim])\n', (1450, 1475), True, 'import numpy as np\n'), ((1297, 1308), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1305, 1308), True, 'import numpy as np\n')] |
"""
Baisc spatial utility functions.
"""
import numpy as np
from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs
import itertools
import logging
from pyrolite.util.math import isclose
logging.getLogger(__name__).addHandler(logging.NullHandler())
logger = logging.getLogger(__name__)
def _spherical_law_cosinse_GC_distance(ps):
"""
Spherical law of cosines calculation of distance between two points. Suffers from
rounding errors for closer points.
Parameters
----------
ps
Numpy array wih latitudes and longitudes [x1, x2, y1, y2]
"""
φ1, φ2 = ps[2:] # latitude
λ1, λ2 = ps[:2] # longitude
Δλ = abs(λ1 - λ2)
Δφ = abs(φ1 - φ2)
return arccos(sin(φ1) * sin(φ2) + cos(φ1) * cos(φ2) * cos(Δλ))
def _vicenty_GC_distance(ps):
"""
Vicenty formula for an ellipsoid with equal major and minor axes.
<NAME> (1975) Direct and Inverse Solutions of Geodesics on the Ellipsoid with
Application of Nested Equations. Survey Review 23:88–93.
doi: 10.1179/SRE.1975.23.176.88
Parameters
----------
ps
Numpy array wih latitudes and longitudes [x1, x2, y1, y2]
"""
φ1, φ2 = ps[2:] # latitude
λ1, λ2 = ps[:2] # longitude
Δλ = abs(λ1 - λ2)
Δφ = abs(φ1 - φ2)
_S = sqrt(
(cos(φ2) * sin(Δλ)) ** 2
+ (cos(φ1) * sin(φ2) - sin(φ1) * cos(φ2) * cos(Δλ)) ** 2
)
_C = sin(φ1) * sin(φ2) + cos(φ1) * cos(φ2) * cos(Δλ)
return np.abs(arctan2(_S, _C))
def _haversine_GC_distance(ps):
"""
Haversine formula for great circle distance. Suffers from rounding errors for
antipodal points.
Parameters
----------
ps : :class:`numpy.ndarray`
Numpy array wih latitudes and longitudes [x1, x2, y1, y2]
"""
φ1, φ2 = ps[2:] # latitude
λ1, λ2 = ps[:2] # longitude
Δλ = abs(λ1 - λ2)
Δφ = abs(φ1 - φ2)
return 2 * arcsin(sqrt(sin(Δφ / 2) ** 2 + cos(φ1) * cos(φ2) * sin(Δλ / 2) ** 2))
def great_circle_distance(
p1, p2, absolute=False, degrees=True, r=6371.0088, method=None
):
"""
Calculate the great circle distance between two lat, long points.
Parameters
----------
p1, p2 : :class:`float`
Lat-Long points to calculate distance between.
absolute : :class:`bool`, :code:`False`
Whether to return estimates of on-sphere distances [True], or simply return the
central angle between the points.
degrees : :class:`bool`, :code:`True`
Whether lat-long coordinates are in degrees [True] or radians [False].
r : :class:`float`
Earth radii for estimating absolute distances.
method : :class:`str`, :code:`{'vicenty', 'cosines', 'haversine'}`
Which method to use for great circle distance calculation. Defaults to the
Vicenty formula.
"""
x1, y1 = p1
x2, y2 = p2
ps = np.array([x1, x2, y1, y2]).astype(np.float)
if degrees:
ps = deg2rad(ps)
if method is None:
f = _vicenty_GC_distance
else:
if method.lower().startswith("cos"):
f = _spherical_law_cosinse_GC_distance
elif method.lower().startswith("hav"):
f = _haversine_GC_distance
else: # Default to most precise
f = _vicenty_GC_distance
angle = f(ps)
if np.isnan(angle) and f != _vicenty_GC_distance: # fallback for cos failure @ 0.
angle = _vicenty_GC_distance(ps)
if absolute:
return np.rad2deg(angle) * r
else:
return np.rad2deg(angle)
def piecewise(segment_ranges: list, segments=2, output_fmt=np.float):
"""
Generator to provide values of quantizable paramaters which define a grid,
here used to split up queries from databases to reduce load.
"""
outf = np.vectorize(output_fmt)
if type(segments) == np.int:
segments = list(np.ones(len(segment_ranges), dtype=np.int) * segments)
else:
pass
seg_width = [
(x2 - x1) / segments[ix] # can have negative steps
for ix, (x1, x2) in enumerate(segment_ranges)
]
separators = [
np.linspace(x1, x2, segments[ix] + 1)[:-1]
for ix, (x1, x2) in enumerate(segment_ranges)
]
pieces = list(itertools.product(*separators))
for ix, i in enumerate(pieces):
i = np.array(i)
out = np.vstack((i, i + np.array(seg_width)))
yield outf(out)
def spatiotemporal_split(
segments=4,
nan_lims=[np.nan, np.nan],
# usebounds=False,
# order=['minx', 'miny', 'maxx', 'maxy'],
**kwargs
):
"""
Creates spatiotemporal grid using piecewise function and arbitrary
ranges for individial kw-parameters (e.g. age=(0., 450.)), and
sequentially returns individial grid cell attributes.
"""
part = 0
for item in piecewise(kwargs.values(), segments=segments):
x1s, x2s = item
part += 1
params = {}
for vix, var in enumerate(kwargs.keys()):
vx1, vx2 = x1s[vix], x2s[vix]
params[var] = (vx1, vx2)
items = dict(
south=params.get("lat", nan_lims)[0],
north=params.get("lat", nan_lims)[1],
west=params.get("long", nan_lims)[0],
east=params.get("long", nan_lims)[1],
)
if "age" in params:
items.update(
dict(
minage=params.get("age", nan_lims)[0],
maxage=params.get("age", nan_lims)[1],
)
)
items = {k: v for (k, v) in items.items() if not np.isnan(v)}
# if usebounds:
# bounds = NSEW_2_bounds(items, order=order)
# yield bounds
# else:
yield items
def NSEW_2_bounds(cardinal, order=["minx", "miny", "maxx", "maxy"]):
"""
Translates cardinal points to xy points in the form of bounds.
Useful for converting to the format required for WFS from REST
style queries.
"""
tnsltr = {
xy: c
for xy, c in zip(
["minx", "miny", "maxx", "maxy"], ["west", "south", "east", "north"]
)
}
bnds = [cardinal.get(tnsltr[o]) for o in order]
return bnds
| [
"numpy.vectorize",
"numpy.abs",
"numpy.arctan2",
"numpy.deg2rad",
"numpy.isnan",
"numpy.rad2deg",
"numpy.sin",
"numpy.array",
"logging.NullHandler",
"itertools.product",
"numpy.cos",
"numpy.linspace",
"logging.getLogger"
] | [((283, 310), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (300, 310), False, 'import logging\n'), ((251, 272), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (270, 272), False, 'import logging\n'), ((678, 690), 'numpy.abs', 'abs', (['(λ1 - λ2)'], {}), '(λ1 - λ2)\n', (681, 690), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((700, 712), 'numpy.abs', 'abs', (['(φ1 - φ2)'], {}), '(φ1 - φ2)\n', (703, 712), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1256, 1268), 'numpy.abs', 'abs', (['(λ1 - λ2)'], {}), '(λ1 - λ2)\n', (1259, 1268), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1278, 1290), 'numpy.abs', 'abs', (['(φ1 - φ2)'], {}), '(φ1 - φ2)\n', (1281, 1290), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1860, 1872), 'numpy.abs', 'abs', (['(λ1 - λ2)'], {}), '(λ1 - λ2)\n', (1863, 1872), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1882, 1894), 'numpy.abs', 'abs', (['(φ1 - φ2)'], {}), '(φ1 - φ2)\n', (1885, 1894), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((3772, 3796), 'numpy.vectorize', 'np.vectorize', (['output_fmt'], {}), '(output_fmt)\n', (3784, 3796), True, 'import numpy as np\n'), ((212, 239), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (229, 239), False, 'import logging\n'), ((1484, 1499), 'numpy.arctan2', 'arctan2', (['_S', '_C'], {}), '(_S, _C)\n', (1491, 1499), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((2944, 2955), 'numpy.deg2rad', 'deg2rad', (['ps'], {}), '(ps)\n', (2951, 2955), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((3310, 3325), 'numpy.isnan', 'np.isnan', (['angle'], {}), '(angle)\n', (3318, 3325), True, 'import numpy as np\n'), ((3511, 3528), 'numpy.rad2deg', 'np.rad2deg', (['angle'], {}), '(angle)\n', (3521, 3528), True, 'import numpy as np\n'), ((4218, 4248), 'itertools.product', 'itertools.product', (['*separators'], {}), '(*separators)\n', (4235, 4248), False, 'import itertools\n'), ((4298, 4309), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (4306, 4309), True, 'import numpy as np\n'), ((1418, 1425), 'numpy.sin', 'sin', (['φ1'], {}), '(φ1)\n', (1421, 1425), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1429, 1436), 'numpy.sin', 'sin', (['φ2'], {}), '(φ2)\n', (1432, 1436), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1462, 1469), 'numpy.cos', 'cos', (['Δλ'], {}), '(Δλ)\n', (1465, 1469), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((2870, 2896), 'numpy.array', 'np.array', (['[x1, x2, y1, y2]'], {}), '([x1, x2, y1, y2])\n', (2878, 2896), True, 'import numpy as np\n'), ((3464, 3481), 'numpy.rad2deg', 'np.rad2deg', (['angle'], {}), '(angle)\n', (3474, 3481), True, 'import numpy as np\n'), ((4097, 4134), 'numpy.linspace', 'np.linspace', (['x1', 'x2', '(segments[ix] + 1)'], {}), '(x1, x2, segments[ix] + 1)\n', (4108, 4134), True, 'import numpy as np\n'), ((729, 736), 'numpy.sin', 'sin', (['φ1'], {}), '(φ1)\n', (732, 736), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((740, 747), 'numpy.sin', 'sin', (['φ2'], {}), '(φ2)\n', (743, 747), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((773, 780), 'numpy.cos', 'cos', (['Δλ'], {}), '(Δλ)\n', (776, 780), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1440, 1447), 'numpy.cos', 'cos', (['φ1'], {}), '(φ1)\n', (1443, 1447), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1451, 1458), 'numpy.cos', 'cos', (['φ2'], {}), '(φ2)\n', (1454, 1458), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((751, 758), 'numpy.cos', 'cos', (['φ1'], {}), '(φ1)\n', (754, 758), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((762, 769), 'numpy.cos', 'cos', (['φ2'], {}), '(φ2)\n', (765, 769), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1314, 1321), 'numpy.cos', 'cos', (['φ2'], {}), '(φ2)\n', (1317, 1321), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1325, 1332), 'numpy.sin', 'sin', (['Δλ'], {}), '(Δλ)\n', (1328, 1332), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((4342, 4361), 'numpy.array', 'np.array', (['seg_width'], {}), '(seg_width)\n', (4350, 4361), True, 'import numpy as np\n'), ((5545, 5556), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (5553, 5556), True, 'import numpy as np\n'), ((1349, 1356), 'numpy.cos', 'cos', (['φ1'], {}), '(φ1)\n', (1352, 1356), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1360, 1367), 'numpy.sin', 'sin', (['φ2'], {}), '(φ2)\n', (1363, 1367), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1393, 1400), 'numpy.cos', 'cos', (['Δλ'], {}), '(Δλ)\n', (1396, 1400), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1920, 1931), 'numpy.sin', 'sin', (['(Δφ / 2)'], {}), '(Δφ / 2)\n', (1923, 1931), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1371, 1378), 'numpy.sin', 'sin', (['φ1'], {}), '(φ1)\n', (1374, 1378), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1382, 1389), 'numpy.cos', 'cos', (['φ2'], {}), '(φ2)\n', (1385, 1389), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1941, 1948), 'numpy.cos', 'cos', (['φ1'], {}), '(φ1)\n', (1944, 1948), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1952, 1959), 'numpy.cos', 'cos', (['φ2'], {}), '(φ2)\n', (1955, 1959), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n'), ((1963, 1974), 'numpy.sin', 'sin', (['(Δλ / 2)'], {}), '(Δλ / 2)\n', (1966, 1974), False, 'from numpy import cos, sin, deg2rad, arctan, arctan2, arcsin, arccos, sqrt, abs\n')] |
import json
from climpy.interactive import ValueChanger
import panel as pn
import xarray as xr
import numpy
from numpy import array
import holoviews as hv
# Used for selection_expr as string
from holoviews.util.transform import dim # noqa: F401
from holoviews.element.selection import spatial_select
from climate_simulation_platform.db import load_file, save_revision, save_step
from climate_simulation_platform.message_broker import send_preprocessing_message
colormaps = hv.plotting.list_cmaps()
class SubBasins(ValueChanger):
file_type = "bathy"
step = "subbasins"
def _default_ocean_values(self, bathy, dims):
# Atlantic = 1
# Pacific = 2
# Indian = 3
arr = numpy.zeros(bathy.shape)
# Define x indices that will delimit the default sub basin areas
ind_1 = numpy.rint(dims[1] * (1/3)).astype(numpy.int64)
ind_2 = numpy.rint(dims[1] * (2/3)).astype(numpy.int64)
# generate sub basin areas
arr[:, 0 : ind_1] = 2
arr[:, ind_1 : ind_2] = 1
arr[:, ind_2 : dims[1]] = 3
# bathy values of 0 -> Land
arr[bathy <= 0] = 0
return arr
def _load_ds(self, _id):
self.loaded = False
with self.app.app_context():
ds_sub_basins = load_file(_id, "sub_basins") # load sub_basins file
if type(ds_sub_basins) == type(None): # if sub_basins file has not been created yet, then initiate with default config
with self.app.app_context():
ds = load_file(_id, self.file_type)
# assert ds.Bathymetry.shape == (149, 182) # generates error if ds.Bathymetry.shape has dim ~= (149,182)
ds.Bathymetry.values = self._default_ocean_values(ds.Bathymetry.values, ds.Bathymetry.shape)
ds = ds.rename({"Bathymetry": "Oceans"})
else: # else if sub_basins file has been created, then use its data
# Load bathy file to extract mask
with self.app.app_context():
ds_bathy = load_file(_id, self.file_type)
mask = numpy.where(ds_bathy.Bathymetry.values <= 0, 0, 1)
ds_sub_basins["pacmsk"] = (('y', 'x'), numpy.where(ds_sub_basins.pacmsk == 1, 2, 0))
ds_sub_basins["indmsk"] = (('y', 'x'), numpy.where(ds_sub_basins.indmsk == 1, 3, 0))
ds = xr.Dataset({})
ds['Oceans'] = (ds_sub_basins.atlmsk +
ds_sub_basins.pacmsk +
ds_sub_basins.indmsk).astype(numpy.float64)* mask
ds['nav_lon'] = ds_sub_basins.navlon
ds['nav_lat'] = ds_sub_basins.navlat
# If lat and lon are in varaibles move them to coords
d = {}
for var in ds.data_vars:
if "lat" in var.lower() or "lon" in var.lower():
d[var] = var
ds = ds.set_coords(d)
self._lat_lon_ori = d
self.curvilinear_coordinates = None
number_coordinates_in_system = len(list(ds.coords.variables.values())[0].dims)
# Standard Grid
if number_coordinates_in_system == 1:
pass
# Curvilinear coordinates
elif number_coordinates_in_system == 2:
dims = list(ds[list(ds.coords)[0]].dims)
# Store the true coordinates for export
self.curvilinear_coordinates = list(ds.coords)
# Add the dimension into the coordinates this results in an ij indexing
ds.coords[dims[0]] = ds[dims[0]]
ds.coords[dims[1]] = ds[dims[1]]
# Remove the curvilinear coordinates from the original coordinates
ds = ds.reset_coords()
else:
raise ValueError("Unknown number of Coordinates")
self.ds = ds
attributes = list(ds.keys())
self.attribute.options = attributes
self.attribute.value = attributes[0]
self._original_ds = ds.copy(deep=True)
self.loaded = True
return True
def _options_pane_setup(self):
self.options_pane.clear()
self.options_pane.extend(
[
pn.pane.Markdown("""### Colormaps"""),
pn.Column(
self.colormap,
pn.Column(
pn.Row(
self.colormap_min, pn.layout.HSpacer(), self.colormap_max
),
self.colormap_range_slider,
),
self.colormap_delta,
),
pn.pane.Markdown("""### Change Values"""),
pn.Column(
self.spinner,
self.apply,
pn.Row(self.undo_button, self.redo_button),
),
]
)
def _set_values(self, value, calculation_type, selection_expr, *args, **kwargs):
# If the selection_expr is in string representation then
# Convert to object code
if isinstance(selection_expr, str):
selection_expr = eval(selection_expr)
hvds = hv.Dataset(
self.ds.to_dataframe(
dim_order=[*list(self.ds[self.attribute.value].dims)]
).reset_index()
)
land_indexs = hvds[self.attribute.value] == 0
hvds.data[self.attribute.value].loc[
hvds.select(selection_expr).data.index
] = value
hvds.data[self.attribute.value].loc[
hvds.select(selection_expr).data.index
] = value
hvds.data[self.attribute.value].loc[land_indexs] = 0
self.ds[self.attribute.value] = tuple(
(
list(self.ds[self.attribute.value].dims),
hvds.data[self.attribute.value].values.reshape(
*self.ds[self.attribute.value].shape
),
)
)
ds = self.ds.copy(deep=True)
self.ds = ds
def _get_graphs(self):
default_graphs = super()._get_graphs()
self.colormap.value = "viridis"
self.colormap_delta.value = 0.75
# Only allow values 1 to 3
self.spinner.value = 1
self.spinner.start = 1
self.spinner.end = 3
return default_graphs
def save(self, event):
atlmsk = numpy.where(self.ds.Oceans.values == 1, 1, 0)
pacmsk = numpy.where(self.ds.Oceans.values == 2, 1, 0)
indmsk = numpy.where(self.ds.Oceans.values == 3, 1, 0)
ds = xr.Dataset(
coords={},
data_vars={
"navlat": (["y", "x"], self.ds.nav_lat.values),
"navlon": (["y", "x"], self.ds.nav_lon.values),
"atlmsk": (["y", "x"], atlmsk),
"pacmsk": (["y", "x"], pacmsk),
"indmsk": (["y", "x"], indmsk),
},
)
ds["navlon"].attrs = {"units": "degrees_east"}
ds["navlat"].attrs = {"units": "degrees_north"}
ds["atlmsk"].attrs = {}
ds["pacmsk"].attrs = {}
ds["indmsk"].attrs = {}
with self.app.app_context():
save_revision(self.data_file_id, ds, "sub_basins")
if self.step is not None:
save_step(
self.data_file_id,
step=self.step,
parameters={
"id": self.data_file_id,
"undo_list": json.dumps(self._undo_list),
},
up_to_date=True,
)
send_preprocessing_message(
self.step + ".done", message={"id": self.data_file_id}
)
if "bokeh_app" in __name__:
sub_basins = SubBasins()
sub_basins.plot().servable("NetCDF Editor")
| [
"climate_simulation_platform.message_broker.send_preprocessing_message",
"holoviews.plotting.list_cmaps",
"numpy.zeros",
"climate_simulation_platform.db.load_file",
"json.dumps",
"xarray.Dataset",
"numpy.rint",
"numpy.where",
"panel.layout.HSpacer",
"climate_simulation_platform.db.save_revision",
... | [((477, 501), 'holoviews.plotting.list_cmaps', 'hv.plotting.list_cmaps', ([], {}), '()\n', (499, 501), True, 'import holoviews as hv\n'), ((712, 736), 'numpy.zeros', 'numpy.zeros', (['bathy.shape'], {}), '(bathy.shape)\n', (723, 736), False, 'import numpy\n'), ((6261, 6306), 'numpy.where', 'numpy.where', (['(self.ds.Oceans.values == 1)', '(1)', '(0)'], {}), '(self.ds.Oceans.values == 1, 1, 0)\n', (6272, 6306), False, 'import numpy\n'), ((6324, 6369), 'numpy.where', 'numpy.where', (['(self.ds.Oceans.values == 2)', '(1)', '(0)'], {}), '(self.ds.Oceans.values == 2, 1, 0)\n', (6335, 6369), False, 'import numpy\n'), ((6387, 6432), 'numpy.where', 'numpy.where', (['(self.ds.Oceans.values == 3)', '(1)', '(0)'], {}), '(self.ds.Oceans.values == 3, 1, 0)\n', (6398, 6432), False, 'import numpy\n'), ((6446, 6685), 'xarray.Dataset', 'xr.Dataset', ([], {'coords': '{}', 'data_vars': "{'navlat': (['y', 'x'], self.ds.nav_lat.values), 'navlon': (['y', 'x'],\n self.ds.nav_lon.values), 'atlmsk': (['y', 'x'], atlmsk), 'pacmsk': ([\n 'y', 'x'], pacmsk), 'indmsk': (['y', 'x'], indmsk)}"}), "(coords={}, data_vars={'navlat': (['y', 'x'], self.ds.nav_lat.\n values), 'navlon': (['y', 'x'], self.ds.nav_lon.values), 'atlmsk': ([\n 'y', 'x'], atlmsk), 'pacmsk': (['y', 'x'], pacmsk), 'indmsk': (['y',\n 'x'], indmsk)})\n", (6456, 6685), True, 'import xarray as xr\n'), ((1279, 1307), 'climate_simulation_platform.db.load_file', 'load_file', (['_id', '"""sub_basins"""'], {}), "(_id, 'sub_basins')\n", (1288, 1307), False, 'from climate_simulation_platform.db import load_file, save_revision, save_step\n'), ((2079, 2129), 'numpy.where', 'numpy.where', (['(ds_bathy.Bathymetry.values <= 0)', '(0)', '(1)'], {}), '(ds_bathy.Bathymetry.values <= 0, 0, 1)\n', (2090, 2129), False, 'import numpy\n'), ((2342, 2356), 'xarray.Dataset', 'xr.Dataset', (['{}'], {}), '({})\n', (2352, 2356), True, 'import xarray as xr\n'), ((7060, 7110), 'climate_simulation_platform.db.save_revision', 'save_revision', (['self.data_file_id', 'ds', '"""sub_basins"""'], {}), "(self.data_file_id, ds, 'sub_basins')\n", (7073, 7110), False, 'from climate_simulation_platform.db import load_file, save_revision, save_step\n'), ((826, 855), 'numpy.rint', 'numpy.rint', (['(dims[1] * (1 / 3))'], {}), '(dims[1] * (1 / 3))\n', (836, 855), False, 'import numpy\n'), ((890, 919), 'numpy.rint', 'numpy.rint', (['(dims[1] * (2 / 3))'], {}), '(dims[1] * (2 / 3))\n', (900, 919), False, 'import numpy\n'), ((1520, 1550), 'climate_simulation_platform.db.load_file', 'load_file', (['_id', 'self.file_type'], {}), '(_id, self.file_type)\n', (1529, 1550), False, 'from climate_simulation_platform.db import load_file, save_revision, save_step\n'), ((2029, 2059), 'climate_simulation_platform.db.load_file', 'load_file', (['_id', 'self.file_type'], {}), '(_id, self.file_type)\n', (2038, 2059), False, 'from climate_simulation_platform.db import load_file, save_revision, save_step\n'), ((2182, 2226), 'numpy.where', 'numpy.where', (['(ds_sub_basins.pacmsk == 1)', '(2)', '(0)'], {}), '(ds_sub_basins.pacmsk == 1, 2, 0)\n', (2193, 2226), False, 'import numpy\n'), ((2279, 2323), 'numpy.where', 'numpy.where', (['(ds_sub_basins.indmsk == 1)', '(3)', '(0)'], {}), '(ds_sub_basins.indmsk == 1, 3, 0)\n', (2290, 2323), False, 'import numpy\n'), ((4102, 4135), 'panel.pane.Markdown', 'pn.pane.Markdown', (['"""### Colormaps"""'], {}), "('### Colormaps')\n", (4118, 4135), True, 'import panel as pn\n'), ((4530, 4567), 'panel.pane.Markdown', 'pn.pane.Markdown', (['"""### Change Values"""'], {}), "('### Change Values')\n", (4546, 4567), True, 'import panel as pn\n'), ((7493, 7580), 'climate_simulation_platform.message_broker.send_preprocessing_message', 'send_preprocessing_message', (["(self.step + '.done')"], {'message': "{'id': self.data_file_id}"}), "(self.step + '.done', message={'id': self.\n data_file_id})\n", (7519, 7580), False, 'from climate_simulation_platform.message_broker import send_preprocessing_message\n'), ((4686, 4728), 'panel.Row', 'pn.Row', (['self.undo_button', 'self.redo_button'], {}), '(self.undo_button, self.redo_button)\n', (4692, 4728), True, 'import panel as pn\n'), ((4313, 4332), 'panel.layout.HSpacer', 'pn.layout.HSpacer', ([], {}), '()\n', (4330, 4332), True, 'import panel as pn\n'), ((7370, 7397), 'json.dumps', 'json.dumps', (['self._undo_list'], {}), '(self._undo_list)\n', (7380, 7397), False, 'import json\n')] |
import numpy as np
from scipy import stats
import sys, scipy, numpy;
print(scipy.__version__, numpy.__version__, sys.version_info)
a = np.array([0, 0, 0, 1, 1, 1, 1])
b = np.arange(7)
pearson1 = stats.pearsonr(a, b)
a1 = a * 1e90
b1 = b * 1e90
pearson2 = stats.pearsonr(a1, b1)
print(pearson1 , pearson2)
# see https://github.com/scipy/scipy/issues/8980
lNotFixed = True
assert(np.allclose(pearson1[0] , pearson2[0]) or lNotFixed)
assert(np.allclose(pearson1[1] , pearson2[1]) or lNotFixed)
| [
"numpy.allclose",
"numpy.array",
"numpy.arange",
"scipy.stats.pearsonr"
] | [((138, 169), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 1]'], {}), '([0, 0, 0, 1, 1, 1, 1])\n', (146, 169), True, 'import numpy as np\n'), ((174, 186), 'numpy.arange', 'np.arange', (['(7)'], {}), '(7)\n', (183, 186), True, 'import numpy as np\n'), ((198, 218), 'scipy.stats.pearsonr', 'stats.pearsonr', (['a', 'b'], {}), '(a, b)\n', (212, 218), False, 'from scipy import stats\n'), ((258, 280), 'scipy.stats.pearsonr', 'stats.pearsonr', (['a1', 'b1'], {}), '(a1, b1)\n', (272, 280), False, 'from scipy import stats\n'), ((383, 420), 'numpy.allclose', 'np.allclose', (['pearson1[0]', 'pearson2[0]'], {}), '(pearson1[0], pearson2[0])\n', (394, 420), True, 'import numpy as np\n'), ((443, 480), 'numpy.allclose', 'np.allclose', (['pearson1[1]', 'pearson2[1]'], {}), '(pearson1[1], pearson2[1])\n', (454, 480), True, 'import numpy as np\n')] |
import torch
import numpy as np
from PIL import Image
import os
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def tensor2im(input_image, imtype=np.uint8):
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor):
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
else:
image_numpy = input_image
return image_numpy.astype(imtype)
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
image_pil.save(image_path)
def save_images(visuals, image_dir, image_path, name):
if not os.path.isdir(image_dir):
os.mkdir(image_dir)
for label, im_data in visuals.items():
im = tensor2im(im_data)
image_name = '%s_%s.png' % (name, label)
save_path = os.path.join(image_dir, image_name)
save_image(im, save_path)
def print_current_losses(epoch, iters, losses, t_comp, t_data, log_name='train_log'):
message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message)
with open(log_name, "a") as log_file:
log_file.write('%s\n' % message) | [
"os.mkdir",
"os.makedirs",
"os.path.isdir",
"os.path.exists",
"numpy.transpose",
"numpy.tile",
"PIL.Image.fromarray",
"os.path.join"
] | [((922, 950), 'PIL.Image.fromarray', 'Image.fromarray', (['image_numpy'], {}), '(image_numpy)\n', (937, 950), False, 'from PIL import Image\n'), ((262, 282), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (276, 282), False, 'import os\n'), ((292, 309), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (303, 309), False, 'import os\n'), ((1088, 1112), 'os.path.isdir', 'os.path.isdir', (['image_dir'], {}), '(image_dir)\n', (1101, 1112), False, 'import os\n'), ((1122, 1141), 'os.mkdir', 'os.mkdir', (['image_dir'], {}), '(image_dir)\n', (1130, 1141), False, 'import os\n'), ((1291, 1326), 'os.path.join', 'os.path.join', (['image_dir', 'image_name'], {}), '(image_dir, image_name)\n', (1303, 1326), False, 'import os\n'), ((669, 700), 'numpy.tile', 'np.tile', (['image_numpy', '(3, 1, 1)'], {}), '(image_numpy, (3, 1, 1))\n', (676, 700), True, 'import numpy as np\n'), ((724, 760), 'numpy.transpose', 'np.transpose', (['image_numpy', '(1, 2, 0)'], {}), '(image_numpy, (1, 2, 0))\n', (736, 760), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 11 16:13:26 2020
@author: <NAME>
In this code a Hamiltonian Neural Network is designed and employed
to solve a system of two differential equations obtained by Hamilton's
equations for the the Hamiltonian of nonlinear oscillator.
"""
import numpy as np
import torch
import torch.optim as optim
from torch.autograd import grad
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
import copy
from scipy.integrate import odeint
dtype=torch.float
## Define the Functions
# Define the sin() activation function
class mySin(torch.nn.Module):
@staticmethod
def forward(input):
return torch.sin(input)
# Use below in the Scipy Solver
def f(u, t ,lam=1):
x, px = u # unpack current values of u
derivs = [px, -x - lam*x**3] # list of dy/dt=f functions
return derivs
# Scipy Solver
def NLosc_solution(N,t, x0, px0, lam=1):
u0 = [x0, px0]
# Call the ODE solver
solPend = odeint(f, u0, t, args=(lam,))
xP = solPend[:,0]; pxP = solPend[:,1];
return xP, pxP
# Energy of nonlinear oscillator
def energy(x, px, lam=1):
Nx=len(x);
x=x.reshape(Nx); px=px.reshape(Nx);
E = 0.5*px**2 + 0.5*x**2 + lam*x**4/4
E = E.reshape(Nx)
return E
# initial energy
def NLosc_exact(N,x0, px0, lam):
E0 = 0.5*px0**2 + 0.5*x0**2 + lam*x0**4/4
E_ex = E0*np.ones(N);
return E0, E_ex
# Set the initial state. lam controls the nonlinearity
x0, px0, lam = 1.3, 1., 1;
t0, t_max, N = 0.,4*np.pi, 200; dt = t_max/N;
X0 = [t0, x0, px0, lam]
t_num = np.linspace(t0, t_max, N)
E0, E_ex = NLosc_exact(N,x0, px0, lam)
# Solution obtained by Scipy solver
x_num, px_num = NLosc_solution(N,t_num, x0, px0, lam)
E_num = energy( x_num, px_num, lam)
#####################################
# Hamiltonian Neural Network
####################################
# Define some more general functions
def dfx(x,f):
# Calculate the derivatice with auto-differention
return grad([f], [x], grad_outputs=torch.ones(x.shape, dtype=dtype), create_graph=True)[0]
def perturbPoints(grid,t0,tf,sig=0.5):
# stochastic perturbation of the evaluation points
# force t[0]=t0 & force points to be in the t-interval
delta_t = grid[1] - grid[0]
noise = delta_t * torch.randn_like(grid)*sig
t = grid + noise
t.data[2] = torch.ones(1,1)*(-1)
t.data[t<t0]=t0 - t.data[t<t0]
t.data[t>tf]=2*tf - t.data[t>tf]
t.data[0] = torch.ones(1,1)*t0
t.requires_grad = False
return t
def saveData(path, t, x, px, E, loss):
np.savetxt(path+"t.txt",t)
np.savetxt(path+"x.txt",x)
np.savetxt(path+"px.txt",px)
np.savetxt(path+"E.txt",E)
np.savetxt(path+"Loss.txt",loss)
# Define some functions used by the Hamiltonian network
def parametricSolutions(t, nn, X0):
# parametric solutions
t0, x0, px0, lam = X0[0],X0[1],X0[2],X0[3]
N1, N2 = nn(t)
dt =t-t0
#### THERE ARE TWO PARAMETRIC SOLUTIONS. Uncomment f=dt
f = (1-torch.exp(-dt))
# f = dt
x_hat = x0 + f*N1
px_hat = px0 + f*N2
return x_hat, px_hat
def hamEqs_Loss(t,x,px,lam):
# Define the loss function by Hamilton Eqs., write explicitely the Ham. Equations
xd,pxd= dfx(t,x),dfx(t,px)
fx = xd - px;
fpx = pxd + x + lam*x.pow(3)
Lx = (fx.pow(2)).mean(); Lpx = (fpx.pow(2)).mean();
L = Lx + Lpx
return L
def hamEqs_Loss_byH(t,x,px,lam):
# This is an alternative way to define the loss function:
# Define the loss function by Hamilton Eqs. directly from Hamiltonian H
#
# Potential and Kinetic Energy
V = 0.5*x.pow(2) + lam*x.pow(4)/4
K = 0.5*px.pow(2)
ham = K + V
xd,pxd= dfx(t,x),dfx(t,px)
# calculate the partial spatial derivatives of H
hx = grad([ham], [x], grad_outputs=torch.ones(x.shape, dtype=dtype), create_graph=True)[0]
hpx = grad([ham], [px], grad_outputs=torch.ones(px.shape, dtype=dtype), create_graph=True)[0]
# Hamilton Eqs
fx = xd - hpx; fpx = pxd + hx;
Lx = (fx.pow(2)).mean(); Lpx = (fpx.pow(2)).mean();
L = Lx + Lpx
return L
# NETWORK ARCHITECTURE
# A two hidden layer NN, 1 input & two output
class odeNet_NLosc_MM(torch.nn.Module):
def __init__(self, D_hid=10):
super(odeNet_NLosc_MM,self).__init__()
##### CHOOCE THE ACTIVATION FUNCTION
self.actF = mySin()
# self.actF = torch.nn.Sigmoid()
# define layers
self.Lin_1 = torch.nn.Linear(1, D_hid)
self.Lin_2 = torch.nn.Linear(D_hid, D_hid)
self.Lin_out = torch.nn.Linear(D_hid, 2)
def forward(self,t):
# layer 1
l = self.Lin_1(t); h = self.actF(l)
# layer 2
l = self.Lin_2(h); h = self.actF(l)
# output layer
r = self.Lin_out(h)
xN = (r[:,0]).reshape(-1,1); pxN = (r[:,1]).reshape(-1,1);
return xN, pxN
# FUNCTION NETWORK TRAINING
def run_odeNet_NLosc_MM(X0, tf, neurons, epochs, n_train,lr,
minibatch_number = 1):
fc0 = odeNet_NLosc_MM(neurons)
fc1=0; # fc1 will be a deepcopy of the network with the lowest training loss
# optimizer
betas = [0.999, 0.9999]
optimizer = optim.Adam(fc0.parameters(), lr=lr, betas=betas)
Loss_history = []; Llim = 1
t0=X0[0];
grid = torch.linspace(t0, tf, n_train).reshape(-1,1)
## TRAINING ITERATION
TeP0 = time.time()
for tt in range(epochs):
# Perturbing the evaluation points & forcing t[0]=t0
t=perturbPoints(grid,t0,tf,sig=0.03*tf)
# BATCHING
batch_size = int(n_train/minibatch_number)
batch_start, batch_end = 0, batch_size
idx = np.random.permutation(n_train)
t_b = t[idx]
t_b.requires_grad = True
loss=0.0
for nbatch in range(minibatch_number):
# batch time set
t_mb = t_b[batch_start:batch_end]
# Network solutions
x,px =parametricSolutions(t_mb,fc0,X0)
# LOSS
# Loss function defined by Hamilton Eqs. (symplectic): Writing explicitely the Eqs (faster)
Ltot = hamEqs_Loss(t_mb,x,px,lam)
# Loss function defined by Hamilton Eqs. (symplectic): Calculating with auto-diff the Eqs (slower)
# Ltot = hamEqs_Loss_byH(t_mb,x,y,px,py,lam)
# OPTIMIZER
Ltot.backward(retain_graph=False); #True
optimizer.step(); loss += Ltot.data.numpy()
optimizer.zero_grad()
batch_start +=batch_size
batch_end +=batch_size
# keep the loss function history
Loss_history.append(loss)
#Keep the best model (lowest loss) by using a deep copy
if tt > 0.8*epochs and Ltot < Llim:
fc1 = copy.deepcopy(fc0)
Llim=Ltot
TePf = time.time()
runTime = TePf - TeP0
return fc1, Loss_history, runTime
# END OF FUNCTIONS DEFINITION
# TRAIN THE NETWORK.
# Here, we use one mini-batch. NO significant different in using more
n_train, neurons, epochs, lr,mb = 200, 50, int(5e4), 8e-3, 1
model,loss,runTime = run_odeNet_NLosc_MM(X0, t_max,
neurons, epochs, n_train,lr,mb)
print('Training time (minutes):', runTime/60)
plt.figure()
plt.loglog(loss,'-b',alpha=0.975);
plt.tight_layout()
plt.ylabel('Loss');plt.xlabel('t')
#plt.savefig('../results/nonlinearOscillator_loss.png')
plt.savefig('nonlinearOscillator_loss.png')
# TEST THE PREDICTED SOLUTIONS
nTest = N ; tTest = torch.linspace(t0,t_max,nTest)
tTest = tTest.reshape(-1,1);
tTest.requires_grad=True
t_net = tTest.detach().numpy()
x,px = parametricSolutions(tTest,model,X0)
# HERE WE CALCULATE THE ell_max (maximum loss in time)
xd,pxd= dfx(tTest,x),dfx(tTest,px) # derivatives obtained by back-propagation
fx = xd - px;
fpx = pxd + x + x.pow(3)
ell_sq = fx.pow(2) + fpx.pow(2)
ell_max = np.max(np.sqrt( ell_sq.data.numpy() ) )
print('The maximum in time loss is ', ell_max)
###################
# Symplectic Euler
####################
def symEuler(Ns, x0,px0,t_max,lam):
t_s = np.linspace(t0, t_max, Ns+1)
x_s = np.zeros(Ns+1); p_s = np.zeros(Ns+1)
x_s[0], p_s[0] = x0, px0
dts = t_max/Ns;
for n in range(Ns):
x_s[n+1] = x_s[n] + dts*p_s[n]
p_s[n+1] = p_s[n] - dts*(x_s[n+1] + lam*x_s[n+1]**3)
E_euler = energy(x_s, p_s, lam=1)
return E_euler, x_s, p_s, t_s
Ns = n_train -1;
E_s, x_s, p_s, t_s = symEuler(Ns, x0,px0,t_max,lam)
Ns100 = 100*n_train ;
E_s100, x_s100, p_s100, t_s100 = symEuler(Ns100, x0,px0,t_max,lam)
################
# Make the plots
#################
x=x.data.numpy(); px=px.data.numpy();
E = energy(x, px, lam)
# Figure for trajectories: x(t), p(t), energy in time E(t),
# and phase space trajectory p(x)
lineW = 2 # Line thickness
plt.figure(figsize=(10,8))
plt.subplot(2,2,1)
plt.plot(t_num,x_num,'-g',linewidth=lineW, label='Ground truth');
plt.plot(t_net, x,'--b', label='Neural Network')
plt.plot(t_s,x_s,':k',linewidth=lineW, label='Symplectic Euler')
plt.plot(t_s100,x_s100,'-.r',linewidth=lineW, label='Symplectic Euler X 100 points')
plt.ylabel('x');plt.xlabel('t')
#plt.legend()
plt.subplot(2,2,2)
plt.plot(t_num,E_ex,'-g',linewidth=lineW, label='Ground truth');
plt.plot(t_net, E_num,'--b', label='Neural Network')
plt.plot(t_s,E_s,':k',linewidth=lineW,label='Symplectic Euler');
plt.plot(t_s100,E_s100,'-.r',linewidth=lineW,label='Symplectic Euler x 100 points');
plt.ylabel('E');plt.xlabel('t')
plt.ylim([0.5*E0,1.5*E0])
plt.legend()
plt.subplot(2,2,3)
plt.plot(t_num,px_num,'-g',linewidth=lineW);
plt.plot(t_net, px,'--b')
plt.plot(t_s,p_s,':k',linewidth=lineW);
plt.plot(t_s100,p_s100,'-.r',linewidth=lineW);
plt.ylabel('px');plt.xlabel('t')
plt.subplot(2,2,4)
plt.plot(x_num,px_num,'-g',linewidth=lineW);
plt.plot(x, px,'--b')
plt.plot(x_s,p_s,':k',linewidth=lineW);
plt.plot(x_s,p_s,'-.r',linewidth=lineW);
plt.ylabel('p');plt.xlabel('x');
#plt.savefig('../results/nonlinearOscillator_trajectories.png')
plt.savefig('nonlinearOscillator_trajectories.png')
## Figure for the error in the predicted solutions: delta_x and delta_p,
# and the energy again
# calculate the errors for the solutions obtained by network and euler
dx_num =x_num-x_num; dp_num=px_num-px_num
dx = x_num - x[:,0]; dp = px_num - px[:,0]
dx_s = x_num - x_s; dp_s = px_num - p_s
# find the exact solution for more points used in Euler x 100
x_num100, px_num100 = NLosc_solution(N,t_s100, x0, px0, lam)
dx_s100 = x_num100 - x_s100; dp_s100 = px_num100 - p_s100
plt.figure(figsize=(10,8))
plt.subplot(2,2,1)
plt.plot(dx_num,dp_num,'-g',linewidth=lineW);
#plt.plot(dx, dp,'--b')
plt.plot(dx_s,dp_s,':k',linewidth=lineW);
plt.plot(dx_s100,dp_s100,'-.r',linewidth=lineW);
plt.ylabel('$\delta_p$');plt.xlabel('$\delta_x$');
plt.ylim([-1e-2,1e-2])
plt.xlim([-1e-2,1e-2])
#plt.legend()
plt.subplot(2,2,2)
plt.plot(t_num,E_ex,'-g',linewidth=lineW, label='Ground truth');
plt.plot(t_net, E,'--b', label='Neural Network')
plt.plot(t_s,E_s,':k',linewidth=lineW,label='symplectic Euler');
plt.plot(t_s100,E_s100,'-.r',linewidth=lineW,label='symplectic Euler x 100 points');
plt.ylabel('E');plt.xlabel('t')
plt.ylim([0.9*E0,1.1*E0])
plt.legend()
plt.subplot(2,2,3)
plt.plot(t_num,dx_num,'-g',linewidth=lineW, label='Ground truth');
plt.plot(t_net, dx,'--b', label='Neural Network')
plt.plot(t_s,dx_s,':k',linewidth=lineW, label='symplectic Euler')
plt.plot(t_s100,dx_s100,'-.r',linewidth=lineW, label='symplectic Euler X 100 points')
plt.ylabel('$\delta_x$');plt.xlabel('t')
plt.subplot(2,2,4)
plt.plot(t_num,dp_num,'-g',linewidth=lineW);
plt.plot(t_net, dp,'--b')
plt.plot(t_s,dp_s,':k',linewidth=lineW);
plt.plot(t_s100,dp_s100,'-.r',linewidth=lineW);
plt.ylabel('$\delta_p$');plt.xlabel('t')
#plt.savefig('../results/nonlinearOscillator_error.png')
plt.savefig('nonlinearOscillator_error.png')
| [
"matplotlib.pyplot.loglog",
"numpy.ones",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"torch.ones",
"scipy.integrate.odeint",
"numpy.savetxt",
"torch.exp",
"numpy.linspace",
"torch.nn.Linear",
"copy.deepcopy",
"torch.randn_like",
"matplotlib.pyplot.ylim",
"matplotlib.pypl... | [((1646, 1671), 'numpy.linspace', 'np.linspace', (['t0', 't_max', 'N'], {}), '(t0, t_max, N)\n', (1657, 1671), True, 'import numpy as np\n'), ((7306, 7318), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7316, 7318), True, 'import matplotlib.pyplot as plt\n'), ((7319, 7354), 'matplotlib.pyplot.loglog', 'plt.loglog', (['loss', '"""-b"""'], {'alpha': '(0.975)'}), "(loss, '-b', alpha=0.975)\n", (7329, 7354), True, 'import matplotlib.pyplot as plt\n'), ((7354, 7372), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7370, 7372), True, 'import matplotlib.pyplot as plt\n'), ((7373, 7391), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (7383, 7391), True, 'import matplotlib.pyplot as plt\n'), ((7392, 7407), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (7402, 7407), True, 'import matplotlib.pyplot as plt\n'), ((7465, 7508), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""nonlinearOscillator_loss.png"""'], {}), "('nonlinearOscillator_loss.png')\n", (7476, 7508), True, 'import matplotlib.pyplot as plt\n'), ((7564, 7596), 'torch.linspace', 'torch.linspace', (['t0', 't_max', 'nTest'], {}), '(t0, t_max, nTest)\n', (7578, 7596), False, 'import torch\n'), ((8875, 8902), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (8885, 8902), True, 'import matplotlib.pyplot as plt\n'), ((8902, 8922), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (8913, 8922), True, 'import matplotlib.pyplot as plt\n'), ((8921, 8988), 'matplotlib.pyplot.plot', 'plt.plot', (['t_num', 'x_num', '"""-g"""'], {'linewidth': 'lineW', 'label': '"""Ground truth"""'}), "(t_num, x_num, '-g', linewidth=lineW, label='Ground truth')\n", (8929, 8988), True, 'import matplotlib.pyplot as plt\n'), ((8988, 9037), 'matplotlib.pyplot.plot', 'plt.plot', (['t_net', 'x', '"""--b"""'], {'label': '"""Neural Network"""'}), "(t_net, x, '--b', label='Neural Network')\n", (8996, 9037), True, 'import matplotlib.pyplot as plt\n'), ((9037, 9104), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s', 'x_s', '""":k"""'], {'linewidth': 'lineW', 'label': '"""Symplectic Euler"""'}), "(t_s, x_s, ':k', linewidth=lineW, label='Symplectic Euler')\n", (9045, 9104), True, 'import matplotlib.pyplot as plt\n'), ((9102, 9194), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s100', 'x_s100', '"""-.r"""'], {'linewidth': 'lineW', 'label': '"""Symplectic Euler X 100 points"""'}), "(t_s100, x_s100, '-.r', linewidth=lineW, label=\n 'Symplectic Euler X 100 points')\n", (9110, 9194), True, 'import matplotlib.pyplot as plt\n'), ((9187, 9202), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x"""'], {}), "('x')\n", (9197, 9202), True, 'import matplotlib.pyplot as plt\n'), ((9203, 9218), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (9213, 9218), True, 'import matplotlib.pyplot as plt\n'), ((9234, 9254), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (9245, 9254), True, 'import matplotlib.pyplot as plt\n'), ((9253, 9319), 'matplotlib.pyplot.plot', 'plt.plot', (['t_num', 'E_ex', '"""-g"""'], {'linewidth': 'lineW', 'label': '"""Ground truth"""'}), "(t_num, E_ex, '-g', linewidth=lineW, label='Ground truth')\n", (9261, 9319), True, 'import matplotlib.pyplot as plt\n'), ((9319, 9372), 'matplotlib.pyplot.plot', 'plt.plot', (['t_net', 'E_num', '"""--b"""'], {'label': '"""Neural Network"""'}), "(t_net, E_num, '--b', label='Neural Network')\n", (9327, 9372), True, 'import matplotlib.pyplot as plt\n'), ((9372, 9439), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s', 'E_s', '""":k"""'], {'linewidth': 'lineW', 'label': '"""Symplectic Euler"""'}), "(t_s, E_s, ':k', linewidth=lineW, label='Symplectic Euler')\n", (9380, 9439), True, 'import matplotlib.pyplot as plt\n'), ((9438, 9530), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s100', 'E_s100', '"""-.r"""'], {'linewidth': 'lineW', 'label': '"""Symplectic Euler x 100 points"""'}), "(t_s100, E_s100, '-.r', linewidth=lineW, label=\n 'Symplectic Euler x 100 points')\n", (9446, 9530), True, 'import matplotlib.pyplot as plt\n'), ((9524, 9539), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""E"""'], {}), "('E')\n", (9534, 9539), True, 'import matplotlib.pyplot as plt\n'), ((9540, 9555), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (9550, 9555), True, 'import matplotlib.pyplot as plt\n'), ((9556, 9586), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.5 * E0, 1.5 * E0]'], {}), '([0.5 * E0, 1.5 * E0])\n', (9564, 9586), True, 'import matplotlib.pyplot as plt\n'), ((9582, 9594), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9592, 9594), True, 'import matplotlib.pyplot as plt\n'), ((9596, 9616), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (9607, 9616), True, 'import matplotlib.pyplot as plt\n'), ((9615, 9661), 'matplotlib.pyplot.plot', 'plt.plot', (['t_num', 'px_num', '"""-g"""'], {'linewidth': 'lineW'}), "(t_num, px_num, '-g', linewidth=lineW)\n", (9623, 9661), True, 'import matplotlib.pyplot as plt\n'), ((9661, 9687), 'matplotlib.pyplot.plot', 'plt.plot', (['t_net', 'px', '"""--b"""'], {}), "(t_net, px, '--b')\n", (9669, 9687), True, 'import matplotlib.pyplot as plt\n'), ((9687, 9728), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s', 'p_s', '""":k"""'], {'linewidth': 'lineW'}), "(t_s, p_s, ':k', linewidth=lineW)\n", (9695, 9728), True, 'import matplotlib.pyplot as plt\n'), ((9728, 9776), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s100', 'p_s100', '"""-.r"""'], {'linewidth': 'lineW'}), "(t_s100, p_s100, '-.r', linewidth=lineW)\n", (9736, 9776), True, 'import matplotlib.pyplot as plt\n'), ((9776, 9792), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""px"""'], {}), "('px')\n", (9786, 9792), True, 'import matplotlib.pyplot as plt\n'), ((9793, 9808), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (9803, 9808), True, 'import matplotlib.pyplot as plt\n'), ((9810, 9830), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (9821, 9830), True, 'import matplotlib.pyplot as plt\n'), ((9829, 9875), 'matplotlib.pyplot.plot', 'plt.plot', (['x_num', 'px_num', '"""-g"""'], {'linewidth': 'lineW'}), "(x_num, px_num, '-g', linewidth=lineW)\n", (9837, 9875), True, 'import matplotlib.pyplot as plt\n'), ((9875, 9897), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'px', '"""--b"""'], {}), "(x, px, '--b')\n", (9883, 9897), True, 'import matplotlib.pyplot as plt\n'), ((9897, 9938), 'matplotlib.pyplot.plot', 'plt.plot', (['x_s', 'p_s', '""":k"""'], {'linewidth': 'lineW'}), "(x_s, p_s, ':k', linewidth=lineW)\n", (9905, 9938), True, 'import matplotlib.pyplot as plt\n'), ((9938, 9980), 'matplotlib.pyplot.plot', 'plt.plot', (['x_s', 'p_s', '"""-.r"""'], {'linewidth': 'lineW'}), "(x_s, p_s, '-.r', linewidth=lineW)\n", (9946, 9980), True, 'import matplotlib.pyplot as plt\n'), ((9980, 9995), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""p"""'], {}), "('p')\n", (9990, 9995), True, 'import matplotlib.pyplot as plt\n'), ((9996, 10011), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (10006, 10011), True, 'import matplotlib.pyplot as plt\n'), ((10078, 10129), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""nonlinearOscillator_trajectories.png"""'], {}), "('nonlinearOscillator_trajectories.png')\n", (10089, 10129), True, 'import matplotlib.pyplot as plt\n'), ((10639, 10666), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (10649, 10666), True, 'import matplotlib.pyplot as plt\n'), ((10667, 10687), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (10678, 10687), True, 'import matplotlib.pyplot as plt\n'), ((10686, 10733), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_num', 'dp_num', '"""-g"""'], {'linewidth': 'lineW'}), "(dx_num, dp_num, '-g', linewidth=lineW)\n", (10694, 10733), True, 'import matplotlib.pyplot as plt\n'), ((10757, 10800), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_s', 'dp_s', '""":k"""'], {'linewidth': 'lineW'}), "(dx_s, dp_s, ':k', linewidth=lineW)\n", (10765, 10800), True, 'import matplotlib.pyplot as plt\n'), ((10800, 10850), 'matplotlib.pyplot.plot', 'plt.plot', (['dx_s100', 'dp_s100', '"""-.r"""'], {'linewidth': 'lineW'}), "(dx_s100, dp_s100, '-.r', linewidth=lineW)\n", (10808, 10850), True, 'import matplotlib.pyplot as plt\n'), ((10850, 10875), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\delta_p$"""'], {}), "('$\\\\delta_p$')\n", (10860, 10875), True, 'import matplotlib.pyplot as plt\n'), ((10875, 10900), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\delta_x$"""'], {}), "('$\\\\delta_x$')\n", (10885, 10900), True, 'import matplotlib.pyplot as plt\n'), ((10901, 10924), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.01, 0.01]'], {}), '([-0.01, 0.01])\n', (10909, 10924), True, 'import matplotlib.pyplot as plt\n'), ((10924, 10947), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.01, 0.01]'], {}), '([-0.01, 0.01])\n', (10932, 10947), True, 'import matplotlib.pyplot as plt\n'), ((10962, 10982), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (10973, 10982), True, 'import matplotlib.pyplot as plt\n'), ((10981, 11047), 'matplotlib.pyplot.plot', 'plt.plot', (['t_num', 'E_ex', '"""-g"""'], {'linewidth': 'lineW', 'label': '"""Ground truth"""'}), "(t_num, E_ex, '-g', linewidth=lineW, label='Ground truth')\n", (10989, 11047), True, 'import matplotlib.pyplot as plt\n'), ((11047, 11096), 'matplotlib.pyplot.plot', 'plt.plot', (['t_net', 'E', '"""--b"""'], {'label': '"""Neural Network"""'}), "(t_net, E, '--b', label='Neural Network')\n", (11055, 11096), True, 'import matplotlib.pyplot as plt\n'), ((11096, 11163), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s', 'E_s', '""":k"""'], {'linewidth': 'lineW', 'label': '"""symplectic Euler"""'}), "(t_s, E_s, ':k', linewidth=lineW, label='symplectic Euler')\n", (11104, 11163), True, 'import matplotlib.pyplot as plt\n'), ((11162, 11254), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s100', 'E_s100', '"""-.r"""'], {'linewidth': 'lineW', 'label': '"""symplectic Euler x 100 points"""'}), "(t_s100, E_s100, '-.r', linewidth=lineW, label=\n 'symplectic Euler x 100 points')\n", (11170, 11254), True, 'import matplotlib.pyplot as plt\n'), ((11248, 11263), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""E"""'], {}), "('E')\n", (11258, 11263), True, 'import matplotlib.pyplot as plt\n'), ((11264, 11279), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (11274, 11279), True, 'import matplotlib.pyplot as plt\n'), ((11280, 11310), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.9 * E0, 1.1 * E0]'], {}), '([0.9 * E0, 1.1 * E0])\n', (11288, 11310), True, 'import matplotlib.pyplot as plt\n'), ((11306, 11318), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11316, 11318), True, 'import matplotlib.pyplot as plt\n'), ((11320, 11340), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (11331, 11340), True, 'import matplotlib.pyplot as plt\n'), ((11339, 11407), 'matplotlib.pyplot.plot', 'plt.plot', (['t_num', 'dx_num', '"""-g"""'], {'linewidth': 'lineW', 'label': '"""Ground truth"""'}), "(t_num, dx_num, '-g', linewidth=lineW, label='Ground truth')\n", (11347, 11407), True, 'import matplotlib.pyplot as plt\n'), ((11407, 11457), 'matplotlib.pyplot.plot', 'plt.plot', (['t_net', 'dx', '"""--b"""'], {'label': '"""Neural Network"""'}), "(t_net, dx, '--b', label='Neural Network')\n", (11415, 11457), True, 'import matplotlib.pyplot as plt\n'), ((11457, 11525), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s', 'dx_s', '""":k"""'], {'linewidth': 'lineW', 'label': '"""symplectic Euler"""'}), "(t_s, dx_s, ':k', linewidth=lineW, label='symplectic Euler')\n", (11465, 11525), True, 'import matplotlib.pyplot as plt\n'), ((11523, 11616), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s100', 'dx_s100', '"""-.r"""'], {'linewidth': 'lineW', 'label': '"""symplectic Euler X 100 points"""'}), "(t_s100, dx_s100, '-.r', linewidth=lineW, label=\n 'symplectic Euler X 100 points')\n", (11531, 11616), True, 'import matplotlib.pyplot as plt\n'), ((11609, 11634), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\delta_x$"""'], {}), "('$\\\\delta_x$')\n", (11619, 11634), True, 'import matplotlib.pyplot as plt\n'), ((11634, 11649), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (11644, 11649), True, 'import matplotlib.pyplot as plt\n'), ((11651, 11671), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (11662, 11671), True, 'import matplotlib.pyplot as plt\n'), ((11670, 11716), 'matplotlib.pyplot.plot', 'plt.plot', (['t_num', 'dp_num', '"""-g"""'], {'linewidth': 'lineW'}), "(t_num, dp_num, '-g', linewidth=lineW)\n", (11678, 11716), True, 'import matplotlib.pyplot as plt\n'), ((11716, 11742), 'matplotlib.pyplot.plot', 'plt.plot', (['t_net', 'dp', '"""--b"""'], {}), "(t_net, dp, '--b')\n", (11724, 11742), True, 'import matplotlib.pyplot as plt\n'), ((11742, 11784), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s', 'dp_s', '""":k"""'], {'linewidth': 'lineW'}), "(t_s, dp_s, ':k', linewidth=lineW)\n", (11750, 11784), True, 'import matplotlib.pyplot as plt\n'), ((11784, 11833), 'matplotlib.pyplot.plot', 'plt.plot', (['t_s100', 'dp_s100', '"""-.r"""'], {'linewidth': 'lineW'}), "(t_s100, dp_s100, '-.r', linewidth=lineW)\n", (11792, 11833), True, 'import matplotlib.pyplot as plt\n'), ((11833, 11858), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\delta_p$"""'], {}), "('$\\\\delta_p$')\n", (11843, 11858), True, 'import matplotlib.pyplot as plt\n'), ((11858, 11873), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (11868, 11873), True, 'import matplotlib.pyplot as plt\n'), ((11933, 11977), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""nonlinearOscillator_error.png"""'], {}), "('nonlinearOscillator_error.png')\n", (11944, 11977), True, 'import matplotlib.pyplot as plt\n'), ((1030, 1059), 'scipy.integrate.odeint', 'odeint', (['f', 'u0', 't'], {'args': '(lam,)'}), '(f, u0, t, args=(lam,))\n', (1036, 1059), False, 'from scipy.integrate import odeint\n'), ((2634, 2663), 'numpy.savetxt', 'np.savetxt', (["(path + 't.txt')", 't'], {}), "(path + 't.txt', t)\n", (2644, 2663), True, 'import numpy as np\n'), ((2665, 2694), 'numpy.savetxt', 'np.savetxt', (["(path + 'x.txt')", 'x'], {}), "(path + 'x.txt', x)\n", (2675, 2694), True, 'import numpy as np\n'), ((2696, 2727), 'numpy.savetxt', 'np.savetxt', (["(path + 'px.txt')", 'px'], {}), "(path + 'px.txt', px)\n", (2706, 2727), True, 'import numpy as np\n'), ((2729, 2758), 'numpy.savetxt', 'np.savetxt', (["(path + 'E.txt')", 'E'], {}), "(path + 'E.txt', E)\n", (2739, 2758), True, 'import numpy as np\n'), ((2760, 2795), 'numpy.savetxt', 'np.savetxt', (["(path + 'Loss.txt')", 'loss'], {}), "(path + 'Loss.txt', loss)\n", (2770, 2795), True, 'import numpy as np\n'), ((5486, 5497), 'time.time', 'time.time', ([], {}), '()\n', (5495, 5497), False, 'import time\n'), ((6865, 6876), 'time.time', 'time.time', ([], {}), '()\n', (6874, 6876), False, 'import time\n'), ((8138, 8168), 'numpy.linspace', 'np.linspace', (['t0', 't_max', '(Ns + 1)'], {}), '(t0, t_max, Ns + 1)\n', (8149, 8168), True, 'import numpy as np\n'), ((8177, 8193), 'numpy.zeros', 'np.zeros', (['(Ns + 1)'], {}), '(Ns + 1)\n', (8185, 8193), True, 'import numpy as np\n'), ((8199, 8215), 'numpy.zeros', 'np.zeros', (['(Ns + 1)'], {}), '(Ns + 1)\n', (8207, 8215), True, 'import numpy as np\n'), ((706, 722), 'torch.sin', 'torch.sin', (['input'], {}), '(input)\n', (715, 722), False, 'import torch\n'), ((1449, 1459), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (1456, 1459), True, 'import numpy as np\n'), ((2421, 2437), 'torch.ones', 'torch.ones', (['(1)', '(1)'], {}), '(1, 1)\n', (2431, 2437), False, 'import torch\n'), ((2530, 2546), 'torch.ones', 'torch.ones', (['(1)', '(1)'], {}), '(1, 1)\n', (2540, 2546), False, 'import torch\n'), ((3067, 3081), 'torch.exp', 'torch.exp', (['(-dt)'], {}), '(-dt)\n', (3076, 3081), False, 'import torch\n'), ((4535, 4560), 'torch.nn.Linear', 'torch.nn.Linear', (['(1)', 'D_hid'], {}), '(1, D_hid)\n', (4550, 4560), False, 'import torch\n'), ((4584, 4613), 'torch.nn.Linear', 'torch.nn.Linear', (['D_hid', 'D_hid'], {}), '(D_hid, D_hid)\n', (4599, 4613), False, 'import torch\n'), ((4637, 4662), 'torch.nn.Linear', 'torch.nn.Linear', (['D_hid', '(2)'], {}), '(D_hid, 2)\n', (4652, 4662), False, 'import torch\n'), ((5786, 5816), 'numpy.random.permutation', 'np.random.permutation', (['n_train'], {}), '(n_train)\n', (5807, 5816), True, 'import numpy as np\n'), ((2357, 2379), 'torch.randn_like', 'torch.randn_like', (['grid'], {}), '(grid)\n', (2373, 2379), False, 'import torch\n'), ((5393, 5424), 'torch.linspace', 'torch.linspace', (['t0', 'tf', 'n_train'], {}), '(t0, tf, n_train)\n', (5407, 5424), False, 'import torch\n'), ((6811, 6829), 'copy.deepcopy', 'copy.deepcopy', (['fc0'], {}), '(fc0)\n', (6824, 6829), False, 'import copy\n'), ((2094, 2126), 'torch.ones', 'torch.ones', (['x.shape'], {'dtype': 'dtype'}), '(x.shape, dtype=dtype)\n', (2104, 2126), False, 'import torch\n'), ((3877, 3909), 'torch.ones', 'torch.ones', (['x.shape'], {'dtype': 'dtype'}), '(x.shape, dtype=dtype)\n', (3887, 3909), False, 'import torch\n'), ((3974, 4007), 'torch.ones', 'torch.ones', (['px.shape'], {'dtype': 'dtype'}), '(px.shape, dtype=dtype)\n', (3984, 4007), False, 'import torch\n')] |
from django.shortcuts import render
import pandas as pd
import pickle
import numpy as np
from sklearn.preprocessing import StandardScaler
# Create your views here.
def liverApp(request):
return render(request, 'liver-app.html', {})
def liverApp_result(request):
context = {}
df = {}
input_data = []
gender_array = [0, 0]
if request.method == "POST":
data = request.POST
for key in data:
if key == 'csrfmiddlewaretoken' or key == 'name' or key == 'gender':
pass
else:
df[key] = data[key]
gender = request.POST.get('gender')
if gender == '0':
gender_array[1] = 1
else:
gender_array[0] = 1
gender_array = np.array(gender_array)
input_data = list(df.values())
input_data = np.array(input_data)
input_data = input_data.astype(float)
gender_array = gender_array.astype(float)
#print(input_data)
#print(gender_array)
final = np.concatenate([input_data, gender_array])
#print(final)
final = final.reshape(1, len(final))
#input_data = StandardScaler().fit_transform(input_data)
#print(input_data)
model = pickle.load(open('ML/Liver_disease/liver.sav', 'rb'))
out = model.predict_proba(final)
#print(out)
if out[0][0] > out[0][1]:
context['acc'] = round(out[0][0]*100, 2)
context['out'] = "Normal"
else:
context['acc'] = round(out[0][1]*100, 2)
context['out'] = "Infected"
return render(request, 'liver_result.html', context) | [
"django.shortcuts.render",
"numpy.array",
"numpy.concatenate"
] | [((200, 237), 'django.shortcuts.render', 'render', (['request', '"""liver-app.html"""', '{}'], {}), "(request, 'liver-app.html', {})\n", (206, 237), False, 'from django.shortcuts import render\n'), ((1633, 1678), 'django.shortcuts.render', 'render', (['request', '"""liver_result.html"""', 'context'], {}), "(request, 'liver_result.html', context)\n", (1639, 1678), False, 'from django.shortcuts import render\n'), ((776, 798), 'numpy.array', 'np.array', (['gender_array'], {}), '(gender_array)\n', (784, 798), True, 'import numpy as np\n'), ((859, 879), 'numpy.array', 'np.array', (['input_data'], {}), '(input_data)\n', (867, 879), True, 'import numpy as np\n'), ((1048, 1090), 'numpy.concatenate', 'np.concatenate', (['[input_data, gender_array]'], {}), '([input_data, gender_array])\n', (1062, 1090), True, 'import numpy as np\n')] |
import numpy as np
import base64
import json
from werkzeug.wrappers import Request, Response
import params as yamnet_params
import yamnet as yamnet_model
DEFAULT_TOP_N = 5
def decode_audio(audio_bytes):
return np.frombuffer(base64.b64decode(audio_bytes), dtype="float32")
def make_app(make_predict_func):
predict_func = None
def app(environ, start_response):
nonlocal predict_func
if predict_func is None:
predict_func = make_predict_func()
request = Request(environ)
inputs = json.loads(request.get_data())
top_n = int(request.args.get('top_n', 0)) or None
outputs = []
for inp in inputs:
try:
pred = predict_func(decode_audio(inp), top_n)
except Exception as e:
print(f"Error predicting classes for input {len(outputs)}: {e}")
pred = None
outputs.append(pred)
return Response(json.dumps(outputs))(environ, start_response)
return app
def load_model():
params = yamnet_params.Params()
yamnet = yamnet_model.yamnet_frames_model(params)
yamnet.load_weights('yamnet.h5')
yamnet_classes = yamnet_model.class_names('yamnet_class_map.csv')
return yamnet, yamnet_classes
def predict_classes(audio, top_n=None, *, model, labels):
scores, embeddings, spectrogram = model(audio)
prediction = np.mean(scores, axis=0)
idxs_by_score = np.argsort(prediction)[::-1]
if not top_n:
top_n = DEFAULT_TOP_N
if top_n:
idxs_by_score = idxs_by_score[:top_n]
return [(labels[i], float(prediction[i])) for i in idxs_by_score]
if __name__ == "__main__":
import argparse
import functools
import bjoern
import multiprocessing
parser = argparse.ArgumentParser()
parser.add_argument("--host", default="127.0.0.1", type=str, help="host to serve")
parser.add_argument("--port", required=True, type=int, help="port to serve")
parser.add_argument("--processes", default=1, type=int, help="number of processes to spawn")
args = parser.parse_args()
def make_predict_func():
model, labels = load_model()
return functools.partial(predict_classes, model=model, labels=labels)
app = make_app(make_predict_func)
if args.processes > 1:
procs = [multiprocessing.Process(target=functools.partial(bjoern.run, app, args.host, args.port, reuse_port=True))
for i in range(args.processes)]
for p in procs:
p.start()
print("Ready")
for p in procs:
p.join()
else:
bjoern.run(app, args.host, args.port)
| [
"functools.partial",
"argparse.ArgumentParser",
"params.Params",
"werkzeug.wrappers.Request",
"bjoern.run",
"base64.b64decode",
"json.dumps",
"numpy.argsort",
"yamnet.yamnet_frames_model",
"numpy.mean",
"yamnet.class_names"
] | [((1057, 1079), 'params.Params', 'yamnet_params.Params', ([], {}), '()\n', (1077, 1079), True, 'import params as yamnet_params\n'), ((1093, 1133), 'yamnet.yamnet_frames_model', 'yamnet_model.yamnet_frames_model', (['params'], {}), '(params)\n', (1125, 1133), True, 'import yamnet as yamnet_model\n'), ((1192, 1240), 'yamnet.class_names', 'yamnet_model.class_names', (['"""yamnet_class_map.csv"""'], {}), "('yamnet_class_map.csv')\n", (1216, 1240), True, 'import yamnet as yamnet_model\n'), ((1403, 1426), 'numpy.mean', 'np.mean', (['scores'], {'axis': '(0)'}), '(scores, axis=0)\n', (1410, 1426), True, 'import numpy as np\n'), ((1783, 1808), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1806, 1808), False, 'import argparse\n'), ((234, 263), 'base64.b64decode', 'base64.b64decode', (['audio_bytes'], {}), '(audio_bytes)\n', (250, 263), False, 'import base64\n'), ((509, 525), 'werkzeug.wrappers.Request', 'Request', (['environ'], {}), '(environ)\n', (516, 525), False, 'from werkzeug.wrappers import Request, Response\n'), ((1447, 1469), 'numpy.argsort', 'np.argsort', (['prediction'], {}), '(prediction)\n', (1457, 1469), True, 'import numpy as np\n'), ((2187, 2249), 'functools.partial', 'functools.partial', (['predict_classes'], {'model': 'model', 'labels': 'labels'}), '(predict_classes, model=model, labels=labels)\n', (2204, 2249), False, 'import functools\n'), ((2621, 2658), 'bjoern.run', 'bjoern.run', (['app', 'args.host', 'args.port'], {}), '(app, args.host, args.port)\n', (2631, 2658), False, 'import bjoern\n'), ((962, 981), 'json.dumps', 'json.dumps', (['outputs'], {}), '(outputs)\n', (972, 981), False, 'import json\n'), ((2365, 2438), 'functools.partial', 'functools.partial', (['bjoern.run', 'app', 'args.host', 'args.port'], {'reuse_port': '(True)'}), '(bjoern.run, app, args.host, args.port, reuse_port=True)\n', (2382, 2438), False, 'import functools\n')] |
# -*- coding: utf-8 -*-
"""
@created on: 27/03/19,
@author: <NAME>,
@version: v0.0.1
Description:
Sphinx Documentation Status:
..todo::
"""
import numpy as np
import pandas as pd
import traceback
pd.set_option('display.max_rows', 1000)
pd.set_option('display.max_columns', 1000)
def encode_continuous_column(data_column, category_count=10):
"""
Converts a continuous column into categorical based on category_count value
:param data_column: structure containing continuous data
:param category_count: number of buckets to create
:return: encoded column from continuous column to categorical column
"""
encoded_column = pd.cut(data_column, category_count, labels=['cat_' + str(x) for x in range(category_count)])
return encoded_column
def calculate_woe(data, independent_var, dependent_var, is_continuous=None, category_count=10):
"""
Calculates weight of evidence of a independent variable against a dependent variable
:param data: dataframe which contains feature a
:param independent_var: variable whose woe needs to be calculated
:param dependent_var: target variable
:param is_continuous: Default None; Boolean indicating whether the independent_var passed in categorical or continuous
:param category_count: Default 10; If the independent variable is continuous, this parameter defines the number of categories to derive from the variable
:return: dictionary containing woe and iv scores under key 'woe' and 'iv 'of the independent variable
"""
# calculate total number of positive and negative samples in data
total_bads = data[dependent_var].sum()
total_goods = len(data) - total_bads
if total_bads == 0 or total_goods == 0:
raise Exception('Target variable does not contain two classes. ')
# check if column is continuous, if yes convert it to bucketize
if is_continuous:
data[independent_var] = encode_continuous_column(data[independent_var], category_count=category_count)
elif data[independent_var].dtype == np.float:
data[independent_var] = encode_continuous_column(data[independent_var], category_count=category_count)
# pivot on independent variable to get counts of goods and bads
pivot = pd.pivot_table(data, index=independent_var, columns=dependent_var, aggfunc='count')
feature_uniques = data[independent_var].unique()
# dictionary to hold values required for iv calculation
values = {'category': [], 'goods_count': [], 'bads_count': [], 'goods_percentage': [], 'bads_percentage': [],
'woe': [], 'iv': []}
# iterate over all the unique categories in the independent variable
for f in feature_uniques:
values['category'].append(f)
goods_count = pivot.loc[f][0]
values['goods_count'].append(goods_count)
bads_count = pivot.loc[f][1]
values['bads_count'].append(bads_count)
goods_percentage = goods_count / total_goods
values['goods_percentage'].append(goods_percentage)
bads_percentage = bads_count / total_bads
values['bads_percentage'].append(bads_percentage)
woe = np.log(goods_percentage / bads_percentage)
values['woe'].append(woe)
iv = (woe * (goods_percentage - bads_percentage))
values['iv'].append(iv)
return values
def calculate_iv(data, independent_var, dependent_var, is_continuous=None, category_count=10):
"""
This function assumes the data passed is treated for null values and any other irregularities
Calculates information value of a independent variable against a dependent variable
:param data: dataframe which contains feature a
:param independent_var: variable whose IV needs to be calculated
:param dependent_var: target variable
:param is_continuous: Default None; Boolean indicating whether the independent_var passed in categorical or continuous
:param category_count: Default 10; If the independent variable is continuous, this parameter defines the number of categories to derive from the variable
:return: iv score of the independent variable
"""
try:
values = calculate_woe(data, independent_var, dependent_var, is_continuous, category_count)
df = pd.DataFrame(values)
return df['iv'].sum()
except Exception:
traceback.print_exc()
if __name__ == '__main__':
iv_scores = {}
csv_filepath = ''
data = pd.read_csv(csv_filepath)
data = data.fillna(0)
target_column = 'Actual Label'
id_column = 'Customer_ID'
cols_to_calculate_iv = [x for x in data.columns if x not in [target_column, id_column]]
for col in cols_to_calculate_iv:
print(col)
iv_score = calculate_iv(data, col, target_column)
iv_scores[col] = iv_score
print(iv_scores)
| [
"pandas.DataFrame",
"traceback.print_exc",
"numpy.log",
"pandas.pivot_table",
"pandas.read_csv",
"pandas.set_option"
] | [((202, 241), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(1000)'], {}), "('display.max_rows', 1000)\n", (215, 241), True, 'import pandas as pd\n'), ((242, 284), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(1000)'], {}), "('display.max_columns', 1000)\n", (255, 284), True, 'import pandas as pd\n'), ((2244, 2332), 'pandas.pivot_table', 'pd.pivot_table', (['data'], {'index': 'independent_var', 'columns': 'dependent_var', 'aggfunc': '"""count"""'}), "(data, index=independent_var, columns=dependent_var, aggfunc=\n 'count')\n", (2258, 2332), True, 'import pandas as pd\n'), ((4430, 4455), 'pandas.read_csv', 'pd.read_csv', (['csv_filepath'], {}), '(csv_filepath)\n', (4441, 4455), True, 'import pandas as pd\n'), ((3145, 3187), 'numpy.log', 'np.log', (['(goods_percentage / bads_percentage)'], {}), '(goods_percentage / bads_percentage)\n', (3151, 3187), True, 'import numpy as np\n'), ((4246, 4266), 'pandas.DataFrame', 'pd.DataFrame', (['values'], {}), '(values)\n', (4258, 4266), True, 'import pandas as pd\n'), ((4327, 4348), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (4346, 4348), False, 'import traceback\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
import pdb
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from waymo_open_dataset.metrics.python import detection_metrics
from waymo_open_dataset.protos import metrics_pb2
ERROR = 1e-6
class DetectionMetricsEstimatorTest(tf.test.TestCase):
def get_boxes_from_bin(self, file):
pd_bbox, pd_type, pd_frame_id, pd_score, difficulty = [], [], [], [], []
stuff1 = metrics_pb2.Objects()
with open(file, 'rb')as rf:
stuff1.ParseFromString(rf.read())
for i in range(len(stuff1.objects)):
obj = stuff1.objects[i].object
pd_frame_id.append(stuff1.objects[i].frame_timestamp_micros)
box = [obj.box.center_x, obj.box.center_y, obj.box.center_z,
obj.box.length, obj.box.width, obj.box.height, obj.box.heading]
pd_bbox.append(box)
pd_score.append(stuff1.objects[i].score)
pd_type.append(obj.type)
if obj.num_lidar_points_in_box and obj.num_lidar_points_in_box<=5:
difficulty.append(2)
else:
difficulty.append(1)
return np.array(pd_bbox), np.array(pd_type), np.array(pd_frame_id), np.array(pd_score), np.array(difficulty)
def get_boxes_from_txt(self, pd_set, gt_set, pd_dir, gt_dir):
__type_list = {'unknown': 0, 'Car': 1, 'Pedestrian': 2, 'Sign': 3, 'Cyclist': 4}
pd_bbox, pd_type, pd_frame_id, pd_score, difficulty = [], [], [], [], []
gt_bbox, gt_type, gt_frame_id, gt_score, gt_diff = [], [], [], [], []
f = open(gt_set, 'r')
lines = f.readlines()
f.close()
#import pdb; pdb.set_trace()
f = open(pd_set, 'r')
pred_lines = f.readlines()
f.close()
for i in range(39848):
print('Current index:', i)
gt_seg, gt_id = lines[i].strip().split(' ')
gt_file_name = os.path.join(gt_dir , 'waymo2kitti', 'validation', gt_seg, 'label_0', gt_id + '.txt')
file_name = pred_lines[i].strip() + '.txt' #str('{0:06}'.format(i)) + '.txt'
file = os.path.join(pd_dir, file_name)
if not os.path.exists(file):
continue
# import pdb; pdb.set_trace()
with open(file, 'r')as f:
for line in f.readlines():
line = line.strip('\n').split()
#if float(line[15])==0:
# continue
pd_frame_id.append(gt_id)
box = [float(line[11]), float(line[12]), float(line[13]),
float(line[10]), float(line[9]), float(line[8]),float(line[14])]
pd_bbox.append(box)
pd_score.append(line[15])
pd_type.append(__type_list[line[0]])
difficulty.append(1)
#import pdb; pdb.set_trace()
with open(gt_file_name, 'r')as f:
for line in f.readlines():
line = line.strip('\n').split()
if line[0]!= 'Car' or float(line[15])==0 or (float(line[4])-float(line[6]))>=0 or (float(line[5])-float(line[7]))>=0:
# print('=========ignore', line[0], line[15], line[4:8])
continue
gt_frame_id.append(gt_id)
box = [float(line[11]), float(line[12]), float(line[13]),
float(line[10]), float(line[9]), float(line[8]),float(line[14])]
gt_bbox.append(box)
gt_score.append('0.5')
# else: # gt
# pd_score.append(0.5)
gt_type.append(__type_list[line[0]])
if float(line[15])>5:
gt_diff.append(1)
else:
gt_diff.append(2)
# import pdb; pdb.set_trace()
return np.array(pd_bbox), np.array(pd_type), np.array(pd_frame_id), np.array(pd_score), np.array(difficulty),np.array(gt_bbox), np.array(gt_type), np.array(gt_frame_id), np.array(gt_score), np.array(gt_diff)
def _BuildConfig(self):
config = metrics_pb2.Config()
# pdb.set_trace()
config_text = """
num_desired_score_cutoffs: 11
breakdown_generator_ids: OBJECT_TYPE
breakdown_generator_ids: RANGE
difficulties {
levels: 1
levels: 2
}
difficulties {
levels: 1
levels: 2
}
matcher_type: TYPE_HUNGARIAN
iou_thresholds: 0.0
iou_thresholds: 0.7
iou_thresholds: 0.5
iou_thresholds: 0.5
iou_thresholds: 0.5
box_type: TYPE_3D
"""
text_format.Merge(config_text, config)
return config
def _BuildGraph(self, graph):
with graph.as_default():
self._pd_frame_id = tf.compat.v1.placeholder(dtype=tf.int64)
self._pd_bbox = tf.compat.v1.placeholder(dtype=tf.float32)
self._pd_type = tf.compat.v1.placeholder(dtype=tf.uint8)
self._pd_score = tf.compat.v1.placeholder(dtype=tf.float32)
self._gt_frame_id = tf.compat.v1.placeholder(dtype=tf.int64)
self._gt_bbox = tf.compat.v1.placeholder(dtype=tf.float32)
self._gt_type = tf.compat.v1.placeholder(dtype=tf.uint8)
self._gt_difficulty = tf.compat.v1.placeholder(dtype=tf.uint8)
metrics = detection_metrics.get_detection_metric_ops(
config=self._BuildConfig(),
prediction_frame_id=self._pd_frame_id,
prediction_bbox=self._pd_bbox,
prediction_type=self._pd_type,
prediction_score=self._pd_score,
prediction_overlap_nlz=tf.zeros_like(
self._pd_frame_id, dtype=tf.bool),
ground_truth_bbox=self._gt_bbox,
ground_truth_type=self._gt_type,
ground_truth_frame_id=self._gt_frame_id,
# ground_truth_difficulty=tf.ones_like(self._gt_frame_id, dtype=tf.uint8),
ground_truth_difficulty=self._gt_difficulty,
recall_at_precision=0.95,
)
return metrics
def _EvalUpdateOps(
self,
sess,
graph,
metrics,
prediction_frame_id,
prediction_bbox,
prediction_type,
prediction_score,
ground_truth_frame_id,
ground_truth_bbox,
ground_truth_type,
ground_truth_difficulty,
):
sess.run(
[tf.group([value[1] for value in metrics.values()])],
feed_dict={
self._pd_bbox: prediction_bbox,
self._pd_frame_id: prediction_frame_id,
self._pd_type: prediction_type,
self._pd_score: prediction_score,
self._gt_bbox: ground_truth_bbox,
self._gt_type: ground_truth_type,
self._gt_frame_id: ground_truth_frame_id,
self._gt_difficulty: ground_truth_difficulty,
})
def _EvalValueOps(self, sess, graph, metrics):
ddd = {}
for item in metrics.items():
#import pdb; pdb.set_trace()
ddd[item[0]] = sess.run([item[1][0]])
return ddd
def testAPBasic(self):
print("start")
print(pd_set)
print(gt_set)
pd_bbox, pd_type, pd_frame_id, pd_score, _, gt_bbox, gt_type, gt_frame_id, _, difficulty = self.get_boxes_from_txt(pd_set, gt_set, pd_dir, gt_dir)
# import pdb; pdb.set_trace()
# pd_bbox, pd_type, pd_frame_id, pd_score, difficulty = self.get_boxes_from_bin(pd_file)
# gt_bbox, gt_type, gt_frame_id = pd_bbox, pd_type, pd_frame_id
graph = tf.Graph()
metrics = self._BuildGraph(graph)
with self.test_session(graph=graph) as sess:
sess.run(tf.compat.v1.initializers.local_variables())
#import pdb; pdb.set_trace()
self._EvalUpdateOps(sess, graph, metrics, pd_frame_id, pd_bbox, pd_type,
pd_score, gt_frame_id, gt_bbox, gt_type, difficulty)
aps = self._EvalValueOps(sess, graph, metrics)
for key, value in aps.items():
print(key, ":", value)
if __name__ == '__main__':
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# pd_set and gt_set are the validation set index, pd_set is converted to the index pattern from gt_set using set_split.py
pd_set = '/home/ubuntu/drive2/code/pct/data/val.txt'
gt_set = '/home/ubuntu/drive2/code/pct/data/val_org.txt'
# pd_path is path of the predicted data for val set, gt_path is the generated waymo label in kitti pattern
pd_path = '/home/ubuntu/drive2/code/pct/experiments/pct/output/data'
gt_path = '/home/ubuntu/drive2/code/waymo_kitti_converter/data'
tf.compat.v1.disable_eager_execution()
tf.test.main()
| [
"tensorflow.test.main",
"os.path.join",
"tensorflow.compat.v1.placeholder",
"os.path.exists",
"tensorflow.zeros_like",
"tensorflow.compat.v1.disable_eager_execution",
"google.protobuf.text_format.Merge",
"numpy.array",
"waymo_open_dataset.protos.metrics_pb2.Objects",
"tensorflow.Graph",
"tensorf... | [((9196, 9234), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (9232, 9234), True, 'import tensorflow as tf\n'), ((9239, 9253), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (9251, 9253), True, 'import tensorflow as tf\n'), ((554, 575), 'waymo_open_dataset.protos.metrics_pb2.Objects', 'metrics_pb2.Objects', ([], {}), '()\n', (573, 575), False, 'from waymo_open_dataset.protos import metrics_pb2\n'), ((4447, 4467), 'waymo_open_dataset.protos.metrics_pb2.Config', 'metrics_pb2.Config', ([], {}), '()\n', (4465, 4467), False, 'from waymo_open_dataset.protos import metrics_pb2\n'), ((4927, 4965), 'google.protobuf.text_format.Merge', 'text_format.Merge', (['config_text', 'config'], {}), '(config_text, config)\n', (4944, 4965), False, 'from google.protobuf import text_format\n'), ((8042, 8052), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (8050, 8052), True, 'import tensorflow as tf\n'), ((1332, 1349), 'numpy.array', 'np.array', (['pd_bbox'], {}), '(pd_bbox)\n', (1340, 1349), True, 'import numpy as np\n'), ((1351, 1368), 'numpy.array', 'np.array', (['pd_type'], {}), '(pd_type)\n', (1359, 1368), True, 'import numpy as np\n'), ((1370, 1391), 'numpy.array', 'np.array', (['pd_frame_id'], {}), '(pd_frame_id)\n', (1378, 1391), True, 'import numpy as np\n'), ((1393, 1411), 'numpy.array', 'np.array', (['pd_score'], {}), '(pd_score)\n', (1401, 1411), True, 'import numpy as np\n'), ((1413, 1433), 'numpy.array', 'np.array', (['difficulty'], {}), '(difficulty)\n', (1421, 1433), True, 'import numpy as np\n'), ((2101, 2189), 'os.path.join', 'os.path.join', (['gt_dir', '"""waymo2kitti"""', '"""validation"""', 'gt_seg', '"""label_0"""', "(gt_id + '.txt')"], {}), "(gt_dir, 'waymo2kitti', 'validation', gt_seg, 'label_0', gt_id +\n '.txt')\n", (2113, 2189), False, 'import os\n'), ((2308, 2339), 'os.path.join', 'os.path.join', (['pd_dir', 'file_name'], {}), '(pd_dir, file_name)\n', (2320, 2339), False, 'import os\n'), ((4200, 4217), 'numpy.array', 'np.array', (['pd_bbox'], {}), '(pd_bbox)\n', (4208, 4217), True, 'import numpy as np\n'), ((4219, 4236), 'numpy.array', 'np.array', (['pd_type'], {}), '(pd_type)\n', (4227, 4236), True, 'import numpy as np\n'), ((4238, 4259), 'numpy.array', 'np.array', (['pd_frame_id'], {}), '(pd_frame_id)\n', (4246, 4259), True, 'import numpy as np\n'), ((4261, 4279), 'numpy.array', 'np.array', (['pd_score'], {}), '(pd_score)\n', (4269, 4279), True, 'import numpy as np\n'), ((4281, 4301), 'numpy.array', 'np.array', (['difficulty'], {}), '(difficulty)\n', (4289, 4301), True, 'import numpy as np\n'), ((4302, 4319), 'numpy.array', 'np.array', (['gt_bbox'], {}), '(gt_bbox)\n', (4310, 4319), True, 'import numpy as np\n'), ((4321, 4338), 'numpy.array', 'np.array', (['gt_type'], {}), '(gt_type)\n', (4329, 4338), True, 'import numpy as np\n'), ((4340, 4361), 'numpy.array', 'np.array', (['gt_frame_id'], {}), '(gt_frame_id)\n', (4348, 4361), True, 'import numpy as np\n'), ((4363, 4381), 'numpy.array', 'np.array', (['gt_score'], {}), '(gt_score)\n', (4371, 4381), True, 'import numpy as np\n'), ((4383, 4400), 'numpy.array', 'np.array', (['gt_diff'], {}), '(gt_diff)\n', (4391, 4400), True, 'import numpy as np\n'), ((5088, 5128), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.int64'}), '(dtype=tf.int64)\n', (5112, 5128), True, 'import tensorflow as tf\n'), ((5157, 5199), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (5181, 5199), True, 'import tensorflow as tf\n'), ((5228, 5268), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.uint8'}), '(dtype=tf.uint8)\n', (5252, 5268), True, 'import tensorflow as tf\n'), ((5298, 5340), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (5322, 5340), True, 'import tensorflow as tf\n'), ((5373, 5413), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.int64'}), '(dtype=tf.int64)\n', (5397, 5413), True, 'import tensorflow as tf\n'), ((5442, 5484), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.float32'}), '(dtype=tf.float32)\n', (5466, 5484), True, 'import tensorflow as tf\n'), ((5513, 5553), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.uint8'}), '(dtype=tf.uint8)\n', (5537, 5553), True, 'import tensorflow as tf\n'), ((5588, 5628), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', ([], {'dtype': 'tf.uint8'}), '(dtype=tf.uint8)\n', (5612, 5628), True, 'import tensorflow as tf\n'), ((2359, 2379), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (2373, 2379), False, 'import os\n'), ((8169, 8212), 'tensorflow.compat.v1.initializers.local_variables', 'tf.compat.v1.initializers.local_variables', ([], {}), '()\n', (8210, 8212), True, 'import tensorflow as tf\n'), ((5976, 6023), 'tensorflow.zeros_like', 'tf.zeros_like', (['self._pd_frame_id'], {'dtype': 'tf.bool'}), '(self._pd_frame_id, dtype=tf.bool)\n', (5989, 6023), True, 'import tensorflow as tf\n')] |
"""Module with functionalities shared among energy systems."""
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def create_multipage_pdf(file_name='plots.pdf', figs=None, dpi=300,
mute=False):
"""Save all open matplotlib figures into a multipage pdf-file.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>>
>>> df1 = pd.DataFrame(np.random.randn(24, 2))
>>> ax1 = df1.plot(kind='line')
>>>
>>> df2 = pd.DataFrame(np.random.randn(24, 2))
>>> ax2 = df2.plot(kind='scatter', x=0, y=1)
>>>
>>> # mute is set to true to surpress writing a pdf file
>>> create_multipage_pdf(file_name='plots.pdf', dpi=300, mute=True)
False
"""
if mute is True:
# set return flag to false if no output is written
flag = False
else:
pp = PdfPages(file_name)
if figs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
for fig in figs:
fig.savefig(pp, format='pdf')
pp.close()
# close all existing figures
for fig in figs:
plt.close(fig)
# set return flag
flag = True
return flag
def znes_colors(n=None):
"""Return dict with ZNES colors.
Examples
--------
>>> znes_colors().keys() # doctest: +ELLIPSIS
dict_keys(['darkblue', 'red', 'lightblue', 'orange', 'grey',...
"""
colors = {
'darkblue': '#00395B',
'red': '#B54036',
'lightblue': '#74ADC0',
'orange': '#EC6707',
'grey': '#BFBFBF',
'dimgrey': 'dimgrey',
'lightgrey': 'lightgrey',
'slategrey': 'slategrey',
'darkgrey': '#A9A9A9'
}
# allow for a dict of n colors
if n is not None:
return {k: colors[k] for k in list(colors)[:n]}
else:
return colors
def znes_boxprops():
"""Return dict with ZNES boxplot properties.
Examples
--------
>>> znes_boxprops().keys() # doctest: +ELLIPSIS
dict_keys(['boxprops', 'flierprops', 'medianprops', 'whiskerprops',...
"""
znes = znes_colors()
props = {}
# boxplot properties as dict since rcparams doesn't show effect with pd
props['boxprops'] = dict(linewidth=3.5)
props['flierprops'] = dict(
linewidth=2, marker='D', markerfacecolor=znes['darkblue'])
props['medianprops'] = dict(
linewidth=3.5, markerfacecolor=znes['darkblue'])
props['whiskerprops'] = dict(
linewidth=3.5, markerfacecolor=znes['darkblue'])
props['capprops'] = dict(
linewidth=3.5, markerfacecolor=znes['darkblue'])
# passing patch_artist=True to a pd.boxplot fills the box
props['colorprops'] = dict(
boxes=znes['darkblue'], whiskers=znes['darkblue'],
medians=znes['darkblue'], caps=znes['darkblue'])
return props
def znes_style(plotting_function):
"""Decorator to create basic matplotlib configuration with ZNES style.
Set markers, lines and colors. Create a znes color palette.
"""
mpl.rcParams.update(mpl.rcParamsDefault) # reset to defaults
mpl.style.use('default')
znes = znes_colors()
znes_palette = [znes['darkblue'], znes['orange'], znes['grey'],
znes['red'], znes['lightblue'], znes['dimgrey'],
znes['lightgrey'], znes['slategrey']]
znes_palette_r = znes_palette[::-1]
cmap_znes = mpl.colors.ListedColormap(znes_palette)
cmap_znes_r = mpl.colors.ListedColormap(znes_palette_r)
mpl.cm.register_cmap(name='znes', cmap=cmap_znes)
mpl.cm.register_cmap(name='znes_r', cmap=cmap_znes_r)
plt.rcParams['image.cmap'] = 'znes_r'
# grid
plt.rcParams['grid.color'] = 'k' # grid color default b0b0b0
plt.rcParams['grid.linestyle'] = 'dotted' # solid
plt.rcParams['grid.linewidth'] = 2.0 # in points default 0.8
plt.rcParams['grid.alpha'] = 1.0 # transparency, between 0.0 and 1.
# lines and markers
plt.rcParams['lines.linewidth'] = 3.5
plt.rcParams['scatter.marker'] = 'o'
# axes, ticks
plt.rcParams['axes.linewidth'] = 2
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['xtick.color'] = 'k'
plt.rcParams['ytick.color'] = 'k'
plt.rcParams['text.color'] = 'k'
plt.rcParams['axes.labelcolor'] = 'k'
plt.rcParams['axes.grid'] = True
plt.rcParams['axes.axisbelow'] = True
# legend
plt.rcParams['legend.fontsize'] = 25
plt.rcParams['legend.loc'] = 'upper right'
plt.rcParams['legend.frameon'] = True
# use normal text for math (non-italic)
plt.rcParams.update({'mathtext.default': 'regular'})
# figure
plt.rcParams['figure.figsize'] = (20.0, 15.0) # inches = px/dpi
# font helvetica clone
mpl.rcParams['font.size'] = 35
mpl.rcParams['font.family'] = 'Carlito' # Liberation Sans
# mpl.rcParams['font.weight'] = 'light' # only for ticks and legend
mpl.rcParams['savefig.format'] = 'pdf'
mpl.rcParams['savefig.bbox'] = 'tight'
mpl.rcParams['savefig.pad_inches'] = 0.1 # padding when bbox is 'tight'
# deactivate warning for high amount of figures
mpl.rcParams.update({'figure.max_open_warning': 0})
return plotting_function
def znes_style_plot():
"""Test function to test abovementioned decorator function.
Checks if matplotlib figsize param is set locally.
"""
param = plt.rcParams['figure.figsize']
return param
def znes_style_test():
"""Test function to test abovementioned decorator function.
Examples
--------
>>> plot = znes_style(znes_style_plot)
>>> plot()
[20.0, 15.0]
"""
pass
def znes_linestyles(columns=None):
"""Return a list with line styles for a passed column list.
Examples
--------
>>> znes_linestyles(['foo', 'bar'])
['-', '-']
"""
linestyles = ['-', '-', '-', '-', '-', '-.', ':', '-.',
'-', '-', '-', '-', '-', '-.', ':', '-.']
return linestyles[:len(columns)]
def znes_markers(columns=None):
"""Return a list with marker styles for a passed column list.
Examples
--------
>>> znes_markers(['foo', 'bar'])
['o', 's']
"""
markers = ['o', 's', 'v', 'x', 'H', '^', 'v', 's', '3', '.', '1', '_',
'o', 's', 'v', 'x', 'H', '^', 'v', 's', '3', '.', '1', '_']
return markers[:len(columns)]
def znes_linear_colormap(name='znes_linear', bins=256,
colors=['#00395B', '#FFFFFF', '#EC6707']):
"""Return a linear segmented colormap from three passed colors.
Examples
--------
>>> cm = znes_linear_colormap(bins=127)
>>> cm.N
127
>>> print(cm.name)
znes_linear
"""
cm = mpl.colors.LinearSegmentedColormap.from_list(name, colors, N=bins)
return cm
def znes_linear_colormap2(name='znes_linear2', bins=20,
colors=['#00395B', '#EC6707', '#BFBFBF',
'#B54036', '#74ADC0']):
"""Return a linear segmented colormap from three passed colors.
Examples
--------
>>> cm = znes_linear_colormap2(bins=77)
>>> cm.N
77
>>> print(cm.name)
znes_linear2
"""
cm = mpl.colors.LinearSegmentedColormap.from_list(name, colors, N=bins)
return cm
def znes_sample_dataframe(length=25):
"""Return a sample DataFrame of defined length to test plots."""
df = pd.DataFrame()
df['Foo'] = np.arange(1, length)
df['Bar'] = max(df['Foo']) - np.sqrt(df['Foo'])
df['Foobar'] = df['Foo'] ** 2
df = df.append(df * 2)
df[['A', 'B', 'C']] = pd.DataFrame(np.random.rand(df.shape[0], 3))
df['ndf'] = 0
df['ndf'] = df['ndf'].where(df['Bar'] < (max(df['Bar'])/2)+1, 1)
df = df.reset_index(drop=True)
df.index = pd.DatetimeIndex(start='2018-01-01 00:00:00',
periods=df.shape[0], freq='h')
return df
if __name__ == '__main__':
import doctest
doctest.testmod()
| [
"matplotlib.colors.LinearSegmentedColormap.from_list",
"pandas.DataFrame",
"matplotlib.backends.backend_pdf.PdfPages",
"matplotlib.style.use",
"matplotlib.pyplot.close",
"matplotlib.rcParams.update",
"pandas.DatetimeIndex",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.rcParams.update",
"numpy.ar... | [((3141, 3181), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (['mpl.rcParamsDefault'], {}), '(mpl.rcParamsDefault)\n', (3160, 3181), True, 'import matplotlib as mpl\n'), ((3207, 3231), 'matplotlib.style.use', 'mpl.style.use', (['"""default"""'], {}), "('default')\n", (3220, 3231), True, 'import matplotlib as mpl\n'), ((3508, 3547), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (['znes_palette'], {}), '(znes_palette)\n', (3533, 3547), True, 'import matplotlib as mpl\n'), ((3566, 3607), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (['znes_palette_r'], {}), '(znes_palette_r)\n', (3591, 3607), True, 'import matplotlib as mpl\n'), ((3612, 3661), 'matplotlib.cm.register_cmap', 'mpl.cm.register_cmap', ([], {'name': '"""znes"""', 'cmap': 'cmap_znes'}), "(name='znes', cmap=cmap_znes)\n", (3632, 3661), True, 'import matplotlib as mpl\n'), ((3666, 3719), 'matplotlib.cm.register_cmap', 'mpl.cm.register_cmap', ([], {'name': '"""znes_r"""', 'cmap': 'cmap_znes_r'}), "(name='znes_r', cmap=cmap_znes_r)\n", (3686, 3719), True, 'import matplotlib as mpl\n'), ((4667, 4719), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'mathtext.default': 'regular'}"], {}), "({'mathtext.default': 'regular'})\n", (4686, 4719), True, 'import matplotlib.pyplot as plt\n'), ((5221, 5272), 'matplotlib.rcParams.update', 'mpl.rcParams.update', (["{'figure.max_open_warning': 0}"], {}), "({'figure.max_open_warning': 0})\n", (5240, 5272), True, 'import matplotlib as mpl\n'), ((6786, 6852), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mpl.colors.LinearSegmentedColormap.from_list', (['name', 'colors'], {'N': 'bins'}), '(name, colors, N=bins)\n', (6830, 6852), True, 'import matplotlib as mpl\n'), ((7267, 7333), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mpl.colors.LinearSegmentedColormap.from_list', (['name', 'colors'], {'N': 'bins'}), '(name, colors, N=bins)\n', (7311, 7333), True, 'import matplotlib as mpl\n'), ((7467, 7481), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7479, 7481), True, 'import pandas as pd\n'), ((7498, 7518), 'numpy.arange', 'np.arange', (['(1)', 'length'], {}), '(1, length)\n', (7507, 7518), True, 'import numpy as np\n'), ((7840, 7916), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', ([], {'start': '"""2018-01-01 00:00:00"""', 'periods': 'df.shape[0]', 'freq': '"""h"""'}), "(start='2018-01-01 00:00:00', periods=df.shape[0], freq='h')\n", (7856, 7916), True, 'import pandas as pd\n'), ((8016, 8033), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (8031, 8033), False, 'import doctest\n'), ((956, 975), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['file_name'], {}), '(file_name)\n', (964, 975), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((7552, 7570), 'numpy.sqrt', 'np.sqrt', (["df['Foo']"], {}), "(df['Foo'])\n", (7559, 7570), True, 'import numpy as np\n'), ((7671, 7701), 'numpy.random.rand', 'np.random.rand', (['df.shape[0]', '(3)'], {}), '(df.shape[0], 3)\n', (7685, 7701), True, 'import numpy as np\n'), ((1224, 1238), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (1233, 1238), True, 'import matplotlib.pyplot as plt\n'), ((1021, 1034), 'matplotlib.pyplot.figure', 'plt.figure', (['n'], {}), '(n)\n', (1031, 1034), True, 'import matplotlib.pyplot as plt\n'), ((1044, 1061), 'matplotlib.pyplot.get_fignums', 'plt.get_fignums', ([], {}), '()\n', (1059, 1061), True, 'import matplotlib.pyplot as plt\n')] |
import math, time, random
import numpy as np
import tensorflow as tf
def train1():
feature_columns = [tf.feature_column.numeric_column("x", shape=[1])]
estimator = tf.estimator.LinearRegressor(feature_columns=feature_columns)
x_train = np.array([1., 2., 3., 4.])
y_train = np.array([0., -1., -2., -3.])
x_eval = np.array([2., 5., 8., 1.])
y_eval = np.array([-1.01, -4.1, -7, 0.])
input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_train}, y_train, batch_size=4, num_epochs=None, shuffle=True)
train_input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_train}, y_train, batch_size=4, num_epochs=1000, shuffle=False)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
{"x": x_eval}, y_eval, batch_size=4, num_epochs=1000, shuffle=False)
estimator.train(input_fn=input_fn, steps=1000)
train_metrics = estimator.evaluate(input_fn=train_input_fn)
eval_metrics = estimator.evaluate(input_fn=eval_input_fn)
print("train metrics: %r"% train_metrics)
print("eval metrics: %r"% eval_metrics)
def gradient():
sess = tf.Session()
x = tf.placeholder(tf.float32)
W = tf.Variable([.3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
linear_model = W*x + b
init = tf.global_variables_initializer()
sess.run(init)
y = tf.placeholder(tf.float32)
loss = tf.reduce_sum(tf.square(linear_model - y) )
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
for i in range(10):
sess.run(train, feed_dict={x: [1, 2, 3, 4], y: [0, -1, -2, -3] } )
print(sess.run([W, b] ) )
g = tf.gradients(linear_model, [W, b], grad_ys=None, name='gradients', colocate_gradients_with_ops=False, gate_gradients=False, aggregation_method=None)
print(sess.run(g, feed_dict={x: [1], y: [1] } ) )
# r = sess.run(loss, feed_dict={x: [1, 2, 3, 4], y: [0, -1, -2, -3] } )
# print("r= {}".format(r) )
def neural_net():
input_size = 10
output_size = input_size
input_ph = tf.placeholder(tf.float32, shape=(None, input_size) )
desired_output_ph = tf.placeholder(tf.float32, shape=(None, input_size) )
# Neural net
hidden1_size, hidden2_size = 10, 10
with tf.name_scope('hidden1'):
w = tf.Variable(
tf.truncated_normal([input_size, hidden1_size], stddev=1.0 / math.sqrt(float(input_size) ) ),
name='weights')
tf.summary.histogram('histogram', w)
b = tf.Variable(tf.zeros([hidden1_size] ), name='biases')
hidden1 = tf.nn.relu(tf.matmul(input_ph, w) + b)
with tf.name_scope('hidden2'):
w = tf.Variable(
tf.truncated_normal([hidden1_size, hidden2_size], stddev=1.0 / math.sqrt(float(hidden2_size) ) ),
name='weights')
tf.summary.histogram('histogram', w)
b = tf.Variable(tf.zeros([hidden2_size] ), name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1, w) + b)
with tf.name_scope('output'):
w = tf.Variable(
tf.truncated_normal([hidden2_size, output_size], stddev=1.0 / math.sqrt(float(hidden2_size) ) ),
name='weights')
tf.summary.histogram('histogram', w)
b = tf.Variable(tf.zeros([output_size] ), name='biases')
# output = tf.matmul(hidden2, w) + b
output = tf.nn.softmax(tf.matmul(hidden2, w) + b)
# loss = tf.norm(output - input_ph, ord=1) # tf.reduce_sum(tf.square(output - input_ph) )
# loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=input_ph, logits=output, name='xentropy') )
loss = tf.reduce_sum(input_ph * -tf.log(output) )
tf.summary.scalar('loss', loss)
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.minimize(loss, global_step=global_step)
# eval_correct = tf.nn.in_top_k(output, tf.argmax(input_ph), 1)
eval_correct = tf.equal(tf.argmax(output, axis=1), tf.argmax(input_ph, axis=1) )
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
summary = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter("/home/ubuntu/deep-scheduler/log", sess.graph)
def gen_in_data():
in_data = np.zeros((1, input_size) )
in_data[0, random.randint(0, input_size-1) ] = 1
# in_data = np.zeros((100, input_size) )
# for i in range(100):
# in_data[i, random.randint(0, input_size-1) ] = 1
return in_data
for step in range(10):
start_time = time.time()
feed_dict = {input_ph: gen_in_data() }
_, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
duration = time.time() - start_time
if step % 100 == 0:
print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration) )
# Update the events file.
summary_str = sess.run(summary, feed_dict=feed_dict)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
# Evaluate
succ = 0
for _ in range(5):
i = gen_in_data()
# print("i= {}".format(sess.run(tf.argmax(i, axis=1), feed_dict={input_ph: i} ) ) )
# print("o= {}".format(sess.run(tf.argmax(output, axis=1), feed_dict={input_ph: i} ) ) )
e = sess.run(eval_correct, feed_dict={input_ph: i} )
# print("e= {}".format(e) )
if e:
succ += 1
# hidden1_vs = sess.run(, feed_dict={input_ph: i} )
hidden1_vs = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='hidden1')
hidden2_vs = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='hidden2')
# for v in hidden1_vs:
# print("v= {}".format(v.name) )
# print("hidden1_vs= {}".format(hidden1_vs) )
# hidden1_gradient = sess.run(tf.gradients(loss, hidden1_vs),
# feed_dict={input_ph: np.ones((1, input_size) ) } )
# print("hidden1_gradient= {}".format(hidden1_gradient) )
hidden2_gradient = sess.run(tf.gradients(output, hidden2_vs),
feed_dict={input_ph: np.ones((1, input_size) ) } )
print("hidden2_gradient= {}".format(hidden2_gradient) )
print("success rate= {}".format(succ/100) )
if __name__ == "__main__":
# train1()
# gradient()
neural_net()
| [
"tensorflow.get_collection",
"numpy.ones",
"tensorflow.matmul",
"tensorflow.Variable",
"random.randint",
"tensorflow.placeholder",
"tensorflow.summary.FileWriter",
"tensorflow.summary.histogram",
"tensorflow.gradients",
"tensorflow.name_scope",
"tensorflow.summary.merge_all",
"tensorflow.summa... | [((172, 233), 'tensorflow.estimator.LinearRegressor', 'tf.estimator.LinearRegressor', ([], {'feature_columns': 'feature_columns'}), '(feature_columns=feature_columns)\n', (200, 233), True, 'import tensorflow as tf\n'), ((249, 279), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0, 4.0]'], {}), '([1.0, 2.0, 3.0, 4.0])\n', (257, 279), True, 'import numpy as np\n'), ((288, 321), 'numpy.array', 'np.array', (['[0.0, -1.0, -2.0, -3.0]'], {}), '([0.0, -1.0, -2.0, -3.0])\n', (296, 321), True, 'import numpy as np\n'), ((329, 359), 'numpy.array', 'np.array', (['[2.0, 5.0, 8.0, 1.0]'], {}), '([2.0, 5.0, 8.0, 1.0])\n', (337, 359), True, 'import numpy as np\n'), ((367, 399), 'numpy.array', 'np.array', (['[-1.01, -4.1, -7, 0.0]'], {}), '([-1.01, -4.1, -7, 0.0])\n', (375, 399), True, 'import numpy as np\n'), ((412, 520), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', (["{'x': x_train}", 'y_train'], {'batch_size': '(4)', 'num_epochs': 'None', 'shuffle': '(True)'}), "({'x': x_train}, y_train, batch_size=4,\n num_epochs=None, shuffle=True)\n", (446, 520), True, 'import tensorflow as tf\n'), ((543, 652), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', (["{'x': x_train}", 'y_train'], {'batch_size': '(4)', 'num_epochs': '(1000)', 'shuffle': '(False)'}), "({'x': x_train}, y_train, batch_size=4,\n num_epochs=1000, shuffle=False)\n", (577, 652), True, 'import tensorflow as tf\n'), ((674, 781), 'tensorflow.estimator.inputs.numpy_input_fn', 'tf.estimator.inputs.numpy_input_fn', (["{'x': x_eval}", 'y_eval'], {'batch_size': '(4)', 'num_epochs': '(1000)', 'shuffle': '(False)'}), "({'x': x_eval}, y_eval, batch_size=4,\n num_epochs=1000, shuffle=False)\n", (708, 781), True, 'import tensorflow as tf\n'), ((1074, 1086), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1084, 1086), True, 'import tensorflow as tf\n'), ((1096, 1122), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1110, 1122), True, 'import tensorflow as tf\n'), ((1129, 1165), 'tensorflow.Variable', 'tf.Variable', (['[0.3]'], {'dtype': 'tf.float32'}), '([0.3], dtype=tf.float32)\n', (1140, 1165), True, 'import tensorflow as tf\n'), ((1171, 1208), 'tensorflow.Variable', 'tf.Variable', (['[-0.3]'], {'dtype': 'tf.float32'}), '([-0.3], dtype=tf.float32)\n', (1182, 1208), True, 'import tensorflow as tf\n'), ((1245, 1278), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1276, 1278), True, 'import tensorflow as tf\n'), ((1305, 1331), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1319, 1331), True, 'import tensorflow as tf\n'), ((1402, 1441), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['(0.01)'], {}), '(0.01)\n', (1435, 1441), True, 'import tensorflow as tf\n'), ((1995, 2047), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, input_size)'}), '(tf.float32, shape=(None, input_size))\n', (2009, 2047), True, 'import tensorflow as tf\n'), ((2071, 2123), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, input_size)'}), '(tf.float32, shape=(None, input_size))\n', (2085, 2123), True, 'import tensorflow as tf\n'), ((3509, 3540), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (3526, 3540), True, 'import tensorflow as tf\n'), ((3581, 3629), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (3614, 3629), True, 'import tensorflow as tf\n'), ((3646, 3697), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (3657, 3697), True, 'import tensorflow as tf\n'), ((3925, 3958), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3956, 3958), True, 'import tensorflow as tf\n'), ((3968, 3980), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3978, 3980), True, 'import tensorflow as tf\n'), ((4013, 4035), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (4033, 4035), True, 'import tensorflow as tf\n'), ((4055, 4123), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['"""/home/ubuntu/deep-scheduler/log"""', 'sess.graph'], {}), "('/home/ubuntu/deep-scheduler/log', sess.graph)\n", (4076, 4123), True, 'import tensorflow as tf\n'), ((105, 153), 'tensorflow.feature_column.numeric_column', 'tf.feature_column.numeric_column', (['"""x"""'], {'shape': '[1]'}), "('x', shape=[1])\n", (137, 153), True, 'import tensorflow as tf\n'), ((1355, 1382), 'tensorflow.square', 'tf.square', (['(linear_model - y)'], {}), '(linear_model - y)\n', (1364, 1382), True, 'import tensorflow as tf\n'), ((1608, 1764), 'tensorflow.gradients', 'tf.gradients', (['linear_model', '[W, b]'], {'grad_ys': 'None', 'name': '"""gradients"""', 'colocate_gradients_with_ops': '(False)', 'gate_gradients': '(False)', 'aggregation_method': 'None'}), "(linear_model, [W, b], grad_ys=None, name='gradients',\n colocate_gradients_with_ops=False, gate_gradients=False,\n aggregation_method=None)\n", (1620, 1764), True, 'import tensorflow as tf\n'), ((2188, 2212), 'tensorflow.name_scope', 'tf.name_scope', (['"""hidden1"""'], {}), "('hidden1')\n", (2201, 2212), True, 'import tensorflow as tf\n'), ((2369, 2405), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""histogram"""', 'w'], {}), "('histogram', w)\n", (2389, 2405), True, 'import tensorflow as tf\n'), ((2528, 2552), 'tensorflow.name_scope', 'tf.name_scope', (['"""hidden2"""'], {}), "('hidden2')\n", (2541, 2552), True, 'import tensorflow as tf\n'), ((2713, 2749), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""histogram"""', 'w'], {}), "('histogram', w)\n", (2733, 2749), True, 'import tensorflow as tf\n'), ((2871, 2894), 'tensorflow.name_scope', 'tf.name_scope', (['"""output"""'], {}), "('output')\n", (2884, 2894), True, 'import tensorflow as tf\n'), ((3050, 3086), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""histogram"""', 'w'], {}), "('histogram', w)\n", (3070, 3086), True, 'import tensorflow as tf\n'), ((3856, 3881), 'tensorflow.argmax', 'tf.argmax', (['output'], {'axis': '(1)'}), '(output, axis=1)\n', (3865, 3881), True, 'import tensorflow as tf\n'), ((3883, 3910), 'tensorflow.argmax', 'tf.argmax', (['input_ph'], {'axis': '(1)'}), '(input_ph, axis=1)\n', (3892, 3910), True, 'import tensorflow as tf\n'), ((4162, 4187), 'numpy.zeros', 'np.zeros', (['(1, input_size)'], {}), '((1, input_size))\n', (4170, 4187), True, 'import numpy as np\n'), ((4435, 4446), 'time.time', 'time.time', ([], {}), '()\n', (4444, 4446), False, 'import math, time, random\n'), ((5322, 5390), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""hidden1"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='hidden1')\n", (5339, 5390), True, 'import tensorflow as tf\n'), ((5408, 5476), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""hidden2"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='hidden2')\n", (5425, 5476), True, 'import tensorflow as tf\n'), ((2426, 2450), 'tensorflow.zeros', 'tf.zeros', (['[hidden1_size]'], {}), '([hidden1_size])\n', (2434, 2450), True, 'import tensorflow as tf\n'), ((2770, 2794), 'tensorflow.zeros', 'tf.zeros', (['[hidden2_size]'], {}), '([hidden2_size])\n', (2778, 2794), True, 'import tensorflow as tf\n'), ((3107, 3130), 'tensorflow.zeros', 'tf.zeros', (['[output_size]'], {}), '([output_size])\n', (3115, 3130), True, 'import tensorflow as tf\n'), ((4578, 4589), 'time.time', 'time.time', ([], {}), '()\n', (4587, 4589), False, 'import math, time, random\n'), ((5843, 5875), 'tensorflow.gradients', 'tf.gradients', (['output', 'hidden2_vs'], {}), '(output, hidden2_vs)\n', (5855, 5875), True, 'import tensorflow as tf\n'), ((2493, 2515), 'tensorflow.matmul', 'tf.matmul', (['input_ph', 'w'], {}), '(input_ph, w)\n', (2502, 2515), True, 'import tensorflow as tf\n'), ((2837, 2858), 'tensorflow.matmul', 'tf.matmul', (['hidden1', 'w'], {}), '(hidden1, w)\n', (2846, 2858), True, 'import tensorflow as tf\n'), ((3216, 3237), 'tensorflow.matmul', 'tf.matmul', (['hidden2', 'w'], {}), '(hidden2, w)\n', (3225, 3237), True, 'import tensorflow as tf\n'), ((3490, 3504), 'tensorflow.log', 'tf.log', (['output'], {}), '(output)\n', (3496, 3504), True, 'import tensorflow as tf\n'), ((4204, 4237), 'random.randint', 'random.randint', (['(0)', '(input_size - 1)'], {}), '(0, input_size - 1)\n', (4218, 4237), False, 'import math, time, random\n'), ((5930, 5954), 'numpy.ones', 'np.ones', (['(1, input_size)'], {}), '((1, input_size))\n', (5937, 5954), True, 'import numpy as np\n')] |
#! /usr/bin/python3
from numpy import array, zeros, mean, eye, squeeze, newaxis
from numpy.random import rand
from scipy.linalg import cho_factor, cho_solve, norm
from mat_util import load, save, save_text
from matplotlib import pyplot as plt
def shrinkage(a, k):
return (abs(a-k)-abs(a+k))/2 + a
newmat = False;
use_cholesky = True # Helper processes perform Cholesky factorization
datadir = 'data'
# for this implementation to make sense
# we need a tall thin matrix.
if newmat:
m = 1000
n = 15
A = rand(m,n)
b = rand(m,1)
save('A',A,datadir)
save('b',b,datadir)
save_text('A',A,datadir)
save_text('b',b,datadir)
else:
A = load('A',datadir)
b = load('b',datadir)
m,n = A.shape
print("m = {}, n = {}".format(m,n))
p = 50 # the number of pieces (helper processes)
lam = 10 # the weight on 1-norm of x
rho = 100
# split the original matrix and store cholesky-factored (Ai'*Ai + rho*I)
# matrices as well as b and u vectors for use by the p processes.
# todo: pre-process the original matrix so that each process
# just needs to load its own data from disk
alst = []
rlst = []
blst = []
for i in range(p):
l = m//p
Ai = A[i*l:(i+1)*l,:]
alst.append(Ai)
rlst.append(cho_factor(Ai.T.dot(Ai) + rho*eye(n)))
blst.append(b[i*l:(i+1)*l])
xs = zeros((n,p))
us = zeros((n,p))
curz = zeros(n)
# Main algorithm
iters=50
fs = zeros(iters)
nxs = zeros(iters)
for i in range(iters):
for j in range(p):
zj = curz
uj = us[:,j]
rj = rlst[j]
bj = blst[j]
aj = alst[j]
if use_cholesky:
r = cho_factor(A.T.dot(A) + rho*eye(n))
else:
r = A.T.dot(A) + rho*eye(n)
xj = cho_solve(rj, squeeze(aj.T.dot(bj)) + rho*(zj-uj))
xs[:,j]=xj # insert the x vector processed
xm = mean(xs,1)
um = mean(us,1)
curz = shrinkage(xm + um,lam/rho/p)
us = (us + xs) - curz[:,newaxis]
nx1 = norm(xm,1)
fs[i] = 0.5*norm(A.dot(xm) - squeeze(b),2)**2 + lam * norm(xm,1)
nxs[i] = nx1
plt.plot(fs)
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"mat_util.save_text",
"numpy.zeros",
"mat_util.load",
"numpy.mean",
"scipy.linalg.norm",
"numpy.random.rand",
"numpy.eye",
"numpy.squeeze",
"mat_util.save"
] | [((1326, 1339), 'numpy.zeros', 'zeros', (['(n, p)'], {}), '((n, p))\n', (1331, 1339), False, 'from numpy import array, zeros, mean, eye, squeeze, newaxis\n'), ((1344, 1357), 'numpy.zeros', 'zeros', (['(n, p)'], {}), '((n, p))\n', (1349, 1357), False, 'from numpy import array, zeros, mean, eye, squeeze, newaxis\n'), ((1364, 1372), 'numpy.zeros', 'zeros', (['n'], {}), '(n)\n', (1369, 1372), False, 'from numpy import array, zeros, mean, eye, squeeze, newaxis\n'), ((1405, 1417), 'numpy.zeros', 'zeros', (['iters'], {}), '(iters)\n', (1410, 1417), False, 'from numpy import array, zeros, mean, eye, squeeze, newaxis\n'), ((1424, 1436), 'numpy.zeros', 'zeros', (['iters'], {}), '(iters)\n', (1429, 1436), False, 'from numpy import array, zeros, mean, eye, squeeze, newaxis\n'), ((2057, 2069), 'matplotlib.pyplot.plot', 'plt.plot', (['fs'], {}), '(fs)\n', (2065, 2069), True, 'from matplotlib import pyplot as plt\n'), ((2070, 2080), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2078, 2080), True, 'from matplotlib import pyplot as plt\n'), ((519, 529), 'numpy.random.rand', 'rand', (['m', 'n'], {}), '(m, n)\n', (523, 529), False, 'from numpy.random import rand\n'), ((537, 547), 'numpy.random.rand', 'rand', (['m', '(1)'], {}), '(m, 1)\n', (541, 547), False, 'from numpy.random import rand\n'), ((551, 572), 'mat_util.save', 'save', (['"""A"""', 'A', 'datadir'], {}), "('A', A, datadir)\n", (555, 572), False, 'from mat_util import load, save, save_text\n'), ((575, 596), 'mat_util.save', 'save', (['"""b"""', 'b', 'datadir'], {}), "('b', b, datadir)\n", (579, 596), False, 'from mat_util import load, save, save_text\n'), ((599, 625), 'mat_util.save_text', 'save_text', (['"""A"""', 'A', 'datadir'], {}), "('A', A, datadir)\n", (608, 625), False, 'from mat_util import load, save, save_text\n'), ((628, 654), 'mat_util.save_text', 'save_text', (['"""b"""', 'b', 'datadir'], {}), "('b', b, datadir)\n", (637, 654), False, 'from mat_util import load, save, save_text\n'), ((667, 685), 'mat_util.load', 'load', (['"""A"""', 'datadir'], {}), "('A', datadir)\n", (671, 685), False, 'from mat_util import load, save, save_text\n'), ((693, 711), 'mat_util.load', 'load', (['"""b"""', 'datadir'], {}), "('b', datadir)\n", (697, 711), False, 'from mat_util import load, save, save_text\n'), ((1841, 1852), 'numpy.mean', 'mean', (['xs', '(1)'], {}), '(xs, 1)\n', (1845, 1852), False, 'from numpy import array, zeros, mean, eye, squeeze, newaxis\n'), ((1861, 1872), 'numpy.mean', 'mean', (['us', '(1)'], {}), '(us, 1)\n', (1865, 1872), False, 'from numpy import array, zeros, mean, eye, squeeze, newaxis\n'), ((1959, 1970), 'scipy.linalg.norm', 'norm', (['xm', '(1)'], {}), '(xm, 1)\n', (1963, 1970), False, 'from scipy.linalg import cho_factor, cho_solve, norm\n'), ((2028, 2039), 'scipy.linalg.norm', 'norm', (['xm', '(1)'], {}), '(xm, 1)\n', (2032, 2039), False, 'from scipy.linalg import cho_factor, cho_solve, norm\n'), ((1277, 1283), 'numpy.eye', 'eye', (['n'], {}), '(n)\n', (1280, 1283), False, 'from numpy import array, zeros, mean, eye, squeeze, newaxis\n'), ((1709, 1715), 'numpy.eye', 'eye', (['n'], {}), '(n)\n', (1712, 1715), False, 'from numpy import array, zeros, mean, eye, squeeze, newaxis\n'), ((1654, 1660), 'numpy.eye', 'eye', (['n'], {}), '(n)\n', (1657, 1660), False, 'from numpy import array, zeros, mean, eye, squeeze, newaxis\n'), ((2003, 2013), 'numpy.squeeze', 'squeeze', (['b'], {}), '(b)\n', (2010, 2013), False, 'from numpy import array, zeros, mean, eye, squeeze, newaxis\n')] |
import path_magic
from function_space import FunctionSpace
import numpy as np
from mesh import CrazyMesh
from forms import Form
from hodge import hodge
from coboundaries import d
from _assembling import assemble_, integral1d_
from assemble import assemble
import matplotlib.pyplot as plt
import scipy.io
from scipy import sparse
from inner_product import inner
# %% define the exact solution
def pfun(x, y):
return np.sin(np.pi * x) * np.sin(np.pi * y)
def uo_dy(x, y):
return np.pi * np.cos(np.pi * x) * np.sin(np.pi * y)
def uo_dx(x, y):
return -np.pi * np.sin(np.pi * x) * np.cos(np.pi * y)
def ffun(x, y):
return -2 * np.pi**2 * np.sin(np.pi * x) * np.sin(np.pi * y)
# %% define
p = 0
n = 2
c = 0.0
px = py = p
nx = ny = n
print("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
print("Start div grad solver @ p=", px)
print(" @ n=", nx)
print(" @ c=", c)
mesh = CrazyMesh(2, (nx, ny), ((-1, 1), (-1, 1)), c)
xi = eta = np.linspace(-1, 1, np.ceil(500 / (nx * ny)) + 1)
# %% function spaces
func_space_eg0 = FunctionSpace(mesh, '0-ext_gauss', (px, py))
p0 = Form(func_space_eg0)
p0_exact = Form(func_space_eg0)
p0_exact.discretize(pfun)
#p0_exact.reconstruct(xi, eta)
#(x, y), data = p0_exact.export_to_plot()
#plt.contourf(x, y, data)
#plt.title('exact extended gauss 0-form, p0')
#plt.colorbar()
#plt.show()
#func_space_g0 = FunctionSpace(mesh, '0-gauss', (px, py))
# %%
func_space_eg1 = FunctionSpace(mesh, '1-ext_gauss', (px, py))
ui = Form(func_space_eg1)
# %%
func_space_gl1 = FunctionSpace(mesh, '1-lobatto', (px + 1, py + 1), is_inner=False)
uo = Form(func_space_gl1)
# %%
func_space_gl2 = FunctionSpace(mesh, '2-lobatto', (px + 1, py + 1), is_inner=False)
f2 = Form(func_space_gl2)
f2.discretize(ffun, ('gauss', px+5))
# f2.reconstruct(xi, eta)
# (x, y), data = f2.export_to_plot()
# plt.contourf(x, y, data)
# plt.title('exact lobatto 2-form, f2')
# plt.colorbar()
# plt.show()
# %%
#E10 = d(func_space_eg0)[0:ui.basis.num_basis]
#E10_assembled = assemble_(mesh, E10, ui.function_space.dof_map.dof_map_internal,
# p0.function_space.dof_map.dof_map, mode='replace')
## E10 = d(func_space_eg0)
#
#
#Wn0 = p0.basis.wedged(f2.basis)
#Wn0_assembled = assemble_(mesh, Wn0, f2.function_space.dof_map.dof_map,
# p0.function_space.dof_map.dof_map_internal,mode='add')
#
#Mn = inner ( f2.basis, f2.basis)
#Mn_assembled = assemble(Mn, f2.function_space, f2.function_space)
#
#M0 = inner ( p0.basis , p0.basis)
#M0_assembled = assemble(M0, p0.function_space, p0.function_space)
#
#H11 = hodge (func_space_gl1)
#H11_assembled = -assemble(H11, ui.function_space,uo.function_space)
#ui_num_dof_internal = ui.basis.num_basis * ui.mesh.num_elements
p0_num_dof_internal = p0.basis.num_basis * ui.mesh.num_elements
# %% LHS 11
Mnm1 = inner ( uo.basis , uo.basis)
Mnm1_assembled = assemble(Mnm1, uo.function_space, uo.function_space)
# %% LHS 21
W0n = f2.basis.wedged(p0.basis)
W0n_assembled = assemble_(mesh, W0n, p0.function_space.dof_map.dof_map_internal,
f2.function_space.dof_map.dof_map,mode='add')
E21 = d(func_space_gl1)
#E21_assembled = assemble_(mesh, E21, f2.function_space.dof_map.dof_map,
# uo.function_space.dof_map.dof_map, mode='replace')
LHS21_local = W0n.dot(E21)
LHS12_local = LHS21_local.T
LHS12_add = sparse.lil_matrix(( np.shape(LHS21_local)[1], (px+1)*4 ))
Wb = integral1d_(1, ('lobatto_edge',px+1), ('gauss_node', px+1), ('gauss', px+5))
P = (p+1) * (p+2)
Q = (p+1)**2
M = p+1
for i in range(p+1):
for j in range(p+1):
# left
LHS12_add[P+i , j] = - Wb[i,j]
# right
LHS12_add[-p-1+i , M + j] = + Wb[i,j]
# bottom
LHS12_add[i*(p+2) , 2*M+ j] = - Wb[i,j]
# top
LHS12_add[(i+1)*(p+2)-1 , 3*M+ j] = + Wb[i,j]
LHS12_local = sparse.hstack((LHS12_local, LHS12_add))
#print(np.shape(LHS21_local))
#print(np.shape(LHS12_local))
LHS21= assemble_(mesh, LHS21_local, p0.function_space.dof_map.dof_map_internal,
uo.function_space.dof_map.dof_map, mode='add')
LHS12= assemble_(mesh, LHS12_local.toarray(), uo.function_space.dof_map.dof_map,
p0.function_space.dof_map.dof_map, mode='add')
#Hn0 = sparse.linalg.inv(Mn_assembled).dot(Wn0_assembled)
#Hn0 = sparse.linalg.inv(W0n_assembled).dot(M0_assembled)
#f2.cochain = Hn0.dot(p0_exact.cochain_internal)
#f2.reconstruct(xi, eta)
#(x, y), data = f2.export_to_plot()
#plt.contourf(x, y, data)
#plt.title('Hodge p0_exact into lobatto 2form')
#plt.colorbar()
#plt.show()
# %%
# system:
# | Mnm1 (W0n*E21)^T | | uo | | 0 |
# | | | | | |
# | | * | | = | |
# | | | | | |
# | W0n*E21 0 | | p | | W0n*f |
LHS1 = sparse.hstack(( Mnm1_assembled, LHS12 ))
LHS2 = sparse.hstack(( LHS21, sparse.csc_matrix((f2.function_space.num_dof, p0.function_space.num_dof))))
RHS1 = np.zeros(shape=(uo.function_space.num_dof, 1))
RHS2 = W0n_assembled.dot(f2.cochain.reshape((f2.function_space.num_dof, 1)))
# %%
def dof_map_crazy_lobatto_edges(mesh, p):
nx, ny = mesh.n_x, mesh.n_y
global_numbering = np.zeros((nx * ny, 2 * p * (p + 1)), dtype=np.int32)
local_numbering = np.array([int(i) for i in range(2 * p * (p + 1))])
for i in range(nx):
for j in range(ny):
s = j + i * ny
global_numbering[s, :] = local_numbering + 2 * p * (p + 1) * s
interface_edge_pair = np.zeros((((nx - 1) * ny + nx * (ny - 1)) * p, 2), dtype=np.int32)
n = 0
for i in range(nx - 1):
for j in range(ny):
s1 = j + i * ny
s2 = j + (i + 1) * ny
for m in range(p):
interface_edge_pair[n, 0] = global_numbering[s1, p * (p + 1) + p**2 + m]
interface_edge_pair[n, 1] = global_numbering[s2, p * (p + 1) + m]
n += 1
for i in range(nx):
for j in range(ny - 1):
s1 = j + i * ny
s2 = j + 1 + i * ny
for m in range(p):
interface_edge_pair[n, 0] = global_numbering[s1, (m + 1) * (p + 1) - 1]
interface_edge_pair[n, 1] = global_numbering[s2, m * (p + 1)]
n += 1
return interface_edge_pair
interface_edge_pair = dof_map_crazy_lobatto_edges(mesh, px+1)
LItFuo = sparse.lil_matrix(( np.shape( interface_edge_pair )[0], uo.function_space.num_dof + p0.function_space.num_dof ) )
RItFuo = np.zeros( shape = ( np.shape( interface_edge_pair )[0], 1) )
for i in range( np.shape( interface_edge_pair )[0] ):
LItFuo[i, interface_edge_pair[i, 0]] = 1
LItFuo[i, interface_edge_pair[i, 1]] = -1
# %%
def CrazyMesh_2d_extended_gauss0_general_boundary_nodes(mesh, p, gathering_matrix):
p += 1
nx = mesh.n_x
ny = mesh.n_y
Left = np.zeros(shape=(ny * p), dtype=np.int16)
Right = np.zeros(shape=(ny * p), dtype=np.int16)
Bottom = np.zeros(shape=(nx * p), dtype=np.int16)
Top = np.zeros(shape=(nx * p), dtype=np.int16)
for J in range(ny):
eleidLeft = J
Left[J * p: J * p + p] = gathering_matrix[eleidLeft, p**2: p**2 + p]
eleidRight = (nx - 1) * ny + J
Right[J * p: J * p + p] = gathering_matrix[eleidRight, p**2 + p: p**2 + 2 * p]
for I in range(nx):
eleidBottom = I * ny
Bottom[I * p: I * p + p] = gathering_matrix[eleidBottom, p**2 + 2 * p: p**2 + 3 * p]
eleidTop = I * ny + ny - 1
Top[I * p: I * p + p] = gathering_matrix[eleidTop, p**2 + 3 * p: p**2 + 4 * p]
return Left, Right, Bottom, Top
Left, Right, Bottom, Top = CrazyMesh_2d_extended_gauss0_general_boundary_nodes(
mesh, px, p0.function_space.dof_map.dof_map)
Boundarypoint = np.hstack((Left, Right, Bottom, Top))
# LBCphi = np.zeros(shape=(np.size(Boundarypoint), ui_num_dof_internal +
# uo.function_space.num_dof + p0.function_space.num_dof))
LBCphi = sparse.lil_matrix((np.size(Boundarypoint), uo.function_space.num_dof + p0.function_space.num_dof))
RBCphi = np.zeros(shape=(np.size(Boundarypoint), 1))
#for i in range(np.size(Boundarypoint)):
# LBCphi[i, uo.function_space.num_dof + Boundarypoint[i]] = 1
# RBCphi[i] = p0_exact.cochain[Boundarypoint[i]]
i = 0
for j in range(np.size(Left)):
LBCphi[i, uo.function_space.num_dof + Left[j]] = - 1
RBCphi[i] = p0_exact.cochain[Left[j]]
i += 1
LBCphi[i, uo.function_space.num_dof + Right[j]] = + 1
RBCphi[i] = p0_exact.cochain[Right[j]]
i += 1
LBCphi[i, uo.function_space.num_dof + Bottom[j]] = - 1
RBCphi[i] = p0_exact.cochain[Bottom[j]]
i += 1
LBCphi[i, uo.function_space.num_dof + Top[j]] = + 1
RBCphi[i] = p0_exact.cochain[Top[j]]
i += 1
# %%
LHS = sparse.vstack((LHS1, LHS2, LItFuo, LBCphi))
RHS = np.vstack((RHS1, RHS2, RItFuo, RBCphi))
# %%
print("----------------------------------------------------")
print("LHS shape:", np.shape(LHS))
#
LHS = sparse.csr_matrix(LHS)
print("------ solve the square sparse system:......")
Res = sparse.linalg.spsolve(LHS,RHS)
# %% split into pieces
uo.cochain = Res[:uo.function_space.num_dof ].reshape(uo.function_space.num_dof)
#p0_cochain_internal = Res[-p0_num_dof_internal:].reshape(p0_num_dof_internal)
#p0.cochain = np.concatenate((p0_cochain_internal,np.zeros(p0.function_space.num_dof-p0_num_dof_internal)),axis=0)
p0.cochain = Res[-p0.function_space.num_dof:].reshape(p0.function_space.num_dof)
# %% plot the solution
p0.reconstruct(xi, eta)
(x, y), data_p0 = p0.export_to_plot()
plt.contourf(x, y, data_p0)
plt.title('solution extended gauss 0-form, p0')
plt.colorbar()
plt.show()
uo.reconstruct(xi, eta)
(x, y), data_dx, data_dy = uo.export_to_plot()
plt.contourf(x, y, data_dx)
plt.title('solution lobatto 1-form dx')
plt.colorbar()
plt.show()
plt.contourf(x, y, data_dy)
plt.title('solution lobatto 1-form dy')
plt.colorbar()
plt.show()
# %% L2_error
L2_error_p0 = p0.l_2_norm(pfun, ('gauss', px + 5))[0]
L2_error_uo = uo.l_2_norm((uo_dx, uo_dy), ('lobatto', px + 5))[0]
print("------ L2_error_p0 =", L2_error_p0)
print("------ L2_error_uo =", L2_error_uo)
print("vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv\n") | [
"matplotlib.pyplot.title",
"numpy.shape",
"numpy.sin",
"matplotlib.pyplot.contourf",
"_assembling.integral1d_",
"inner_product.inner",
"matplotlib.pyplot.colorbar",
"scipy.sparse.linalg.spsolve",
"numpy.size",
"matplotlib.pyplot.show",
"numpy.ceil",
"mesh.CrazyMesh",
"_assembling.assemble_",... | [((938, 983), 'mesh.CrazyMesh', 'CrazyMesh', (['(2)', '(nx, ny)', '((-1, 1), (-1, 1))', 'c'], {}), '(2, (nx, ny), ((-1, 1), (-1, 1)), c)\n', (947, 983), False, 'from mesh import CrazyMesh\n'), ((1082, 1126), 'function_space.FunctionSpace', 'FunctionSpace', (['mesh', '"""0-ext_gauss"""', '(px, py)'], {}), "(mesh, '0-ext_gauss', (px, py))\n", (1095, 1126), False, 'from function_space import FunctionSpace\n'), ((1138, 1158), 'forms.Form', 'Form', (['func_space_eg0'], {}), '(func_space_eg0)\n', (1142, 1158), False, 'from forms import Form\n'), ((1170, 1190), 'forms.Form', 'Form', (['func_space_eg0'], {}), '(func_space_eg0)\n', (1174, 1190), False, 'from forms import Form\n'), ((1470, 1514), 'function_space.FunctionSpace', 'FunctionSpace', (['mesh', '"""1-ext_gauss"""', '(px, py)'], {}), "(mesh, '1-ext_gauss', (px, py))\n", (1483, 1514), False, 'from function_space import FunctionSpace\n'), ((1520, 1540), 'forms.Form', 'Form', (['func_space_eg1'], {}), '(func_space_eg1)\n', (1524, 1540), False, 'from forms import Form\n'), ((1563, 1629), 'function_space.FunctionSpace', 'FunctionSpace', (['mesh', '"""1-lobatto"""', '(px + 1, py + 1)'], {'is_inner': '(False)'}), "(mesh, '1-lobatto', (px + 1, py + 1), is_inner=False)\n", (1576, 1629), False, 'from function_space import FunctionSpace\n'), ((1635, 1655), 'forms.Form', 'Form', (['func_space_gl1'], {}), '(func_space_gl1)\n', (1639, 1655), False, 'from forms import Form\n'), ((1678, 1744), 'function_space.FunctionSpace', 'FunctionSpace', (['mesh', '"""2-lobatto"""', '(px + 1, py + 1)'], {'is_inner': '(False)'}), "(mesh, '2-lobatto', (px + 1, py + 1), is_inner=False)\n", (1691, 1744), False, 'from function_space import FunctionSpace\n'), ((1750, 1770), 'forms.Form', 'Form', (['func_space_gl2'], {}), '(func_space_gl2)\n', (1754, 1770), False, 'from forms import Form\n'), ((2958, 2983), 'inner_product.inner', 'inner', (['uo.basis', 'uo.basis'], {}), '(uo.basis, uo.basis)\n', (2963, 2983), False, 'from inner_product import inner\n'), ((3019, 3071), 'assemble.assemble', 'assemble', (['Mnm1', 'uo.function_space', 'uo.function_space'], {}), '(Mnm1, uo.function_space, uo.function_space)\n', (3027, 3071), False, 'from assemble import assemble\n'), ((3143, 3259), '_assembling.assemble_', 'assemble_', (['mesh', 'W0n', 'p0.function_space.dof_map.dof_map_internal', 'f2.function_space.dof_map.dof_map'], {'mode': '"""add"""'}), "(mesh, W0n, p0.function_space.dof_map.dof_map_internal, f2.\n function_space.dof_map.dof_map, mode='add')\n", (3152, 3259), False, 'from _assembling import assemble_, integral1d_\n'), ((3297, 3314), 'coboundaries.d', 'd', (['func_space_gl1'], {}), '(func_space_gl1)\n', (3298, 3314), False, 'from coboundaries import d\n'), ((3603, 3691), '_assembling.integral1d_', 'integral1d_', (['(1)', "('lobatto_edge', px + 1)", "('gauss_node', px + 1)", "('gauss', px + 5)"], {}), "(1, ('lobatto_edge', px + 1), ('gauss_node', px + 1), ('gauss', \n px + 5))\n", (3614, 3691), False, 'from _assembling import assemble_, integral1d_\n'), ((4063, 4102), 'scipy.sparse.hstack', 'sparse.hstack', (['(LHS12_local, LHS12_add)'], {}), '((LHS12_local, LHS12_add))\n', (4076, 4102), False, 'from scipy import sparse\n'), ((4171, 4295), '_assembling.assemble_', 'assemble_', (['mesh', 'LHS21_local', 'p0.function_space.dof_map.dof_map_internal', 'uo.function_space.dof_map.dof_map'], {'mode': '"""add"""'}), "(mesh, LHS21_local, p0.function_space.dof_map.dof_map_internal, uo\n .function_space.dof_map.dof_map, mode='add')\n", (4180, 4295), False, 'from _assembling import assemble_, integral1d_\n'), ((5096, 5134), 'scipy.sparse.hstack', 'sparse.hstack', (['(Mnm1_assembled, LHS12)'], {}), '((Mnm1_assembled, LHS12))\n', (5109, 5134), False, 'from scipy import sparse\n'), ((5253, 5299), 'numpy.zeros', 'np.zeros', ([], {'shape': '(uo.function_space.num_dof, 1)'}), '(shape=(uo.function_space.num_dof, 1))\n', (5261, 5299), True, 'import numpy as np\n'), ((8067, 8104), 'numpy.hstack', 'np.hstack', (['(Left, Right, Bottom, Top)'], {}), '((Left, Right, Bottom, Top))\n', (8076, 8104), True, 'import numpy as np\n'), ((9099, 9142), 'scipy.sparse.vstack', 'sparse.vstack', (['(LHS1, LHS2, LItFuo, LBCphi)'], {}), '((LHS1, LHS2, LItFuo, LBCphi))\n', (9112, 9142), False, 'from scipy import sparse\n'), ((9153, 9192), 'numpy.vstack', 'np.vstack', (['(RHS1, RHS2, RItFuo, RBCphi)'], {}), '((RHS1, RHS2, RItFuo, RBCphi))\n', (9162, 9192), True, 'import numpy as np\n'), ((9308, 9330), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['LHS'], {}), '(LHS)\n', (9325, 9330), False, 'from scipy import sparse\n'), ((9391, 9422), 'scipy.sparse.linalg.spsolve', 'sparse.linalg.spsolve', (['LHS', 'RHS'], {}), '(LHS, RHS)\n', (9412, 9422), False, 'from scipy import sparse\n'), ((9888, 9915), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', 'data_p0'], {}), '(x, y, data_p0)\n', (9900, 9915), True, 'import matplotlib.pyplot as plt\n'), ((9916, 9963), 'matplotlib.pyplot.title', 'plt.title', (['"""solution extended gauss 0-form, p0"""'], {}), "('solution extended gauss 0-form, p0')\n", (9925, 9963), True, 'import matplotlib.pyplot as plt\n'), ((9964, 9978), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (9976, 9978), True, 'import matplotlib.pyplot as plt\n'), ((9979, 9989), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9987, 9989), True, 'import matplotlib.pyplot as plt\n'), ((10062, 10089), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', 'data_dx'], {}), '(x, y, data_dx)\n', (10074, 10089), True, 'import matplotlib.pyplot as plt\n'), ((10090, 10129), 'matplotlib.pyplot.title', 'plt.title', (['"""solution lobatto 1-form dx"""'], {}), "('solution lobatto 1-form dx')\n", (10099, 10129), True, 'import matplotlib.pyplot as plt\n'), ((10130, 10144), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (10142, 10144), True, 'import matplotlib.pyplot as plt\n'), ((10145, 10155), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10153, 10155), True, 'import matplotlib.pyplot as plt\n'), ((10157, 10184), 'matplotlib.pyplot.contourf', 'plt.contourf', (['x', 'y', 'data_dy'], {}), '(x, y, data_dy)\n', (10169, 10184), True, 'import matplotlib.pyplot as plt\n'), ((10185, 10224), 'matplotlib.pyplot.title', 'plt.title', (['"""solution lobatto 1-form dy"""'], {}), "('solution lobatto 1-form dy')\n", (10194, 10224), True, 'import matplotlib.pyplot as plt\n'), ((10225, 10239), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (10237, 10239), True, 'import matplotlib.pyplot as plt\n'), ((10240, 10250), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10248, 10250), True, 'import matplotlib.pyplot as plt\n'), ((5480, 5532), 'numpy.zeros', 'np.zeros', (['(nx * ny, 2 * p * (p + 1))'], {'dtype': 'np.int32'}), '((nx * ny, 2 * p * (p + 1)), dtype=np.int32)\n', (5488, 5532), True, 'import numpy as np\n'), ((5786, 5852), 'numpy.zeros', 'np.zeros', (['(((nx - 1) * ny + nx * (ny - 1)) * p, 2)'], {'dtype': 'np.int32'}), '((((nx - 1) * ny + nx * (ny - 1)) * p, 2), dtype=np.int32)\n', (5794, 5852), True, 'import numpy as np\n'), ((7134, 7172), 'numpy.zeros', 'np.zeros', ([], {'shape': '(ny * p)', 'dtype': 'np.int16'}), '(shape=ny * p, dtype=np.int16)\n', (7142, 7172), True, 'import numpy as np\n'), ((7187, 7225), 'numpy.zeros', 'np.zeros', ([], {'shape': '(ny * p)', 'dtype': 'np.int16'}), '(shape=ny * p, dtype=np.int16)\n', (7195, 7225), True, 'import numpy as np\n'), ((7241, 7279), 'numpy.zeros', 'np.zeros', ([], {'shape': '(nx * p)', 'dtype': 'np.int16'}), '(shape=nx * p, dtype=np.int16)\n', (7249, 7279), True, 'import numpy as np\n'), ((7292, 7330), 'numpy.zeros', 'np.zeros', ([], {'shape': '(nx * p)', 'dtype': 'np.int16'}), '(shape=nx * p, dtype=np.int16)\n', (7300, 7330), True, 'import numpy as np\n'), ((8608, 8621), 'numpy.size', 'np.size', (['Left'], {}), '(Left)\n', (8615, 8621), True, 'import numpy as np\n'), ((9281, 9294), 'numpy.shape', 'np.shape', (['LHS'], {}), '(LHS)\n', (9289, 9294), True, 'import numpy as np\n'), ((419, 436), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (425, 436), True, 'import numpy as np\n'), ((439, 456), 'numpy.sin', 'np.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (445, 456), True, 'import numpy as np\n'), ((514, 531), 'numpy.sin', 'np.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (520, 531), True, 'import numpy as np\n'), ((590, 607), 'numpy.cos', 'np.cos', (['(np.pi * y)'], {}), '(np.pi * y)\n', (596, 607), True, 'import numpy as np\n'), ((672, 689), 'numpy.sin', 'np.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (678, 689), True, 'import numpy as np\n'), ((1014, 1038), 'numpy.ceil', 'np.ceil', (['(500 / (nx * ny))'], {}), '(500 / (nx * ny))\n', (1021, 1038), True, 'import numpy as np\n'), ((5169, 5242), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['(f2.function_space.num_dof, p0.function_space.num_dof)'], {}), '((f2.function_space.num_dof, p0.function_space.num_dof))\n', (5186, 5242), False, 'from scipy import sparse\n'), ((6847, 6876), 'numpy.shape', 'np.shape', (['interface_edge_pair'], {}), '(interface_edge_pair)\n', (6855, 6876), True, 'import numpy as np\n'), ((8295, 8317), 'numpy.size', 'np.size', (['Boundarypoint'], {}), '(Boundarypoint)\n', (8302, 8317), True, 'import numpy as np\n'), ((494, 511), 'numpy.cos', 'np.cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (500, 511), True, 'import numpy as np\n'), ((570, 587), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (576, 587), True, 'import numpy as np\n'), ((652, 669), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (658, 669), True, 'import numpy as np\n'), ((3560, 3581), 'numpy.shape', 'np.shape', (['LHS21_local'], {}), '(LHS21_local)\n', (3568, 3581), True, 'import numpy as np\n'), ((6666, 6695), 'numpy.shape', 'np.shape', (['interface_edge_pair'], {}), '(interface_edge_pair)\n', (6674, 6695), True, 'import numpy as np\n'), ((8400, 8422), 'numpy.size', 'np.size', (['Boundarypoint'], {}), '(Boundarypoint)\n', (8407, 8422), True, 'import numpy as np\n'), ((6790, 6819), 'numpy.shape', 'np.shape', (['interface_edge_pair'], {}), '(interface_edge_pair)\n', (6798, 6819), True, 'import numpy as np\n')] |
import numpy as np
from ddCRP import statistics
from sklearn.metrics import normalized_mutual_info_score as NMI
def test_normalize():
"""
Test to check feature normalization.
"""
data = np.asarray(
[[-1, 0, 1],
[1, 2, 3]])
mu = data.mean(0)
stdev = data.std(0)
expected = (data-mu[None, :]) / stdev[None, :]
actual = statistics.Normalize(data)
assert expected == actual
def test_NMI():
label_true = np.asarray([0, 1, 2, 3])
label_pred = np.asarray([0, 1, 2, 3])
expected = NMI(label_true, label_pred)
actual = statistics.NMI(label_true, label_pred)
assert expected == actual
label_true = np.asarray([0, 0, 0, 3])
label_pred = np.asarray([0, 2, 2, 2])
expected = NMI(label_true, label_pred)
actual = statistics.NMI(label_true, label_pred)
assert expected == actual
| [
"ddCRP.statistics.NMI",
"numpy.asarray",
"ddCRP.statistics.Normalize",
"sklearn.metrics.normalized_mutual_info_score"
] | [((206, 241), 'numpy.asarray', 'np.asarray', (['[[-1, 0, 1], [1, 2, 3]]'], {}), '([[-1, 0, 1], [1, 2, 3]])\n', (216, 241), True, 'import numpy as np\n'), ((383, 409), 'ddCRP.statistics.Normalize', 'statistics.Normalize', (['data'], {}), '(data)\n', (403, 409), False, 'from ddCRP import statistics\n'), ((477, 501), 'numpy.asarray', 'np.asarray', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (487, 501), True, 'import numpy as np\n'), ((519, 543), 'numpy.asarray', 'np.asarray', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (529, 543), True, 'import numpy as np\n'), ((560, 587), 'sklearn.metrics.normalized_mutual_info_score', 'NMI', (['label_true', 'label_pred'], {}), '(label_true, label_pred)\n', (563, 587), True, 'from sklearn.metrics import normalized_mutual_info_score as NMI\n'), ((601, 639), 'ddCRP.statistics.NMI', 'statistics.NMI', (['label_true', 'label_pred'], {}), '(label_true, label_pred)\n', (615, 639), False, 'from ddCRP import statistics\n'), ((689, 713), 'numpy.asarray', 'np.asarray', (['[0, 0, 0, 3]'], {}), '([0, 0, 0, 3])\n', (699, 713), True, 'import numpy as np\n'), ((731, 755), 'numpy.asarray', 'np.asarray', (['[0, 2, 2, 2]'], {}), '([0, 2, 2, 2])\n', (741, 755), True, 'import numpy as np\n'), ((772, 799), 'sklearn.metrics.normalized_mutual_info_score', 'NMI', (['label_true', 'label_pred'], {}), '(label_true, label_pred)\n', (775, 799), True, 'from sklearn.metrics import normalized_mutual_info_score as NMI\n'), ((813, 851), 'ddCRP.statistics.NMI', 'statistics.NMI', (['label_true', 'label_pred'], {}), '(label_true, label_pred)\n', (827, 851), False, 'from ddCRP import statistics\n')] |
import numpy as np
# To do
# Update graph representation
def component_idx(tile_rank, idx):
"""
Inputs:
_______
tile_rank : int
Outputs:
_______
range : np.array (L,)
Vector with
"""
init = tile_rank[:idx].sum()
length = tile_rank[idx]
return np.arange(init, init + length, dtype='int')
def neigh_components(neigh_tiles, rank_tiles):
"""
Visualization
Inputs:
_______
Outputs:
_______
"""
twin_components = []
breaks = [0]
for idx in neigh_tiles:
twin_components = np.concatenate([twin_components,
component_idx(rank_tiles, idx)])
# print(component_idx(rank_tiles,idx))
breaks.append(component_idx(rank_tiles, idx).shape[0])
# print(component_idx(rank_tiles,idx).shape[0])
return twin_components.astype('int'), breaks
def rank_tile(block_ranks, nblocks):
"""
Extract rank for a given input
Inputs:
_______
block_ranks :
nblocks :
Outputs:
_______
rank_tiles :
"""
# def overlapping_rank_reformat(block_ranks,nblocks):
d1, d2 = nblocks[:2]
num_no_offset = int(d1*d2)
num_row_offset = int((d1-1)*d2)
num_col_offset = int(d1*(d2-1))
num_diag_offset = int((d1-1)*(d2-1))
num_row_half_tiles = 2*d2
num_col_half_tiles = 2*d1
num_diag_rhalf_tiles = d1*2-2
num_diag_chalf_tiles = d2*2-2
num_diag_quarter_tiles = 4
num_total_tiles = num_no_offset + num_row_offset
num_total_tiles += num_diag_offset + num_row_half_tiles
num_total_tiles += num_col_offset + num_col_half_tiles
num_total_tiles += num_diag_rhalf_tiles + num_diag_chalf_tiles
num_total_tiles += num_diag_quarter_tiles
rank_tiles = np.zeros((num_total_tiles,))
cumsum = 0
for tile_count, tile_name in zip([num_no_offset,
num_row_offset,
num_row_half_tiles,
num_col_offset,
num_col_half_tiles,
num_diag_offset,
num_diag_rhalf_tiles,
num_diag_chalf_tiles,
num_diag_quarter_tiles],
[('no_skew', 'full'),
('vert_skew', 'full'),
('vert_skew', 'half'),
('horz_skew', 'full'),
('horz_skew', 'half'),
('diag_skew', 'full'),
('diag_skew', 'thalf'),
('diag_skew', 'whalf'),
('diag_skew', 'quarter'),
]):
# -- update half skew where components are added in different order
tmp_rank = block_ranks[tile_name[0]][tile_name[1]]
if tile_name[1] == 'half' and tile_name[0] == 'vert_skew':
rank_tiles[cumsum:cumsum + tile_count//2] = tmp_rank[::2]
rank_tiles[cumsum + tile_count//2: cumsum + tile_count] = tmp_rank[1::2]
# -- update not half skew where components are added in some order
elif tile_name[1] == 'whalf':
rank_tiles[cumsum:cumsum + tile_count//2] = tmp_rank[::2]
rank_tiles[cumsum + tile_count//2: cumsum +
tile_count] = tmp_rank[1::2]
else:
rank_tiles[cumsum:cumsum + tile_count] = tmp_rank
cumsum += len(tmp_rank)# tile_count
return rank_tiles
def cx_blocks(nblocks):
"""
Column major ordering for full offset matrices
Row major ordering for half offset matrices
Build test cases from which to create connectivity graph
Inputs:
________
nblocks : (d1,d2) tuple
indicated the dimensions over which a block was partitioned
Outputs:
________
M : (d1,d2) numpy array
indices of tiles for no skew matrix
Mr : (d1,d2) numpy array
indices of tiles for vert_skew full matrix
Mrh : (d1,d2) numpy array
indices of tiles for vert_skew half matrix
Mc : (d1,d2) numpy array
indices of tiles for vert_skew matrix
Mch : (d1,d2) numpy array
indices of tiles for vert_skew half matrix
Md : (d1,d2) numpy array
indices of tiles for diag_skew full matrix
Mdr : (d1,d2) numpy array
indices of tiles for diag_skew thalf matrix
Mdc : (d1,d2) numpy array
indices of tiles for diag_skew whalf matrix
Mdh : (d1,d2) numpy array
indices of tiles for diag_skew quarter matrix
"""
d1, d2 = nblocks[:2]
base = np.arange(1, d1*d2+1).reshape(d2, d1).T
base_r = np.arange(1, d2*(d1-1)+1).reshape(d2, d1-1).T
base_c = np.arange(1, d1*(d2-1)+1).reshape(d2-1, d1).T
base_rc = np.arange(1, (d1-1)*(d2-1)+1).reshape(d2-1, d1-1).T
M = np.repeat(np.repeat(base, 2, axis=1), 2, axis=0)
Mr = np.zeros(M.shape)
Mc = np.zeros(M.shape)
Md = np.zeros(M.shape)
M = np.repeat(np.repeat(base, 2, axis=1), 2, axis=0)
Mr = np.zeros(M.shape)
Mc = np.zeros(M.shape)
Md = np.zeros(M.shape)
Mr[1:-1, :] = np.repeat(np.repeat(base_r, 2, axis=1), 2, axis=0)
Mc[:, 1:-1] = np.repeat(np.repeat(base_c, 2, axis=1), 2, axis=0)
Md[1:-1, 1:-1] = np.repeat(np.repeat(base_rc, 2, axis=1), 2, axis=0)
# --- overcomplete tiles in row order
Mrh = np.zeros(M.shape)
Mch = np.zeros(M.shape)
Mdr = np.zeros(M.shape)
Mdc = np.zeros(M.shape)
Mdh = np.zeros(M.shape)
Mrh[0, :] = np.repeat(np.arange(1, d2+1), 2, axis=0)
Mrh[-1, :] = np.repeat(np.arange(d2+1, d2*2+1), 2, axis=0)
Mch[:, 0] = np.repeat(np.arange(1, d1+1), 2, axis=0)
Mch[:, -1] = np.repeat(np.arange(d1+1, d1*2+1), 2, axis=0)
# Mdr[0, 1:-1] = np.repeat(np.arange(1, d2*2-1)[::2], 2, axis=0)
# Mdr[-1, 1:-1] = np.repeat(np.arange(1, d2*2-1)[1::2], 2, axis=0)
Mdr[1:-1, 0] = np.repeat(np.arange(1, d1), 2, axis=0)
Mdr[1:-1, -1] = np.repeat(np.arange(d1, 2*d1-1), 2, axis=0)
# Mdc[1:-1, 0] = np.repeat(np.arange(1, d1*2-1)[::2], 2, axis=0)
# Mdc[1:-1, -1] = np.repeat(np.arange(1, d1*2-1)[1::2], 2, axis=0)
Mdc[0, 1:-1] = np.repeat(np.arange(1, d2), 2, axis=0)
Mdc[-1, 1:-1] = np.repeat(np.arange(d2, 2*d2-1), 2, axis=0)
Mdh[0, 0] = 1
Mdh[-1, 0] = 2
Mdh[0, -1] = 3
Mdh[-1, -1] = 4
# --- exclude 0 replace by nan
Mr[Mr == 0] = np.nan
Mc[Mc == 0] = np.nan
Md[Md == 0] = np.nan
Mrh[Mrh == 0] = np.nan
Mch[Mch == 0] = np.nan
Mdr[Mdr == 0] = np.nan
Mdc[Mdc == 0] = np.nan
Mdh[Mdh == 0] = np.nan
# --- python 0 indexing
M -= 1
Mr -= 1
Mc -= 1
Md -= 1
Mrh -= 1
Mch -= 1
Mdr -= 1
Mdc -= 1
Mdh -= 1
return M, Mr, Mrh, Mc, Mch, Md, Mdr, Mdc, Mdh
def rx_blocks(nblocks):
"""
Row major ordering
Inputs:
________
nblocks : (d1,d2) tuple
indicated the dimensions over which a block was partitioned
Outputs:
________
M : (d1,d2) numpy array
indices of tiles for no skew matrix
Mr : (d1,d2) numpy array
indices of tiles for vert_skew full matrix
Mrh : (d1,d2) numpy array
indices of tiles for vert_skew half matrix
Mc : (d1,d2) numpy array
indices of tiles for vert_skew matrix
Mch : (d1,d2) numpy array
indices of tiles for vert_skew half matrix
Md : (d1,d2) numpy array
indices of tiles for diag_skew full matrix
Mdr : (d1,d2) numpy array
indices of tiles for diag_skew thalf matrix
Mdc : (d1,d2) numpy array
indices of tiles for diag_skew whalf matrix
Mdh : (d1,d2) numpy array
indices of tiles for diag_skew quarter matrix
"""
d1, d2 = nblocks[:2]
base = np.arange(1, d1*d2+1).reshape(d1, d2)
base_c = np.arange(1, d1*(d2-1)+1).reshape(d1, d2-1)
base_rc = np.arange(1, (d1-1)*(d2-1)+1).reshape(d1-1, d2-1)
M = np.repeat(np.repeat(base, 2, axis=1), 2, axis=0)
Mr = np.zeros(M.shape)
Mc = np.zeros(M.shape)
Md = np.zeros(M.shape)
Mr[1:-1, :] = np.repeat(np.repeat(base[:d1-1], 2, axis=1), 2, axis=0)
Mc[:, 1:-1] = np.repeat(np.repeat(base_c, 2, axis=1), 2, axis=0)
Md[1:-1, 1:-1] = np.repeat(np.repeat(base_rc, 2, axis=1), 2, axis=0)
Mrh = np.zeros(M.shape)
Mch = np.zeros(M.shape)
Mdr = np.zeros(M.shape)
Mdc = np.zeros(M.shape)
Mdh = np.zeros(M.shape)
Mrh[0, :] = np.repeat(np.arange(1, d2+1), 2, axis=0)
Mrh[-1, :] = np.repeat(np.arange(d2+1, d2*2+1), 2, axis=0)
Mch[:, 0] = np.repeat(np.arange(1, d1+1), 2, axis=0)
Mch[:, -1] = np.repeat(np.arange(d1+1, d1*2+1), 2, axis=0)
# Mdr[0, 1:-1] = np.repeat(np.arange(1, d2*2-1)[::2], 2, axis=0)
# Mdr[-1, 1:-1] = np.repeat(np.arange(1, d2*2-1)[1::2], 2, axis=0)
Mdr[1:-1,0] = np.repeat(np.arange(1, d1), 2, axis=0)
Mdr[1:-1,-1] = np.repeat(np.arange(d1, 2*d1-1), 2, axis=0)
# Mdc[1:-1, 0] = np.repeat(np.arange(1, d1*2-1)[::2], 2, axis=0)
# Mdc[1:-1, -1] = np.repeat(np.arange(1, d1*2-1)[1::2], 2, axis=0)
Mdc[0,1:-1] = np.repeat(np.arange(1, d2), 2, axis=0)
Mdc[-1,1:-1] = np.repeat(np.arange(1, 2*d2-1), 2, axis=0)
Mdh[0, 0] = 1
Mdh[-1, 0] = 2
Mdh[0, -1] = 3
Mdh[-1, -1] = 4
# --- exclude 0 replace by nan
Mr[Mr == 0] = np.nan
Mc[Mc == 0] = np.nan
Md[Md == 0] = np.nan
Mrh[Mrh == 0] = np.nan
Mch[Mch == 0] = np.nan
Mdr[Mdr == 0] = np.nan
Mdc[Mdc == 0] = np.nan
Mdh[Mdh == 0] = np.nan
# --- python 0 indexing
M -= 1
Mr -= 1
Mc -= 1
Md -= 1
Mrh -= 1
Mch -= 1
Mdr -= 1
Mdc -= 1
Mdh -= 1
return M, Mr, Mrh, Mc, Mch, Md, Mdr, Mdc, Mdh
def rx_neigh(dims, M, Mr):
"""
Inputs:
_______
dims :
M :
Mr :
Outputs:
________
A :
"""
d1, d2 = dims[:2]
A = np.zeros((d1, d2))
for ii in range(d1):
nn = np.unique(M[np.nonzero(Mr == ii)])
nn = nn[~np.isnan(nn)].astype('int')
A[ii, nn] = 1
return A
def rx_graph(nblocks):
"""
Builds connectivity matrix to find temporal components per pixel
using the pixel components
Inputs:
_______
nblocks :
Outputs:
________
M3 :
"""
d1, d2 = nblocks[:2]
num_no_offset = int(d1*d2)
num_row_offset = int((d1-1)*d2)
num_col_offset = int(d1*(d2-1))
num_diag_offset = int((d1-1)*(d2-1))
num_row_half_tiles = 2*d2
num_col_half_tiles = 2*d1
num_diag_rhalf_tiles = d1*2-2
num_diag_chalf_tiles = d2*2-2
num_diag_quarter_tiles = 4
# -- master graph
M, \
Mr, Mrh, \
Mc, Mch, \
Md, Mdr, Mdc, Mdh = cx_blocks([d1, d2])
# individual graphs
A = rx_neigh((num_row_offset, num_no_offset), M, Mr)
B = rx_neigh((num_col_offset, num_no_offset), M, Mc)
C = rx_neigh((num_diag_offset, num_no_offset), M, Md)
D = rx_neigh((num_col_offset, num_row_offset), Mr, Mc)
E = rx_neigh((num_diag_offset, num_row_offset), Mr, Md)
F = rx_neigh((num_diag_offset, num_col_offset), Mc, Md)
# overcomplete graphs
r1o = rx_neigh((num_row_half_tiles, num_no_offset), M, Mrh)
c1o = rx_neigh((num_col_half_tiles, num_no_offset), M, Mch)
d1o = rx_neigh((num_diag_rhalf_tiles, num_no_offset), M, Mdr)
d2o = rx_neigh((num_diag_chalf_tiles, num_no_offset), M, Mdc)
d3o = rx_neigh((num_diag_quarter_tiles, num_no_offset), M, Mdh)
#
# c1r = rx_neigh((num_col_half_tiles, num_row_offset), Mch, Mch)
# d1r = rx_neigh((num_diag_rhalf_tiles, num_row_offset), Mch, Mdr)
# d2r = rx_neigh((num_diag_chalf_tiles, num_row_offset), Mch, Mdc)
# d3r = rx_neigh((num_diag_quarter_tiles, num_row_offset), Mch, Mdh)
c1r = rx_neigh((num_col_half_tiles, num_row_offset), Mr, Mch)
d1r = rx_neigh((num_diag_rhalf_tiles, num_row_offset), Mr, Mdr)
d2r = rx_neigh((num_diag_chalf_tiles, num_row_offset), Mr, Mdc)
d3r = rx_neigh((num_diag_quarter_tiles, num_row_offset), Mr, Mdh)
#
cr1 = rx_neigh((num_col_offset, num_row_half_tiles), Mrh, Mc)
c1r1 = rx_neigh((num_col_half_tiles, num_row_half_tiles), Mrh, Mch)
dr1 = rx_neigh((num_diag_offset, num_row_half_tiles), Mrh, Md)
d1r1 = rx_neigh((num_diag_rhalf_tiles, num_row_half_tiles), Mrh, Mdr)
d2r1 = rx_neigh((num_diag_chalf_tiles, num_row_half_tiles), Mrh, Mdc)
d3r1 = rx_neigh((num_diag_quarter_tiles, num_row_half_tiles), Mrh, Mdh)
#
d1c = rx_neigh((num_diag_rhalf_tiles, num_col_offset), Mc, Mdr)
d2c = rx_neigh((num_diag_chalf_tiles, num_col_offset), Mc, Mdc)
d3c = rx_neigh((num_diag_quarter_tiles, num_col_offset), Mc, Mdh)
#
dc1 = rx_neigh((num_diag_offset, num_col_half_tiles), Mch, Md)
d1c1 = rx_neigh((num_diag_rhalf_tiles, num_col_half_tiles), Mch, Mdr)
d2c1 = rx_neigh((num_diag_chalf_tiles, num_col_half_tiles), Mch, Mdc)
d3c1 = rx_neigh((num_diag_quarter_tiles, num_col_half_tiles), Mch, Mdh)
# --- merge all blocks into one single array
# | off | row | row1 | col | col1 | diag | diag1 | diag2 | diag3 |
# off | X | A.T | r1o.T| B.T | c1o.T | C.T | d1o.T | d2o.T | d3o.T |
# row | A | X | X | D.T | c1r.T | E.T | d1r.T | d2r.T | d3r.T |
# row1 | r1o | X | X | cr1.T | c1r1.T | dr1.T | d1r1.T | d2r1.T| d3r1.T |
# col | B | D | cr1 | X | X | F.T | d1c.T | d2c.T | d3c.T |
# col1 | c1o | c1r | c1r1 | X | X | dc1.T | d1c1.T | d2c1.T| d3c1.T |
# diag | C | E | dr1 | F | dc1 | X | X | X | X |
# diag1 | d1o | d1r | d1r1 | d1c | d1c1 | X | X | X | X |
# diag2 | d2o | d2r | d2r1 | d2c | d2c1 | X | X | X | X |
# diag3 | d3o | d3r | d3r1 | d3c | d3c1 | X | X | X | X |
# OFF COL
X = np.zeros((num_no_offset, num_no_offset))
COL1 = np.vstack([X, A, r1o, B, c1o, C, d1o, d2o, d3o])
# ROW COL
X1 = np.zeros((num_row_offset, num_row_offset))
X2 = np.zeros((num_row_half_tiles, num_row_offset))
COL2 = np.vstack([A.T, X1, X2, D, c1r, E, d1r, d2r, d3r])
# ROW1 COL
X1 = np.zeros((num_row_offset, num_row_half_tiles))
X2 = np.zeros((num_row_half_tiles, num_row_half_tiles))
COL3 = np.vstack([r1o.T, X1, X2, cr1, c1r1, dr1, d1r1, d2r1, d3r1])
# COL COL
X1 = np.zeros((num_col_offset, num_col_offset))
X2 = np.zeros((num_col_half_tiles, num_col_offset))
COL4 = np.vstack((B.T, D.T, cr1.T, X1, X2, F, d1c, d2c, d3c))
# COL1 COL
X1 = np.zeros((num_col_offset, num_col_half_tiles))
X2 = np.zeros((num_col_half_tiles, num_col_half_tiles))
COL5 = np.vstack([c1o.T, c1r.T, c1r1.T, X1, X2, dc1, d1c1, d2c1, d3c1])
# DIAG COL
X1 = np.zeros((num_diag_offset, num_diag_offset))
X2 = np.zeros((num_diag_rhalf_tiles, num_diag_offset))
X3 = np.zeros((num_diag_chalf_tiles, num_diag_offset))
X4 = np.zeros((num_diag_quarter_tiles, num_diag_offset))
COL6 = np.vstack([C.T, E.T, dr1.T, F.T, dc1.T, X1, X2, X3, X4])
# DIAG1 COL
X1 = np.zeros((num_diag_offset, num_diag_rhalf_tiles))
X2 = np.zeros((num_diag_rhalf_tiles, num_diag_rhalf_tiles))
X3 = np.zeros((num_diag_chalf_tiles, num_diag_rhalf_tiles))
X4 = np.zeros((num_diag_quarter_tiles, num_diag_rhalf_tiles))
COL7 = np.vstack([d1o.T, d1r.T, d1r1.T, d1c.T, d1c1.T, X1, X2, X3, X4])
# DIAG1 COL
X1 = np.zeros((num_diag_offset, num_diag_chalf_tiles))
X2 = np.zeros((num_diag_rhalf_tiles, num_diag_chalf_tiles))
X3 = np.zeros((num_diag_chalf_tiles, num_diag_chalf_tiles))
X4 = np.zeros((num_diag_quarter_tiles, num_diag_chalf_tiles))
COL8 = np.vstack([d2o.T, d2r.T, d2r1.T, d2c.T, d2c1.T, X1, X2, X3, X4])
# DIAG1 COL
X1 = np.zeros((num_diag_offset, num_diag_quarter_tiles))
X2 = np.zeros((num_diag_rhalf_tiles, num_diag_quarter_tiles))
X3 = np.zeros((num_diag_chalf_tiles, num_diag_quarter_tiles))
X4 = np.zeros((num_diag_quarter_tiles, num_diag_quarter_tiles))
COL9 = np.vstack([d3o.T, d3r.T, d3r1.T, d3c.T, d3c1.T, X1, X2, X3, X4])
M3 = np.hstack([COL1, COL2, COL3, COL4, COL5, COL6, COL7, COL8, COL9])
return M3
| [
"numpy.zeros",
"numpy.isnan",
"numpy.hstack",
"numpy.nonzero",
"numpy.arange",
"numpy.vstack",
"numpy.repeat"
] | [((334, 377), 'numpy.arange', 'np.arange', (['init', '(init + length)'], {'dtype': '"""int"""'}), "(init, init + length, dtype='int')\n", (343, 377), True, 'import numpy as np\n'), ((1814, 1842), 'numpy.zeros', 'np.zeros', (['(num_total_tiles,)'], {}), '((num_total_tiles,))\n', (1822, 1842), True, 'import numpy as np\n'), ((5378, 5395), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (5386, 5395), True, 'import numpy as np\n'), ((5405, 5422), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (5413, 5422), True, 'import numpy as np\n'), ((5432, 5449), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (5440, 5449), True, 'import numpy as np\n'), ((5517, 5534), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (5525, 5534), True, 'import numpy as np\n'), ((5544, 5561), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (5552, 5561), True, 'import numpy as np\n'), ((5571, 5588), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (5579, 5588), True, 'import numpy as np\n'), ((5854, 5871), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (5862, 5871), True, 'import numpy as np\n'), ((5882, 5899), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (5890, 5899), True, 'import numpy as np\n'), ((5910, 5927), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (5918, 5927), True, 'import numpy as np\n'), ((5938, 5955), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (5946, 5955), True, 'import numpy as np\n'), ((5966, 5983), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (5974, 5983), True, 'import numpy as np\n'), ((8687, 8704), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (8695, 8704), True, 'import numpy as np\n'), ((8714, 8731), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (8722, 8731), True, 'import numpy as np\n'), ((8741, 8758), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (8749, 8758), True, 'import numpy as np\n'), ((8986, 9003), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (8994, 9003), True, 'import numpy as np\n'), ((9014, 9031), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (9022, 9031), True, 'import numpy as np\n'), ((9042, 9059), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (9050, 9059), True, 'import numpy as np\n'), ((9070, 9087), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (9078, 9087), True, 'import numpy as np\n'), ((9098, 9115), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (9106, 9115), True, 'import numpy as np\n'), ((10578, 10596), 'numpy.zeros', 'np.zeros', (['(d1, d2)'], {}), '((d1, d2))\n', (10586, 10596), True, 'import numpy as np\n'), ((14622, 14662), 'numpy.zeros', 'np.zeros', (['(num_no_offset, num_no_offset)'], {}), '((num_no_offset, num_no_offset))\n', (14630, 14662), True, 'import numpy as np\n'), ((14674, 14722), 'numpy.vstack', 'np.vstack', (['[X, A, r1o, B, c1o, C, d1o, d2o, d3o]'], {}), '([X, A, r1o, B, c1o, C, d1o, d2o, d3o])\n', (14683, 14722), True, 'import numpy as np\n'), ((14747, 14789), 'numpy.zeros', 'np.zeros', (['(num_row_offset, num_row_offset)'], {}), '((num_row_offset, num_row_offset))\n', (14755, 14789), True, 'import numpy as np\n'), ((14799, 14845), 'numpy.zeros', 'np.zeros', (['(num_row_half_tiles, num_row_offset)'], {}), '((num_row_half_tiles, num_row_offset))\n', (14807, 14845), True, 'import numpy as np\n'), ((14857, 14907), 'numpy.vstack', 'np.vstack', (['[A.T, X1, X2, D, c1r, E, d1r, d2r, d3r]'], {}), '([A.T, X1, X2, D, c1r, E, d1r, d2r, d3r])\n', (14866, 14907), True, 'import numpy as np\n'), ((14933, 14979), 'numpy.zeros', 'np.zeros', (['(num_row_offset, num_row_half_tiles)'], {}), '((num_row_offset, num_row_half_tiles))\n', (14941, 14979), True, 'import numpy as np\n'), ((14989, 15039), 'numpy.zeros', 'np.zeros', (['(num_row_half_tiles, num_row_half_tiles)'], {}), '((num_row_half_tiles, num_row_half_tiles))\n', (14997, 15039), True, 'import numpy as np\n'), ((15051, 15111), 'numpy.vstack', 'np.vstack', (['[r1o.T, X1, X2, cr1, c1r1, dr1, d1r1, d2r1, d3r1]'], {}), '([r1o.T, X1, X2, cr1, c1r1, dr1, d1r1, d2r1, d3r1])\n', (15060, 15111), True, 'import numpy as np\n'), ((15136, 15178), 'numpy.zeros', 'np.zeros', (['(num_col_offset, num_col_offset)'], {}), '((num_col_offset, num_col_offset))\n', (15144, 15178), True, 'import numpy as np\n'), ((15188, 15234), 'numpy.zeros', 'np.zeros', (['(num_col_half_tiles, num_col_offset)'], {}), '((num_col_half_tiles, num_col_offset))\n', (15196, 15234), True, 'import numpy as np\n'), ((15246, 15300), 'numpy.vstack', 'np.vstack', (['(B.T, D.T, cr1.T, X1, X2, F, d1c, d2c, d3c)'], {}), '((B.T, D.T, cr1.T, X1, X2, F, d1c, d2c, d3c))\n', (15255, 15300), True, 'import numpy as np\n'), ((15326, 15372), 'numpy.zeros', 'np.zeros', (['(num_col_offset, num_col_half_tiles)'], {}), '((num_col_offset, num_col_half_tiles))\n', (15334, 15372), True, 'import numpy as np\n'), ((15382, 15432), 'numpy.zeros', 'np.zeros', (['(num_col_half_tiles, num_col_half_tiles)'], {}), '((num_col_half_tiles, num_col_half_tiles))\n', (15390, 15432), True, 'import numpy as np\n'), ((15444, 15508), 'numpy.vstack', 'np.vstack', (['[c1o.T, c1r.T, c1r1.T, X1, X2, dc1, d1c1, d2c1, d3c1]'], {}), '([c1o.T, c1r.T, c1r1.T, X1, X2, dc1, d1c1, d2c1, d3c1])\n', (15453, 15508), True, 'import numpy as np\n'), ((15534, 15578), 'numpy.zeros', 'np.zeros', (['(num_diag_offset, num_diag_offset)'], {}), '((num_diag_offset, num_diag_offset))\n', (15542, 15578), True, 'import numpy as np\n'), ((15588, 15637), 'numpy.zeros', 'np.zeros', (['(num_diag_rhalf_tiles, num_diag_offset)'], {}), '((num_diag_rhalf_tiles, num_diag_offset))\n', (15596, 15637), True, 'import numpy as np\n'), ((15647, 15696), 'numpy.zeros', 'np.zeros', (['(num_diag_chalf_tiles, num_diag_offset)'], {}), '((num_diag_chalf_tiles, num_diag_offset))\n', (15655, 15696), True, 'import numpy as np\n'), ((15706, 15757), 'numpy.zeros', 'np.zeros', (['(num_diag_quarter_tiles, num_diag_offset)'], {}), '((num_diag_quarter_tiles, num_diag_offset))\n', (15714, 15757), True, 'import numpy as np\n'), ((15769, 15825), 'numpy.vstack', 'np.vstack', (['[C.T, E.T, dr1.T, F.T, dc1.T, X1, X2, X3, X4]'], {}), '([C.T, E.T, dr1.T, F.T, dc1.T, X1, X2, X3, X4])\n', (15778, 15825), True, 'import numpy as np\n'), ((15852, 15901), 'numpy.zeros', 'np.zeros', (['(num_diag_offset, num_diag_rhalf_tiles)'], {}), '((num_diag_offset, num_diag_rhalf_tiles))\n', (15860, 15901), True, 'import numpy as np\n'), ((15911, 15965), 'numpy.zeros', 'np.zeros', (['(num_diag_rhalf_tiles, num_diag_rhalf_tiles)'], {}), '((num_diag_rhalf_tiles, num_diag_rhalf_tiles))\n', (15919, 15965), True, 'import numpy as np\n'), ((15975, 16029), 'numpy.zeros', 'np.zeros', (['(num_diag_chalf_tiles, num_diag_rhalf_tiles)'], {}), '((num_diag_chalf_tiles, num_diag_rhalf_tiles))\n', (15983, 16029), True, 'import numpy as np\n'), ((16039, 16095), 'numpy.zeros', 'np.zeros', (['(num_diag_quarter_tiles, num_diag_rhalf_tiles)'], {}), '((num_diag_quarter_tiles, num_diag_rhalf_tiles))\n', (16047, 16095), True, 'import numpy as np\n'), ((16107, 16171), 'numpy.vstack', 'np.vstack', (['[d1o.T, d1r.T, d1r1.T, d1c.T, d1c1.T, X1, X2, X3, X4]'], {}), '([d1o.T, d1r.T, d1r1.T, d1c.T, d1c1.T, X1, X2, X3, X4])\n', (16116, 16171), True, 'import numpy as np\n'), ((16198, 16247), 'numpy.zeros', 'np.zeros', (['(num_diag_offset, num_diag_chalf_tiles)'], {}), '((num_diag_offset, num_diag_chalf_tiles))\n', (16206, 16247), True, 'import numpy as np\n'), ((16257, 16311), 'numpy.zeros', 'np.zeros', (['(num_diag_rhalf_tiles, num_diag_chalf_tiles)'], {}), '((num_diag_rhalf_tiles, num_diag_chalf_tiles))\n', (16265, 16311), True, 'import numpy as np\n'), ((16321, 16375), 'numpy.zeros', 'np.zeros', (['(num_diag_chalf_tiles, num_diag_chalf_tiles)'], {}), '((num_diag_chalf_tiles, num_diag_chalf_tiles))\n', (16329, 16375), True, 'import numpy as np\n'), ((16385, 16441), 'numpy.zeros', 'np.zeros', (['(num_diag_quarter_tiles, num_diag_chalf_tiles)'], {}), '((num_diag_quarter_tiles, num_diag_chalf_tiles))\n', (16393, 16441), True, 'import numpy as np\n'), ((16453, 16517), 'numpy.vstack', 'np.vstack', (['[d2o.T, d2r.T, d2r1.T, d2c.T, d2c1.T, X1, X2, X3, X4]'], {}), '([d2o.T, d2r.T, d2r1.T, d2c.T, d2c1.T, X1, X2, X3, X4])\n', (16462, 16517), True, 'import numpy as np\n'), ((16544, 16595), 'numpy.zeros', 'np.zeros', (['(num_diag_offset, num_diag_quarter_tiles)'], {}), '((num_diag_offset, num_diag_quarter_tiles))\n', (16552, 16595), True, 'import numpy as np\n'), ((16605, 16661), 'numpy.zeros', 'np.zeros', (['(num_diag_rhalf_tiles, num_diag_quarter_tiles)'], {}), '((num_diag_rhalf_tiles, num_diag_quarter_tiles))\n', (16613, 16661), True, 'import numpy as np\n'), ((16671, 16727), 'numpy.zeros', 'np.zeros', (['(num_diag_chalf_tiles, num_diag_quarter_tiles)'], {}), '((num_diag_chalf_tiles, num_diag_quarter_tiles))\n', (16679, 16727), True, 'import numpy as np\n'), ((16737, 16795), 'numpy.zeros', 'np.zeros', (['(num_diag_quarter_tiles, num_diag_quarter_tiles)'], {}), '((num_diag_quarter_tiles, num_diag_quarter_tiles))\n', (16745, 16795), True, 'import numpy as np\n'), ((16807, 16871), 'numpy.vstack', 'np.vstack', (['[d3o.T, d3r.T, d3r1.T, d3c.T, d3c1.T, X1, X2, X3, X4]'], {}), '([d3o.T, d3r.T, d3r1.T, d3c.T, d3c1.T, X1, X2, X3, X4])\n', (16816, 16871), True, 'import numpy as np\n'), ((16882, 16947), 'numpy.hstack', 'np.hstack', (['[COL1, COL2, COL3, COL4, COL5, COL6, COL7, COL8, COL9]'], {}), '([COL1, COL2, COL3, COL4, COL5, COL6, COL7, COL8, COL9])\n', (16891, 16947), True, 'import numpy as np\n'), ((5330, 5356), 'numpy.repeat', 'np.repeat', (['base', '(2)'], {'axis': '(1)'}), '(base, 2, axis=1)\n', (5339, 5356), True, 'import numpy as np\n'), ((5469, 5495), 'numpy.repeat', 'np.repeat', (['base', '(2)'], {'axis': '(1)'}), '(base, 2, axis=1)\n', (5478, 5495), True, 'import numpy as np\n'), ((5618, 5646), 'numpy.repeat', 'np.repeat', (['base_r', '(2)'], {'axis': '(1)'}), '(base_r, 2, axis=1)\n', (5627, 5646), True, 'import numpy as np\n'), ((5687, 5715), 'numpy.repeat', 'np.repeat', (['base_c', '(2)'], {'axis': '(1)'}), '(base_c, 2, axis=1)\n', (5696, 5715), True, 'import numpy as np\n'), ((5759, 5788), 'numpy.repeat', 'np.repeat', (['base_rc', '(2)'], {'axis': '(1)'}), '(base_rc, 2, axis=1)\n', (5768, 5788), True, 'import numpy as np\n'), ((6011, 6031), 'numpy.arange', 'np.arange', (['(1)', '(d2 + 1)'], {}), '(1, d2 + 1)\n', (6020, 6031), True, 'import numpy as np\n'), ((6069, 6098), 'numpy.arange', 'np.arange', (['(d2 + 1)', '(d2 * 2 + 1)'], {}), '(d2 + 1, d2 * 2 + 1)\n', (6078, 6098), True, 'import numpy as np\n'), ((6131, 6151), 'numpy.arange', 'np.arange', (['(1)', '(d1 + 1)'], {}), '(1, d1 + 1)\n', (6140, 6151), True, 'import numpy as np\n'), ((6189, 6218), 'numpy.arange', 'np.arange', (['(d1 + 1)', '(d1 * 2 + 1)'], {}), '(d1 + 1, d1 * 2 + 1)\n', (6198, 6218), True, 'import numpy as np\n'), ((6396, 6412), 'numpy.arange', 'np.arange', (['(1)', 'd1'], {}), '(1, d1)\n', (6405, 6412), True, 'import numpy as np\n'), ((6455, 6480), 'numpy.arange', 'np.arange', (['d1', '(2 * d1 - 1)'], {}), '(d1, 2 * d1 - 1)\n', (6464, 6480), True, 'import numpy as np\n'), ((6664, 6680), 'numpy.arange', 'np.arange', (['(1)', 'd2'], {}), '(1, d2)\n', (6673, 6680), True, 'import numpy as np\n'), ((6723, 6748), 'numpy.arange', 'np.arange', (['d2', '(2 * d2 - 1)'], {}), '(d2, 2 * d2 - 1)\n', (6732, 6748), True, 'import numpy as np\n'), ((8639, 8665), 'numpy.repeat', 'np.repeat', (['base', '(2)'], {'axis': '(1)'}), '(base, 2, axis=1)\n', (8648, 8665), True, 'import numpy as np\n'), ((8787, 8822), 'numpy.repeat', 'np.repeat', (['base[:d1 - 1]', '(2)'], {'axis': '(1)'}), '(base[:d1 - 1], 2, axis=1)\n', (8796, 8822), True, 'import numpy as np\n'), ((8861, 8889), 'numpy.repeat', 'np.repeat', (['base_c', '(2)'], {'axis': '(1)'}), '(base_c, 2, axis=1)\n', (8870, 8889), True, 'import numpy as np\n'), ((8933, 8962), 'numpy.repeat', 'np.repeat', (['base_rc', '(2)'], {'axis': '(1)'}), '(base_rc, 2, axis=1)\n', (8942, 8962), True, 'import numpy as np\n'), ((9143, 9163), 'numpy.arange', 'np.arange', (['(1)', '(d2 + 1)'], {}), '(1, d2 + 1)\n', (9152, 9163), True, 'import numpy as np\n'), ((9201, 9230), 'numpy.arange', 'np.arange', (['(d2 + 1)', '(d2 * 2 + 1)'], {}), '(d2 + 1, d2 * 2 + 1)\n', (9210, 9230), True, 'import numpy as np\n'), ((9263, 9283), 'numpy.arange', 'np.arange', (['(1)', '(d1 + 1)'], {}), '(1, d1 + 1)\n', (9272, 9283), True, 'import numpy as np\n'), ((9321, 9350), 'numpy.arange', 'np.arange', (['(d1 + 1)', '(d1 * 2 + 1)'], {}), '(d1 + 1, d1 * 2 + 1)\n', (9330, 9350), True, 'import numpy as np\n'), ((9526, 9542), 'numpy.arange', 'np.arange', (['(1)', 'd1'], {}), '(1, d1)\n', (9535, 9542), True, 'import numpy as np\n'), ((9584, 9609), 'numpy.arange', 'np.arange', (['d1', '(2 * d1 - 1)'], {}), '(d1, 2 * d1 - 1)\n', (9593, 9609), True, 'import numpy as np\n'), ((9787, 9803), 'numpy.arange', 'np.arange', (['(1)', 'd2'], {}), '(1, d2)\n', (9796, 9803), True, 'import numpy as np\n'), ((9845, 9869), 'numpy.arange', 'np.arange', (['(1)', '(2 * d2 - 1)'], {}), '(1, 2 * d2 - 1)\n', (9854, 9869), True, 'import numpy as np\n'), ((8461, 8486), 'numpy.arange', 'np.arange', (['(1)', '(d1 * d2 + 1)'], {}), '(1, d1 * d2 + 1)\n', (8470, 8486), True, 'import numpy as np\n'), ((8512, 8543), 'numpy.arange', 'np.arange', (['(1)', '(d1 * (d2 - 1) + 1)'], {}), '(1, d1 * (d2 - 1) + 1)\n', (8521, 8543), True, 'import numpy as np\n'), ((8570, 8607), 'numpy.arange', 'np.arange', (['(1)', '((d1 - 1) * (d2 - 1) + 1)'], {}), '(1, (d1 - 1) * (d2 - 1) + 1)\n', (8579, 8607), True, 'import numpy as np\n'), ((5087, 5112), 'numpy.arange', 'np.arange', (['(1)', '(d1 * d2 + 1)'], {}), '(1, d1 * d2 + 1)\n', (5096, 5112), True, 'import numpy as np\n'), ((5140, 5171), 'numpy.arange', 'np.arange', (['(1)', '(d2 * (d1 - 1) + 1)'], {}), '(1, d2 * (d1 - 1) + 1)\n', (5149, 5171), True, 'import numpy as np\n'), ((5199, 5230), 'numpy.arange', 'np.arange', (['(1)', '(d1 * (d2 - 1) + 1)'], {}), '(1, d1 * (d2 - 1) + 1)\n', (5208, 5230), True, 'import numpy as np\n'), ((5259, 5296), 'numpy.arange', 'np.arange', (['(1)', '((d1 - 1) * (d2 - 1) + 1)'], {}), '(1, (d1 - 1) * (d2 - 1) + 1)\n', (5268, 5296), True, 'import numpy as np\n'), ((10647, 10667), 'numpy.nonzero', 'np.nonzero', (['(Mr == ii)'], {}), '(Mr == ii)\n', (10657, 10667), True, 'import numpy as np\n'), ((10687, 10699), 'numpy.isnan', 'np.isnan', (['nn'], {}), '(nn)\n', (10695, 10699), True, 'import numpy as np\n')] |
from tqdm import tqdm
import torch
import numpy as np
from torchvision import transforms
import cv2
@torch.no_grad()
def p_sample_ddim(self, x, c, t, index):
b, *_, device = *x.shape, x.device
e_t = self.model.apply_model(x, t, c)
alphas = self.ddim_alphas
alphas_prev = self.ddim_alphas_prev
sqrt_one_minus_alphas = self.ddim_sqrt_one_minus_alphas
sigmas = self.ddim_sigmas
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
# direction pointing to x_t
dir_xt = (1. - a_prev).sqrt() * e_t
x_prev = a_prev.sqrt() * pred_x0 + dir_xt
return x_prev, pred_x0
@torch.no_grad()
def sample(self, x_T):
b = x_T.shape[0]
device = x_T.device
cond = None
img = x_T
log_every_t = 10
timesteps = self.ddim_timesteps
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = np.flip(timesteps)
total_steps = timesteps.shape[0]
print(f"Running DDIM Sampling with {total_steps} timesteps")
iterator = tqdm(time_range, desc='DDIM', total=total_steps)
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
outs = p_sample_ddim(self, img, cond, ts, index=index)
img, pred_x0 = outs
if index % log_every_t == 0 or index == total_steps - 1:
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return img, intermediates
@torch.no_grad()
def p_sample_ddim_rev(self, x, c, t, index):
b, *_, device = *x.shape, x.device
e_t = self.model.apply_model(x, t, c)
alphas = self.ddim_alphas
alphas_next = self.ddim_alphas_next
sqrt_one_minus_alphas = self.ddim_sqrt_one_minus_alphas
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_next = torch.full((b, 1, 1, 1), alphas_next[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
# direction pointing to x_t
dir_xt = (1. - a_next).sqrt() * e_t
x_prev = a_next.sqrt() * pred_x0 + dir_xt
return x_prev, pred_x0
@torch.no_grad()
def sample_rev(self, x_T):
b = x_T.shape[0]
device = x_T.device
cond = None
img = x_T
log_every_t = 10
timesteps = self.ddim_timesteps
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = timesteps
total_steps = timesteps.shape[0]
print(f"Running DDIM Reverse Sampling with {total_steps} timesteps")
iterator = tqdm(time_range, desc='DDIM Reverse', total=total_steps)
for i, step in enumerate(iterator):
index = i
ts = torch.full((b,), step, device=device, dtype=torch.long)
outs = p_sample_ddim_rev(self, img, cond, ts, index=index)
img, pred_x0 = outs
if index % log_every_t == 0 or index == total_steps - 1:
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return img, intermediates
def to_tensor(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (256, 256), interpolation=cv2.INTER_LANCZOS4)
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
img_tensor = transform(img)
return img_tensor
def to_image(img_tensor):
img = (255*(img_tensor + 1)/2).permute(1, 2, 0).detach().cpu().numpy()
img = img.clip(0, 255).astype(np.uint8)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img
def img_to_latents(diffusion, img_tensor):
model = diffusion.model
device = img_tensor.device
orig_lats = model.encode_first_stage(img_tensor.unsqueeze(0).to(device))
lats_rev, _ = sample_rev(diffusion, orig_lats)
return lats_rev
def latents_to_img(diffusion, lats_rev):
model = diffusion.model
orig_lats_, _ = sample(diffusion, lats_rev)
orig_decoded_ = model.decode_first_stage(orig_lats_)
return orig_decoded_ | [
"tqdm.tqdm",
"numpy.flip",
"cv2.cvtColor",
"torch.full",
"torchvision.transforms.Normalize",
"torch.no_grad",
"cv2.resize",
"torchvision.transforms.ToTensor"
] | [((102, 117), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (115, 117), False, 'import torch\n'), ((948, 963), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (961, 963), False, 'import torch\n'), ((1816, 1831), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1829, 1831), False, 'import torch\n'), ((2636, 2651), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2649, 2651), False, 'import torch\n'), ((487, 541), 'torch.full', 'torch.full', (['(b, 1, 1, 1)', 'alphas[index]'], {'device': 'device'}), '((b, 1, 1, 1), alphas[index], device=device)\n', (497, 541), False, 'import torch\n'), ((555, 614), 'torch.full', 'torch.full', (['(b, 1, 1, 1)', 'alphas_prev[index]'], {'device': 'device'}), '((b, 1, 1, 1), alphas_prev[index], device=device)\n', (565, 614), False, 'import torch\n'), ((640, 709), 'torch.full', 'torch.full', (['(b, 1, 1, 1)', 'sqrt_one_minus_alphas[index]'], {'device': 'device'}), '((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)\n', (650, 709), False, 'import torch\n'), ((1194, 1212), 'numpy.flip', 'np.flip', (['timesteps'], {}), '(timesteps)\n', (1201, 1212), True, 'import numpy as np\n'), ((1331, 1379), 'tqdm.tqdm', 'tqdm', (['time_range'], {'desc': '"""DDIM"""', 'total': 'total_steps'}), "(time_range, desc='DDIM', total=total_steps)\n", (1335, 1379), False, 'from tqdm import tqdm\n'), ((2175, 2229), 'torch.full', 'torch.full', (['(b, 1, 1, 1)', 'alphas[index]'], {'device': 'device'}), '((b, 1, 1, 1), alphas[index], device=device)\n', (2185, 2229), False, 'import torch\n'), ((2243, 2302), 'torch.full', 'torch.full', (['(b, 1, 1, 1)', 'alphas_next[index]'], {'device': 'device'}), '((b, 1, 1, 1), alphas_next[index], device=device)\n', (2253, 2302), False, 'import torch\n'), ((2328, 2397), 'torch.full', 'torch.full', (['(b, 1, 1, 1)', 'sqrt_one_minus_alphas[index]'], {'device': 'device'}), '((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)\n', (2338, 2397), False, 'import torch\n'), ((3023, 3079), 'tqdm.tqdm', 'tqdm', (['time_range'], {'desc': '"""DDIM Reverse"""', 'total': 'total_steps'}), "(time_range, desc='DDIM Reverse', total=total_steps)\n", (3027, 3079), False, 'from tqdm import tqdm\n'), ((3531, 3567), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (3543, 3567), False, 'import cv2\n'), ((3578, 3639), 'cv2.resize', 'cv2.resize', (['img', '(256, 256)'], {'interpolation': 'cv2.INTER_LANCZOS4'}), '(img, (256, 256), interpolation=cv2.INTER_LANCZOS4)\n', (3588, 3639), False, 'import cv2\n'), ((3988, 4024), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2BGR'], {}), '(img, cv2.COLOR_RGB2BGR)\n', (4000, 4024), False, 'import cv2\n'), ((1470, 1525), 'torch.full', 'torch.full', (['(b,)', 'step'], {'device': 'device', 'dtype': 'torch.long'}), '((b,), step, device=device, dtype=torch.long)\n', (1480, 1525), False, 'import torch\n'), ((3152, 3207), 'torch.full', 'torch.full', (['(b,)', 'step'], {'device': 'device', 'dtype': 'torch.long'}), '((b,), step, device=device, dtype=torch.long)\n', (3162, 3207), False, 'import torch\n'), ((3685, 3706), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3704, 3706), False, 'from torchvision import transforms\n'), ((3716, 3770), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (3736, 3770), False, 'from torchvision import transforms\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# import matplotlib.pyplot as plt
import numpy as np
from scipy.linalg import norm, solve
def spline(d, nspline):
"""The inputs are:
* d: the displacement (or velocity) time series.
* n: the number of spline segments (or, the number of knots - 1).
The outputs are:
* xs: the sought basis functions.
* kn: the knot locations.
* dx: this output is basically useless for your use.
"""
ns = len(d)
nk = nspline+1
maxd = 1.01*max(d)
mind = 1.01*min(d)
kn = np.linspace(mind, maxd, nspline+1)
# index of kn before zero
j0 = np.where(kn <= 0)[0][-1]
Q = np.zeros((nk, nk))
for j in range(nk):
if j == j0:
t0 = -kn[j]/(kn[j+1]-kn[j])
Q[0, j] = (t0**3-2*t0**2+t0) * (kn[j+1]-kn[j])
Q[0, j+1] = (t0**3-t0**2) * (kn[j+1]-kn[j])
Q[nk-1, j] = (3*t0**2-4*t0+1) * (kn[j+1]-kn[j])
Q[nk-1, j+1] = (3*t0**2-2*t0) * (kn[j+1]-kn[j])
if j != 0:
Q[j, j-1] = 1/(kn[j]-kn[j-1])
Q[j, j] = 2 * (1/(kn[j]-kn[j-1]) + 1/(kn[j+1]-kn[j]))
Q[j, j+1] = 1/(kn[j+1]-kn[j])
elif j != 0 and j != j0 and j != nk-1:
Q[j, j-1] = 1/(kn[j]-kn[j-1])
Q[j, j] = 2*(1/(kn[j]-kn[j-1]) + 1/(kn[j+1]-kn[j]))
Q[j, j+1] = 1/(kn[j+1]-kn[j])
S = np.zeros((nk, nk))
for j in range(nk):
if j == j0:
t0 = -kn[j]/(kn[j+1]-kn[j])
S[0, j] = -(2*t0**3-3*t0**2+1)
S[0, j+1] = -(-2*t0**3+3*t0**2)
S[nk-1, j] = 6*(t0-t0**2)
S[nk-1, j+1] = -6*(t0-t0**2)
if j != 0:
S[j, j-1] = -3/(kn[j]-kn[j-1])**2
S[j, j] = 3 * (1/(kn[j]-kn[j-1])**2 - 1/(kn[j+1]-kn[j])**2)
S[j, j+1] = 3/(kn[j+1]-kn[j])**2
elif j != 0 and j != j0 and j != nk-1:
S[j, j-1] = -3/(kn[j]-kn[j-1])**2
S[j, j] = 3*(1/(kn[j]-kn[j-1])**2 - 1/(kn[j+1]-kn[j])**2)
S[j, j+1] = 3/(kn[j+1]-kn[j])**2
dx = solve(Q, S)
x = np.zeros((nspline, nk, ns))
for j in range(nspline):
u = np.zeros(ns)
t = np.zeros(ns)
t1 = np.zeros(ns)
if j == 0:
mask = (kn[j] <= d) & (d <= kn[j+1])
else:
mask = (kn[j] < d) & (d <= kn[j+1])
u[mask] = d[mask]
t[mask] = (u[mask]-kn[j]) / (kn[j+1]-kn[j])
t1[mask] = 1
A = 2*t**3 - 3*t**2 + t1
B = -2*t**3 + 3*t**2
C = (t**3-2*t**2+t) * (kn[j+1]-kn[j])
D = (t**3-t**2) * (kn[j+1]-kn[j])
x[j, j, :] += A
x[j, j+1, :] += B
for k in range(nk):
x[j, k, :] += C*dx[j, k] + D*dx[j+1, k]
xs = np.squeeze(np.sum(x, 0))
return xs, kn, dx
# fs = 20
# x = np.arange(1000)/fs
# y = np.sin(x)
# xs, kn, dx = spline(y,5)
| [
"scipy.linalg.solve",
"numpy.sum",
"numpy.zeros",
"numpy.where",
"numpy.linspace"
] | [((559, 595), 'numpy.linspace', 'np.linspace', (['mind', 'maxd', '(nspline + 1)'], {}), '(mind, maxd, nspline + 1)\n', (570, 595), True, 'import numpy as np\n'), ((667, 685), 'numpy.zeros', 'np.zeros', (['(nk, nk)'], {}), '((nk, nk))\n', (675, 685), True, 'import numpy as np\n'), ((1399, 1417), 'numpy.zeros', 'np.zeros', (['(nk, nk)'], {}), '((nk, nk))\n', (1407, 1417), True, 'import numpy as np\n'), ((2088, 2099), 'scipy.linalg.solve', 'solve', (['Q', 'S'], {}), '(Q, S)\n', (2093, 2099), False, 'from scipy.linalg import norm, solve\n'), ((2108, 2135), 'numpy.zeros', 'np.zeros', (['(nspline, nk, ns)'], {}), '((nspline, nk, ns))\n', (2116, 2135), True, 'import numpy as np\n'), ((2179, 2191), 'numpy.zeros', 'np.zeros', (['ns'], {}), '(ns)\n', (2187, 2191), True, 'import numpy as np\n'), ((2204, 2216), 'numpy.zeros', 'np.zeros', (['ns'], {}), '(ns)\n', (2212, 2216), True, 'import numpy as np\n'), ((2230, 2242), 'numpy.zeros', 'np.zeros', (['ns'], {}), '(ns)\n', (2238, 2242), True, 'import numpy as np\n'), ((2778, 2790), 'numpy.sum', 'np.sum', (['x', '(0)'], {}), '(x, 0)\n', (2784, 2790), True, 'import numpy as np\n'), ((634, 651), 'numpy.where', 'np.where', (['(kn <= 0)'], {}), '(kn <= 0)\n', (642, 651), True, 'import numpy as np\n')] |
"""Reads the DeepFashion dataset and provides a python interface to the same.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import pickle
class DeepFashion:
def __init__(self, dataset_path):
# The constants
img_folder_name = "img"
eval_folder_name = "Eval"
anno_folder_name = "Anno"
list_eval_partition_file = "list_eval_partition.txt"
list_attr_img_file = "list_attr_img.txt"
list_category_img_file = "list_category_img.txt"
# The data structures
# Each element is a tuple of (image path, category, attributes)
self.train_imgs = [] # for all the training images
self.test_imgs = [] # for all the test images
self.val_imgs = [] # for all the validation images
# Construct the paths
self.path = dataset_path
self.img_dir = os.path.join(self.path, img_folder_name)
self.eval_dir = os.path.join(self.path, eval_folder_name)
self.anno_dir = os.path.join(self.path, anno_folder_name)
self.list_eval_partition = os.path.join(self.eval_dir, list_eval_partition_file)
self.list_attr_img = os.path.join(self.anno_dir, list_attr_img_file)
self.list_category_img = os.path.join(self.anno_dir, list_category_img_file)
# Gather the Train, Test and Val image paths
self.read_img_files_list()
def read_img_files_list(self):
fashion_db = "fashion.db"
# If we already have the data structures on filesystem, read it and return
if os.path.exists(fashion_db):
print("Reading data structures from: ", fashion_db)
db = open(fashion_db, "rb")
self.train_imgs, self.val_imgs, self.test_imgs = pickle.load(db)
print("Training images", len(self.train_imgs))
print("Validation images", len(self.val_imgs))
print("Test images", len(self.test_imgs))
return
# Read in the image to category mapping
image_to_category = {}
with open(self.list_category_img) as f:
imgs_count = int(f.readline().strip())
_ = f.readline().strip() # read and throw away the header
for line in f:
words = line.split()
image_to_category[words[0].strip()] = int(words[1].strip())
assert(imgs_count == len(image_to_category))
# Read in the image to attributes mapping
image_to_attributes = {}
with open(self.list_attr_img) as f:
imgs_count = int(f.readline().strip())
_ = f.readline().strip() # read and throw away the header
for line in f:
words = line.split(sep='jpg')
lst = [int(i) for i in words[1].strip().split()]
image_to_attributes[words[0].strip()+"jpg"] = lst
assert(imgs_count == len(image_to_attributes))
# Read in the images
with open(self.list_eval_partition) as f:
imgs_count = int(f.readline().strip())
_ = f.readline().strip() # read and throw away the header
for line in f:
words = line.split()
img = words[0].strip()
category_idx = image_to_category[img]
category = np.zeros(50) # one hot encoded
category[category_idx - 1] = 1
attributes = np.asarray(image_to_attributes[img], dtype=np.int16)
if words[1].strip() == "train":
self.train_imgs.append((img, category, attributes))
if words[1].strip() == "val":
self.val_imgs.append((img, category, attributes))
if words[1].strip() == "test":
self.test_imgs.append((img, category, attributes))
print("Training images", len(self.train_imgs))
print("Validation images", len(self.val_imgs))
print("Test images", len(self.test_imgs))
assert(imgs_count == (len(self.train_imgs)+len(self.test_imgs)+len(self.val_imgs)))
# Store the data structures
db = open(fashion_db, "wb")
pickle.dump((self.train_imgs, self.val_imgs, self.test_imgs), db)
db.close()
print("Data structures stored on filesystem as: ", fashion_db)
#df = DeepFashion("/home/as/datasets/lily/deep-fashion")
#print(df.val_imgs[0])
| [
"pickle.dump",
"numpy.asarray",
"numpy.zeros",
"os.path.exists",
"pickle.load",
"os.path.join"
] | [((878, 918), 'os.path.join', 'os.path.join', (['self.path', 'img_folder_name'], {}), '(self.path, img_folder_name)\n', (890, 918), False, 'import os\n'), ((943, 984), 'os.path.join', 'os.path.join', (['self.path', 'eval_folder_name'], {}), '(self.path, eval_folder_name)\n', (955, 984), False, 'import os\n'), ((1009, 1050), 'os.path.join', 'os.path.join', (['self.path', 'anno_folder_name'], {}), '(self.path, anno_folder_name)\n', (1021, 1050), False, 'import os\n'), ((1087, 1140), 'os.path.join', 'os.path.join', (['self.eval_dir', 'list_eval_partition_file'], {}), '(self.eval_dir, list_eval_partition_file)\n', (1099, 1140), False, 'import os\n'), ((1170, 1217), 'os.path.join', 'os.path.join', (['self.anno_dir', 'list_attr_img_file'], {}), '(self.anno_dir, list_attr_img_file)\n', (1182, 1217), False, 'import os\n'), ((1251, 1302), 'os.path.join', 'os.path.join', (['self.anno_dir', 'list_category_img_file'], {}), '(self.anno_dir, list_category_img_file)\n', (1263, 1302), False, 'import os\n'), ((1557, 1583), 'os.path.exists', 'os.path.exists', (['fashion_db'], {}), '(fashion_db)\n', (1571, 1583), False, 'import os\n'), ((4147, 4212), 'pickle.dump', 'pickle.dump', (['(self.train_imgs, self.val_imgs, self.test_imgs)', 'db'], {}), '((self.train_imgs, self.val_imgs, self.test_imgs), db)\n', (4158, 4212), False, 'import pickle\n'), ((1750, 1765), 'pickle.load', 'pickle.load', (['db'], {}), '(db)\n', (1761, 1765), False, 'import pickle\n'), ((3297, 3309), 'numpy.zeros', 'np.zeros', (['(50)'], {}), '(50)\n', (3305, 3309), True, 'import numpy as np\n'), ((3405, 3457), 'numpy.asarray', 'np.asarray', (['image_to_attributes[img]'], {'dtype': 'np.int16'}), '(image_to_attributes[img], dtype=np.int16)\n', (3415, 3457), True, 'import numpy as np\n')] |
"""
@file
@brief Manipulate data for training.
"""
import numpy
from onnxruntime import OrtValue
class OrtDataLoader:
"""
Draws consecutive random observations from a dataset
by batch. It iterates over the datasets by drawing
*batch_size* consecutive observations.
:param X: features
:param y: labels
:param batch_size: batch size (consecutive observations)
:param device: `'cpu'` or `'cuda'`
:param device_idx: device index
See example :ref:`l-orttraining-nn-gpu`.
"""
def __init__(self, X, y, batch_size=20, device='cpu', device_idx=0):
if len(y.shape) == 1:
y = y.reshape((-1, 1))
if X.shape[0] != y.shape[0]:
raise ValueError(
"Shape mismatch X.shape=%r, y.shape=%r." % (X.shape, y.shape))
self.X = numpy.ascontiguousarray(X)
self.y = numpy.ascontiguousarray(y)
self.batch_size = batch_size
self.device = device
self.device_idx = device_idx
def __repr__(self):
"usual"
return "%s(..., ..., batch_size=%r, device=%r, device_idx=%r)" % (
self.__class__.__name__, self.batch_size, self.device,
self.device_idx)
def __len__(self):
"Returns the number of observations."
return self.X.shape[0]
def __iter__(self):
"""
Iterates over the datasets by drawing
*batch_size* consecutive observations.
"""
N = 0
b = len(self) - self.batch_size
if b <= 0 or self.batch_size <= 0:
yield (
OrtValue.ortvalue_from_numpy(
self.X, self.device, self.device_idx),
OrtValue.ortvalue_from_numpy(
self.y, self.device, self.device_idx))
else:
while N < len(self):
i = numpy.random.randint(0, b)
N += self.batch_size
yield (
OrtValue.ortvalue_from_numpy(
self.X[i:i + self.batch_size],
self.device, self.device_idx),
OrtValue.ortvalue_from_numpy(
self.y[i:i + self.batch_size],
self.device, self.device_idx))
@property
def data(self):
"Returns a tuple of the datasets."
return self.X, self.y
| [
"onnxruntime.OrtValue.ortvalue_from_numpy",
"numpy.random.randint",
"numpy.ascontiguousarray"
] | [((820, 846), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['X'], {}), '(X)\n', (843, 846), False, 'import numpy\n'), ((864, 890), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['y'], {}), '(y)\n', (887, 890), False, 'import numpy\n'), ((1843, 1869), 'numpy.random.randint', 'numpy.random.randint', (['(0)', 'b'], {}), '(0, b)\n', (1863, 1869), False, 'import numpy\n'), ((1582, 1648), 'onnxruntime.OrtValue.ortvalue_from_numpy', 'OrtValue.ortvalue_from_numpy', (['self.X', 'self.device', 'self.device_idx'], {}), '(self.X, self.device, self.device_idx)\n', (1610, 1648), False, 'from onnxruntime import OrtValue\n'), ((1687, 1753), 'onnxruntime.OrtValue.ortvalue_from_numpy', 'OrtValue.ortvalue_from_numpy', (['self.y', 'self.device', 'self.device_idx'], {}), '(self.y, self.device, self.device_idx)\n', (1715, 1753), False, 'from onnxruntime import OrtValue\n'), ((1951, 2044), 'onnxruntime.OrtValue.ortvalue_from_numpy', 'OrtValue.ortvalue_from_numpy', (['self.X[i:i + self.batch_size]', 'self.device', 'self.device_idx'], {}), '(self.X[i:i + self.batch_size], self.device,\n self.device_idx)\n', (1979, 2044), False, 'from onnxruntime import OrtValue\n'), ((2111, 2204), 'onnxruntime.OrtValue.ortvalue_from_numpy', 'OrtValue.ortvalue_from_numpy', (['self.y[i:i + self.batch_size]', 'self.device', 'self.device_idx'], {}), '(self.y[i:i + self.batch_size], self.device,\n self.device_idx)\n', (2139, 2204), False, 'from onnxruntime import OrtValue\n')] |
import sys
import cv2
import numpy as np
import glob
import os
# values to adjust
threshold_margin = 5
erode_kernal_size = 2
erode_iterations = 2
dilute_kernal_size = 5
dilute_interations = 3
root_image_folder = sys.argv[1]
root_output_folder = sys.argv[2]
def max_index(array):
max_value = max(array)
for i in range(len(array)):
if array[i] == max_value:
return i
return 0
def process_image(image_grey):
# High pass filter
sigma = 30
image_grey = image_grey - cv2.GaussianBlur(image_grey, (0,0), sigma) + 127
cv2.imshow(image_path, img_bgr)
cv2.waitKey(0)
# Find global background colour by finding the most frequently used background colour
histogram = cv2.calcHist(image_grey, [0], None, [256], [0,255])
common_grey = max_index(histogram)
# apply threshold at the common grey
result, img_bw = cv2.threshold(image_grey, common_grey - threshold_margin, 255, cv2.THRESH_BINARY_INV)
# Dilate picture slightly to rejoin any loosely disconnected blobs
dilue_kernal = np.ones((dilute_kernal_size, dilute_kernal_size), np.uint8)
erode_kernal = np.ones((erode_kernal_size, erode_kernal_size), np.uint8)
img_erosion = cv2.erode(img_bw, erode_kernal, iterations=erode_iterations)
output_img_bw = cv2.dilate(img_erosion, dilue_kernal, iterations=dilute_interations)
return output_img_bw
def reveal_blob(img_bw):
contours, hierarchy = cv2.findContours(img_bw, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
biggest_blob = max(contours, key = cv2.contourArea)
return biggest_blob
def save_picture(path, image):
cv2.imwrite(path, image)
# Entry point
# Get a list of all the .tif files to load
root_image_path = os.path.abspath(root_image_folder)
image_paths = glob.glob(root_image_path + "/*.tif")
# Create output folder if it doesn't already exist
os.makedirs(root_output_folder, exist_ok=True)
for image_path in image_paths:
# Load image as greyscale
img_bgr = cv2.imread(image_path)
img_grey = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
# Convert to binary image
img_bw = process_image(img_grey)
# draw blob onto a blank binary image
blob = reveal_blob(img_bw)
output_bw = np.zeros(img_bw.shape, np.uint8)
cv2.drawContours(output_bw, [blob], -1, (255,255,255), -1)
# Save blob to a file
save_picture(root_output_folder + "/" + os.path.basename(image_path), output_bw)
# Display trace overlay over original to performance can be measured
cv2.drawContours(img_bgr, [blob], -1, (0,255,0), 2)
cv2.imshow(image_path, img_bgr)
cv2.waitKey(0)
| [
"cv2.GaussianBlur",
"os.path.abspath",
"os.makedirs",
"cv2.dilate",
"cv2.waitKey",
"cv2.calcHist",
"cv2.threshold",
"cv2.imwrite",
"cv2.cvtColor",
"numpy.ones",
"numpy.zeros",
"os.path.basename",
"cv2.imread",
"cv2.drawContours",
"glob.glob",
"cv2.erode",
"cv2.imshow",
"cv2.findCon... | [((1721, 1755), 'os.path.abspath', 'os.path.abspath', (['root_image_folder'], {}), '(root_image_folder)\n', (1736, 1755), False, 'import os\n'), ((1770, 1807), 'glob.glob', 'glob.glob', (["(root_image_path + '/*.tif')"], {}), "(root_image_path + '/*.tif')\n", (1779, 1807), False, 'import glob\n'), ((1860, 1906), 'os.makedirs', 'os.makedirs', (['root_output_folder'], {'exist_ok': '(True)'}), '(root_output_folder, exist_ok=True)\n', (1871, 1906), False, 'import os\n'), ((563, 594), 'cv2.imshow', 'cv2.imshow', (['image_path', 'img_bgr'], {}), '(image_path, img_bgr)\n', (573, 594), False, 'import cv2\n'), ((599, 613), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (610, 613), False, 'import cv2\n'), ((721, 773), 'cv2.calcHist', 'cv2.calcHist', (['image_grey', '[0]', 'None', '[256]', '[0, 255]'], {}), '(image_grey, [0], None, [256], [0, 255])\n', (733, 773), False, 'import cv2\n'), ((879, 969), 'cv2.threshold', 'cv2.threshold', (['image_grey', '(common_grey - threshold_margin)', '(255)', 'cv2.THRESH_BINARY_INV'], {}), '(image_grey, common_grey - threshold_margin, 255, cv2.\n THRESH_BINARY_INV)\n', (892, 969), False, 'import cv2\n'), ((1056, 1115), 'numpy.ones', 'np.ones', (['(dilute_kernal_size, dilute_kernal_size)', 'np.uint8'], {}), '((dilute_kernal_size, dilute_kernal_size), np.uint8)\n', (1063, 1115), True, 'import numpy as np\n'), ((1135, 1192), 'numpy.ones', 'np.ones', (['(erode_kernal_size, erode_kernal_size)', 'np.uint8'], {}), '((erode_kernal_size, erode_kernal_size), np.uint8)\n', (1142, 1192), True, 'import numpy as np\n'), ((1211, 1271), 'cv2.erode', 'cv2.erode', (['img_bw', 'erode_kernal'], {'iterations': 'erode_iterations'}), '(img_bw, erode_kernal, iterations=erode_iterations)\n', (1220, 1271), False, 'import cv2\n'), ((1292, 1360), 'cv2.dilate', 'cv2.dilate', (['img_erosion', 'dilue_kernal'], {'iterations': 'dilute_interations'}), '(img_erosion, dilue_kernal, iterations=dilute_interations)\n', (1302, 1360), False, 'import cv2\n'), ((1439, 1501), 'cv2.findContours', 'cv2.findContours', (['img_bw', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_NONE'], {}), '(img_bw, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n', (1455, 1501), False, 'import cv2\n'), ((1620, 1644), 'cv2.imwrite', 'cv2.imwrite', (['path', 'image'], {}), '(path, image)\n', (1631, 1644), False, 'import cv2\n'), ((1983, 2005), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (1993, 2005), False, 'import cv2\n'), ((2021, 2062), 'cv2.cvtColor', 'cv2.cvtColor', (['img_bgr', 'cv2.COLOR_BGR2GRAY'], {}), '(img_bgr, cv2.COLOR_BGR2GRAY)\n', (2033, 2062), False, 'import cv2\n'), ((2221, 2253), 'numpy.zeros', 'np.zeros', (['img_bw.shape', 'np.uint8'], {}), '(img_bw.shape, np.uint8)\n', (2229, 2253), True, 'import numpy as np\n'), ((2258, 2318), 'cv2.drawContours', 'cv2.drawContours', (['output_bw', '[blob]', '(-1)', '(255, 255, 255)', '(-1)'], {}), '(output_bw, [blob], -1, (255, 255, 255), -1)\n', (2274, 2318), False, 'import cv2\n'), ((2507, 2560), 'cv2.drawContours', 'cv2.drawContours', (['img_bgr', '[blob]', '(-1)', '(0, 255, 0)', '(2)'], {}), '(img_bgr, [blob], -1, (0, 255, 0), 2)\n', (2523, 2560), False, 'import cv2\n'), ((2563, 2594), 'cv2.imshow', 'cv2.imshow', (['image_path', 'img_bgr'], {}), '(image_path, img_bgr)\n', (2573, 2594), False, 'import cv2\n'), ((2599, 2613), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2610, 2613), False, 'import cv2\n'), ((509, 552), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['image_grey', '(0, 0)', 'sigma'], {}), '(image_grey, (0, 0), sigma)\n', (525, 552), False, 'import cv2\n'), ((2388, 2416), 'os.path.basename', 'os.path.basename', (['image_path'], {}), '(image_path)\n', (2404, 2416), False, 'import os\n')] |
# Parts of this file are modified from Tensorflow tutorial, licensed under the
# Apache License, Version 2.0, https://www.apache.org/licenses/LICENSE-2.0
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
"""Dataset download and creation, as well as other data-related functions."""
import collections
import functools
import json
import logging
import os
import pathlib
import shutil
import tarfile
import zipfile
from typing import Callable
import cv2
import numpy as np
import tensorflow as tf
from gan import util
logger = logging.getLogger(__name__)
def _save_images(images, data_dir, class_name):
class_dir = os.path.join(data_dir, class_name)
os.makedirs(class_dir, exist_ok=True)
for i, img in enumerate(images):
cv2.imwrite(f"{class_dir}/{i}.png", img)
# Create a bunch of shapes and save them.
def create_shapes(width, height, num_images, channels=3, data_dir="data"):
"""Creates basic shapes with the given width and height."""
# Add an alpha channel if we are in color. It's not going to matter for
# Tensorflow, but it's nice anyway.
if channels == 3:
channels += 1
size = (width, height, channels)
center = (width // 2, height // 2)
# For now, only create ellipses
logger.info(f"Creating {num_images} images with size {size}")
ellipses = [
cv2.ellipse(
np.full(size, 0, dtype=int),
center,
(
np.random.randint(width // 4, width // 2),
np.random.randint(width // 4, width // 2),
),
np.random.randint(0, 180), # angle
# start angle and end angle should always be 0 and 360 so we get full
# circles
0,
360,
255
if channels == 1
else (
np.random.randint(0, 256),
np.random.randint(0, 256),
np.random.randint(0, 256),
255,
),
thickness=-1, # -1 thickness fills the shape
)
for _ in range(num_images)
]
logger.info("Saving images to disk")
_save_images(ellipses, data_dir, "ellipse")
def prepare_cartoon(data_dir, tar_filename):
"""Prepare the cartoon avatar dataset for training."""
if not os.path.exists(tar_filename):
raise Exception("Tarfile does not exist")
extract_dir = os.path.join(data_dir, "__tmp")
# All images will be stored as "face" class label, no matter what their
# attributes are.
class_dir = os.path.join(data_dir, "face")
os.makedirs(class_dir, exist_ok=True)
logger.info("Extracting cartoon tarfile")
with tarfile.open(tar_filename) as f:
f.extractall(extract_dir)
path = pathlib.Path(extract_dir)
logger.info("Saving images")
for image_path in path.rglob("*.png"):
image_path.rename(os.path.join(class_dir, image_path.name))
logger.info("Saving metadata")
for csv_path in path.rglob("*.csv"):
csv_path.rename(os.path.join(class_dir, csv_path.name))
shutil.rmtree(extract_dir, ignore_errors=True)
def prepare_coco(image_dir, text_dir, zip_dir):
"""Prepare the Coco dataset.
Only uses the validation dataset from 2017 (since it has a reasonable size).
"""
# https://cocodataset.org/#download
# Everything velow is hardcoded for the validation set for 2017 due to its
# limited file size.
images_zip_url = "http://images.cocodataset.org/zips/val2017.zip"
annotations_zip_url = (
"http://images.cocodataset.org/annotations/annotations_trainval2017.zip"
)
instances_json_filename = "instances_val2017.json"
captions_json_filename = "captions_val2017.json"
file_source_dir = "val2017"
images_zip_filename = os.path.join(zip_dir, os.path.basename(images_zip_url))
annotations_zip_filename = os.path.join(
zip_dir, os.path.basename(annotations_zip_url)
)
if not os.path.exists(annotations_zip_filename):
logger.info(f"Annotations not found, downloading to {annotations_zip_filename}")
util.download_file(annotations_zip_url, annotations_zip_filename)
else:
logger.info(f"Annotations zip found at {annotations_zip_filename}")
if not os.path.exists(images_zip_filename):
logger.info(f"Images not found, downloading to {images_zip_filename}")
util.download_file(images_zip_url, images_zip_filename)
else:
logger.info(f"Images zip found at {images_zip_filename}")
extract_dir = os.path.join(zip_dir, "__tmp")
logger.info("Extracting images zipfile")
with zipfile.ZipFile(images_zip_filename) as f:
f.extractall(extract_dir)
logger.info("Extracting annotations zipfile")
with zipfile.ZipFile(annotations_zip_filename) as f:
f.extractall(extract_dir)
# Move images to their appropriate place according to their annotations
# This will cause duplicates if there are multiple images with the same
# class label.
logger.info("Preparing annotations")
with open(os.path.join(extract_dir, "annotations", instances_json_filename)) as f:
instances = json.load(f)
with open(os.path.join(extract_dir, "annotations", captions_json_filename)) as f:
captions = json.load(f)
category_lookup = {
c["id"]: (c["name"], c["supercategory"]) for c in instances["categories"]
}
image_lookup = {i["id"]: i["file_name"] for i in instances["images"]}
logger.info("Copying images")
for annotation in instances["annotations"]:
image_id = annotation["image_id"]
category_id = annotation["category_id"]
category_name, _ = category_lookup[category_id]
file_source = os.path.join(extract_dir, file_source_dir, image_lookup[image_id])
ext = os.path.splitext(file_source)[1]
dir_destination = os.path.join(image_dir, category_name, f"{image_id}{ext}")
os.makedirs(os.path.dirname(dir_destination), exist_ok=True)
shutil.copy(file_source, dir_destination)
grouped_captions = collections.defaultdict(list)
for annotation in captions["annotations"]:
grouped_captions[annotation["image_id"]].append(annotation["caption"])
os.makedirs(text_dir, exist_ok=True)
logger.info("Copying captions")
for image_id, captions in grouped_captions.items():
with open(os.path.join(text_dir, f"{image_id}.txt"), "w") as f:
f.write("\n".join(captions))
shutil.rmtree(extract_dir, ignore_errors=True)
def _get_label(class_names, file_path):
# Convert the path to a list of path components
parts = tf.strings.split(file_path, os.path.sep)
# The second to last is the class-directory. This returns a one-hot
# encoded array with class labels
return parts[-2] == class_names
def _decode_img(width, height, channels, img):
# Convert the compressed string to a uint8 tensor
img = tf.image.decode_png(img)
# If we have 4 channels, ignore the alpha channel for now.
if tf.shape(img)[2] == 4:
img = img[:, :, :3]
# Use `convert_image_dtype` to convert to floats in the [0,1] range.
img = tf.image.convert_image_dtype(img, tf.float32)
# Resize the image to the desired size.
img = tf.image.resize(img, [width, height])
# If we have 1 channel but require 3, convert to rgb!
if channels == 3 and tf.shape(img)[2] == 1:
img = tf.image.grayscale_to_rgb(img)
# Rescale to be between -1 and 1
return (img - 0.5) / 0.5
def _process_path(get_label: Callable, decode_img: Callable, file_path: str):
label = get_label(file_path)
# Load the raw data from the file as a string
img = tf.io.read_file(file_path)
img = decode_img(img)
return img, label
def create_dataset(data_dir: str, width: int, height: int, channels: int):
# Prepare a dataset with the images.
# According to the guide on loading image data, we should probably use tf.data
# since this way of loading data is more efficient than e.g.
# keras.preprocessing
# https://www.tensorflow.org/tutorials/load_data/images
# Find class names based on directory structure
data_dir = pathlib.Path(data_dir)
class_names = np.array(sorted([item.name for item in data_dir.glob("*")]))
logger.info(f"Found class names: {class_names}")
# Create helper functions
get_label = functools.partial(_get_label, class_names)
decode_img = functools.partial(_decode_img, width, height, channels)
process_path = functools.partial(_process_path, get_label, decode_img)
# Make a tf.data.Dataset from the files in the directory.
list_ds = tf.data.Dataset.list_files(
[str(data_dir / "*/*.png"), str(data_dir / "*/*.jpg")]
)
labeled_ds = list_ds.map(
process_path, num_parallel_calls=tf.data.experimental.AUTOTUNE
)
return labeled_ds, class_names
| [
"tensorflow.image.grayscale_to_rgb",
"collections.defaultdict",
"tensorflow.image.decode_png",
"pathlib.Path",
"gan.util.download_file",
"numpy.random.randint",
"shutil.rmtree",
"os.path.join",
"shutil.copy",
"numpy.full",
"cv2.imwrite",
"os.path.dirname",
"os.path.exists",
"tarfile.open",... | [((541, 568), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (558, 568), False, 'import logging\n'), ((635, 669), 'os.path.join', 'os.path.join', (['data_dir', 'class_name'], {}), '(data_dir, class_name)\n', (647, 669), False, 'import os\n'), ((674, 711), 'os.makedirs', 'os.makedirs', (['class_dir'], {'exist_ok': '(True)'}), '(class_dir, exist_ok=True)\n', (685, 711), False, 'import os\n'), ((2394, 2425), 'os.path.join', 'os.path.join', (['data_dir', '"""__tmp"""'], {}), "(data_dir, '__tmp')\n", (2406, 2425), False, 'import os\n'), ((2541, 2571), 'os.path.join', 'os.path.join', (['data_dir', '"""face"""'], {}), "(data_dir, 'face')\n", (2553, 2571), False, 'import os\n'), ((2576, 2613), 'os.makedirs', 'os.makedirs', (['class_dir'], {'exist_ok': '(True)'}), '(class_dir, exist_ok=True)\n', (2587, 2613), False, 'import os\n'), ((2749, 2774), 'pathlib.Path', 'pathlib.Path', (['extract_dir'], {}), '(extract_dir)\n', (2761, 2774), False, 'import pathlib\n'), ((3065, 3111), 'shutil.rmtree', 'shutil.rmtree', (['extract_dir'], {'ignore_errors': '(True)'}), '(extract_dir, ignore_errors=True)\n', (3078, 3111), False, 'import shutil\n'), ((4534, 4564), 'os.path.join', 'os.path.join', (['zip_dir', '"""__tmp"""'], {}), "(zip_dir, '__tmp')\n", (4546, 4564), False, 'import os\n'), ((6072, 6101), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (6095, 6101), False, 'import collections\n'), ((6233, 6269), 'os.makedirs', 'os.makedirs', (['text_dir'], {'exist_ok': '(True)'}), '(text_dir, exist_ok=True)\n', (6244, 6269), False, 'import os\n'), ((6481, 6527), 'shutil.rmtree', 'shutil.rmtree', (['extract_dir'], {'ignore_errors': '(True)'}), '(extract_dir, ignore_errors=True)\n', (6494, 6527), False, 'import shutil\n'), ((6634, 6674), 'tensorflow.strings.split', 'tf.strings.split', (['file_path', 'os.path.sep'], {}), '(file_path, os.path.sep)\n', (6650, 6674), True, 'import tensorflow as tf\n'), ((6935, 6959), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['img'], {}), '(img)\n', (6954, 6959), True, 'import tensorflow as tf\n'), ((7166, 7211), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['img', 'tf.float32'], {}), '(img, tf.float32)\n', (7194, 7211), True, 'import tensorflow as tf\n'), ((7267, 7304), 'tensorflow.image.resize', 'tf.image.resize', (['img', '[width, height]'], {}), '(img, [width, height])\n', (7282, 7304), True, 'import tensorflow as tf\n'), ((7697, 7723), 'tensorflow.io.read_file', 'tf.io.read_file', (['file_path'], {}), '(file_path)\n', (7712, 7723), True, 'import tensorflow as tf\n'), ((8192, 8214), 'pathlib.Path', 'pathlib.Path', (['data_dir'], {}), '(data_dir)\n', (8204, 8214), False, 'import pathlib\n'), ((8395, 8437), 'functools.partial', 'functools.partial', (['_get_label', 'class_names'], {}), '(_get_label, class_names)\n', (8412, 8437), False, 'import functools\n'), ((8455, 8510), 'functools.partial', 'functools.partial', (['_decode_img', 'width', 'height', 'channels'], {}), '(_decode_img, width, height, channels)\n', (8472, 8510), False, 'import functools\n'), ((8530, 8585), 'functools.partial', 'functools.partial', (['_process_path', 'get_label', 'decode_img'], {}), '(_process_path, get_label, decode_img)\n', (8547, 8585), False, 'import functools\n'), ((757, 797), 'cv2.imwrite', 'cv2.imwrite', (['f"""{class_dir}/{i}.png"""', 'img'], {}), "(f'{class_dir}/{i}.png', img)\n", (768, 797), False, 'import cv2\n'), ((2295, 2323), 'os.path.exists', 'os.path.exists', (['tar_filename'], {}), '(tar_filename)\n', (2309, 2323), False, 'import os\n'), ((2670, 2696), 'tarfile.open', 'tarfile.open', (['tar_filename'], {}), '(tar_filename)\n', (2682, 2696), False, 'import tarfile\n'), ((3804, 3836), 'os.path.basename', 'os.path.basename', (['images_zip_url'], {}), '(images_zip_url)\n', (3820, 3836), False, 'import os\n'), ((3900, 3937), 'os.path.basename', 'os.path.basename', (['annotations_zip_url'], {}), '(annotations_zip_url)\n', (3916, 3937), False, 'import os\n'), ((3956, 3996), 'os.path.exists', 'os.path.exists', (['annotations_zip_filename'], {}), '(annotations_zip_filename)\n', (3970, 3996), False, 'import os\n'), ((4095, 4160), 'gan.util.download_file', 'util.download_file', (['annotations_zip_url', 'annotations_zip_filename'], {}), '(annotations_zip_url, annotations_zip_filename)\n', (4113, 4160), False, 'from gan import util\n'), ((4259, 4294), 'os.path.exists', 'os.path.exists', (['images_zip_filename'], {}), '(images_zip_filename)\n', (4273, 4294), False, 'import os\n'), ((4383, 4438), 'gan.util.download_file', 'util.download_file', (['images_zip_url', 'images_zip_filename'], {}), '(images_zip_url, images_zip_filename)\n', (4401, 4438), False, 'from gan import util\n'), ((4620, 4656), 'zipfile.ZipFile', 'zipfile.ZipFile', (['images_zip_filename'], {}), '(images_zip_filename)\n', (4635, 4656), False, 'import zipfile\n'), ((4757, 4798), 'zipfile.ZipFile', 'zipfile.ZipFile', (['annotations_zip_filename'], {}), '(annotations_zip_filename)\n', (4772, 4798), False, 'import zipfile\n'), ((5159, 5171), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5168, 5171), False, 'import json\n'), ((5278, 5290), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5287, 5290), False, 'import json\n'), ((5730, 5796), 'os.path.join', 'os.path.join', (['extract_dir', 'file_source_dir', 'image_lookup[image_id]'], {}), '(extract_dir, file_source_dir, image_lookup[image_id])\n', (5742, 5796), False, 'import os\n'), ((5870, 5928), 'os.path.join', 'os.path.join', (['image_dir', 'category_name', 'f"""{image_id}{ext}"""'], {}), "(image_dir, category_name, f'{image_id}{ext}')\n", (5882, 5928), False, 'import os\n'), ((6006, 6047), 'shutil.copy', 'shutil.copy', (['file_source', 'dir_destination'], {}), '(file_source, dir_destination)\n', (6017, 6047), False, 'import shutil\n'), ((7426, 7456), 'tensorflow.image.grayscale_to_rgb', 'tf.image.grayscale_to_rgb', (['img'], {}), '(img)\n', (7451, 7456), True, 'import tensorflow as tf\n'), ((1370, 1397), 'numpy.full', 'np.full', (['size', '(0)'], {'dtype': 'int'}), '(size, 0, dtype=int)\n', (1377, 1397), True, 'import numpy as np\n'), ((1578, 1603), 'numpy.random.randint', 'np.random.randint', (['(0)', '(180)'], {}), '(0, 180)\n', (1595, 1603), True, 'import numpy as np\n'), ((2877, 2917), 'os.path.join', 'os.path.join', (['class_dir', 'image_path.name'], {}), '(class_dir, image_path.name)\n', (2889, 2917), False, 'import os\n'), ((3020, 3058), 'os.path.join', 'os.path.join', (['class_dir', 'csv_path.name'], {}), '(class_dir, csv_path.name)\n', (3032, 3058), False, 'import os\n'), ((5066, 5131), 'os.path.join', 'os.path.join', (['extract_dir', '"""annotations"""', 'instances_json_filename'], {}), "(extract_dir, 'annotations', instances_json_filename)\n", (5078, 5131), False, 'import os\n'), ((5187, 5251), 'os.path.join', 'os.path.join', (['extract_dir', '"""annotations"""', 'captions_json_filename'], {}), "(extract_dir, 'annotations', captions_json_filename)\n", (5199, 5251), False, 'import os\n'), ((5811, 5840), 'os.path.splitext', 'os.path.splitext', (['file_source'], {}), '(file_source)\n', (5827, 5840), False, 'import os\n'), ((5949, 5981), 'os.path.dirname', 'os.path.dirname', (['dir_destination'], {}), '(dir_destination)\n', (5964, 5981), False, 'import os\n'), ((7031, 7044), 'tensorflow.shape', 'tf.shape', (['img'], {}), '(img)\n', (7039, 7044), True, 'import tensorflow as tf\n'), ((1449, 1490), 'numpy.random.randint', 'np.random.randint', (['(width // 4)', '(width // 2)'], {}), '(width // 4, width // 2)\n', (1466, 1490), True, 'import numpy as np\n'), ((1508, 1549), 'numpy.random.randint', 'np.random.randint', (['(width // 4)', '(width // 2)'], {}), '(width // 4, width // 2)\n', (1525, 1549), True, 'import numpy as np\n'), ((6381, 6422), 'os.path.join', 'os.path.join', (['text_dir', 'f"""{image_id}.txt"""'], {}), "(text_dir, f'{image_id}.txt')\n", (6393, 6422), False, 'import os\n'), ((7389, 7402), 'tensorflow.shape', 'tf.shape', (['img'], {}), '(img)\n', (7397, 7402), True, 'import tensorflow as tf\n'), ((1830, 1855), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (1847, 1855), True, 'import numpy as np\n'), ((1873, 1898), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (1890, 1898), True, 'import numpy as np\n'), ((1916, 1941), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {}), '(0, 256)\n', (1933, 1941), True, 'import numpy as np\n')] |
from __future__ import division
import collections
import numpy as np
from numpy import random
class MarkovException(Exception):
"""Exception raised when there is a problem with a markov chain
"""
pass
class MarkovChain:
"""Markov Chain modeling
Args:
pi (list) : A vector containing the probability for each start to be the initial state
transition : The matrix holding the transition probabilities
Attributes:
Each attribute starting with an underscore (_)
is semi private (usable but not recommended)
Each attribute starting with a double underscore (__)
are meant to be private and not callable from outside the class.
pi (list) : A vector containing the probability for each start to be the initial state
transition (np.array) : The matrix holding the transition probabilities
_cannonical (np.array) : Holds the cannonical representation of the transition matrix
_Q (np.array) : Holds the submatrix for transient states (t * t, with t size of transient states)
_R (np.array) : Holds the submatrix for absorbant states (t * r, with r size of absorbant states)
_N (np.array) : Holds the fundamental matrix of the markov chain
_B (np.array) : Holds the probability for each transient state to end up in each absorbant states
__index_mapping (dictionary) : Map every index to their new index for _B.
"""
def __init__(self, pi, transition):
self.pi = np.copy(pi)
self.transition = np.copy(transition)
self.__index_mapping = {}
self._cannonical, self._Q, self._R = self._compute_cannonical()
self._N = self._compute_fondamental()
self._B = np.dot(self._N, self._R)
if( not self.__check_markovian()):
raise MarkovException("The tansition matrix does not correspond to a markov chain")
@staticmethod
def check_markovian(transition):
"""Checks wether a given matrix is a markov transition matrix or not.
It checks if the matrix is square and if the sum of each row equals 1.
Args:
transition (np.array) : The matrix to test
Returns:
(boolean) : True if the matrix is a markov transition matrix, False otherwise.
"""
#If the matrix is not square then it's not a markov chain.
if(transition.shape[0] != transition.shape[1]):
return False
for i, row in enumerate(transition):
total = sum(row)
#Using absolute difference with 10e-05 error to check if total == 1
#Solve float arithmetic error
if(abs(1-total) > 0.00001):
return False
return True
def __check_markovian(self):
"""Check if the matrix of this markov chain is correct.
Args:
None
Returns:
(boolean) : True if the matrix is a markov transition matrix, False otherwise.
"""
return MarkovChain.check_markovian(self.transition)
def find_absorbant_state(self):
"""Return a list containing the index of every absorbant states
Args:
None
Returns:
indexes (list) : list containing the index of every absorbant states
"""
diag = np.diagonal(self.transition)
indexes = np.where(diag == 1)
return indexes[0]
def _compute_cannonical(self):
"""Computes the cannonical form of the transition matrix
and extracts useful submatrices of this cannonical form
Args:
None
Returns:
cannonical (np.array) : The cannonical matrix of the transition matrix
Q (np.array) : The t*t upper left submatrix.
R (np.array) : The t*r upper right submatrix
"""
#Get absorbant state indexes
absorbants_indexes = self.find_absorbant_state()
#Deduce transient indexes by taking every other index
transient_indexes = [x for x in range(len(self.transition)) if x not in absorbants_indexes]
r = len(absorbants_indexes)
t = len(transient_indexes)
#Create a mapping for indexes (since columns and rows will be interchanged)
for i, item in enumerate(absorbants_indexes):
self.__index_mapping[item] = i
for i, item in enumerate(transient_indexes):
self.__index_mapping[item] = i
#If absorbant states are the last indexes, then the matrix is already cannonical
last = range(len(self.transition))[-len(absorbants_indexes):]
if(sorted(last) == sorted(absorbants_indexes)):
cannonical = np.copy(self.transition)
else:
#Copy columns for readability
absorbants_col = self.transition[:, absorbants_indexes]
transient_col = self.transition[:, transient_indexes]
#Reconcatenate (execute the swap)
cannonical = np.concatenate((transient_col, absorbants_col), axis=1)
#Without temp var :
#self.transition = np.concatenate((self.transition[:, transient_indexes], self.transition[:, absorbants_indexes]), axis=1)
#Same for rows
absorbants_row = cannonical[absorbants_indexes]
transient_row = cannonical[transient_indexes]
cannonical = np.concatenate((transient_row, absorbants_row), axis=0)
#Without temp var :
#self.transition = np.concatenate((elf.transition[transient_indexes], self.transition[absorbants_indexes]), axis=0)
Q = cannonical[np.ix_(range(t), range(t))]
new_absor_index = [x for x in range(t+r) if x not in range(t)]
R = cannonical[np.ix_(range(t), new_absor_index)]
return cannonical, Q, R
def _compute_fondamental(self):
"""Computes the fundamental matrix corresponding to the cannonical form
Requires the cannonical form to be computed beforehand (see self._compute_cannonical())
Its computed by taking the submatrix Q and performing these operations :
N = (I-Q)^-1
Args:
None
Returns:
N (np.array) : The fundamental matrix computed
"""
I = np.identity(self._Q.shape[1])
N = np.linalg.inv(I-self._Q)
return N
def absorbing_probability(self, current_state, reaching_state):
"""Computes the probability to end in a given absorbant state based on the current state.
Args:
current_state (int) : The index of the current state
reaching_state (int) : The index of the absorbant state where we want to end up
Returns:
(float) : probability to reach 'reaching_state' from 'current_state'
"""
if(current_state in self.find_absorbant_state()):
return 0
i = self.__index_mapping[current_state]
j = self.__index_mapping[reaching_state]
return self._B[i][j]
class ExpectationMaximisation:
"""EM Algorithm applied to Markov Chain.
Args:
sequences (list): The sequences to clusterize.
states (list): The exhaustive list of states.
nb_clusters (int): he number of cluster to create.
Attributes:
Each attribute starting with an underscore (_)
is semi private (usable but not recommended)
Each attribute starting with a double underscore (__)
are meant to be private and not callable from outside the class.
clusters (list): A list holding the different clusters, with their associated
markov chain.
weighted_compatibility (np.array): A matrix (size m*k) holding the probability
for each sequence to be in each cluster.
_sequences (list): The sequences to clusterize.
_states (list): The exhaustive list of states.
_weights (np.array): The probability to be in each cluster, P(c) for 0<=c<=k.
__m (int): The number of sequences used.
__k (int): The number of cluster to create.
"""
def __init__(self, sequences, states, nb_clusters):
self._sequences = sequences
self.__m = len(sequences)
self._states = states
self.__k = nb_clusters
self._weights = np.zeros(self.__k)
self.weighted_compatibility = np.zeros(shape=(self.__m, self.__k))
self.clusters = []
self.__initialize()
def __initialize(self):
"""Randomly associate sequence to a cluster.
Args:
None
Retuns:
None
"""
#Variable aliases to improve readability
m = self.__m
k = self.__k
for i, s in enumerate(self._sequences):
#Random association to a cluster
c = random.randint(0, k)
#The probability is certain so its 1
self.weighted_compatibility[i][c] = 1
for c in range(k):
#@todo fix empty cluster exception
if(sum(self.weighted_compatibility[:,c]) == 0):
raise MarkovException("Empty cluster")
#pi is the probability for each state to start on this markov chain
#a is the matrix holding the probabilities for each state to go to each other state
markov = self._compute_markov_chain(c)
#pi, a = compute_markov_chain(sequences, weighted_compatibility, states, c)
#Weights is the probability to be in cluster c, P(c)
self._weights[c] = sum(self.weighted_compatibility[:,c])/m
self.clusters.append(markov)
#return weighted_compatibility, clusters, weights
def _compute_markov_chain(self, c):
"""Compute a markov chain for a specific cluster
Args:
c (int): The index of the cluster for which the markov chain
will be computed.
Returns:
(phi): A markov chain (containing the initial states pi and the matrix a).
"""
#Initialize values
pi = []
n = len(self._states)
a = np.zeros(shape=(n, n))
#We first calculate pi_c(state) with c beeing the cluster and state, well the state..
#For this, we compute two terms.
#The first one is the sum of the probability of sequence Si to belong to cluster c, knowing Si and phi :
#sum(P(ci = c|Si, phi))
#With ci being the cluster of the sequence i, Si the sequence i, and phi the characteristiques of the cluster
#(in this case phi is the markov chain)
#P(ci = c | Si, phi) is computed in another step, and is holded in weighted.
#P(ci = c | Si, phi) is actualy weighted[i][c]
#The second term is the same sum, but we only keep terms where the sequence start with a certain state :
#sum( P(ci = c | Si, phi) * delta(state, initial_state) )
#where P(ci = c | Si, phi) is the same as above and
#delta(state, initial_state) = state == initial_state ? 1 : 0
#(Kronecker delta)
#With this two terms, pi_c(state) = term1 / term2
for t, state in enumerate(self._states):
total_ponderated = 0
#R state holds the number of transition from the current state
r_state = []
for i, s in enumerate(self._sequences):
if(s[0] == state):
total_ponderated += self.weighted_compatibility[i][c]
#We don't count the last element for transitions
nb_transitions = s[:-1].count(state)
r_state.append(nb_transitions)
total = sum(self.weighted_compatibility[:,c])
#We add the newly calculated pi to the array
pi.append(total_ponderated/total)
for _, state_prime in enumerate(self._states):
#R state prime holds the number of transition from current state to state_prime
r_state_prime = []
for i, s in enumerate(self._sequences):
nb_transitions = 0
for j in range(len(s)-1):
if(s[j] == state and s[j+1] == state_prime):
nb_transitions += 1
r_state_prime.append(nb_transitions)
#To calculate the transition probability from one state to another, we must calculate two terms first.
#The first term is the sum of the probability of the sequence i beeing in cluster c, knowing the sequence and phi, ponderated with the number of transitions :
#sum( P(ci = c | Si, phi) * r_state_prime )
#The second one is the same sum but with a different ponderation :
#sum (P(ci = c | Si, phi) * r_state)
dividend = sum(self.weighted_compatibility[:, c]*r_state_prime)
divisor = sum(self.weighted_compatibility[:, c]*r_state)
#Handling ending point case
if(divisor == 0):
#We set the diagonal at 1
a[state][state] = 1
else:
a[state][state_prime] = dividend/divisor
return MarkovChain(pi, a)
def _compute_compatibilities(self, sequence):
"""Calculate the compatibility between a given sequence and each cluster.
Args:
sequence (list): A list of different states (each state is an int).
Returns:
compatibility (np.array): An array holding the compatibility for each cluster.
"""
#Aliases for readability
k = self.__k
initial_state = 0
compatibilities = np.zeros(k)
for c, cluster in enumerate(self.clusters):
proba = 1
for state in range(len(sequence)-1):
proba *= cluster.transition[sequence[state]][sequence[state+1]]
#Proba now holds the probability of sequence i to have been generated by markov chain of cluster c
compatibilities[c] = cluster.pi[sequence[initial_state]]*proba
#compatibility = P(Si | ci = c, Phi) = P(Si|Phi) = pi(ei,1)* Product(ac(ei,t-1; ei,t))
#With ac(ei,t-1; ei,t) beeing the path taken in the markov chain.
#We multiply by P(c) for each compatibility.
compatibilities = compatibilities * self._weights
return compatibilities
def _compute_sequence_in_cluster_probabilities(self, sequence):
"""Give the probabilities for a given sequence to belong to each cluster.
Args:
The sequence to clusterize
Returns:
(list) : List containing the probabilities for the sequence to be in each cluster
"""
compatibilities = self._compute_compatibilities(sequence)
#print(compatibilities)
compatibilities /= sum(compatibilities)
return compatibilities
def _expectation(self):
"""The expectation part of EM algorithm.
Args:
None
Returns:
weighted_compatibility (np.array): The new matrix holding the probailities
for each sequence to belong in each cluster.
"""
#Aliases for readability
m = self.__m
k = self.__k
#Temporal matrix used because delta is needed (difference with old one).
weighted_compatibility = np.zeros(shape=(m, k))
for i, s in enumerate(self._sequences):
weighted_compatibility[i] = self._compute_sequence_in_cluster_probabilities(s)
return weighted_compatibility
def _maximisation(self):
"""The maximisation part of EM algorithm.
Args:
None
Returns:
None
"""
#Aliases for readability
m = self.__m
#Temp variable
clusters = []
for c, cluster in enumerate(self.clusters):
#We recompute P(c)
self._weights[c] = (1/m)*sum(self.weighted_compatibility[:,c])
#Recompute markov chain and add to the list
clusters.append(self._compute_markov_chain(c))
#@todo change current clusters instead of creating new one
self.clusters = clusters
def fit(self):
"""Clusterize the sequence passed at init with the number of cluster specified.
Args:
None
Returns:
None
"""
#weighted_compatibility, clusters, weights = initialize_EM(sequences, states, k)
delta = 1
#We loop over until the difference between the probabilities varies a little (10e-4 error)
while delta > 0.0001:
new_weighted_compatibility = self._expectation()
#Change the delta
delta = np.mean(abs(new_weighted_compatibility - self.weighted_compatibility))
print(delta )
self.weighted_compatibility = new_weighted_compatibility
self._maximisation()
#Return not necessary, to see
#return self.clusters, self.weighted_compatibility
def predict_proba_hard(self, sequence, final_state):
"""Give the proba for a sequence to end in final state
The sequence is categorized in the cluster where the probability of belonging is the highest
Args:
sequence (list) : A list of states to clusterize and predict
final_state (int) : The reaching state wanted
Returns:
(float) : The probability for sequence to end in final state
"""
probas = self._compute_sequence_in_cluster_probabilities(sequence)
c = np.where(probas == max(probas))[0]
#@todo add treshold for selecting probas
cluster = self.clusters[c]
return cluster.absorbing_probability(sequence[-1], final_state)
def predict_proba_soft(self, sequence, final_state):
"""Give the proba for a sequence to end in final state
The probas is ponderated by the probability of belonging to each cluster
Args:
sequence (list) : A list of states to clusterize and predict
final_state (int) : The reaching state wanted
Returns:
(float) : The probability for sequence to end in final state)
"""
probas = self._compute_sequence_in_cluster_probabilities(sequence)
abs_proba = 0
for c, cluster in enumerate(self.clusters):
abs_proba += (probas[c]*cluster.absorbing_probability(sequence[-1], final_state))
return abs_proba
def __str__(self):
"""Overiding the __str__ magic method to display the clusters and their distribution properly
Args:
None
Returns:
A string representation of the EM object
"""
value = "============================\nWeighted probability matrix : \n"
temp = ""
for i, row in enumerate(self.weighted_compatibility):
temp = "\t[ "
for j, col in enumerate(row):
temp = "".join([temp, "{:.2f} ".format(col)])
temp = "".join([temp, "]\t"])
temp = "".join([temp, "\t{0}".format(self._sequences[i])])
temp = "".join([temp, "\n"])
value = "".join([value, temp])
value = "".join([value, "============================\n"])
value = "".join([value, "\n============================\nClusters : \n"])
for c, cluster in enumerate(self.clusters):
temp = "\tCluster {0} : \n".format(c)
temp = "".join([temp, "\t\t Transition Matrix : \n"])
for i, row in enumerate(cluster.transition):
temp = "".join([temp, "\t\t[ "])
for j, col in enumerate(row):
temp = "".join([temp, "{:.2f} ".format(col)])
temp = "".join([temp, "]\n"])
temp = "".join([temp, "\n\t\t Absorbing Probability : \n"])
for i, row in enumerate(cluster._B):
temp = "".join([temp, "\t\t[ "])
for j, col in enumerate(row):
#temp = "".join([temp, "\t\t[ "])
temp = "".join([temp, "{:.2f} ".format(col)])
temp = "".join([temp, "]\n"])
value = "".join([value, temp])
value = "".join([value, "============================\n"])
return value
| [
"numpy.copy",
"numpy.zeros",
"numpy.identity",
"numpy.where",
"numpy.linalg.inv",
"numpy.random.randint",
"numpy.dot",
"numpy.concatenate",
"numpy.diagonal"
] | [((1558, 1569), 'numpy.copy', 'np.copy', (['pi'], {}), '(pi)\n', (1565, 1569), True, 'import numpy as np\n'), ((1596, 1615), 'numpy.copy', 'np.copy', (['transition'], {}), '(transition)\n', (1603, 1615), True, 'import numpy as np\n'), ((1786, 1810), 'numpy.dot', 'np.dot', (['self._N', 'self._R'], {}), '(self._N, self._R)\n', (1792, 1810), True, 'import numpy as np\n'), ((3369, 3397), 'numpy.diagonal', 'np.diagonal', (['self.transition'], {}), '(self.transition)\n', (3380, 3397), True, 'import numpy as np\n'), ((3416, 3435), 'numpy.where', 'np.where', (['(diag == 1)'], {}), '(diag == 1)\n', (3424, 3435), True, 'import numpy as np\n'), ((6295, 6324), 'numpy.identity', 'np.identity', (['self._Q.shape[1]'], {}), '(self._Q.shape[1])\n', (6306, 6324), True, 'import numpy as np\n'), ((6337, 6363), 'numpy.linalg.inv', 'np.linalg.inv', (['(I - self._Q)'], {}), '(I - self._Q)\n', (6350, 6363), True, 'import numpy as np\n'), ((8349, 8367), 'numpy.zeros', 'np.zeros', (['self.__k'], {}), '(self.__k)\n', (8357, 8367), True, 'import numpy as np\n'), ((8406, 8442), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.__m, self.__k)'}), '(shape=(self.__m, self.__k))\n', (8414, 8442), True, 'import numpy as np\n'), ((10146, 10168), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, n)'}), '(shape=(n, n))\n', (10154, 10168), True, 'import numpy as np\n'), ((13713, 13724), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (13721, 13724), True, 'import numpy as np\n'), ((15445, 15467), 'numpy.zeros', 'np.zeros', ([], {'shape': '(m, k)'}), '(shape=(m, k))\n', (15453, 15467), True, 'import numpy as np\n'), ((4739, 4763), 'numpy.copy', 'np.copy', (['self.transition'], {}), '(self.transition)\n', (4746, 4763), True, 'import numpy as np\n'), ((5025, 5080), 'numpy.concatenate', 'np.concatenate', (['(transient_col, absorbants_col)'], {'axis': '(1)'}), '((transient_col, absorbants_col), axis=1)\n', (5039, 5080), True, 'import numpy as np\n'), ((5419, 5474), 'numpy.concatenate', 'np.concatenate', (['(transient_row, absorbants_row)'], {'axis': '(0)'}), '((transient_row, absorbants_row), axis=0)\n', (5433, 5474), True, 'import numpy as np\n'), ((8858, 8878), 'numpy.random.randint', 'random.randint', (['(0)', 'k'], {}), '(0, k)\n', (8872, 8878), False, 'from numpy import random\n')] |
import numpy as np
from ex1.data_perturb import DataPerturb
class DataPerturbUniform(DataPerturb):
def __init__(self, k=1.0):
self.k = k
@property
def k(self):
return self._k
@k.setter
def k(self, value):
self._k = float(value)
def perturb_data(self, x):
noise = np.random.random_sample(size=x.shape)
scaled_noise = 2 * self._k * noise - self._k
x_perturbed = x + scaled_noise
# clipping values outside of [0,1]
x_perturbed[x_perturbed >= 1] = 1
x_perturbed[x_perturbed <= 0] = 0
return x_perturbed
| [
"numpy.random.random_sample"
] | [((326, 363), 'numpy.random.random_sample', 'np.random.random_sample', ([], {'size': 'x.shape'}), '(size=x.shape)\n', (349, 363), True, 'import numpy as np\n')] |
import json
import numpy as np
import tensorflow as tf
import torch
def post_processing(reg_list, cls_list, num_classes, image_size, feature_map_wh_list, min_boxes,
center_variance, size_variance,
conf_threshold=0.6, nms_max_output_size=100, nms_iou_threshold=0.3, top_k=100):
reg_list = [tf.keras.layers.Reshape([-1, 4])(reg) for reg in reg_list]
cls_list = [tf.keras.layers.Reshape([-1, num_classes])(cls) for cls in cls_list]
reg = tf.keras.layers.Concatenate(axis=1)(reg_list)
cls = tf.keras.layers.Concatenate(axis=1)(cls_list)
# post process
cls = tf.keras.layers.Softmax(axis=-1)(cls)
loc = decode_regression(reg, image_size, feature_map_wh_list, min_boxes,
center_variance, size_variance)
result = tf.keras.layers.Concatenate(axis=-1)([cls, loc])
# confidence thresholding
mask = conf_threshold < cls[..., 1]
result = tf.boolean_mask(tensor=result, mask=mask)
# non-maximum suppression
mask = tf.image.non_max_suppression(boxes=result[..., -4:],
scores=result[..., 1],
max_output_size=nms_max_output_size,
iou_threshold=nms_iou_threshold,
name='non_maximum_suppresion')
result = tf.gather(params=result, indices=mask, axis=0)
# top-k filtering
top_k_value = tf.math.minimum(tf.constant(top_k), tf.shape(result)[0])
mask = tf.nn.top_k(result[..., 1], k=top_k_value, sorted=True).indices
result = tf.gather(params=result, indices=mask, axis=0)
return result
def decode_regression(reg, image_size, feature_map_w_h_list, min_boxes,
center_variance, size_variance):
priors = []
for feature_map_w_h, min_box in zip(feature_map_w_h_list, min_boxes):
xy_grid = np.meshgrid(range(feature_map_w_h[0]), range(feature_map_w_h[1]))
xy_grid = np.add(xy_grid, 0.5)
xy_grid[0, :, :] /= feature_map_w_h[0]
xy_grid[1, :, :] /= feature_map_w_h[1]
xy_grid = np.stack(xy_grid, axis=-1)
xy_grid = np.tile(xy_grid, [1, 1, len(min_box)])
xy_grid = np.reshape(xy_grid, (-1, 2))
wh_grid = np.array(min_box) / np.array(image_size)[:, np.newaxis]
wh_grid = np.tile(np.transpose(wh_grid), [np.product(feature_map_w_h), 1])
prior = np.concatenate((xy_grid, wh_grid), axis=-1)
priors.append(prior)
priors = np.concatenate(priors, axis=0)
print(f'priors nums:{priors.shape[0]}')
priors = tf.constant(priors, dtype=tf.float32, shape=priors.shape, name='priors')
center_xy = reg[..., :2] * center_variance * priors[..., 2:] + priors[..., :2]
center_wh = tf.exp(reg[..., 2:] * size_variance) * priors[..., 2:]
# center to corner
start_xy = center_xy - center_wh / 2
end_xy = center_xy + center_wh / 2
loc = tf.concat([start_xy, end_xy], axis=-1)
loc = tf.clip_by_value(loc, clip_value_min=0.0, clip_value_max=1.0)
return loc
def load_weight(model, torch_path, mapping_table_path):
torch_weights = torch.load(torch_path, map_location=torch.device('cpu'))
with open(mapping_table_path, 'r') as f:
mapping_table = json.load(f)
mapping_table = {layer['name']: layer['weight'] for layer in mapping_table}
for layer in model.layers:
if layer.name in mapping_table:
print(f'Set layer: {layer.name}')
layer_type = layer.name.split('_')[-1]
torch_layer_names = mapping_table[layer.name]
if layer_type == 'conv':
weight = np.array(torch_weights[torch_layer_names[0]])
weight = np.transpose(weight, [2, 3, 1, 0])
layer.set_weights([weight])
elif layer_type == 'dconv':
weight = np.array(torch_weights[torch_layer_names[0]])
weight = np.transpose(weight, [2, 3, 0, 1])
layer.set_weights([weight])
elif layer_type == 'bn':
gamma = np.array(torch_weights[torch_layer_names[0]])
beta = np.array(torch_weights[torch_layer_names[1]])
running_mean = np.array(torch_weights[torch_layer_names[2]])
running_var = np.array(torch_weights[torch_layer_names[3]])
layer.set_weights([gamma, beta, running_mean, running_var])
elif layer_type == 'convbias':
weight = np.array(torch_weights[torch_layer_names[0]])
bias = np.array(torch_weights[torch_layer_names[1]])
weight = np.transpose(weight, [2, 3, 1, 0])
layer.set_weights([weight, bias])
elif layer_type == 'dconvbias':
weight = np.array(torch_weights[torch_layer_names[0]])
bias = np.array(torch_weights[torch_layer_names[1]])
weight = np.transpose(weight, [2, 3, 0, 1])
layer.set_weights([weight, bias])
else:
raise RuntimeError(f'Unknown Layer type \'{layer_type}\'.')
else:
print(f'Ignore layer: {layer.name}')
| [
"tensorflow.keras.layers.Reshape",
"tensorflow.clip_by_value",
"numpy.product",
"torch.device",
"tensorflow.keras.layers.Concatenate",
"tensorflow.gather",
"tensorflow.nn.top_k",
"tensorflow.keras.layers.Softmax",
"tensorflow.concat",
"numpy.transpose",
"tensorflow.exp",
"numpy.reshape",
"nu... | [((945, 986), 'tensorflow.boolean_mask', 'tf.boolean_mask', ([], {'tensor': 'result', 'mask': 'mask'}), '(tensor=result, mask=mask)\n', (960, 986), True, 'import tensorflow as tf\n'), ((1029, 1213), 'tensorflow.image.non_max_suppression', 'tf.image.non_max_suppression', ([], {'boxes': 'result[..., -4:]', 'scores': 'result[..., 1]', 'max_output_size': 'nms_max_output_size', 'iou_threshold': 'nms_iou_threshold', 'name': '"""non_maximum_suppresion"""'}), "(boxes=result[..., -4:], scores=result[..., 1],\n max_output_size=nms_max_output_size, iou_threshold=nms_iou_threshold,\n name='non_maximum_suppresion')\n", (1057, 1213), True, 'import tensorflow as tf\n'), ((1379, 1425), 'tensorflow.gather', 'tf.gather', ([], {'params': 'result', 'indices': 'mask', 'axis': '(0)'}), '(params=result, indices=mask, axis=0)\n', (1388, 1425), True, 'import tensorflow as tf\n'), ((1612, 1658), 'tensorflow.gather', 'tf.gather', ([], {'params': 'result', 'indices': 'mask', 'axis': '(0)'}), '(params=result, indices=mask, axis=0)\n', (1621, 1658), True, 'import tensorflow as tf\n'), ((2525, 2555), 'numpy.concatenate', 'np.concatenate', (['priors'], {'axis': '(0)'}), '(priors, axis=0)\n', (2539, 2555), True, 'import numpy as np\n'), ((2614, 2686), 'tensorflow.constant', 'tf.constant', (['priors'], {'dtype': 'tf.float32', 'shape': 'priors.shape', 'name': '"""priors"""'}), "(priors, dtype=tf.float32, shape=priors.shape, name='priors')\n", (2625, 2686), True, 'import tensorflow as tf\n'), ((2957, 2995), 'tensorflow.concat', 'tf.concat', (['[start_xy, end_xy]'], {'axis': '(-1)'}), '([start_xy, end_xy], axis=-1)\n', (2966, 2995), True, 'import tensorflow as tf\n'), ((3006, 3067), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['loc'], {'clip_value_min': '(0.0)', 'clip_value_max': '(1.0)'}), '(loc, clip_value_min=0.0, clip_value_max=1.0)\n', (3022, 3067), True, 'import tensorflow as tf\n'), ((491, 526), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {'axis': '(1)'}), '(axis=1)\n', (518, 526), True, 'import tensorflow as tf\n'), ((547, 582), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {'axis': '(1)'}), '(axis=1)\n', (574, 582), True, 'import tensorflow as tf\n'), ((623, 655), 'tensorflow.keras.layers.Softmax', 'tf.keras.layers.Softmax', ([], {'axis': '(-1)'}), '(axis=-1)\n', (646, 655), True, 'import tensorflow as tf\n'), ((812, 848), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {'axis': '(-1)'}), '(axis=-1)\n', (839, 848), True, 'import tensorflow as tf\n'), ((1483, 1501), 'tensorflow.constant', 'tf.constant', (['top_k'], {}), '(top_k)\n', (1494, 1501), True, 'import tensorflow as tf\n'), ((1535, 1590), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['result[..., 1]'], {'k': 'top_k_value', 'sorted': '(True)'}), '(result[..., 1], k=top_k_value, sorted=True)\n', (1546, 1590), True, 'import tensorflow as tf\n'), ((1999, 2019), 'numpy.add', 'np.add', (['xy_grid', '(0.5)'], {}), '(xy_grid, 0.5)\n', (2005, 2019), True, 'import numpy as np\n'), ((2132, 2158), 'numpy.stack', 'np.stack', (['xy_grid'], {'axis': '(-1)'}), '(xy_grid, axis=-1)\n', (2140, 2158), True, 'import numpy as np\n'), ((2234, 2262), 'numpy.reshape', 'np.reshape', (['xy_grid', '(-1, 2)'], {}), '(xy_grid, (-1, 2))\n', (2244, 2262), True, 'import numpy as np\n'), ((2438, 2481), 'numpy.concatenate', 'np.concatenate', (['(xy_grid, wh_grid)'], {'axis': '(-1)'}), '((xy_grid, wh_grid), axis=-1)\n', (2452, 2481), True, 'import numpy as np\n'), ((2787, 2823), 'tensorflow.exp', 'tf.exp', (['(reg[..., 2:] * size_variance)'], {}), '(reg[..., 2:] * size_variance)\n', (2793, 2823), True, 'import tensorflow as tf\n'), ((3289, 3301), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3298, 3301), False, 'import json\n'), ((336, 368), 'tensorflow.keras.layers.Reshape', 'tf.keras.layers.Reshape', (['[-1, 4]'], {}), '([-1, 4])\n', (359, 368), True, 'import tensorflow as tf\n'), ((411, 453), 'tensorflow.keras.layers.Reshape', 'tf.keras.layers.Reshape', (['[-1, num_classes]'], {}), '([-1, num_classes])\n', (434, 453), True, 'import tensorflow as tf\n'), ((1503, 1519), 'tensorflow.shape', 'tf.shape', (['result'], {}), '(result)\n', (1511, 1519), True, 'import tensorflow as tf\n'), ((2282, 2299), 'numpy.array', 'np.array', (['min_box'], {}), '(min_box)\n', (2290, 2299), True, 'import numpy as np\n'), ((2364, 2385), 'numpy.transpose', 'np.transpose', (['wh_grid'], {}), '(wh_grid)\n', (2376, 2385), True, 'import numpy as np\n'), ((3198, 3217), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3210, 3217), False, 'import torch\n'), ((2302, 2322), 'numpy.array', 'np.array', (['image_size'], {}), '(image_size)\n', (2310, 2322), True, 'import numpy as np\n'), ((2388, 2415), 'numpy.product', 'np.product', (['feature_map_w_h'], {}), '(feature_map_w_h)\n', (2398, 2415), True, 'import numpy as np\n'), ((3676, 3721), 'numpy.array', 'np.array', (['torch_weights[torch_layer_names[0]]'], {}), '(torch_weights[torch_layer_names[0]])\n', (3684, 3721), True, 'import numpy as np\n'), ((3747, 3781), 'numpy.transpose', 'np.transpose', (['weight', '[2, 3, 1, 0]'], {}), '(weight, [2, 3, 1, 0])\n', (3759, 3781), True, 'import numpy as np\n'), ((3891, 3936), 'numpy.array', 'np.array', (['torch_weights[torch_layer_names[0]]'], {}), '(torch_weights[torch_layer_names[0]])\n', (3899, 3936), True, 'import numpy as np\n'), ((3962, 3996), 'numpy.transpose', 'np.transpose', (['weight', '[2, 3, 0, 1]'], {}), '(weight, [2, 3, 0, 1])\n', (3974, 3996), True, 'import numpy as np\n'), ((4102, 4147), 'numpy.array', 'np.array', (['torch_weights[torch_layer_names[0]]'], {}), '(torch_weights[torch_layer_names[0]])\n', (4110, 4147), True, 'import numpy as np\n'), ((4171, 4216), 'numpy.array', 'np.array', (['torch_weights[torch_layer_names[1]]'], {}), '(torch_weights[torch_layer_names[1]])\n', (4179, 4216), True, 'import numpy as np\n'), ((4248, 4293), 'numpy.array', 'np.array', (['torch_weights[torch_layer_names[2]]'], {}), '(torch_weights[torch_layer_names[2]])\n', (4256, 4293), True, 'import numpy as np\n'), ((4324, 4369), 'numpy.array', 'np.array', (['torch_weights[torch_layer_names[3]]'], {}), '(torch_weights[torch_layer_names[3]])\n', (4332, 4369), True, 'import numpy as np\n'), ((4514, 4559), 'numpy.array', 'np.array', (['torch_weights[torch_layer_names[0]]'], {}), '(torch_weights[torch_layer_names[0]])\n', (4522, 4559), True, 'import numpy as np\n'), ((4583, 4628), 'numpy.array', 'np.array', (['torch_weights[torch_layer_names[1]]'], {}), '(torch_weights[torch_layer_names[1]])\n', (4591, 4628), True, 'import numpy as np\n'), ((4654, 4688), 'numpy.transpose', 'np.transpose', (['weight', '[2, 3, 1, 0]'], {}), '(weight, [2, 3, 1, 0])\n', (4666, 4688), True, 'import numpy as np\n'), ((4808, 4853), 'numpy.array', 'np.array', (['torch_weights[torch_layer_names[0]]'], {}), '(torch_weights[torch_layer_names[0]])\n', (4816, 4853), True, 'import numpy as np\n'), ((4877, 4922), 'numpy.array', 'np.array', (['torch_weights[torch_layer_names[1]]'], {}), '(torch_weights[torch_layer_names[1]])\n', (4885, 4922), True, 'import numpy as np\n'), ((4948, 4982), 'numpy.transpose', 'np.transpose', (['weight', '[2, 3, 0, 1]'], {}), '(weight, [2, 3, 0, 1])\n', (4960, 4982), True, 'import numpy as np\n')] |
import numpy as np
import cv2
from PIL import Image
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
from tkinter import *
from tkinter import filedialog
from tkinter import messagebox
import os
class staticClass:
selc_im_path=""
grnd_path=""
save_path=""
root = Tk() #main
root.geometry("570x500")
var = IntVar()
save_path = StringVar()
def selc_im():
sel_im['bg'] = 'blue'
selc = filedialog.askopenfilename()
ext_sel = selc.split(".")
check_sel = ext_sel[-1]
staticClass.selc_im_path=selc
if check_sel != "png":
messagebox.showerror("Error", "Wrong Target File Extension")
sel_im['bg'] = 'black'
staticClass.selc_im_path =""
def selc_grnd_im():
sel_grd_im['bg'] = 'blue'
grnd_path = filedialog.askopenfilename()
ext_grd = grnd_path.split(".")
check_grd = ext_grd[-1]
staticClass.grnd_path=grnd_path
if check_grd != "png":
messagebox.showerror("Error", "Wrong Ground thruth File Extension")
sel_grd_im['bg'] = 'black'
staticClass.grnd_path=""
def savePathFun (event):
staticClass.save_path = filedialog.askdirectory()
########### Main process will goes here ##############
def processImage(ImagePath,grd_path, savePath):
color_img = cv2.imread(grd_path,1)
original_img = cv2.imread(ImagePath,0)
path = savePath
rows_color,cols_color,channels_c = color_img.shape
rows_original,cols_original,channels_o = color_img.shape
#output_image = np.zeros([rows_color, cols_color, 3], dtype=np.uint8)
output_image = cv2.imread(ImagePath,1)
buf = []
A = []
number_of_segments = 0
count_red = 0
count_green = 0
count_blue = 0
id=0
for x in range(0, 250, 10):
for y in range(0, 250, 10):
for z in range(0, 250, 10):
sought = [x, y, z]
# Find all pixels where the 3 RGB values match "sought", and count
result = np.count_nonzero(np.all(color_img == sought, axis=2))
if result != 0:
co_i = []
co_j = []
print('\n*******************************')
print('\n\nsegment found')
print("\nNumber of pixels in this segment")
print(result)
#print("\n")
number_of_segments = number_of_segments + 1
for i in range(rows_color):
for j in range(cols_color):
if color_img[i,j][0] == sought[0] and color_img[i,j][1] == sought[1] and color_img[i,j][2] == sought[2]:
buf.append(original_img[i][j])
co_i.append(i)
co_j.append(j)
print("\nLength of buffer")
print(len(buf))
print("\nLength of co-ordinate array")
print(len(co_i))
A.append(np.mean(buf))
print("\nMean buffer value of the segment")
print(np.mean(buf))
if np.mean(buf) < 130.0:
for a in range(0, len(co_i), 1):
#print("Test1")
#print("pixel co-ordinates are :" )
#print(co_i[a],co_j[a])
#output_image.putpixel(((co_i[a]), (co_j[a])), (255, 0, 0, 255))
output_image[(co_i[a]), (co_j[a])][0] = 255
output_image[(co_i[a]), (co_j[a])][1] = 0
output_image[(co_i[a]), (co_j[a])][2] = 0
count_red = count_red+1
print("\nSegment colored red")
print("\nNumber of pixels colored")
print(count_red)
elif np.mean(buf) > 130.0 and np.mean(buf) < 180.0:
for a in range(0, len(co_i), 1):
#print("Test2")
#print("pixel co-ordinates are :")
#print(co_i[a], co_j[a])
#output_image.putpixel(((co_i[a]), (co_j[a])), (0, 255, 0, 255))
output_image[(co_i[a]), (co_j[a])][0] = 0
output_image[(co_i[a]), (co_j[a])][1] = 255
output_image[(co_i[a]), (co_j[a])][2] = 0
count_green=count_green+1
print("\nSegment colored green")
print("\nNumber of pixels colored")
print(count_green)
elif np.mean(buf) > 180.0:
for a in range(0, len(co_i), 1):
#print("Test3")
#print("pixel co-ordinates are :")
#print(co_i[a], co_j[a])
#output_image.putpixel(((co_i[a]), (co_j[a])), (0, 0, 255, 255))
output_image[(co_i[a]), (co_j[a])][0] = 0
output_image[(co_i[a]), (co_j[a])][1] = 0
output_image[(co_i[a]), (co_j[a])][2] = 255
count_blue=count_blue+1
print("\nSegment colored blue")
print("\nNumber of pixels colored")
print(count_blue)
else:
print("\nNothing done")
#A = np.asarray(buf)
#buf.clear()
del buf[:]
del co_i[:]
del co_j[:]
count_red = 0
count_green = 0
count_blue = 0
print(A)
print("\nNumber of segments")
print(number_of_segments)
cv2.imshow('color_img',color_img)
cv2.imshow('original_img',original_img)
cv2.imshow('output_img',output_image)
print(path)
cv2.imwrite(path+"/finalResult.jpg",output_image)
#cv2.imwrite(path+'finalResult.jpg',output_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
####################### END of Main Operation ###############
def hide(x):
#print("Hide")
x.pack_forget()
separator = Frame(root,height=2, bd=2, relief=SUNKEN)
separator.pack(fill=Y, padx=5, pady=50)
title = Label(separator, text = "Micro Structure Segmentation Tool", fg = "black" , font=("Courier", 20))
title.pack(fill= Y, padx = 10)
body_frame = Frame(root)
body_frame.pack(fill = X, padx = 100, pady=5)
meth_win = Frame(root)
meth_win.pack(fill = X, padx = 100, pady=5)
#### select button function
def next_fun():
target = staticClass.selc_im_path
#print(target)
ground = staticClass.grnd_path
#print(ground)
if target != "":
if ground != "":
hide(sel_im)
hide(quit_btn)
hide(sel_grd_im)
hide(next_win_btn)
Radiobutton(body_frame, text="Image Processing Technique",padx = 5, variable = var, value = 1).pack()
Radiobutton(body_frame, text="Neural Netwrok Technique(U-NET)",padx = 20, variable = var, value = 2, state = DISABLED).pack()
save_path_btn = Button(meth_win, text = "Select Folder to Save", height = 2, width = 15, bg = "black", fg = "white")
save_path_btn.bind("<Button-1>",savePathFun)
save_path_btn.pack(fill = X, padx = 100, pady=5)
proc = Button(meth_win, text = "Proceed", height = 2, width = 15, bg = "black", fg = "white", command=lambda:processImage(target,ground,staticClass.save_path))
proc.pack(fill = X, padx = 100, pady=5)
quit_btn_n = Button(meth_win, text = "Quit", height = 2, width = 15, bg = "black", fg = "white", command = root.quit)
quit_btn_n.pack(fill = X, padx = 100, pady=5)
else:
messagebox.showerror("Error", "No Ground File Selected")
else:
messagebox.showerror("Error", "No Target File Selected")
sel_grd_im = Button(body_frame, text = "Select Ground truth Image", activebackground = "green", height = 2, width = 15, bg = "black", fg = "white" , command=lambda:selc_grnd_im())
sel_grd_im.pack(fill = X, padx = 100, pady=5)
#sel_grd_im.bind("<Button-1>",selc_grnd_im)
sel_im = Button(body_frame, text = "Select Image", activebackground = "green", height = 2, width = 15, bg = "black", fg = "white",command=lambda:selc_im())
sel_im.pack(fill = X, padx = 100, pady=5)
#sel_im.bind("<Button-1>",selc_im)
next_win_btn = Button(body_frame, text = "Next", activebackground = "green", height = 2, width = 15, bg = "black", fg = "white", command=lambda:next_fun())
next_win_btn.pack(fill = X, padx = 100, pady=5)
quit_btn = Button(body_frame, text = "Quit", height = 2, width = 15, bg = "black", fg = "white", command = root.quit)
quit_btn.pack(fill = X, padx = 100, pady=5)
mainloop() | [
"cv2.waitKey",
"cv2.imwrite",
"cv2.destroyAllWindows",
"tkinter.filedialog.askopenfilename",
"tkinter.filedialog.askdirectory",
"cv2.imread",
"matplotlib.use",
"numpy.mean",
"cv2.imshow",
"tkinter.messagebox.showerror",
"numpy.all"
] | [((70, 91), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (84, 91), False, 'import matplotlib\n'), ((434, 462), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (460, 462), False, 'from tkinter import filedialog\n'), ((785, 813), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (811, 813), False, 'from tkinter import filedialog\n'), ((1137, 1162), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {}), '()\n', (1160, 1162), False, 'from tkinter import filedialog\n'), ((1289, 1312), 'cv2.imread', 'cv2.imread', (['grd_path', '(1)'], {}), '(grd_path, 1)\n', (1299, 1312), False, 'import cv2\n'), ((1331, 1355), 'cv2.imread', 'cv2.imread', (['ImagePath', '(0)'], {}), '(ImagePath, 0)\n', (1341, 1355), False, 'import cv2\n'), ((1585, 1609), 'cv2.imread', 'cv2.imread', (['ImagePath', '(1)'], {}), '(ImagePath, 1)\n', (1595, 1609), False, 'import cv2\n'), ((5256, 5290), 'cv2.imshow', 'cv2.imshow', (['"""color_img"""', 'color_img'], {}), "('color_img', color_img)\n", (5266, 5290), False, 'import cv2\n'), ((5294, 5334), 'cv2.imshow', 'cv2.imshow', (['"""original_img"""', 'original_img'], {}), "('original_img', original_img)\n", (5304, 5334), False, 'import cv2\n'), ((5338, 5376), 'cv2.imshow', 'cv2.imshow', (['"""output_img"""', 'output_image'], {}), "('output_img', output_image)\n", (5348, 5376), False, 'import cv2\n'), ((5396, 5448), 'cv2.imwrite', 'cv2.imwrite', (["(path + '/finalResult.jpg')", 'output_image'], {}), "(path + '/finalResult.jpg', output_image)\n", (5407, 5448), False, 'import cv2\n'), ((5503, 5517), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5514, 5517), False, 'import cv2\n'), ((5522, 5545), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5543, 5545), False, 'import cv2\n'), ((590, 650), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Wrong Target File Extension"""'], {}), "('Error', 'Wrong Target File Extension')\n", (610, 650), False, 'from tkinter import messagebox\n'), ((948, 1015), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Wrong Ground thruth File Extension"""'], {}), "('Error', 'Wrong Ground thruth File Extension')\n", (968, 1015), False, 'from tkinter import messagebox\n'), ((7471, 7528), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""No Target File Selected"""'], {}), "('Error', 'No Target File Selected')\n", (7491, 7528), False, 'from tkinter import messagebox\n'), ((7396, 7452), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""No Ground File Selected"""'], {}), "('Error', 'No Ground File Selected')\n", (7416, 7452), False, 'from tkinter import messagebox\n'), ((1970, 2005), 'numpy.all', 'np.all', (['(color_img == sought)'], {'axis': '(2)'}), '(color_img == sought, axis=2)\n', (1976, 2005), True, 'import numpy as np\n'), ((2828, 2840), 'numpy.mean', 'np.mean', (['buf'], {}), '(buf)\n', (2835, 2840), True, 'import numpy as np\n'), ((2916, 2928), 'numpy.mean', 'np.mean', (['buf'], {}), '(buf)\n', (2923, 2928), True, 'import numpy as np\n'), ((2946, 2958), 'numpy.mean', 'np.mean', (['buf'], {}), '(buf)\n', (2953, 2958), True, 'import numpy as np\n'), ((3604, 3616), 'numpy.mean', 'np.mean', (['buf'], {}), '(buf)\n', (3611, 3616), True, 'import numpy as np\n'), ((3630, 3642), 'numpy.mean', 'np.mean', (['buf'], {}), '(buf)\n', (3637, 3642), True, 'import numpy as np\n'), ((4288, 4300), 'numpy.mean', 'np.mean', (['buf'], {}), '(buf)\n', (4295, 4300), True, 'import numpy as np\n')] |
import torch
import numpy as np
from torch.distributions import constraints, Distribution
from relie.utils.so3_tools import so3_uniform_random
class SO3Prior(Distribution):
domain = constraints.real
codomain = constraints.real
event_dim = 2
def __init__(self, device=None, dtype=None):
super().__init__(event_shape=(3, 3))
self.device = device
self.dtype = dtype
def sample(self, shape=torch.Size()):
n = np.prod(shape)
return so3_uniform_random(n, device=self.device, dtype=self.dtype).view(
*shape, 3, 3
)
def log_prob(self, value):
return value.new_full(value.shape[:-2], np.log(1 / (8 * np.pi ** 2)))
| [
"relie.utils.so3_tools.so3_uniform_random",
"numpy.log",
"torch.Size",
"numpy.prod"
] | [((435, 447), 'torch.Size', 'torch.Size', ([], {}), '()\n', (445, 447), False, 'import torch\n'), ((462, 476), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (469, 476), True, 'import numpy as np\n'), ((673, 701), 'numpy.log', 'np.log', (['(1 / (8 * np.pi ** 2))'], {}), '(1 / (8 * np.pi ** 2))\n', (679, 701), True, 'import numpy as np\n'), ((492, 551), 'relie.utils.so3_tools.so3_uniform_random', 'so3_uniform_random', (['n'], {'device': 'self.device', 'dtype': 'self.dtype'}), '(n, device=self.device, dtype=self.dtype)\n', (510, 551), False, 'from relie.utils.so3_tools import so3_uniform_random\n')] |
'''Black box false discovery rate (FDR) control for treatment effects. We use
the two-groups empirical Bayes approach for the treatment effects and fit a
collection of deep networks as the empirical prior.'''
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.optim as optim
from scipy.stats import norm, beta
from utils import p2z, tpr, fdr, true_positives, false_positives, create_folds,\
batches, calc_fdr, p_value_2sided
from normix import predictive_recursion, empirical_null, GridDistribution
class LinearAdaptiveFDRModeler(nn.Module):
def __init__(self, nfeatures):
super(LinearAdaptiveFDRModeler, self).__init__()
self.fc_in = nn.Sequential(nn.Linear(nfeatures, 2), nn.Softplus())
def forward(self, x):
return self.fc_in(x) + 1.
class DeepAdaptiveFDRModeler(nn.Module):
def __init__(self, nfeatures):
super(DeepAdaptiveFDRModeler, self).__init__()
self.fc_in = nn.Sequential(
nn.Linear(nfeatures, 200),
nn.ReLU(),
nn.Dropout(),
nn.Linear(200, 200),
nn.ReLU(),
nn.Dropout(),
nn.Linear(200, 2),
nn.Softplus())
def forward(self, x):
return self.fc_in(x) + 1.
class BlackBoxTwoGroupsModel:
def __init__(self, X, y, fdr=0.1, num_alt_bins=220,
num_pr_sweeps=5, estimate_null=False,
pvalues=False, beta_gridsize=1001):
if pvalues:
# Convert the p-values to z-scores
print('\tConverting to z-scores under a one-sided test assumption')
sys.stdout.flush()
p_values = np.copy(y)
y = p2z(p_values)
else:
p_values = p_value_2sided(y)
self.X = X
self.y = y
self.nsamples = X.shape[0]
self.nfeatures = X.shape[1]
self.fdr = fdr
self.X_means = X.mean(axis=0)[np.newaxis,:]
self.X_std = X.std(axis=0)[np.newaxis,:]
# Empirical null estimation
if estimate_null:
print('\tEstimating empirical null distribution')
sys.stdout.flush()
mu0, sigma0 = empirical_null(y)
p_values = p_value_2sided(y, mu0, sigma0)
else:
mu0, sigma0 = 0., 1.
self.null_dist = (mu0, sigma0)
# print('\tNull mean: {} std: {}'.format(mu0, sigma0))
# sys.stdout.flush()
# Predictive recursion estimate of alternative distribution
min_alt_z, max_alt_z = min(-10, y.min() - 1), max(y.max() + 1, 10)
# print('\tEstimating alternative distribution via predictive recursion over range [{},{}] with {} bins'.format(min_alt_z, max_alt_z, num_alt_bins))
sys.stdout.flush()
grid_x = np.linspace(min_alt_z, max_alt_z, num_alt_bins)
pr_results = predictive_recursion(y, num_pr_sweeps, grid_x, mu0=mu0, sig0=sigma0)
self.pi0 = pr_results['pi0']
self.alt_dist = GridDistribution(pr_results['grid_x'], pr_results['y_signal'])
# Create a discrete grid approximation to the Beta support
self.beta_grid = np.linspace(0.001, 0.999, beta_gridsize)[np.newaxis,:]
# Cache the likelihoods
# print('\tCaching likelihoods')
sys.stdout.flush()
self.P0 = norm.pdf(y, mu0, sigma0)[:,np.newaxis]
self.P1 = self.alt_dist.pdf(y)[:,np.newaxis]
# Create the torch variables
self.tP0 = autograd.Variable(torch.FloatTensor(self.P0), requires_grad=False)
self.tP1 = autograd.Variable(torch.FloatTensor(self.P1), requires_grad=False)
self.tX = autograd.Variable(torch.FloatTensor((self.X - self.X_means) / self.X_std), requires_grad=False)
def train(self, model_fn=None, lasso=0., l2=1e-4, lr=3e-4, num_epochs=250,
batch_size=None, num_folds=3, val_pct=0.1, verbose=False, folds=None,
weight_decay=0.01, random_restarts=1, save_dir='/tmp/',
momentum=0.9, patience=3, clip_gradients=None):
# Make sure we have a model of the prior
if model_fn is None:
model_fn = lambda nfeatures: DeepAdaptiveFDRModeler(nfeatures)
# Lasso penalty (if any)
lasso = autograd.Variable(torch.FloatTensor([lasso]), requires_grad=False)
l2 = autograd.Variable(torch.FloatTensor([l2]), requires_grad=False)
if batch_size is None:
batch_size = int(max(10,min(100,np.round(self.X.shape[0] / 100.))))
print('Batch size: {}'.format(batch_size))
# Discrete approximation of a beta PDF support
tbeta_grid = autograd.Variable(torch.FloatTensor(self.beta_grid), requires_grad=False)
sys.stdout.flush()
# Split the data into a bunch of cross-validation folds
if folds is None:
if verbose:
print('\tCreating {} folds'.format(num_folds))
sys.stdout.flush()
folds = create_folds(self.X, k=num_folds)
self.priors = np.zeros((self.nsamples,2), dtype=float)
self.models = []
train_losses, val_losses = np.zeros((len(folds),random_restarts,num_epochs)), np.zeros((len(folds),random_restarts,num_epochs))
epochs_per_fold = np.zeros(len(folds))
for fold_idx, test_indices in enumerate(folds):
# Create train/validate splits
mask = np.ones(self.nsamples, dtype=bool)
mask[test_indices] = False
indices = np.arange(self.nsamples, dtype=int)[mask]
np.random.shuffle(indices)
train_cutoff = int(np.round(len(indices)*(1-val_pct)))
train_indices = indices[:train_cutoff]
validate_indices = indices[train_cutoff:]
torch_test_indices = autograd.Variable(torch.LongTensor(test_indices), requires_grad=False)
best_loss = None
# Try re-initializing a few times
for restart in range(random_restarts):
model = model_fn(self.nfeatures)
# Setup the optimizers
# optimizer = optim.SGD(model.parameters(), lr=lr, weight_decay=weight_decay, momentum=momentum)
# scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', patience=patience)
optimizer = optim.RMSprop(model.parameters(), lr=lr, weight_decay=weight_decay)
# optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
# Train the model
for epoch in range(num_epochs):
if verbose:
print('\t\tRestart {} Fold {} Epoch {}'.format(restart+1, fold_idx+1,epoch+1))
sys.stdout.flush()
train_loss = torch.Tensor([0])
for batch_idx, batch in enumerate(batches(train_indices, batch_size, shuffle=False)):
if verbose and (batch_idx % 100 == 0):
print('\t\t\tBatch {}'.format(batch_idx))
tidx = autograd.Variable(torch.LongTensor(batch), requires_grad=False)
# Set the model to training mode
model.train()
# Reset the gradient
model.zero_grad()
# Run the model and get the prior predictions
concentrations = model(self.tX[tidx])
# Calculate the loss as the negative log-likelihood of the data
# Use a beta prior for the treatment effect
prior_dist = torch.distributions.Beta(concentrations[:,0:1], concentrations[:,1:2])
# Discretize the (0,1) interval to approximate the beta PDF
prior_probs = prior_dist.log_prob(tbeta_grid).exp()
prior_probs = prior_probs / prior_probs.sum(dim=1, keepdim=True)
# Calculate the loss
posterior_probs = (((1-tbeta_grid) * self.tP0[tidx]
+ tbeta_grid * self.tP1[tidx]) * prior_probs).sum(dim=1)
loss = -posterior_probs.log().mean()
# L1 penalty to shrink c and be more conservative
regularized_loss = loss + lasso * concentrations.mean() + l2 * (concentrations**2).mean()
# Update the model with gradient clipping for stability
regularized_loss.backward()
# Clip the gradients if need-be
if clip_gradients is not None:
torch.nn.utils.clip_grad_norm(model.parameters(), clip_gradients)
# Apply the update
[p for p in model.parameters() if p.requires_grad]
optimizer.step()
# Track the loss
train_loss += loss.data
validate_loss = torch.Tensor([0])
for batch_idx, batch in enumerate(batches(validate_indices, batch_size)):
if verbose and (batch_idx % 100 == 0):
print('\t\t\tValidation Batch {}'.format(batch_idx))
tidx = autograd.Variable(torch.LongTensor(batch), requires_grad=False)
# Set the model to test mode
model.eval()
# Reset the gradient
model.zero_grad()
# Run the model and get the prior predictions
concentrations = model(self.tX[tidx])
# Calculate the loss as the negative log-likelihood of the data
# Use a beta prior for the treatment effect
prior_dist = torch.distributions.Beta(concentrations[:,0:1], concentrations[:,1:2])
# Discretize the (0,1) interval to approximate the beta PDF
prior_probs = prior_dist.log_prob(tbeta_grid).exp()
prior_probs = (prior_probs / prior_probs.sum(dim=1, keepdim=True)).clamp(1e-8, 1-1e-8)
# Calculate the loss
posterior_probs = (((1-tbeta_grid) * self.tP0[tidx]
+ tbeta_grid * self.tP1[tidx]) * prior_probs).sum(dim=1).clamp(1e-8, 1-1e-8)
loss = -posterior_probs.log().sum()
# Track the loss
validate_loss += loss.data
train_losses[fold_idx, restart, epoch] = train_loss.numpy() / float(len(train_indices))
val_losses[fold_idx, restart, epoch] = validate_loss.numpy() / float(len(validate_indices))
# # Adjust the learning rate down if the validation performance is bad
# scheduler.step(val_losses[fold_idx, epoch])
# Check if we are currently have the best held-out log-likelihood
if verbose:
print('Validation loss: {} Best: {}'.format(val_losses[fold_idx, restart, epoch], best_loss))
if (restart == 0 and epoch == 0) or val_losses[fold_idx, restart, epoch] <= best_loss:
if verbose:
print('\t\t\tSaving test set results. <----- New high water mark for fold {} on epoch {}'.format(fold_idx+1, epoch+1))
# If so, use the current model on the test set
best_loss = val_losses[fold_idx, restart, epoch]
epochs_per_fold[fold_idx] = epoch + 1
self.priors[test_indices] = model(self.tX[torch_test_indices]).data.numpy()
torch.save(model, save_dir + '_fold{}.pt'.format(fold_idx))
if verbose:
means = self.priors[test_indices,0] / self.priors[test_indices].sum(axis=1)
print('Prior range: [{},{}]'.format(means.min(), means.max()))
print('First 3:')
print(self.priors[test_indices][:3])
# Reload the best model
self.models.append(torch.load(save_dir + '_fold{}.pt'.format(fold_idx)))
# Calculate the posterior probabilities
if verbose:
print('Calculating posteriors.')
sys.stdout.flush()
prior_grid = beta.pdf(self.beta_grid, self.priors[:,0:1], self.priors[:,1:2])
prior_grid /= prior_grid.sum(axis=1, keepdims=True)
post0 = self.P0 * (1-self.beta_grid)
post1 = self.P1 * self.beta_grid
self.posteriors = ((post1 / (post0 + post1)) * prior_grid).sum(axis=1)
self.posteriors = self.posteriors.clip(1e-8,1-1e-8)
if verbose:
print('Calculating predictions at a {:.2f}% FDR threshold'.format(self.fdr*100))
sys.stdout.flush()
self.predictions = calc_fdr(self.posteriors, self.fdr)
if verbose:
print('Finished training.')
sys.stdout.flush()
self.folds = folds
return {'train_losses': train_losses,
'validation_losses': val_losses,
'priors': self.priors,
'posteriors': self.posteriors,
'predictions': self.predictions,
'models': self.models,
'folds': folds}
def predict(self, X, y=None, models=None, batch_size=100):
# Potentially use a subset of the trained models
# (useful when X may have been used to train some of the
# models)
if models is None:
models = self.models
else:
models = [self.models[i] for i in models]
priors = np.zeros((X.shape[0], 2))
for model in models:
model.eval()
priors += model(autograd.Variable(torch.FloatTensor((X - self.X_means) / self.X_std), requires_grad=False)).data.numpy()
priors /= float(len(models))
if y is None:
return priors
# Get the posterior estimates
mu0, sigma0 = self.null_dist
P0 = norm.pdf(y, mu0, sigma0)[:,np.newaxis]
P1 = self.alt_dist.pdf(y)[:,np.newaxis]
prior_grid = beta.pdf(self.beta_grid, priors[:,0:1], priors[:,1:2])
prior_grid /= prior_grid.sum(axis=1, keepdims=True)
post0 = P0 * (1-self.beta_grid)
post1 = P1 * self.beta_grid
posteriors = ((post1 / (post0 + post1)) * prior_grid).sum(axis=1)
posteriors = posteriors.clip(1e-8, 1-1e-8)
return priors, posteriors
| [
"torch.nn.Dropout",
"numpy.ones",
"sys.stdout.flush",
"numpy.arange",
"numpy.round",
"utils.create_folds",
"normix.predictive_recursion",
"numpy.copy",
"torch.FloatTensor",
"normix.empirical_null",
"normix.GridDistribution",
"torch.Tensor",
"numpy.linspace",
"torch.nn.Linear",
"scipy.sta... | [((2866, 2884), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2882, 2884), False, 'import sys\n'), ((2902, 2949), 'numpy.linspace', 'np.linspace', (['min_alt_z', 'max_alt_z', 'num_alt_bins'], {}), '(min_alt_z, max_alt_z, num_alt_bins)\n', (2913, 2949), True, 'import numpy as np\n'), ((2971, 3039), 'normix.predictive_recursion', 'predictive_recursion', (['y', 'num_pr_sweeps', 'grid_x'], {'mu0': 'mu0', 'sig0': 'sigma0'}), '(y, num_pr_sweeps, grid_x, mu0=mu0, sig0=sigma0)\n', (2991, 3039), False, 'from normix import predictive_recursion, empirical_null, GridDistribution\n'), ((3101, 3163), 'normix.GridDistribution', 'GridDistribution', (["pr_results['grid_x']", "pr_results['y_signal']"], {}), "(pr_results['grid_x'], pr_results['y_signal'])\n", (3117, 3163), False, 'from normix import predictive_recursion, empirical_null, GridDistribution\n'), ((3394, 3412), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3410, 3412), False, 'import sys\n'), ((4835, 4853), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4851, 4853), False, 'import sys\n'), ((5142, 5183), 'numpy.zeros', 'np.zeros', (['(self.nsamples, 2)'], {'dtype': 'float'}), '((self.nsamples, 2), dtype=float)\n', (5150, 5183), True, 'import numpy as np\n'), ((12734, 12800), 'scipy.stats.beta.pdf', 'beta.pdf', (['self.beta_grid', 'self.priors[:, 0:1]', 'self.priors[:, 1:2]'], {}), '(self.beta_grid, self.priors[:, 0:1], self.priors[:, 1:2])\n', (12742, 12800), False, 'from scipy.stats import norm, beta\n'), ((13256, 13291), 'utils.calc_fdr', 'calc_fdr', (['self.posteriors', 'self.fdr'], {}), '(self.posteriors, self.fdr)\n', (13264, 13291), False, 'from utils import p2z, tpr, fdr, true_positives, false_positives, create_folds, batches, calc_fdr, p_value_2sided\n'), ((14064, 14089), 'numpy.zeros', 'np.zeros', (['(X.shape[0], 2)'], {}), '((X.shape[0], 2))\n', (14072, 14089), True, 'import numpy as np\n'), ((14559, 14615), 'scipy.stats.beta.pdf', 'beta.pdf', (['self.beta_grid', 'priors[:, 0:1]', 'priors[:, 1:2]'], {}), '(self.beta_grid, priors[:, 0:1], priors[:, 1:2])\n', (14567, 14615), False, 'from scipy.stats import norm, beta\n'), ((787, 810), 'torch.nn.Linear', 'nn.Linear', (['nfeatures', '(2)'], {}), '(nfeatures, 2)\n', (796, 810), True, 'import torch.nn as nn\n'), ((812, 825), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (823, 825), True, 'import torch.nn as nn\n'), ((1077, 1102), 'torch.nn.Linear', 'nn.Linear', (['nfeatures', '(200)'], {}), '(nfeatures, 200)\n', (1086, 1102), True, 'import torch.nn as nn\n'), ((1120, 1129), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1127, 1129), True, 'import torch.nn as nn\n'), ((1147, 1159), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (1157, 1159), True, 'import torch.nn as nn\n'), ((1177, 1196), 'torch.nn.Linear', 'nn.Linear', (['(200)', '(200)'], {}), '(200, 200)\n', (1186, 1196), True, 'import torch.nn as nn\n'), ((1214, 1223), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1221, 1223), True, 'import torch.nn as nn\n'), ((1241, 1253), 'torch.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (1251, 1253), True, 'import torch.nn as nn\n'), ((1271, 1288), 'torch.nn.Linear', 'nn.Linear', (['(200)', '(2)'], {}), '(200, 2)\n', (1280, 1288), True, 'import torch.nn as nn\n'), ((1306, 1319), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (1317, 1319), True, 'import torch.nn as nn\n'), ((1752, 1770), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1768, 1770), False, 'import sys\n'), ((1794, 1804), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (1801, 1804), True, 'import numpy as np\n'), ((1821, 1834), 'utils.p2z', 'p2z', (['p_values'], {}), '(p_values)\n', (1824, 1834), False, 'from utils import p2z, tpr, fdr, true_positives, false_positives, create_folds, batches, calc_fdr, p_value_2sided\n'), ((1872, 1889), 'utils.p_value_2sided', 'p_value_2sided', (['y'], {}), '(y)\n', (1886, 1889), False, 'from utils import p2z, tpr, fdr, true_positives, false_positives, create_folds, batches, calc_fdr, p_value_2sided\n'), ((2262, 2280), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2278, 2280), False, 'import sys\n'), ((2307, 2324), 'normix.empirical_null', 'empirical_null', (['y'], {}), '(y)\n', (2321, 2324), False, 'from normix import predictive_recursion, empirical_null, GridDistribution\n'), ((2348, 2378), 'utils.p_value_2sided', 'p_value_2sided', (['y', 'mu0', 'sigma0'], {}), '(y, mu0, sigma0)\n', (2362, 2378), False, 'from utils import p2z, tpr, fdr, true_positives, false_positives, create_folds, batches, calc_fdr, p_value_2sided\n'), ((3257, 3297), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.999)', 'beta_gridsize'], {}), '(0.001, 0.999, beta_gridsize)\n', (3268, 3297), True, 'import numpy as np\n'), ((3431, 3455), 'scipy.stats.norm.pdf', 'norm.pdf', (['y', 'mu0', 'sigma0'], {}), '(y, mu0, sigma0)\n', (3439, 3455), False, 'from scipy.stats import norm, beta\n'), ((3598, 3624), 'torch.FloatTensor', 'torch.FloatTensor', (['self.P0'], {}), '(self.P0)\n', (3615, 3624), False, 'import torch\n'), ((3684, 3710), 'torch.FloatTensor', 'torch.FloatTensor', (['self.P1'], {}), '(self.P1)\n', (3701, 3710), False, 'import torch\n'), ((3769, 3824), 'torch.FloatTensor', 'torch.FloatTensor', (['((self.X - self.X_means) / self.X_std)'], {}), '((self.X - self.X_means) / self.X_std)\n', (3786, 3824), False, 'import torch\n'), ((4383, 4409), 'torch.FloatTensor', 'torch.FloatTensor', (['[lasso]'], {}), '([lasso])\n', (4400, 4409), False, 'import torch\n'), ((4463, 4486), 'torch.FloatTensor', 'torch.FloatTensor', (['[l2]'], {}), '([l2])\n', (4480, 4486), False, 'import torch\n'), ((4771, 4804), 'torch.FloatTensor', 'torch.FloatTensor', (['self.beta_grid'], {}), '(self.beta_grid)\n', (4788, 4804), False, 'import torch\n'), ((5086, 5119), 'utils.create_folds', 'create_folds', (['self.X'], {'k': 'num_folds'}), '(self.X, k=num_folds)\n', (5098, 5119), False, 'from utils import p2z, tpr, fdr, true_positives, false_positives, create_folds, batches, calc_fdr, p_value_2sided\n'), ((5509, 5543), 'numpy.ones', 'np.ones', (['self.nsamples'], {'dtype': 'bool'}), '(self.nsamples, dtype=bool)\n', (5516, 5543), True, 'import numpy as np\n'), ((5659, 5685), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (5676, 5685), True, 'import numpy as np\n'), ((12694, 12712), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (12710, 12712), False, 'import sys\n'), ((13210, 13228), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13226, 13228), False, 'import sys\n'), ((13365, 13383), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13381, 13383), False, 'import sys\n'), ((14451, 14475), 'scipy.stats.norm.pdf', 'norm.pdf', (['y', 'mu0', 'sigma0'], {}), '(y, mu0, sigma0)\n', (14459, 14475), False, 'from scipy.stats import norm, beta\n'), ((5047, 5065), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5063, 5065), False, 'import sys\n'), ((5605, 5640), 'numpy.arange', 'np.arange', (['self.nsamples'], {'dtype': 'int'}), '(self.nsamples, dtype=int)\n', (5614, 5640), True, 'import numpy as np\n'), ((5909, 5939), 'torch.LongTensor', 'torch.LongTensor', (['test_indices'], {}), '(test_indices)\n', (5925, 5939), False, 'import torch\n'), ((6901, 6918), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (6913, 6918), False, 'import torch\n'), ((9185, 9202), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (9197, 9202), False, 'import torch\n'), ((4585, 4618), 'numpy.round', 'np.round', (['(self.X.shape[0] / 100.0)'], {}), '(self.X.shape[0] / 100.0)\n', (4593, 4618), True, 'import numpy as np\n'), ((6848, 6866), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6864, 6866), False, 'import sys\n'), ((6973, 7022), 'utils.batches', 'batches', (['train_indices', 'batch_size'], {'shuffle': '(False)'}), '(train_indices, batch_size, shuffle=False)\n', (6980, 7022), False, 'from utils import p2z, tpr, fdr, true_positives, false_positives, create_folds, batches, calc_fdr, p_value_2sided\n'), ((7764, 7836), 'torch.distributions.Beta', 'torch.distributions.Beta', (['concentrations[:, 0:1]', 'concentrations[:, 1:2]'], {}), '(concentrations[:, 0:1], concentrations[:, 1:2])\n', (7788, 7836), False, 'import torch\n'), ((9257, 9294), 'utils.batches', 'batches', (['validate_indices', 'batch_size'], {}), '(validate_indices, batch_size)\n', (9264, 9294), False, 'from utils import p2z, tpr, fdr, true_positives, false_positives, create_folds, batches, calc_fdr, p_value_2sided\n'), ((10042, 10114), 'torch.distributions.Beta', 'torch.distributions.Beta', (['concentrations[:, 0:1]', 'concentrations[:, 1:2]'], {}), '(concentrations[:, 0:1], concentrations[:, 1:2])\n', (10066, 10114), False, 'import torch\n'), ((7207, 7230), 'torch.LongTensor', 'torch.LongTensor', (['batch'], {}), '(batch)\n', (7223, 7230), False, 'import torch\n'), ((9490, 9513), 'torch.LongTensor', 'torch.LongTensor', (['batch'], {}), '(batch)\n', (9506, 9513), False, 'import torch\n'), ((14190, 14240), 'torch.FloatTensor', 'torch.FloatTensor', (['((X - self.X_means) / self.X_std)'], {}), '((X - self.X_means) / self.X_std)\n', (14207, 14240), False, 'import torch\n')] |
from GAN.utils import read_and_save
import numpy as np
import os
import matplotlib.pyplot as plt
import tensorflow as tf
from keras.initializers import RandomNormal
from keras import Input, Model
from keras.layers import Concatenate, Conv2D, BatchNormalization
from keras.layers import LeakyReLU, Activation, Dropout, Conv2DTranspose
from keras.optimizers import Adam
from keras.models import load_model
import cv2 as cv
class Pix2PixModel:
@staticmethod
def name():
return 'Pix2PixModel'
def __init__(self, rn, image_shape=(512, 1024, 3)):
self.image_shape = image_shape
self.d_model = None
self.g_model = None
self.gan_model = None
self.run_number = rn
# define the discriminator model
def create_discriminator(self):
# empty random tensor
init = RandomNormal(stddev=0.02)
# tensor for the pose image
in_src_image = Input(shape=self.image_shape)
# tensor for the target image, the real frame
in_target_image = Input(shape=self.image_shape)
# concatenate images channel-wise
merged = Concatenate()([in_src_image, in_target_image])
# C64
layers = Conv2D(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(merged)
layers = LeakyReLU(alpha=0.2)(layers)
# C128
layers = Conv2D(128, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(layers)
layers = BatchNormalization()(layers)
layers = LeakyReLU(alpha=0.2)(layers)
# C256
layers = Conv2D(256, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(layers)
layers = BatchNormalization()(layers)
layers = LeakyReLU(alpha=0.2)(layers)
# C512
layers = Conv2D(512, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(layers)
layers = BatchNormalization()(layers)
layers = LeakyReLU(alpha=0.2)(layers)
# second last output layer
layers = Conv2D(512, (4, 4), padding='same', kernel_initializer=init)(layers)
layers = BatchNormalization()(layers)
layers = LeakyReLU(alpha=0.2)(layers)
# patch output
layers = Conv2D(1, (4, 4), padding='same', kernel_initializer=init)(layers)
patch_out = Activation('sigmoid')(layers)
# define model
self.d_model = Model([in_src_image, in_target_image], patch_out)
# compile model
opt = Adam(lr=0.0002, beta_1=0.5)
self.d_model.compile(loss='binary_crossentropy', optimizer=opt, loss_weights=[0.5])
@staticmethod
def encoder_block(in_layer, filters_num, batch_norm=True):
init = RandomNormal(stddev=0.02)
# downsampling layer
enc = Conv2D(filters_num, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(in_layer)
if batch_norm:
enc = BatchNormalization()(enc, training=True)
enc = LeakyReLU(alpha=0.2)(enc)
return enc
@staticmethod
def decoder_block(in_layer, skip_conn, filters_num, dropout=True):
init = RandomNormal(stddev=0.02)
# add upsampling layer
dec = Conv2DTranspose(filters_num, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(in_layer)
dec = BatchNormalization()(dec, training=True)
if dropout:
dec = Dropout(0.5)(dec, training=True)
# merge with skip connection
dec = Concatenate()([dec, skip_conn])
dec = Activation('relu')(dec)
return dec
# define the standalone generator model
def create_generator(self):
init = RandomNormal(stddev=0.02)
inp_image = Input(shape=self.image_shape)
# encoder model
e1 = self.encoder_block(inp_image, 64, batch_norm=False)
e2 = self.encoder_block(e1, 128)
e3 = self.encoder_block(e2, 256)
e4 = self.encoder_block(e3, 512)
e5 = self.encoder_block(e4, 512)
e6 = self.encoder_block(e5, 512)
e7 = self.encoder_block(e6, 512)
# bottleneck, no batch norm and relu
b = Conv2D(512, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(e7)
b = Activation('relu')(b)
# decoder model
d1 = self.decoder_block(b, e7, 512)
d2 = self.decoder_block(d1, e6, 512)
d3 = self.decoder_block(d2, e5, 512)
d4 = self.decoder_block(d3, e4, 512, dropout=False)
d5 = self.decoder_block(d4, e3, 256, dropout=False)
d6 = self.decoder_block(d5, e2, 128, dropout=False)
d7 = self.decoder_block(d6, e1, 64, dropout=False)
# output
g = Conv2DTranspose(3, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)(d7)
out_image = Activation('tanh')(g)
# define model
self.g_model = Model(inp_image, out_image)
# define the combined generator and discriminator model, for updating the generator
def create_gan(self):
# make weights in the discriminator not trainable
for layer in self.d_model.layers:
if not isinstance(layer, BatchNormalization):
layer.trainable = False
src_input = Input(shape=self.image_shape)
# connect the source image to the generator input
gen_out = self.g_model(src_input)
# connect the source input and generator output to the discriminator input
dis_out = self.d_model([src_input, gen_out])
# src image as input, generated image and classification output
self.gan_model = Model(src_input, [dis_out, gen_out])
# compile model
opt = Adam(lr=0.0002, beta_1=0.5)
self.gan_model.compile(loss=['binary_crossentropy', 'mae'], optimizer=opt, loss_weights=[1, 100])
# select a batch of random samples, returns images and target for the discriminator
@staticmethod
def generate_real_samples(dataset, n_samples, patch_shape):
train_poses, train_images = dataset
# choose random instances
ix = np.random.randint(0, train_poses.shape[0], n_samples)
# retrieve selected images
X1, X2 = train_poses[ix], train_images[ix]
# generate 'real' class labels (1)
y = np.ones((n_samples, patch_shape, patch_shape * 2, 1))
return [X1, X2], y
# generate a batch of images, returns images and targets
def generate_fake_samples(self, samples, patch_shape):
# generate fake instance
X = self.g_model.predict(samples)
# create 'fake' class labels (0)
y = np.zeros((len(samples), patch_shape, patch_shape * 2, 1))
return X, y
def load_models(self, d_model_name, g_model_name, gan_model_name):
self.d_model = load_model(d_model_name)
self.g_model = load_model(g_model_name)
self.gan_model = load_model(gan_model_name)
# generate samples and save as a plot and save the model
def summarize_performance(self, step, dataset, n_samples=2):
# select a sample of input images
[X_realA, X_realB], _ = self.generate_real_samples(dataset, n_samples, 1)
# generate a batch of fake samples
fake_labels, _ = self.generate_fake_samples(X_realA, 1)
# scale all pixels from [-1,1] to [0,1]
X_realA = (X_realA + 1) / 2.0
X_realA = X_realA.astype(np.uint8)
X_realB = (X_realB + 1) / 2.0
X_realB = X_realB.astype(np.uint8)
fake_labels = (fake_labels + 1) / 2.0
fake_labels = fake_labels.astype(np.uint8)
# plot real pose images
for i in range(n_samples):
plt.subplot(3, n_samples, 1 + i)
plt.axis('off')
plt.imshow(X_realA[i])
# plot generated target image
for i in range(n_samples):
plt.subplot(3, n_samples, 1 + n_samples + i)
plt.axis('off')
plt.imshow(fake_labels[i])
# plot real target image
for i in range(n_samples):
plt.subplot(3, n_samples, 1 + n_samples * 2 + i)
plt.axis('off')
plt.imshow(X_realB[i])
# save plot to file
path = 'plots/' + str(self.run_number)
if not os.path.exists(path):
os.makedirs(path)
filename1 = path + '/plot_%06d.png' % (step + 1)
plt.savefig(filename1)
plt.close()
# save the generator model
path = 'models/' + str(self.run_number)
if not os.path.exists(path):
os.makedirs(path)
filename2_g_model = path + '/model2_g_%06d.h5' % (step + 1)
filename2_d_model = path + '/model2_d_%06d.h5' % (step + 1)
filename2_gan_model = path + '/model2_gan_%06d.h5' % (step + 1)
global last_saved_model
last_saved_model = filename2_g_model
self.g_model.save(filename2_g_model)
self.d_model.save(filename2_d_model)
self.gan_model.save(filename2_gan_model)
print('>Saved: %s and %s and %s and %s' % (filename1, filename2_g_model, filename2_d_model, filename2_gan_model))
# train pix2pix model
def train(self, dataset, n_epochs=100, n_batch=1):
# determine the output square shape of the discriminator
n_patch = self.d_model.output_shape[1]
train_poses, train_images = dataset
# batch_per_epoch = number of batches per training epoch
batch_per_epoch = int(len(train_poses) / n_batch)
# n_steps = number of training iterations
n_steps = batch_per_epoch * n_epochs
# manually enumerate epochs
for i in range(n_steps):
# select a batch of real samples
[X_realA, X_realB], y_real = self.generate_real_samples(dataset, n_batch, n_patch)
# generate a batch of fake samples
X_fakeB, y_fake = self.generate_fake_samples(X_realA, n_patch)
# update discriminator for real samples
d_loss1 = self.d_model.train_on_batch([X_realA, X_realB], y_real)
# update discriminator for generated samples
d_loss2 = self.d_model.train_on_batch([X_realA, X_fakeB], y_fake)
# update the generator
g_loss, _, _ = self.gan_model.train_on_batch(X_realA, [y_real, X_realB])
# summarize performance
print('>%d, d1[%.3f] d2[%.3f] g[%.3f]' % (i + 1, d_loss1, d_loss2, g_loss))
# summarize model performance
if (i + 1) % (batch_per_epoch * 10) == 0:
self.summarize_performance(i, dataset)
def load_compressed_dataset(filename):
data = np.load(filename)
# unpack arrays
X1 = data['arr_0']
# scale from [0, 255] to [-1,1]
X1 = (X1 - 127.5) / 127.5
return X1
def save_samples(rn, src_imgs, gen_imgs, tar_imgs):
path = 'testing_samples/' + str(rn)
if not os.path.exists(path):
os.makedirs(path)
n = len(src_imgs)
for i in range(n):
img = (src_imgs[i] + 1) / 2.0 * 255
img = img.astype(np.uint8)
cv.imwrite(path + '/' + str(i) + 'src_image.jpg', img)
img = (gen_imgs[i] + 1) / 2.0 * 255
img = img.astype(np.uint8)
cv.imwrite(path + '/' + str(i) + 'gen_image.jpg', img)
img = (tar_imgs[i] + 1) / 2.0 * 255
img = img.astype(np.uint8)
cv.imwrite(path + '/' + str(i) + 'tar_image.jpg', img)
def callGAN(data_ready, subject_name, run_number, train, test, first_run, st1, sz1, st2, sz2, n_epochs=200,
d_model_name=None, g_model_name=None, gan_model_name=None, last_saved_model_name=None):
'''
Function to call GAN model with many options.
:param data_ready: Boolean: if the data is ready in npz file or not
:param subject_name: String
:param run_number: Integer
:param train: Boolean: train or not
:param test: Boolean: Test or not
:param first_run: Boolean: if no pre-saved models exist for the subject or not
:param st1: start index of training data
:param sz1: size of training data
:param st2: start index of testing data
:param sz2: size of testing data
:param n_epochs: number of epochs
:param d_model_name: Discriminator model name in case of pre-saved
:param g_model_name: Generator model name in case of pre-saved
:param gan_model_name: GAN model name in case of pre-saved
:param last_saved_model_name: Generator model name, in case of testing only
:return: None
'''
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
en1 = sz1 + st1
en2 = sz2 + st2
if data_ready:
train_filename1 = 'compressed_data/' + subject_name + '_train_images' + str(st1) + '-' + str(en1) + '.npz'
train_filename2 = 'compressed_data/' + subject_name + '_train_poses' + str(st1) + '-' + str(en1) + '.npz'
val_filename1 = 'compressed_data/' + subject_name + '_val_images' + str(st2) + '-' + str(en2) + '.npz'
val_filename2 = 'compressed_data/' + subject_name + '_val_poses' + str(st2) + '-' + str(en2) + '.npz'
else:
print('\nReading data...\n')
train_filename1, train_filename2, val_filename1, val_filename2 = read_and_save(subject_name, st1, en1, st2, en2)
global last_saved_model
last_saved_model = None
if not first_run:
d_model_name = 'models/' + d_model_name
last_saved_model = g_model_name = 'models/' + g_model_name
gan_model_name = 'models/' + gan_model_name
if train:
X1_images = load_compressed_dataset(train_filename1)
X2_poses = load_compressed_dataset(train_filename2)
loaded_data = [X2_poses, X1_images]
print('Loaded', loaded_data[0].shape, loaded_data[1].shape)
shape = loaded_data[0].shape[1:]
print('Image shape is', shape)
GANModel = Pix2PixModel(run_number, shape)
print(GANModel.name(), '\n\n')
if first_run:
GANModel.create_discriminator()
GANModel.create_generator()
GANModel.create_gan()
else:
GANModel.load_models(d_model_name, g_model_name, gan_model_name)
GANModel.train(loaded_data, n_epochs=n_ep)
# Show sample of output from training dataset
X1, X2 = loaded_data
del loaded_data
if test:
if last_saved_model is None:
last_saved_model = 'models/' + last_saved_model_name
gen_model = load_model(last_saved_model)
print("Model is loaded.\n")
XX2_images = load_compressed_dataset(val_filename1)
XX1_poses = load_compressed_dataset(val_filename2)
gen_model.summary()
print("\nStart generating data...")
gen_images = gen_model.predict(XX1_poses, batch_size=16)
save_samples(run_number, XX1_poses, gen_images, XX2_images)
| [
"keras.models.load_model",
"numpy.load",
"numpy.ones",
"numpy.random.randint",
"keras.initializers.RandomNormal",
"GAN.utils.read_and_save",
"matplotlib.pyplot.close",
"matplotlib.pyplot.imshow",
"os.path.exists",
"keras.layers.LeakyReLU",
"keras.Model",
"keras.layers.Dropout",
"keras.optimi... | [((10497, 10514), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (10504, 10514), True, 'import numpy as np\n'), ((837, 862), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (849, 862), False, 'from keras.initializers import RandomNormal\n'), ((922, 951), 'keras.Input', 'Input', ([], {'shape': 'self.image_shape'}), '(shape=self.image_shape)\n', (927, 951), False, 'from keras import Input, Model\n'), ((1032, 1061), 'keras.Input', 'Input', ([], {'shape': 'self.image_shape'}), '(shape=self.image_shape)\n', (1037, 1061), False, 'from keras import Input, Model\n'), ((2372, 2421), 'keras.Model', 'Model', (['[in_src_image, in_target_image]', 'patch_out'], {}), '([in_src_image, in_target_image], patch_out)\n', (2377, 2421), False, 'from keras import Input, Model\n'), ((2460, 2487), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0002)', 'beta_1': '(0.5)'}), '(lr=0.0002, beta_1=0.5)\n', (2464, 2487), False, 'from keras.optimizers import Adam\n'), ((2677, 2702), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (2689, 2702), False, 'from keras.initializers import RandomNormal\n'), ((3087, 3112), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (3099, 3112), False, 'from keras.initializers import RandomNormal\n'), ((3620, 3645), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (3632, 3645), False, 'from keras.initializers import RandomNormal\n'), ((3666, 3695), 'keras.Input', 'Input', ([], {'shape': 'self.image_shape'}), '(shape=self.image_shape)\n', (3671, 3695), False, 'from keras import Input, Model\n'), ((4805, 4832), 'keras.Model', 'Model', (['inp_image', 'out_image'], {}), '(inp_image, out_image)\n', (4810, 4832), False, 'from keras import Input, Model\n'), ((5166, 5195), 'keras.Input', 'Input', ([], {'shape': 'self.image_shape'}), '(shape=self.image_shape)\n', (5171, 5195), False, 'from keras import Input, Model\n'), ((5529, 5565), 'keras.Model', 'Model', (['src_input', '[dis_out, gen_out]'], {}), '(src_input, [dis_out, gen_out])\n', (5534, 5565), False, 'from keras import Input, Model\n'), ((5604, 5631), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0002)', 'beta_1': '(0.5)'}), '(lr=0.0002, beta_1=0.5)\n', (5608, 5631), False, 'from keras.optimizers import Adam\n'), ((6000, 6053), 'numpy.random.randint', 'np.random.randint', (['(0)', 'train_poses.shape[0]', 'n_samples'], {}), '(0, train_poses.shape[0], n_samples)\n', (6017, 6053), True, 'import numpy as np\n'), ((6195, 6248), 'numpy.ones', 'np.ones', (['(n_samples, patch_shape, patch_shape * 2, 1)'], {}), '((n_samples, patch_shape, patch_shape * 2, 1))\n', (6202, 6248), True, 'import numpy as np\n'), ((6698, 6722), 'keras.models.load_model', 'load_model', (['d_model_name'], {}), '(d_model_name)\n', (6708, 6722), False, 'from keras.models import load_model\n'), ((6746, 6770), 'keras.models.load_model', 'load_model', (['g_model_name'], {}), '(g_model_name)\n', (6756, 6770), False, 'from keras.models import load_model\n'), ((6796, 6822), 'keras.models.load_model', 'load_model', (['gan_model_name'], {}), '(gan_model_name)\n', (6806, 6822), False, 'from keras.models import load_model\n'), ((8259, 8281), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename1'], {}), '(filename1)\n', (8270, 8281), True, 'import matplotlib.pyplot as plt\n'), ((8290, 8301), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8299, 8301), True, 'import matplotlib.pyplot as plt\n'), ((10743, 10763), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (10757, 10763), False, 'import os\n'), ((10773, 10790), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (10784, 10790), False, 'import os\n'), ((13062, 13109), 'GAN.utils.read_and_save', 'read_and_save', (['subject_name', 'st1', 'en1', 'st2', 'en2'], {}), '(subject_name, st1, en1, st2, en2)\n', (13075, 13109), False, 'from GAN.utils import read_and_save\n'), ((14304, 14332), 'keras.models.load_model', 'load_model', (['last_saved_model'], {}), '(last_saved_model)\n', (14314, 14332), False, 'from keras.models import load_model\n'), ((1121, 1134), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (1132, 1134), False, 'from keras.layers import Concatenate, Conv2D, BatchNormalization\n'), ((1199, 1274), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'kernel_initializer': 'init'}), "(64, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)\n", (1205, 1274), False, 'from keras.layers import Concatenate, Conv2D, BatchNormalization\n'), ((1300, 1320), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1309, 1320), False, 'from keras.layers import LeakyReLU, Activation, Dropout, Conv2DTranspose\n'), ((1361, 1437), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'kernel_initializer': 'init'}), "(128, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)\n", (1367, 1437), False, 'from keras.layers import Concatenate, Conv2D, BatchNormalization\n'), ((1463, 1483), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1481, 1483), False, 'from keras.layers import Concatenate, Conv2D, BatchNormalization\n'), ((1509, 1529), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1518, 1529), False, 'from keras.layers import LeakyReLU, Activation, Dropout, Conv2DTranspose\n'), ((1570, 1646), 'keras.layers.Conv2D', 'Conv2D', (['(256)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'kernel_initializer': 'init'}), "(256, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)\n", (1576, 1646), False, 'from keras.layers import Concatenate, Conv2D, BatchNormalization\n'), ((1672, 1692), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1690, 1692), False, 'from keras.layers import Concatenate, Conv2D, BatchNormalization\n'), ((1718, 1738), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1727, 1738), False, 'from keras.layers import LeakyReLU, Activation, Dropout, Conv2DTranspose\n'), ((1779, 1855), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'kernel_initializer': 'init'}), "(512, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)\n", (1785, 1855), False, 'from keras.layers import Concatenate, Conv2D, BatchNormalization\n'), ((1881, 1901), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1899, 1901), False, 'from keras.layers import Concatenate, Conv2D, BatchNormalization\n'), ((1927, 1947), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (1936, 1947), False, 'from keras.layers import LeakyReLU, Activation, Dropout, Conv2DTranspose\n'), ((2008, 2068), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(4, 4)'], {'padding': '"""same"""', 'kernel_initializer': 'init'}), "(512, (4, 4), padding='same', kernel_initializer=init)\n", (2014, 2068), False, 'from keras.layers import Concatenate, Conv2D, BatchNormalization\n'), ((2094, 2114), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2112, 2114), False, 'from keras.layers import Concatenate, Conv2D, BatchNormalization\n'), ((2140, 2160), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2149, 2160), False, 'from keras.layers import LeakyReLU, Activation, Dropout, Conv2DTranspose\n'), ((2209, 2267), 'keras.layers.Conv2D', 'Conv2D', (['(1)', '(4, 4)'], {'padding': '"""same"""', 'kernel_initializer': 'init'}), "(1, (4, 4), padding='same', kernel_initializer=init)\n", (2215, 2267), False, 'from keras.layers import Concatenate, Conv2D, BatchNormalization\n'), ((2296, 2317), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (2306, 2317), False, 'from keras.layers import LeakyReLU, Activation, Dropout, Conv2DTranspose\n'), ((2746, 2834), 'keras.layers.Conv2D', 'Conv2D', (['filters_num', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'kernel_initializer': 'init'}), "(filters_num, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)\n", (2752, 2834), False, 'from keras.layers import Concatenate, Conv2D, BatchNormalization\n'), ((2937, 2957), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2946, 2957), False, 'from keras.layers import LeakyReLU, Activation, Dropout, Conv2DTranspose\n'), ((3158, 3255), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['filters_num', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'kernel_initializer': 'init'}), "(filters_num, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)\n", (3173, 3255), False, 'from keras.layers import LeakyReLU, Activation, Dropout, Conv2DTranspose\n'), ((3276, 3296), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3294, 3296), False, 'from keras.layers import Concatenate, Conv2D, BatchNormalization\n'), ((3439, 3452), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (3450, 3452), False, 'from keras.layers import Concatenate, Conv2D, BatchNormalization\n'), ((3485, 3503), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3495, 3503), False, 'from keras.layers import LeakyReLU, Activation, Dropout, Conv2DTranspose\n'), ((4088, 4164), 'keras.layers.Conv2D', 'Conv2D', (['(512)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'kernel_initializer': 'init'}), "(512, (4, 4), strides=(2, 2), padding='same', kernel_initializer=init)\n", (4094, 4164), False, 'from keras.layers import Concatenate, Conv2D, BatchNormalization\n'), ((4181, 4199), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4191, 4199), False, 'from keras.layers import LeakyReLU, Activation, Dropout, Conv2DTranspose\n'), ((4629, 4716), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['(3)', '(4, 4)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'kernel_initializer': 'init'}), "(3, (4, 4), strides=(2, 2), padding='same',\n kernel_initializer=init)\n", (4644, 4716), False, 'from keras.layers import LeakyReLU, Activation, Dropout, Conv2DTranspose\n'), ((4737, 4755), 'keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (4747, 4755), False, 'from keras.layers import LeakyReLU, Activation, Dropout, Conv2DTranspose\n'), ((7567, 7599), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', 'n_samples', '(1 + i)'], {}), '(3, n_samples, 1 + i)\n', (7578, 7599), True, 'import matplotlib.pyplot as plt\n'), ((7612, 7627), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7620, 7627), True, 'import matplotlib.pyplot as plt\n'), ((7640, 7662), 'matplotlib.pyplot.imshow', 'plt.imshow', (['X_realA[i]'], {}), '(X_realA[i])\n', (7650, 7662), True, 'import matplotlib.pyplot as plt\n'), ((7748, 7792), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', 'n_samples', '(1 + n_samples + i)'], {}), '(3, n_samples, 1 + n_samples + i)\n', (7759, 7792), True, 'import matplotlib.pyplot as plt\n'), ((7805, 7820), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7813, 7820), True, 'import matplotlib.pyplot as plt\n'), ((7833, 7859), 'matplotlib.pyplot.imshow', 'plt.imshow', (['fake_labels[i]'], {}), '(fake_labels[i])\n', (7843, 7859), True, 'import matplotlib.pyplot as plt\n'), ((7940, 7988), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', 'n_samples', '(1 + n_samples * 2 + i)'], {}), '(3, n_samples, 1 + n_samples * 2 + i)\n', (7951, 7988), True, 'import matplotlib.pyplot as plt\n'), ((8001, 8016), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (8009, 8016), True, 'import matplotlib.pyplot as plt\n'), ((8029, 8051), 'matplotlib.pyplot.imshow', 'plt.imshow', (['X_realB[i]'], {}), '(X_realB[i])\n', (8039, 8051), True, 'import matplotlib.pyplot as plt\n'), ((8142, 8162), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8156, 8162), False, 'import os\n'), ((8176, 8193), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (8187, 8193), False, 'import os\n'), ((8400, 8420), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (8414, 8420), False, 'import os\n'), ((8434, 8451), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (8445, 8451), False, 'import os\n'), ((12377, 12428), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (12421, 12428), True, 'import tensorflow as tf\n'), ((2882, 2902), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (2900, 2902), False, 'from keras.layers import Concatenate, Conv2D, BatchNormalization\n'), ((3355, 3367), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3362, 3367), False, 'from keras.layers import LeakyReLU, Activation, Dropout, Conv2DTranspose\n')] |
# In this program I try to show the efficasy of hand detection using the gamma correction.
# https://www.youtube.com/watch?v=Khy8U_zXDC4
# Usage: python demo_realsense_gamma_analysis.py cfg/yolo-hands.cfg backup/hands/000570.weights
# from utils_orgyolo import *
import utils_orgyolo as uyolo
import numpy as np
from darknet import Darknet
import cv2
import pyrealsense2 as rs
import collections
def adjust_gamma(image, gamma=1.0):
# build a lookup table mapping the pixel values [0, 255] to
# their adjusted gamma values
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv2.LUT(image, table)
def demo(cfgfile, weightfile):
model_hand = Darknet(cfgfile)
model_hand.print_network()
model_hand.load_weights(weightfile)
print('Loading weights from %s... Done!' % (weightfile))
namesfile = 'data/hands.names'
class_names = uyolo.load_class_names(namesfile)
use_cuda = 1
if use_cuda:
model_hand.cuda()
# RealSense Start
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
profile = pipeline.start(config)
# Setting exposure
s = profile.get_device().query_sensors()[1]
s.set_option(rs.option.exposure, exposure_val)
# Setting counter for evaluation
movingList = collections.deque(maxlen=100)
while True:
# Reading image from camera
frames = pipeline.wait_for_frames()
color_frame = frames.get_color_frame()
if not color_frame:
continue
img = np.asanyarray(color_frame.get_data())
if gamma_correction:
img = adjust_gamma(img, gamma=gamma_val)
# yolo stuff
sized = cv2.resize(img, (model_hand.width, model_hand.height))
bboxes = uyolo.do_detect(model_hand, sized, 0.5, 0.4, use_cuda)
print('------')
draw_img = uyolo.plot_boxes_cv2(img, bboxes, None, class_names)
# Evaluation
movingList.append(any(bboxes))
print('Continuity : {}'.format(np.mean(movingList)))
cv2.imshow(cfgfile, draw_img)
cv2.waitKey(1)
############################################
if __name__ == '__main__':
exposure_val = 166
gamma_val = 2
gamma_correction = False
demo('cfg/yolo-hands.cfg', 'backup/hands/000200.weights')
| [
"cv2.resize",
"darknet.Darknet",
"pyrealsense2.pipeline",
"cv2.waitKey",
"collections.deque",
"pyrealsense2.config",
"cv2.LUT",
"utils_orgyolo.plot_boxes_cv2",
"numpy.mean",
"numpy.arange",
"cv2.imshow",
"utils_orgyolo.do_detect",
"utils_orgyolo.load_class_names"
] | [((730, 751), 'cv2.LUT', 'cv2.LUT', (['image', 'table'], {}), '(image, table)\n', (737, 751), False, 'import cv2\n'), ((801, 817), 'darknet.Darknet', 'Darknet', (['cfgfile'], {}), '(cfgfile)\n', (808, 817), False, 'from darknet import Darknet\n'), ((1005, 1038), 'utils_orgyolo.load_class_names', 'uyolo.load_class_names', (['namesfile'], {}), '(namesfile)\n', (1027, 1038), True, 'import utils_orgyolo as uyolo\n'), ((1138, 1151), 'pyrealsense2.pipeline', 'rs.pipeline', ([], {}), '()\n', (1149, 1151), True, 'import pyrealsense2 as rs\n'), ((1165, 1176), 'pyrealsense2.config', 'rs.config', ([], {}), '()\n', (1174, 1176), True, 'import pyrealsense2 as rs\n'), ((1463, 1492), 'collections.deque', 'collections.deque', ([], {'maxlen': '(100)'}), '(maxlen=100)\n', (1480, 1492), False, 'import collections\n'), ((1862, 1916), 'cv2.resize', 'cv2.resize', (['img', '(model_hand.width, model_hand.height)'], {}), '(img, (model_hand.width, model_hand.height))\n', (1872, 1916), False, 'import cv2\n'), ((1934, 1988), 'utils_orgyolo.do_detect', 'uyolo.do_detect', (['model_hand', 'sized', '(0.5)', '(0.4)', 'use_cuda'], {}), '(model_hand, sized, 0.5, 0.4, use_cuda)\n', (1949, 1988), True, 'import utils_orgyolo as uyolo\n'), ((2032, 2084), 'utils_orgyolo.plot_boxes_cv2', 'uyolo.plot_boxes_cv2', (['img', 'bboxes', 'None', 'class_names'], {}), '(img, bboxes, None, class_names)\n', (2052, 2084), True, 'import utils_orgyolo as uyolo\n'), ((2216, 2245), 'cv2.imshow', 'cv2.imshow', (['cfgfile', 'draw_img'], {}), '(cfgfile, draw_img)\n', (2226, 2245), False, 'import cv2\n'), ((2254, 2268), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2265, 2268), False, 'import cv2\n'), ((2185, 2204), 'numpy.mean', 'np.mean', (['movingList'], {}), '(movingList)\n', (2192, 2204), True, 'import numpy as np\n'), ((630, 647), 'numpy.arange', 'np.arange', (['(0)', '(256)'], {}), '(0, 256)\n', (639, 647), True, 'import numpy as np\n')] |
from PIL import Image
from PIL import ImageTk
from scipy.spatial import distance as dist
import tkinter as tki
import threading
from imutils import perspective
from imutils import contours
import imutils
import numpy as np
import cv2
class TallyhoApp:
def __init__(self, videoStream):
self.videoStream = videoStream
self.frame = None
self.thread = None
self.stopEvent = None
self.updatePPM = False
self.calibrationWidth = None
self.pixelsPerMetric = 40 # Initialize to anything, really...
self.root = tki.Tk()
self.panel = None
bottomPanel = tki.Frame(self.root)
bottomPanel.pack(side="bottom", fill="both", expand="yes", padx=0, pady=10)
lbl = tki.Label(bottomPanel, text="Calibration width (in inches)")
lbl.pack(side="left", padx=10, pady=0)
self.calibrationWidthEntry = tki.Entry(bottomPanel, justify="right", width=5)
self.calibrationWidthEntry.pack(side="left", padx=0, pady=0)
btn = tki.Button(bottomPanel, text="Set Calibration", command=self.calibrate)
btn.pack(side="left", padx=10, pady=0)
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.videoLoop, args=())
self.thread.start()
self.root.wm_title("Tallyho!")
self.root.wm_protocol("WM_DELETE_WINDOW", self.onClose)
def videoLoop(self):
try:
while not self.stopEvent.is_set():
self.frame = self.videoStream.read()
self.frame = imutils.resize(self.frame, width=800)
self.drawOverlay()
image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
if self.panel is None:
self.panel = tki.Label(image=image)
self.panel.image = image
self.panel.pack(side="left", padx=10, pady=10)
else:
self.panel.configure(image=image)
self.panel.image = image
except RuntimeError:
print("[INFO] Caught a RuntimeError")
def midpoint(self, ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
def drawOverlay(self):
# Get a single frame, do all calculations, and draw the overlays of measurements
self.overlay = self.frame.copy()
opacity = 0.5
# Our operations on the frame come here
gray = cv2.cvtColor(self.overlay, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
edged = cv2.Canny(gray, 50, 100)
edged = cv2.dilate(edged, None, iterations=1)
edged = cv2.erode(edged, None, iterations=1)
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# sort the contours from left-to-right and initialize the
# 'pixels per metric' calibration variable if necessary
if len(cnts) > 1:
(cnts, _) = contours.sort_contours(cnts)
if self.updatePPM:
self.pixelsPerMetric = None
self.updatePPM = False
for c in cnts:
# if the contour is not sufficiently large, ignore it
if cv2.contourArea(c) < 100:
continue
# compute the rotated bounding box of the contour
box = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
# order the points in the contour such that they appear
# in top-left, top-right, bottom-right, and bottom-left
# order, then draw the outline of the rotated bounding
# box
box = perspective.order_points(box)
cv2.drawContours(self.overlay, [box.astype("int")], -1, (0, 255, 0), 2)
# loop over the original points and draw them
for (x, y) in box:
cv2.circle(self.overlay, (int(x), int(y)), 5, (0, 0, 255), -1)
# unpack the ordered bounding box, then compute the midpoint
# between the top-left and top-right coordinates, followed by
# the midpoint between bottom-left and bottom-right coordinates
(tl, tr, br, bl) = box
(tltrX, tltrY) = self.midpoint(tl, tr)
(blbrX, blbrY) = self.midpoint(bl, br)
# compute the midpoint between the top-left and top-right points,
# followed by the midpoint between the top-righ and bottom-right
(tlblX, tlblY) = self.midpoint(tl, bl)
(trbrX, trbrY) = self.midpoint(tr, br)
# draw the midpoints on the image
cv2.circle(self.overlay, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
cv2.circle(self.overlay, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
cv2.circle(self.overlay, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
cv2.circle(self.overlay, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
# draw lines between the midpoints
cv2.line(self.overlay, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)), (255, 0, 255), 2)
cv2.line(self.overlay, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)), (255, 0, 255), 2)
# compute the Euclidean distance between the midpoints
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
# if the pixels per metric has not been initialized, then
# compute it as the ratio of pixels to supplied metric
# (in this case, inches)
if self.pixelsPerMetric is None:
self.pixelsPerMetric = dB / self.calibrationWidth
# compute the size of the object
dimA = dA / self.pixelsPerMetric
dimB = dB / self.pixelsPerMetric
# draw the object sizes on the image
#cv2.putText(self.overlay, "{:.3f}in".format(dimA), (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)
cv2.putText(self.overlay, "{:.3f}in".format(dimB), (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)
#cv2.putText(self.overlay, "{:.4f}ft".format(dimB), (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)
# source1 = overlay, source2 = frame, destination = frame
cv2.addWeighted(self.overlay, opacity, self.frame, 1 - opacity, 0, self.frame)
def onClose(self):
print("[INFO] Closing...")
self.stopEvent.set()
self.videoStream.stop()
self.root.quit()
def calibrate(self):
enteredValue = self.calibrationWidthEntry.get()
print("Entered: " + enteredValue)
if enteredValue:
self.calibrationWidth = float(enteredValue)
self.updatePPM = True
print("Calibration width set: " + str(self.calibrationWidth))
else:
print("Nothing entered.")
| [
"cv2.GaussianBlur",
"cv2.cv.BoxPoints",
"imutils.contours.sort_contours",
"cv2.boxPoints",
"cv2.minAreaRect",
"tkinter.Frame",
"imutils.is_cv2",
"cv2.erode",
"imutils.resize",
"tkinter.Label",
"cv2.contourArea",
"scipy.spatial.distance.euclidean",
"cv2.dilate",
"tkinter.Button",
"cv2.cvt... | [((571, 579), 'tkinter.Tk', 'tki.Tk', ([], {}), '()\n', (577, 579), True, 'import tkinter as tki\n'), ((629, 649), 'tkinter.Frame', 'tki.Frame', (['self.root'], {}), '(self.root)\n', (638, 649), True, 'import tkinter as tki\n'), ((749, 809), 'tkinter.Label', 'tki.Label', (['bottomPanel'], {'text': '"""Calibration width (in inches)"""'}), "(bottomPanel, text='Calibration width (in inches)')\n", (758, 809), True, 'import tkinter as tki\n'), ((903, 951), 'tkinter.Entry', 'tki.Entry', (['bottomPanel'], {'justify': '"""right"""', 'width': '(5)'}), "(bottomPanel, justify='right', width=5)\n", (912, 951), True, 'import tkinter as tki\n'), ((1044, 1115), 'tkinter.Button', 'tki.Button', (['bottomPanel'], {'text': '"""Set Calibration"""', 'command': 'self.calibrate'}), "(bottomPanel, text='Set Calibration', command=self.calibrate)\n", (1054, 1115), True, 'import tkinter as tki\n'), ((1189, 1206), 'threading.Event', 'threading.Event', ([], {}), '()\n', (1204, 1206), False, 'import threading\n'), ((1229, 1277), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.videoLoop', 'args': '()'}), '(target=self.videoLoop, args=())\n', (1245, 1277), False, 'import threading\n'), ((2580, 2626), 'cv2.cvtColor', 'cv2.cvtColor', (['self.overlay', 'cv2.COLOR_BGR2GRAY'], {}), '(self.overlay, cv2.COLOR_BGR2GRAY)\n', (2592, 2626), False, 'import cv2\n'), ((2642, 2675), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(7, 7)', '(0)'], {}), '(gray, (7, 7), 0)\n', (2658, 2675), False, 'import cv2\n'), ((2692, 2716), 'cv2.Canny', 'cv2.Canny', (['gray', '(50)', '(100)'], {}), '(gray, 50, 100)\n', (2701, 2716), False, 'import cv2\n'), ((2733, 2770), 'cv2.dilate', 'cv2.dilate', (['edged', 'None'], {'iterations': '(1)'}), '(edged, None, iterations=1)\n', (2743, 2770), False, 'import cv2\n'), ((2787, 2823), 'cv2.erode', 'cv2.erode', (['edged', 'None'], {'iterations': '(1)'}), '(edged, None, iterations=1)\n', (2796, 2823), False, 'import cv2\n'), ((6647, 6725), 'cv2.addWeighted', 'cv2.addWeighted', (['self.overlay', 'opacity', 'self.frame', '(1 - opacity)', '(0)', 'self.frame'], {}), '(self.overlay, opacity, self.frame, 1 - opacity, 0, self.frame)\n', (6662, 6725), False, 'import cv2\n'), ((2941, 2957), 'imutils.is_cv2', 'imutils.is_cv2', ([], {}), '()\n', (2955, 2957), False, 'import imutils\n'), ((3152, 3180), 'imutils.contours.sort_contours', 'contours.sort_contours', (['cnts'], {}), '(cnts)\n', (3174, 3180), False, 'from imutils import contours\n'), ((3532, 3550), 'cv2.minAreaRect', 'cv2.minAreaRect', (['c'], {}), '(c)\n', (3547, 3550), False, 'import cv2\n'), ((3653, 3679), 'numpy.array', 'np.array', (['box'], {'dtype': '"""int"""'}), "(box, dtype='int')\n", (3661, 3679), True, 'import numpy as np\n'), ((3920, 3949), 'imutils.perspective.order_points', 'perspective.order_points', (['box'], {}), '(box)\n', (3944, 3949), False, 'from imutils import perspective\n'), ((5544, 5590), 'scipy.spatial.distance.euclidean', 'dist.euclidean', (['(tltrX, tltrY)', '(blbrX, blbrY)'], {}), '((tltrX, tltrY), (blbrX, blbrY))\n', (5558, 5590), True, 'from scipy.spatial import distance as dist\n'), ((5608, 5654), 'scipy.spatial.distance.euclidean', 'dist.euclidean', (['(tlblX, tlblY)', '(trbrX, trbrY)'], {}), '((tlblX, tlblY), (trbrX, trbrY))\n', (5622, 5654), True, 'from scipy.spatial import distance as dist\n'), ((1578, 1615), 'imutils.resize', 'imutils.resize', (['self.frame'], {'width': '(800)'}), '(self.frame, width=800)\n', (1592, 1615), False, 'import imutils\n'), ((1677, 1720), 'cv2.cvtColor', 'cv2.cvtColor', (['self.frame', 'cv2.COLOR_BGR2RGB'], {}), '(self.frame, cv2.COLOR_BGR2RGB)\n', (1689, 1720), False, 'import cv2\n'), ((1745, 1767), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (1760, 1767), False, 'from PIL import Image\n'), ((1792, 1817), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['image'], {}), '(image)\n', (1810, 1817), False, 'from PIL import ImageTk\n'), ((3400, 3418), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (3415, 3418), False, 'import cv2\n'), ((3594, 3610), 'imutils.is_cv2', 'imutils.is_cv2', ([], {}), '()\n', (3608, 3610), False, 'import imutils\n'), ((3569, 3590), 'cv2.cv.BoxPoints', 'cv2.cv.BoxPoints', (['box'], {}), '(box)\n', (3585, 3590), False, 'import cv2\n'), ((3616, 3634), 'cv2.boxPoints', 'cv2.boxPoints', (['box'], {}), '(box)\n', (3629, 3634), False, 'import cv2\n'), ((1891, 1913), 'tkinter.Label', 'tki.Label', ([], {'image': 'image'}), '(image=image)\n', (1900, 1913), True, 'import tkinter as tki\n')] |
#!/usr/bin/env python
"""
vreckon and vdist are iterative algorithms.
How much does PyPy help over Cpython?
Hmm, PyPy is slower than Cpython..
$ pypy3 tests/benchmark_vincenty.py 10000
2.1160879135131836
0.06056046485900879
$ python tests/benchmark_vincenty.py 10000
0.3325080871582031
0.02107095718383789
"""
from time import time
from pymap3d.vincenty import vreckon, vdist
import numpy as np
from argparse import ArgumentParser
ll0 = (42., 82.)
def bench_vreckon(N: int) -> float:
sr = np.random.random(N)
az = np.random.random(N)
tic = time()
a, b, c = vreckon(*ll0, sr, az)
return time() - tic
def bench_vdist(N: int) -> float:
lat = np.random.random(N)
lon = np.random.random(N)
tic = time()
asr, aaz, aa21 = vdist(*ll0, lat, lon)
return time() - tic
if __name__ == '__main__':
p = ArgumentParser()
p.add_argument('N', type=int)
p = p.parse_args()
print(bench_vreckon(p.N))
print(bench_vdist(p.N))
| [
"argparse.ArgumentParser",
"time.time",
"pymap3d.vincenty.vreckon",
"numpy.random.random",
"pymap3d.vincenty.vdist"
] | [((500, 519), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (516, 519), True, 'import numpy as np\n'), ((529, 548), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (545, 548), True, 'import numpy as np\n'), ((560, 566), 'time.time', 'time', ([], {}), '()\n', (564, 566), False, 'from time import time\n'), ((581, 602), 'pymap3d.vincenty.vreckon', 'vreckon', (['*ll0', 'sr', 'az'], {}), '(*ll0, sr, az)\n', (588, 602), False, 'from pymap3d.vincenty import vreckon, vdist\n'), ((674, 693), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (690, 693), True, 'import numpy as np\n'), ((704, 723), 'numpy.random.random', 'np.random.random', (['N'], {}), '(N)\n', (720, 723), True, 'import numpy as np\n'), ((735, 741), 'time.time', 'time', ([], {}), '()\n', (739, 741), False, 'from time import time\n'), ((763, 784), 'pymap3d.vincenty.vdist', 'vdist', (['*ll0', 'lat', 'lon'], {}), '(*ll0, lat, lon)\n', (768, 784), False, 'from pymap3d.vincenty import vreckon, vdist\n'), ((847, 863), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (861, 863), False, 'from argparse import ArgumentParser\n'), ((615, 621), 'time.time', 'time', ([], {}), '()\n', (619, 621), False, 'from time import time\n'), ((797, 803), 'time.time', 'time', ([], {}), '()\n', (801, 803), False, 'from time import time\n')] |
#!/usr/bin/env python3
try:
import numpy as np
except ImportError:
pass
import re
import sys
import inspect
import os
def printerr(m):
sys.stderr.write(str(m) + "\n")
def ReadVtkPoly(f, verbose=False):
"""
Reads vtk points, polygons and fields from legacy VTK file.
f: `str` or file-like
Path to legacy VTK file or file-like object.
Returns:
points: `numpy.ndarray`, (num_points, 3)
Points (vertices).
poly: `list` [`list` [ `int` ]], (num_cells, ...)
Polygons as lists of indices in `points`.
cell_fields: `dict` [`str`, `numpy.ndarray`] , (num_cells,)
Cell felds indexed by name. Each field has shape (num_cells,).
"""
def Assert(cond, msg=""):
if not cond:
caller = inspect.getframeinfo(inspect.stack()[1][0])
lines = "\n".join(caller[3]).strip()
filename = os.path.basename(caller.filename)
lineno = caller.lineno
printerr("\n{:}:{:} {:}".format(filename, lineno, lines))
printerr("Failing at iteration {:} in state s={:}".format(
lnum + 1, s))
if msg: printerr(str(msg))
printerr("Current input line:\n{:}".format(l.strip()))
printerr("Next line would be:\n{:}".format(f.readline().strip()))
exit(1)
class S:
header, comment, binary, dataset, points, \
polygons, cell_data, cell_scalars, cell_field = range(9)
points = None
poly = None
dim = 3
num_points = None
num_poly = None
cell_fields = dict()
cell_field_name = None
binary = False
path = None
if type(f) is str:
path = f
f = open(path, 'rb')
else:
pass # expect file-like
s = S.header
if f:
for lnum, l in enumerate(f):
l = str(l)
if not l.strip():
continue
if s == S.header: # check header
Assert("# vtk" in l)
s = S.comment
elif s == S.comment: # skip comment
s = S.binary
elif s == S.binary:
Assert("ASCII" in l or "BINARY" in l)
binary = "BINARY" in l
s = S.dataset
elif s == S.dataset:
Assert("DATASET POLYDATA" in l)
s = S.points
elif s == S.points:
Assert("POINTS" in l)
dtype = np.float64 if "double" in l else np.float32
num_points = int(re.findall("\D*(\d*)\D*", l)[0])
points = np.empty((num_points, dim))
if binary:
dt = np.dtype('>f4')
bytes = f.read(3 * num_points * dt.itemsize)
points = np.frombuffer(bytes, dtype=dt)
points = points.astype(np.float)
f.readline()
else:
points = np.fromfile(f,
dtype=np.float,
count=num_points * 3,
sep=' ')
points = points.reshape((num_points, 3))
Assert(points.shape[0] == num_points)
Assert(points.shape[1] == 3)
if verbose: printerr("Read {:} points".format(points.shape[0]))
s = S.polygons
elif s == S.polygons:
Assert("POLYGONS" in l)
m = re.findall("\D*(\d*)\s*(\d*)", l)[0]
num_poly = int(m[0])
num_ints = int(m[1])
if binary:
dt = np.dtype('>i')
bytes = f.read(num_ints * dt.itemsize)
ints = np.frombuffer(bytes, dtype=dt)
ints = ints.astype(np.int)
f.readline()
else:
ints = np.fromfile(f,
dtype=np.int,
count=num_ints,
sep=' ')
i = 0
poly = []
for ip in range(num_poly):
n = ints[i]
i += 1
poly.append(ints[i:i + n])
i += n
Assert(i == num_ints)
Assert(len(poly) == num_poly)
if verbose: printerr("Read {:} polygons".format(len(poly)))
s = S.cell_data
elif s == S.cell_data:
if "CELL_DATA" in l:
n = int(re.findall("\D*(\d*)", l)[0])
Assert(n == num_poly)
s = S.cell_scalars
elif "POINT_DATA" in l:
pass
elif s == S.cell_scalars: # read cell field
if "SCALARS" in l:
cell_field_name = re.findall("SCALARS\s*(\S+)", l)[0]
s = S.cell_field
else:
s = S.cell_data
elif s == S.cell_field:
Assert("LOOKUP_TABLE" in l)
if binary:
dt = np.dtype('>f4')
bytes = f.read(num_poly * dt.itemsize)
u = np.frombuffer(bytes, dtype=dt)
u = u.astype(np.float)
f.readline()
else:
u = np.fromfile(f, dtype=np.float, count=num_poly, sep=' ')
Assert(u.shape[0] == num_poly, ["u.shape=", u.shape])
if verbose:
printerr("Read cell field '{:}'".format(cell_field_name))
cell_fields[cell_field_name] = u
s = S.cell_scalars
if path:
f.close()
return points, poly, cell_fields
| [
"os.path.basename",
"numpy.fromfile",
"numpy.empty",
"numpy.frombuffer",
"numpy.dtype",
"re.findall",
"inspect.stack"
] | [((889, 922), 'os.path.basename', 'os.path.basename', (['caller.filename'], {}), '(caller.filename)\n', (905, 922), False, 'import os\n'), ((794, 809), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (807, 809), False, 'import inspect\n'), ((2580, 2607), 'numpy.empty', 'np.empty', (['(num_points, dim)'], {}), '((num_points, dim))\n', (2588, 2607), True, 'import numpy as np\n'), ((2660, 2675), 'numpy.dtype', 'np.dtype', (['""">f4"""'], {}), "('>f4')\n", (2668, 2675), True, 'import numpy as np\n'), ((2770, 2800), 'numpy.frombuffer', 'np.frombuffer', (['bytes'], {'dtype': 'dt'}), '(bytes, dtype=dt)\n', (2783, 2800), True, 'import numpy as np\n'), ((2938, 2999), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.float', 'count': '(num_points * 3)', 'sep': '""" """'}), "(f, dtype=np.float, count=num_points * 3, sep=' ')\n", (2949, 2999), True, 'import numpy as np\n'), ((2522, 2553), 're.findall', 're.findall', (['"""\\\\D*(\\\\d*)\\\\D*"""', 'l'], {}), "('\\\\D*(\\\\d*)\\\\D*', l)\n", (2532, 2553), False, 'import re\n'), ((3484, 3521), 're.findall', 're.findall', (['"""\\\\D*(\\\\d*)\\\\s*(\\\\d*)"""', 'l'], {}), "('\\\\D*(\\\\d*)\\\\s*(\\\\d*)', l)\n", (3494, 3521), False, 'import re\n'), ((3647, 3661), 'numpy.dtype', 'np.dtype', (['""">i"""'], {}), "('>i')\n", (3655, 3661), True, 'import numpy as np\n'), ((3748, 3778), 'numpy.frombuffer', 'np.frombuffer', (['bytes'], {'dtype': 'dt'}), '(bytes, dtype=dt)\n', (3761, 3778), True, 'import numpy as np\n'), ((3908, 3961), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.int', 'count': 'num_ints', 'sep': '""" """'}), "(f, dtype=np.int, count=num_ints, sep=' ')\n", (3919, 3961), True, 'import numpy as np\n'), ((4595, 4622), 're.findall', 're.findall', (['"""\\\\D*(\\\\d*)"""', 'l'], {}), "('\\\\D*(\\\\d*)', l)\n", (4605, 4622), False, 'import re\n'), ((4901, 4935), 're.findall', 're.findall', (['"""SCALARS\\\\s*(\\\\S+)"""', 'l'], {}), "('SCALARS\\\\s*(\\\\S+)', l)\n", (4911, 4935), False, 'import re\n'), ((5164, 5179), 'numpy.dtype', 'np.dtype', (['""">f4"""'], {}), "('>f4')\n", (5172, 5179), True, 'import numpy as np\n'), ((5263, 5293), 'numpy.frombuffer', 'np.frombuffer', (['bytes'], {'dtype': 'dt'}), '(bytes, dtype=dt)\n', (5276, 5293), True, 'import numpy as np\n'), ((5416, 5471), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.float', 'count': 'num_poly', 'sep': '""" """'}), "(f, dtype=np.float, count=num_poly, sep=' ')\n", (5427, 5471), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy as np
import os
from multi_isotope_calculator import Multi_isotope
DATA_PATH = '../data/'
CORE_MASS = 110820 # in kg
REFUELLING_TIME = 6 # in days
SEPARATION_EFFICIENCY = 0.97
SIM_DUR = 729 # in days
def main():
spent_natU_fname = os.path.join(DATA_PATH,
"SERPENT_outputs_NatU_percentages.npy")
get_expectations(spent_natU_fname, 22, '0.5MWd', True)
get_expectations(spent_natU_fname, 88, '2MWd', True)
return
def get_expectations(fname, irradiation_time, burnup, verbose=True):
"""Run all the calculations for one simulation"""
print(f"Expected values for a cycle with burnup {burnup} and "
+ f"irradiation time {irradiation_time}:\n")
reactor_cycles(irradiation_time, verbose=True)
natU_to_repU_cycles(fname, irradiation_time, burnup, verbose)
expected_plutonium(burnup, irradiation_time)
expected_heu(fname, irradiation_time, burnup)
spent_reprocessed_uranium(fname, burnup, irradiation_time, verbose)
print("\n\n")
return
def expected_heu(fname, irradiation_time, burnup):
"""Get the expected amount of weapongrade U produced"""
# Get fraction of times where natU is used as reactor fuel
n_natU, n_repU = natU_to_repU_cycles(fname, irradiation_time, burnup,
verbose=False)
# + 1 because of the stored fuel assembly and + 1 for the incomplete
# reactor cycle at the end of the simulation.
time_reactor_enrichment = (n_natU + 2) * enrichment_reactorgrade()
time_heu_enrichment = SIM_DUR - time_reactor_enrichment
m = Multi_isotope({'234': 0.0054, '235': (0.7204, 90., 0.3)},
feed=10000, alpha235=1.35, process='centrifuge',
downblend=True)
m.calculate_staging()
heu_per_cycle = m.p
total_heu = heu_per_cycle * time_heu_enrichment
print((f'Total weapongrade U: {total_heu:.1f} kg, using an irradiation'
+ f' time of {irradiation_time}'))
return total_heu
def enrichment_reactorgrade(verbose=False):
"""Get the (non-int) timesteps needed to produce one SRS core
This function returns the number of enrichment cycles needed to enrich
NatU to 1.1% making it usable in the Savannah River Site reaction. One
full reactor core contains 110820 kg SEU and we assume that in one step
10'000 kg of uranium are used as feed.
While timesteps typically are integers, this is not the case here.
Using floats has the advantage that the enrichment in the last step is
reflected better, as the facility retains some capacity that can
subsequently be used to enrich natural uranium to HEU.
"""
m = Multi_isotope({'234': 0.0054, '235': (0.7204, 1.1, 0.3)},
feed=10000, alpha235=1.35, process='centrifuge',
downblend=True)
m.calculate_staging()
product = m.p
n_steps = CORE_MASS / product
if verbose:
print(f'{n_steps} enrichment cycles needed.')
return n_steps
def spent_reprocessed_uranium(fname, burnup, irradiation_time,
verbose=False):
if burnup=='0.5MWd':
fname = os.path.join(DATA_PATH, 'SERPENT_outputs_RepU_05MWd_percentages.npy')
elif burnup=='2MWd':
fname = os.path.join(DATA_PATH, 'SERPENT_outputs_RepU_2MWd_percentages.npy')
else:
raise ValueError("'burnup' has to be either '0.5MWd' or '2MWd'")
data = np.load(fname, allow_pickle=True).item()
data = data[burnup]
uranium_content = 0
for key, val in data.items():
if key in [f'U{iso}' for iso in range(232, 239)]:
uranium_content += val
spent_batch = CORE_MASS * SEPARATION_EFFICIENCY * uranium_content
n_reactor_cycles = natU_to_repU_cycles(fname, irradiation_time,
burnup)[1]
spent_reprocessed = n_reactor_cycles * spent_batch
if verbose:
print(f'{spent_reprocessed/1000:.1f} t of spent uranium in storage')
return spent_reprocessed
def reactor_cycles(irradiation_time, verbose=False):
"""Get the (int) number of expected reactor cycles in one simulation"""
# Note the integer division!
n_cycles = ((SIM_DUR - enrichment_reactorgrade())
// (irradiation_time + REFUELLING_TIME))
if verbose:
print(f'Irradiation time of {irradiation_time} yields {n_cycles} '
+ 'cycles')
return n_cycles
def natU_to_repU_cycles(fname, irradiation_time, burnup, verbose=False):
"""Get the number of reactor cyclues using repU and natU"""
data = np.load(fname, allow_pickle=True).item()
data = data[burnup]
spentU_composition = {}
uranium_content = 0
# Get uranium content and isotopic composition of uranium
for key, val in data.items():
if key in [f'U{iso}' for iso in range(232, 239) if iso!=237]:
spentU_composition[key[1:]] = val # remove the 'U'
uranium_content += val
# Normalise spent uranium's isotopic composition to 100 (percent)
for key, val in spentU_composition.items():
spentU_composition[key] = 100 * val / uranium_content
total_cycles = reactor_cycles(irradiation_time, False)
spent_batch = CORE_MASS * SEPARATION_EFFICIENCY * uranium_content
spentU_composition['235'] = (spentU_composition['235'], 1.1, 0.3)
del spentU_composition['238']
m = Multi_isotope(spentU_composition, feed=spent_batch, alpha235=1.35,
process='centrifuge', downblend=True)
m.calculate_staging()
repU_batch_mass = m.p
cycle = 0 # timestep in the form of reactorcycles
repU_storage = 0
temp_dict = {}
n_natU = 0
n_repU = 0
while cycle < total_cycles:
# This weird dictionary part has to be done in order to reflect
# spent fresh fuel from a reactor can not be used in the subsequent
# cycle but only in the second to next cycle because of the
# in-between stations (reprocessing and enrichment).
try:
repU_storage += temp_dict[cycle-2]
del temp_dict[cycle-2]
except KeyError:
pass
if repU_storage < CORE_MASS:
n_natU += 1
temp_dict[cycle] = repU_batch_mass
else:
n_repU += 1
repU_storage -= CORE_MASS
cycle += 1
if verbose:
print(f"Out of {total_cycles} cycles, {n_natU} used fresh fuel and"
+ f" {n_repU} used reprocessed fuel.")
if repU_storage < CORE_MASS:
print(f"Last, incomplete cycle uses natU")
else:
print(f"Last, incomplete cycle uses repU")
return (n_natU, n_repU)
def expected_plutonium(burnup, irradiation_time):
"""This is ugly coding don't look at it"""
data = []
data.append(get_plutonium(
os.path.join(DATA_PATH, 'SERPENT_outputs_NatU_percentages.npy'),
burnup))
if burnup=='0.5MWd':
pu = get_plutonium(os.path.join(DATA_PATH,
'SERPENT_outputs_RepU_05MWd_percentages.npy'), burnup)
data.append(pu)
elif burnup=='2MWd':
pu = get_plutonium(os.path.join(DATA_PATH,
'SERPENT_outputs_RepU_2MWd_percentages.npy'), burnup)
data.append(pu)
else:
raise ValueError("'burnup' has to be either '0.5MWd' or '2MWd'")
plutonium = np.array(data)
plutonium *= (reactor_cycles(irradiation_time) * CORE_MASS
* SEPARATION_EFFICIENCY)
mean = np.mean(plutonium)
std = np.std(plutonium, ddof=1)
print(f"Pu for {burnup}: {mean:.1f} +- {std:.1f}")
return
def get_plutonium(fname, burnup):
"""Load the composition of spent fuel and filter it (e.g., only U)"""
data = np.load(fname, allow_pickle=True).item()
data = data[burnup]
pu = 0
for isotope, value in data.items():
if isotope in ('Pu239', 'Pu240', 'Pu241'):
pu += value
pu += (1 - 2.**(-1./2.356)) * data['Np239']
return pu
if __name__=="__main__":
main()
| [
"numpy.load",
"numpy.std",
"numpy.mean",
"numpy.array",
"multi_isotope_calculator.Multi_isotope",
"os.path.join"
] | [((279, 342), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""SERPENT_outputs_NatU_percentages.npy"""'], {}), "(DATA_PATH, 'SERPENT_outputs_NatU_percentages.npy')\n", (291, 342), False, 'import os\n'), ((1629, 1756), 'multi_isotope_calculator.Multi_isotope', 'Multi_isotope', (["{'234': 0.0054, '235': (0.7204, 90.0, 0.3)}"], {'feed': '(10000)', 'alpha235': '(1.35)', 'process': '"""centrifuge"""', 'downblend': '(True)'}), "({'234': 0.0054, '235': (0.7204, 90.0, 0.3)}, feed=10000,\n alpha235=1.35, process='centrifuge', downblend=True)\n", (1642, 1756), False, 'from multi_isotope_calculator import Multi_isotope\n'), ((2726, 2852), 'multi_isotope_calculator.Multi_isotope', 'Multi_isotope', (["{'234': 0.0054, '235': (0.7204, 1.1, 0.3)}"], {'feed': '(10000)', 'alpha235': '(1.35)', 'process': '"""centrifuge"""', 'downblend': '(True)'}), "({'234': 0.0054, '235': (0.7204, 1.1, 0.3)}, feed=10000,\n alpha235=1.35, process='centrifuge', downblend=True)\n", (2739, 2852), False, 'from multi_isotope_calculator import Multi_isotope\n'), ((5483, 5592), 'multi_isotope_calculator.Multi_isotope', 'Multi_isotope', (['spentU_composition'], {'feed': 'spent_batch', 'alpha235': '(1.35)', 'process': '"""centrifuge"""', 'downblend': '(True)'}), "(spentU_composition, feed=spent_batch, alpha235=1.35, process=\n 'centrifuge', downblend=True)\n", (5496, 5592), False, 'from multi_isotope_calculator import Multi_isotope\n'), ((7464, 7478), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (7472, 7478), True, 'import numpy as np\n'), ((7598, 7616), 'numpy.mean', 'np.mean', (['plutonium'], {}), '(plutonium)\n', (7605, 7616), True, 'import numpy as np\n'), ((7627, 7652), 'numpy.std', 'np.std', (['plutonium'], {'ddof': '(1)'}), '(plutonium, ddof=1)\n', (7633, 7652), True, 'import numpy as np\n'), ((3218, 3287), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""SERPENT_outputs_RepU_05MWd_percentages.npy"""'], {}), "(DATA_PATH, 'SERPENT_outputs_RepU_05MWd_percentages.npy')\n", (3230, 3287), False, 'import os\n'), ((3330, 3398), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""SERPENT_outputs_RepU_2MWd_percentages.npy"""'], {}), "(DATA_PATH, 'SERPENT_outputs_RepU_2MWd_percentages.npy')\n", (3342, 3398), False, 'import os\n'), ((3498, 3531), 'numpy.load', 'np.load', (['fname'], {'allow_pickle': '(True)'}), '(fname, allow_pickle=True)\n', (3505, 3531), True, 'import numpy as np\n'), ((4662, 4695), 'numpy.load', 'np.load', (['fname'], {'allow_pickle': '(True)'}), '(fname, allow_pickle=True)\n', (4669, 4695), True, 'import numpy as np\n'), ((6935, 6998), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""SERPENT_outputs_NatU_percentages.npy"""'], {}), "(DATA_PATH, 'SERPENT_outputs_NatU_percentages.npy')\n", (6947, 6998), False, 'import os\n'), ((7069, 7138), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""SERPENT_outputs_RepU_05MWd_percentages.npy"""'], {}), "(DATA_PATH, 'SERPENT_outputs_RepU_05MWd_percentages.npy')\n", (7081, 7138), False, 'import os\n'), ((7840, 7873), 'numpy.load', 'np.load', (['fname'], {'allow_pickle': '(True)'}), '(fname, allow_pickle=True)\n', (7847, 7873), True, 'import numpy as np\n'), ((7241, 7309), 'os.path.join', 'os.path.join', (['DATA_PATH', '"""SERPENT_outputs_RepU_2MWd_percentages.npy"""'], {}), "(DATA_PATH, 'SERPENT_outputs_RepU_2MWd_percentages.npy')\n", (7253, 7309), False, 'import os\n')] |
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.offline as py
import plotly.graph_objs as go
import colorlover as cl
sns.set()
################################################################################
# PROGRESS TRCAKER
################################################################################
def update_progress(progress,message=""):
# update_progress() : Displays or updates a console progress bar
## Accepts a float between 0 and 1. Any int will be converted to a float.
## A value under 0 represents a 'halt'.
## A value at 1 or bigger represents 100%
barLength = 20 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\rProgress : [{0}] {1}% {2} {3}".format( "="*block +
" "*(barLength-block),
round(progress*100,2),
status,message)
sys.stdout.write(text)
sys.stdout.flush()
################################################################################
# PLOTS FOR estimate
################################################################################
def visualisation(df,model):
predictors_name = np.array(['ED', 'SOUTH', 'NONWH', 'HISP', 'FE', 'MARR', 'MARRFE', 'EX',
'UNION', 'MANUF', 'CONSTR', 'MANAG', 'SALES', 'CLER',
'SERV', 'PROF'])
trace1 = []
colors = cl.scales[str(len(df.columns))]["qual"]["Dark2"]
for i,name in enumerate(df.columns):
trace0 = go.Scatter(
y = df[name].values,
name = name,
mode = 'markers',
marker = dict(
size = 10,
color = colors[i]
),
opacity=1
)
trace1.append(trace0)
if len(df)>16:
layout = dict(title = 'Result comparison for model {}'.format(model.name),
yaxis = dict(zeroline = True),
xaxis = go.layout.XAxis(
tickmode = 'array',
tickvals = np.arange(0,17),
ticktext = np.insert(predictors_name,0,"noise"),
zeroline = False
)
)
else:
layout = dict(title = 'Result comparison',
yaxis = dict(zeroline = True),
xaxis = go.layout.XAxis(
tickmode = 'array',
tickvals = np.arange(0,16),
ticktext = predictors_name,
zeroline = False
)
)
if model.cond_model.name == "Multilogistic":
layout = dict(title = 'Result comparison for model {}'.format(model.name),
yaxis = dict(zeroline = True),
xaxis = go.layout.XAxis(
tickmode = 'array',
tickvals = np.arange(0,17),
ticktext = np.insert(predictors_name,0,"intercept"),
zeroline = False
)
)
fig = dict(data=trace1, layout=layout)
py.iplot(fig)
################################################################################
# PLOTS FOR METROPOLIS HASTINGS
################################################################################
def big_plot(a = 16.0,b = 8.0):
plt.rcParams['figure.figsize'] = (a, b)
def reset_plot():
plt.rcParams['figure.figsize'] = (8.0, 4.0)
def compare_samples_MH(sample1,sample2):
fig, axs = plt.subplots(4, 4, figsize=(16, 16), sharey=True, sharex = True)
axs[0,0].plot(sample1[:,1], alpha = 0.8)
axs[0,1].plot(sample1[:,2], alpha = 0.8)
axs[0,2].plot(sample1[:,3], alpha = 0.8)
axs[0,3].plot(sample1[:,4], alpha = 0.8)
axs[0,0].plot(sample2[:,1], alpha = 0.8)
axs[0,1].plot(sample2[:,2], alpha = 0.8)
axs[0,2].plot(sample2[:,3], alpha = 0.8)
axs[0,3].plot(sample2[:,4], alpha = 0.8)
axs[1,0].plot(sample1[:,5], alpha = 0.8)
axs[1,1].plot(sample1[:,6], alpha = 0.8)
axs[1,2].plot(sample1[:,7], alpha = 0.8)
axs[1,3].plot(sample1[:,8], alpha = 0.8)
axs[1,0].plot(sample2[:,5], alpha = 0.8)
axs[1,1].plot(sample2[:,6], alpha = 0.8)
axs[1,2].plot(sample2[:,7], alpha = 0.8)
axs[1,3].plot(sample2[:,8], alpha = 0.8)
axs[2,0].plot(sample1[:,9], alpha = 0.8)
axs[2,1].plot(sample1[:,10], alpha = 0.8)
axs[2,2].plot(sample1[:,11], alpha = 0.8)
axs[2,3].plot(sample1[:,12], alpha = 0.8)
axs[2,0].plot(sample2[:,9], alpha = 0.8)
axs[2,1].plot(sample2[:,10], alpha = 0.8)
axs[2,2].plot(sample2[:,11], alpha = 0.8)
axs[2,3].plot(sample2[:,12], alpha = 0.8)
axs[3,0].plot(sample1[:,13], alpha = 0.8)
axs[3,1].plot(sample1[:,14], alpha = 0.8)
axs[3,2].plot(sample1[:,15], alpha = 0.8)
axs[3,3].plot(sample1[:,16], alpha = 0.8)
axs[3,0].plot(sample2[:,13], alpha = 0.8)
axs[3,1].plot(sample2[:,14], alpha = 0.8)
axs[3,2].plot(sample2[:,15], alpha = 0.8)
axs[3,3].plot(sample2[:,16], alpha = 0.8)
plt.ylim(-1.5,1.5)
plt.tight_layout()
plt.show()
def autocorrelation(time_series, maxRange):
# estimate the autocorrelation
l = len(time_series)
ans = np.zeros(2*maxRange+1)
delta = np.arange(-maxRange,maxRange+1,1)
for k in range(2*maxRange+1):
v0 = time_series[maxRange : l - maxRange ]
v1 = time_series[maxRange - delta[k] : l - maxRange - delta[k]]
m0 = np.mean(v0)
m1 = np.mean(v1)
cov = np.sum( (v0-m0) * (v1-m1) / len(v0) )
var0 = np.sum( (v0-m0)**2 / len(v0) )
var1 = np.sum( (v1-m1)**2 / len(v0) )
corr = cov / (var0 * var1)**0.5
ans[k] = corr
return delta, ans
def showAutocorrelation(samples, delta = None, col = None):
if delta == None:
delta = np.int( len(samples) / 6 )
_, trueCorrelation = autocorrelation(samples, delta )
if col == None:
plt.plot(np.arange(-delta,delta+1), trueCorrelation)
else:
plt.plot(np.arange(-delta,delta+1), trueCorrelation, c = col)
plt.ylim([-1.1,1.1])
def samples_exploration(samples, iterations = True, distribution = True,
correlation = True, size_samples = (24,13), names = None):
"visualisation of the metropolis algorithm"
if len(samples.shape)>1:
size = samples.shape[1]
else:
size = 1
colors = sns.color_palette("hls", size)
if iterations:
print("iterations")
rows = int(size/6)+1
if rows > 30: big_plot(24,12)
else: big_plot(24,6)
for k in range(size):
if names == None:
plt.plot(samples[:,k], c = colors[k],label = "coord {}".format(k+1), alpha = 0.5)
else :
plt.plot(samples[:,k], c = colors[k], alpha = 0.5, label = names[k])
plt.tight_layout()
plt.legend()
plt.show()
reset_plot()
if distribution:
print("estimation of the distributions")
rows = int(size/6)+1
if rows > 30: big_plot(24,12)
else: big_plot(24,6)
for k in range(size):
plt.subplot(rows,6,k+1)
sns.distplot(samples[:,k], color = colors[k])
plt.tight_layout()
plt.show()
if correlation:
print("autocorrelation")
big_plot(26,8)
for k in range(size):
showAutocorrelation(samples[:,k], col = colors[k])
plt.show()
reset_plot()
| [
"sys.stdout.write",
"matplotlib.pyplot.tight_layout",
"plotly.offline.iplot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"numpy.insert",
"numpy.mean",
"sys... | [((190, 199), 'seaborn.set', 'sns.set', ([], {}), '()\n', (197, 199), True, 'import seaborn as sns\n'), ((1460, 1482), 'sys.stdout.write', 'sys.stdout.write', (['text'], {}), '(text)\n', (1476, 1482), False, 'import sys\n'), ((1487, 1505), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1503, 1505), False, 'import sys\n'), ((1769, 1915), 'numpy.array', 'np.array', (["['ED', 'SOUTH', 'NONWH', 'HISP', 'FE', 'MARR', 'MARRFE', 'EX', 'UNION',\n 'MANUF', 'CONSTR', 'MANAG', 'SALES', 'CLER', 'SERV', 'PROF']"], {}), "(['ED', 'SOUTH', 'NONWH', 'HISP', 'FE', 'MARR', 'MARRFE', 'EX',\n 'UNION', 'MANUF', 'CONSTR', 'MANAG', 'SALES', 'CLER', 'SERV', 'PROF'])\n", (1777, 1915), True, 'import numpy as np\n'), ((3705, 3718), 'plotly.offline.iplot', 'py.iplot', (['fig'], {}), '(fig)\n', (3713, 3718), True, 'import plotly.offline as py\n'), ((4143, 4205), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(4)'], {'figsize': '(16, 16)', 'sharey': '(True)', 'sharex': '(True)'}), '(4, 4, figsize=(16, 16), sharey=True, sharex=True)\n', (4155, 4205), True, 'import matplotlib.pyplot as plt\n'), ((5666, 5685), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.5)', '(1.5)'], {}), '(-1.5, 1.5)\n', (5674, 5685), True, 'import matplotlib.pyplot as plt\n'), ((5689, 5707), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5705, 5707), True, 'import matplotlib.pyplot as plt\n'), ((5712, 5722), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5720, 5722), True, 'import matplotlib.pyplot as plt\n'), ((5839, 5865), 'numpy.zeros', 'np.zeros', (['(2 * maxRange + 1)'], {}), '(2 * maxRange + 1)\n', (5847, 5865), True, 'import numpy as np\n'), ((5874, 5911), 'numpy.arange', 'np.arange', (['(-maxRange)', '(maxRange + 1)', '(1)'], {}), '(-maxRange, maxRange + 1, 1)\n', (5883, 5911), True, 'import numpy as np\n'), ((6720, 6741), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-1.1, 1.1]'], {}), '([-1.1, 1.1])\n', (6728, 6741), True, 'import matplotlib.pyplot as plt\n'), ((7046, 7076), 'seaborn.color_palette', 'sns.color_palette', (['"""hls"""', 'size'], {}), "('hls', size)\n", (7063, 7076), True, 'import seaborn as sns\n'), ((6102, 6113), 'numpy.mean', 'np.mean', (['v0'], {}), '(v0)\n', (6109, 6113), True, 'import numpy as np\n'), ((6127, 6138), 'numpy.mean', 'np.mean', (['v1'], {}), '(v1)\n', (6134, 6138), True, 'import numpy as np\n'), ((7495, 7513), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7511, 7513), True, 'import matplotlib.pyplot as plt\n'), ((7522, 7534), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7532, 7534), True, 'import matplotlib.pyplot as plt\n'), ((7543, 7553), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7551, 7553), True, 'import matplotlib.pyplot as plt\n'), ((7875, 7893), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7891, 7893), True, 'import matplotlib.pyplot as plt\n'), ((7902, 7912), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7910, 7912), True, 'import matplotlib.pyplot as plt\n'), ((8091, 8101), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8099, 8101), True, 'import matplotlib.pyplot as plt\n'), ((6592, 6620), 'numpy.arange', 'np.arange', (['(-delta)', '(delta + 1)'], {}), '(-delta, delta + 1)\n', (6601, 6620), True, 'import numpy as np\n'), ((6663, 6691), 'numpy.arange', 'np.arange', (['(-delta)', '(delta + 1)'], {}), '(-delta, delta + 1)\n', (6672, 6691), True, 'import numpy as np\n'), ((7785, 7812), 'matplotlib.pyplot.subplot', 'plt.subplot', (['rows', '(6)', '(k + 1)'], {}), '(rows, 6, k + 1)\n', (7796, 7812), True, 'import matplotlib.pyplot as plt\n'), ((7821, 7865), 'seaborn.distplot', 'sns.distplot', (['samples[:, k]'], {'color': 'colors[k]'}), '(samples[:, k], color=colors[k])\n', (7833, 7865), True, 'import seaborn as sns\n'), ((7417, 7480), 'matplotlib.pyplot.plot', 'plt.plot', (['samples[:, k]'], {'c': 'colors[k]', 'alpha': '(0.5)', 'label': 'names[k]'}), '(samples[:, k], c=colors[k], alpha=0.5, label=names[k])\n', (7425, 7480), True, 'import matplotlib.pyplot as plt\n'), ((2584, 2600), 'numpy.arange', 'np.arange', (['(0)', '(17)'], {}), '(0, 17)\n', (2593, 2600), True, 'import numpy as np\n'), ((2640, 2678), 'numpy.insert', 'np.insert', (['predictors_name', '(0)', '"""noise"""'], {}), "(predictors_name, 0, 'noise')\n", (2649, 2678), True, 'import numpy as np\n'), ((3003, 3019), 'numpy.arange', 'np.arange', (['(0)', '(16)'], {}), '(0, 16)\n', (3012, 3019), True, 'import numpy as np\n'), ((3475, 3491), 'numpy.arange', 'np.arange', (['(0)', '(17)'], {}), '(0, 17)\n', (3484, 3491), True, 'import numpy as np\n'), ((3531, 3573), 'numpy.insert', 'np.insert', (['predictors_name', '(0)', '"""intercept"""'], {}), "(predictors_name, 0, 'intercept')\n", (3540, 3573), True, 'import numpy as np\n')] |
import numpy as np
from numpy.random.mtrand import RandomState
from agents import AbstractFeatureProvider, ViewsFeaturesProvider, Model, ModelBasedAgent
from reco_gym import Configuration
organic_user_count_args = {
'num_products': 10,
'random_seed': np.random.randint(2 ** 31 - 1),
# Select a Product randomly with the highest probability for the most frequently viewed product.
'select_randomly': True,
# Weight History Function: how treat each event back in time.
'weight_history_function': None,
# reverse popularity.
'reverse_pop': False,
# Epsilon-greedy - if none-zero, this ensures the policy has support over all products
'epsilon': .0
}
#/EXPERIMENTAL#
def fast_choice(n_options, probs, rng):
# Numpy.random.choice is fast when vectorised, but we call it for single choices
# This very simple algorithm is faster for a small number of options (empirically - < 150)
# Generate a random number
pchoice = rng.random_sample()
# Get the probablity for the first option
running_sum = probs[0]
running_choice = 0
# Loop over options
for p in probs[1:]:
# If our random number is bigger than the sum of preceding probabilities
# Pick this one
if pchoice <= running_sum:
break
else:
running_sum += p
running_choice += 1
# Machine precision issues make that the probabilities sometimes do not sum exactly to one
# If this becomes an issue, divide the remaining probability mass over all items
if running_choice >= n_options:
return fast_choice(n_options, [1/n_options]*n_options, rng)
return running_choice
from numba import jit
@jit(nopython=True)
def numba_fast_choice(probs, p):
return np.searchsorted(probs.cumsum(),p)
#/EXPERIMENTAL#
class OrganicUserEventCounterModelBuilder(AbstractFeatureProvider):
def __init__(self, config):
super(OrganicUserEventCounterModelBuilder, self).__init__(config)
def build(self):
class OrganicUserEventCounterModel(Model):
"""
Organic Event Count Model (per a User).
"""
def __init__(self, config):
super(OrganicUserEventCounterModel, self).__init__(config)
if config.select_randomly:
self.rng = RandomState(self.config.random_seed)
def act(self, observation, features):
# Preparations for epsilon-greedy
if self.config.epsilon > 0:
# Compute current mass
sum_features = np.sum(features, axis = 0)
# Get non-zero features
mask = features == 0
# Rescale to (1 - eps) % of the mass
features[~mask] = (1.0 - self.config.epsilon) * sum_features
# Uniformly redistribute eps % of the mass
features += self.config.epsilon * sum_features
if not self.config.reverse_pop:
action_proba = features / np.sum(features, axis = 0)
else:
action_proba = 1 - features / np.sum(features, axis = 0)
action_proba = action_proba/sum(action_proba)
if self.config.select_randomly:
#action = self.rng.choice(self.config.num_products, p = action_proba)
#action = fast_choice(self.config.num_products, action_proba, self.rng)
action = numba_fast_choice(action_proba, self.rng.random_sample())
ps = action_proba[action]
ps_all = action_proba
else:
action = np.argmax(action_proba)
ps = 1.0
ps_all = np.zeros(self.config.num_products)
ps_all[action] = 1.0
return {
**super().act(observation, features),
**{
'a': action,
'ps': ps,
'ps-a': ps_all,
},
}
return (
ViewsFeaturesProvider(self.config),
OrganicUserEventCounterModel(self.config)
)
class OrganicUserEventCounterAgent(ModelBasedAgent):
"""
Organic Event Counter Agent
The Agent that counts Organic views of Products (per a User)
and selects an Action for the most frequently shown Product.
"""
def __init__(self, config = Configuration(organic_user_count_args)):
super(OrganicUserEventCounterAgent, self).__init__(
config,
OrganicUserEventCounterModelBuilder(config)
)
| [
"numpy.sum",
"numpy.argmax",
"numpy.zeros",
"numpy.random.mtrand.RandomState",
"numpy.random.randint",
"numba.jit",
"agents.ViewsFeaturesProvider",
"reco_gym.Configuration"
] | [((1706, 1724), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1709, 1724), False, 'from numba import jit\n'), ((261, 291), 'numpy.random.randint', 'np.random.randint', (['(2 ** 31 - 1)'], {}), '(2 ** 31 - 1)\n', (278, 291), True, 'import numpy as np\n'), ((4566, 4604), 'reco_gym.Configuration', 'Configuration', (['organic_user_count_args'], {}), '(organic_user_count_args)\n', (4579, 4604), False, 'from reco_gym import Configuration\n'), ((4199, 4233), 'agents.ViewsFeaturesProvider', 'ViewsFeaturesProvider', (['self.config'], {}), '(self.config)\n', (4220, 4233), False, 'from agents import AbstractFeatureProvider, ViewsFeaturesProvider, Model, ModelBasedAgent\n'), ((2342, 2378), 'numpy.random.mtrand.RandomState', 'RandomState', (['self.config.random_seed'], {}), '(self.config.random_seed)\n', (2353, 2378), False, 'from numpy.random.mtrand import RandomState\n'), ((2609, 2633), 'numpy.sum', 'np.sum', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (2615, 2633), True, 'import numpy as np\n'), ((3752, 3775), 'numpy.argmax', 'np.argmax', (['action_proba'], {}), '(action_proba)\n', (3761, 3775), True, 'import numpy as np\n'), ((3834, 3868), 'numpy.zeros', 'np.zeros', (['self.config.num_products'], {}), '(self.config.num_products)\n', (3842, 3868), True, 'import numpy as np\n'), ((3084, 3108), 'numpy.sum', 'np.sum', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (3090, 3108), True, 'import numpy as np\n'), ((3183, 3207), 'numpy.sum', 'np.sum', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (3189, 3207), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import pyrealsense2 as rs
from centersnap import CenterSnap
from centersnap.utils import load_img_NOCS, Open3dVisualizer
REALSENSE_MAT_640 = np.array([[428.907 , 0. , 321.383 , 0. ],
[ 0. , 428.611 , 241.602 , 0. ],
[ 0. , 0. , 1. , 0. ],
[ 0. , 0. , 0. , 1. ]])
def initialize_device():
# Create a pipeline
pipeline = rs.pipeline()
config = rs.config()
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
profile = pipeline.start(config)
# Get stream profile and camera intrinsics
color_profile = rs.video_stream_profile(profile.get_stream(rs.stream.color))
color_intrinsics = color_profile.get_intrinsics()
# print(color_intrinsics)
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
# print("Depth Scale is: " , depth_scale)
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)
return pipeline, align, depth_scale
if __name__ == '__main__':
model_path = "models/CenterSnap_sim.onnx"
poincloud_estimator_path = "models/CenterSnapAE_sim.onnx"
max_dist = 2.0
# Initialize pose estimator with autoencoder
poseEstimator = CenterSnap(model_path, poincloud_estimator_path, min_conf=0.6, camera_mat=REALSENSE_MAT_640)
# Create REALSENSE pipeline
pipeline, align, depth_scale = initialize_device()
# out = cv2.VideoWriter('output.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (1920*2,1080))
# Initialize the Open3d visualizer
open3dVisualizer = Open3dVisualizer()
cv2.namedWindow('Projected Pose',cv2.WINDOW_NORMAL)
while True:
# Press q key to stop
if cv2.waitKey(1) == ord('q'):
break
# Get frameset of color and depth
frames = pipeline.wait_for_frames()
# Align the depth frame to color frame
aligned_frames = align.process(frames)
# Get aligned frames
aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
color_frame = aligned_frames.get_color_frame()
# Validate that both frames are valid
if not aligned_depth_frame or not color_frame:
continue
depth_map = np.asanyarray(aligned_depth_frame.get_data())*depth_scale*1000
depth_map[depth_map>max_dist*1000] = max_dist*1000
rgb_img = np.asanyarray(color_frame.get_data())
# Update pose estimator
ret = poseEstimator(rgb_img, depth_map/255.0)
if ret:
# Draw RGB image with 2d data
combined_img = poseEstimator.draw_points_2d(rgb_img)
# Draw 3D data
open3dVisualizer(poseEstimator.points_3d_list, poseEstimator.boxes_3d_list)
else:
combined_img = rgb_img
combined_img = cv2.resize(combined_img, (1920, 1080))
# Convert Open3D map to image
o3d_screenshot_mat = open3dVisualizer.vis.capture_screen_float_buffer()
o3d_screenshot_mat = (255.0 * np.asarray(o3d_screenshot_mat)).astype(np.uint8)
o3d_screenshot_mat = cv2.resize(o3d_screenshot_mat, (1920, 1080))
combined_img = cv2.hconcat([combined_img, o3d_screenshot_mat])
# out.write(combined_img)
cv2.imshow("Projected Pose", combined_img)
# out.release()
pipeline.stop() | [
"cv2.resize",
"pyrealsense2.pipeline_wrapper",
"pyrealsense2.pipeline",
"cv2.waitKey",
"numpy.asarray",
"centersnap.utils.Open3dVisualizer",
"pyrealsense2.align",
"pyrealsense2.config",
"numpy.array",
"cv2.hconcat",
"centersnap.CenterSnap",
"cv2.imshow",
"cv2.namedWindow"
] | [((174, 292), 'numpy.array', 'np.array', (['[[428.907, 0.0, 321.383, 0.0], [0.0, 428.611, 241.602, 0.0], [0.0, 0.0, 1.0,\n 0.0], [0.0, 0.0, 0.0, 1.0]]'], {}), '([[428.907, 0.0, 321.383, 0.0], [0.0, 428.611, 241.602, 0.0], [0.0,\n 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]])\n', (182, 292), True, 'import numpy as np\n'), ((526, 539), 'pyrealsense2.pipeline', 'rs.pipeline', ([], {}), '()\n', (537, 539), True, 'import pyrealsense2 as rs\n'), ((553, 564), 'pyrealsense2.config', 'rs.config', ([], {}), '()\n', (562, 564), True, 'import pyrealsense2 as rs\n'), ((589, 618), 'pyrealsense2.pipeline_wrapper', 'rs.pipeline_wrapper', (['pipeline'], {}), '(pipeline)\n', (608, 618), True, 'import pyrealsense2 as rs\n'), ((1608, 1626), 'pyrealsense2.align', 'rs.align', (['align_to'], {}), '(align_to)\n', (1616, 1626), True, 'import pyrealsense2 as rs\n'), ((1898, 1995), 'centersnap.CenterSnap', 'CenterSnap', (['model_path', 'poincloud_estimator_path'], {'min_conf': '(0.6)', 'camera_mat': 'REALSENSE_MAT_640'}), '(model_path, poincloud_estimator_path, min_conf=0.6, camera_mat=\n REALSENSE_MAT_640)\n', (1908, 1995), False, 'from centersnap import CenterSnap\n'), ((2249, 2267), 'centersnap.utils.Open3dVisualizer', 'Open3dVisualizer', ([], {}), '()\n', (2265, 2267), False, 'from centersnap.utils import load_img_NOCS, Open3dVisualizer\n'), ((2273, 2325), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Projected Pose"""', 'cv2.WINDOW_NORMAL'], {}), "('Projected Pose', cv2.WINDOW_NORMAL)\n", (2288, 2325), False, 'import cv2\n'), ((3530, 3568), 'cv2.resize', 'cv2.resize', (['combined_img', '(1920, 1080)'], {}), '(combined_img, (1920, 1080))\n', (3540, 3568), False, 'import cv2\n'), ((3814, 3858), 'cv2.resize', 'cv2.resize', (['o3d_screenshot_mat', '(1920, 1080)'], {}), '(o3d_screenshot_mat, (1920, 1080))\n', (3824, 3858), False, 'import cv2\n'), ((3885, 3932), 'cv2.hconcat', 'cv2.hconcat', (['[combined_img, o3d_screenshot_mat]'], {}), '([combined_img, o3d_screenshot_mat])\n', (3896, 3932), False, 'import cv2\n'), ((3975, 4017), 'cv2.imshow', 'cv2.imshow', (['"""Projected Pose"""', 'combined_img'], {}), "('Projected Pose', combined_img)\n", (3985, 4017), False, 'import cv2\n'), ((2383, 2397), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2394, 2397), False, 'import cv2\n'), ((3736, 3766), 'numpy.asarray', 'np.asarray', (['o3d_screenshot_mat'], {}), '(o3d_screenshot_mat)\n', (3746, 3766), True, 'import numpy as np\n')] |
import abc
import asyncio
import functools
import importlib
import sys
import tokenize
from collections import Awaitable, Iterable, OrderedDict
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from threading import Thread
import numpy as np
from ._bootstrap import get_current_user, open_resource, save_inputCells
from ._plot import draw
from .db import _schema
from .ui import ApplicationUI, display_source_code
class MetaApplication(type):
'''MetaApplication
Get the source code of Application so we can record it into database.
'''
def __new__(cls, name, bases, nmspc):
#nmspc['__source__'] = None
#nmspc['__AppData__'] = None
return super(MetaApplication, cls).__new__(cls, name, bases, nmspc)
def __init__(cls, name, bases, nmspc):
super(MetaApplication, cls).__init__(name, bases, nmspc)
if cls.__module__ != 'builtins':
try:
cls.__source__ = cls._getSourceCode()
except:
cls.__source__ = ''
def _getSourceCode(cls):
'''getSourceCode
'''
module = sys.modules[cls.__module__]
if module.__name__ == '__main__' and hasattr(module, 'In'):
code = module.In[-1]
elif cls.__AppData__ is not None:
code = cls.__AppData__.module.source.text
elif hasattr(module, '__file__'):
with tokenize.open(module.__file__) as f:
code = f.read()
else:
code = ''
return code
class Application(
metaclass=type('Meta', (abc.ABCMeta, MetaApplication), dict())):
__source__ = ''
__AppData__ = None
def __init__(self, parent=None):
self.parent = parent
self.rc = RcMap()
self.settings = {}
self.params = {}
self.tags = []
self.sweep = None
self.status = None
self.ui = None
self.reset_status()
self.level = 1
self.level_limit = 3
if parent is not None:
self.rc.parent = parent.rc
self.settings.update(parent.settings)
self.params.update(parent.params)
self.params.update(parent.status['current_params'])
self.tags.extend(parent.tags)
self.set_sweeps(parent.sweep.sweeps.values())
self.level = parent.level + 1
self.level_limit = parent.level_limit
#parent.status['sub_process_num'] += 1
def __del__(self):
if self.parent is not None:
#self.parent.status['sub_process_num'] -= 1
pass
def reset(self):
self.reset_status()
# self.ui.reset()
def record_title(self):
version = '%s.%d' % (self.__AppData__.version_tag,
self.__AppData__.version)
return 'Record by %s (%s)' % (self.__class__.__name__, version)
def reset_status(self):
self.status = dict(
current_record=None,
current_params={},
last_step_process=0,
sub_process_num=1,
process_changed_by_children=False,
result=None,
process=0.0,
done=False,
)
def getTotalProcess(self):
if self.parent is None:
return 100.0
else:
return self.parent.status['last_step_process'] / max(
self.parent.status['sub_process_num'], 1)
def processToChange(self, delta):
self.status['last_step_process'] = delta
def increaseProcess(self, value=0, by_children=False):
if not self.status['process_changed_by_children']:
value = self.status['last_step_process']
self.status['process'] += value
elif by_children:
self.status['process'] += value
if self.parent is not None:
self.parent.status['process_changed_by_children'] = True
self.parent.increaseProcess(
value * self.getTotalProcess() / 100, by_children=True)
if self.ui is not None:
self.ui.setProcess(self.status['process'])
def with_rc(self, rc={}):
self.rc.update(rc)
return self
def with_tags(self, *tags):
for tag in tags:
if tag not in self.tags:
self.tags.append(tag)
return self
def with_params(self, **kwargs):
params = dict([(name, [v[0], v[1]])
if isinstance(v, (list, tuple)) else (name, [v, ''])
for name, v in kwargs.items()])
self.params.update(params)
return self
def with_settings(self, settings={}):
self.settings.update(settings)
return self
def set_sweeps(self, sweeps=[]):
s = []
for sweep in sweeps:
if isinstance(sweep, tuple):
s.append(Sweep(*sweep))
elif isinstance(sweep, dict):
s.append(Sweep(**sweep))
elif isinstance(sweep, Sweep):
s.append(sweep)
else:
raise TypeError('Unsupport type %r for sweep.' % type(sweep))
self.sweep = SweepSet(self, s)
return self
def collect(self, data):
'''对于存在循环的实验,将每一轮生成的数据收集起来'''
if not isinstance(data, tuple):
data = (data, )
if self.status['result'] is None:
self.status['result'] = dict(
rows=1, cols=len(data), data=[[v] for v in data])
else:
for i, v in enumerate(data):
self.status['result']['data'][i].append(v)
self.status['result']['rows'] += 1
def result(self):
'''将收集到的数据按 work 生成时的顺序返回'''
if self.status['result']['rows'] == 1:
data = tuple([v[0] for v in self.status['result']['data']])
else:
data = tuple([np.array(v) for v in self.status['result']['data']])
if self.status['result']['cols'] == 1:
return self.pre_save(data[0])
else:
return self.pre_save(*data)
def newRecord(self):
rc = dict([(name, str(v)) for name, v in self.rc.items()])
record = _schema.Record(
title=self.record_title(),
user=get_current_user(),
tags=self.tags,
params=self.params,
rc=rc,
hidden=False if self.parent is None else True,
app=self.__AppData__,
)
#self.status['current_record'] = record
if self.parent is not None:
self.parent.addSubRecord(record)
return record
def addSubRecord(self, record):
if self.status['current_record'] is not None:
if record.id is None:
record.save(signal_kwargs=dict(finished=True))
self.status['current_record'].children.append(record)
self.status['current_record'].save(
signal_kwargs=dict(finished=True))
def saveRecord(self, data):
if self.status['current_record'] is None:
self.status['current_record'] = self.newRecord()
self.status['current_record'].data = data
self.status['current_record'].save(signal_kwargs=dict(finished=True))
def setDone(self):
self.status['done'] = True
#value = 100 - self.ui.get_process()
# self.increase_process(value)
def run(self):
self.ui = ApplicationUI(self)
self.ui.display()
with ThreadPoolExecutor() as executor:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.set_default_executor(executor)
tasks = [asyncio.ensure_future(self.done())]
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
save_inputCells()
async def done(self):
self.reset()
async for data in self.work():
self.collect(data)
result = self.result()
if self.level <= self.level_limit:
self.saveRecord(result)
draw(self.__class__.plot, result, self)
self.setDone()
return self.result()
async def work(self):
'''单个返回值不要用 tuple,否则会被解包,下面这些都允许
yield 0
yield 0, 1, 2
yield np.array([1,2,3])
yield [1,2,3]
yield 1, (1,2)
yield 0.5, np.array([1,2,3])
'''
def pre_save(self, *args):
return args
@staticmethod
def plot(fig, data):
pass
@classmethod
def save(cls, version=None, moduleName=None):
_schema.saveApplication(cls.__name__, cls.__source__,
get_current_user(), cls.__doc__, moduleName,
version)
@classmethod
def show(cls):
display_source_code(cls.__source__)
class Sweep:
"""Sweep
Sweep channal config.
"""
def __init__(self,
name,
generator,
unit='',
setter=None,
start=None,
total=None):
self.name = name
self.generator = generator
self.unit = unit
self.setter = setter
self.start = start
self.total = total
self._generator = self.generator
def __call__(self, *args, **kwds):
self._generator = self._generator(*args, **kwds)
return self
def __len__(self):
try:
return len(self._generator)
except TypeError:
return self.total
def __aiter__(self):
return SweepIter(self)
class SweepIter:
def __init__(self, sweep):
self.iter = sweep._generator.__iter__() if isinstance(
sweep._generator, Iterable) else sweep._generator
self.app = sweep.app
self.setter = sweep.setter
self.name = sweep.name
self.unit = sweep.unit
self.lenght = len(sweep)
def fetch_data(self):
try:
data = next(self.iter)
except StopIteration:
raise StopAsyncIteration
return data
async def set_data(self, data):
if self.setter is not None:
ret = self.setter(data)
elif self.app is not None and hasattr(self.app, 'set_%s' % self.name):
ret = getattr(self.app, 'set_%s' % self.name).__call__(data)
else:
print(self.name, 'not set', self.app.__class__.__name__)
return
if isinstance(ret, Awaitable):
await ret
async def __anext__(self):
if self.app is not None:
self.app.increaseProcess()
data = self.fetch_data()
await self.set_data(data)
if self.app is not None:
self.app.status['current_params'][self.name] = [
float(data), self.unit
]
if self.lenght is not None:
self.app.processToChange(100.0 / self.lenght)
return data
class SweepSet:
def __init__(self, app, sweeps):
self.app = app
self.sweeps = {}
for sweep in sweeps:
sweep.app = app
self.sweeps[sweep.name] = sweep
def __getitem__(self, name):
return self.sweeps[name]
class RcMap:
def __init__(self, rc={}, parent=None):
self.rc = {}
self.parent = parent
self.rc.update(rc)
def update(self, rc={}):
self.rc.update(rc)
def items(self):
return [(name, self.__getitem__(name)) for name in self.keys()]
def keys(self):
keys = set(self.rc.keys())
if self.parent is not None:
keys = keys.union(self.parent.keys())
return list(keys)
def get(self, name, default=None):
if name in self.keys():
return self.get_resource(name)
elif default is None:
raise KeyError('key %r not found in RcMap.' % name)
else:
return default
def get_resource(self, name):
name = self.rc.get(name, name)
if not isinstance(name, str):
return name
elif self.parent is not None:
return self.parent.get_resource(name)
else:
return open_resource(name)
def __getitem__(self, name):
return self.get(name)
def getAppClass(name='', version=None, id=None, **kwds):
appdata = _schema.getApplication(name, version, id, **kwds)
if appdata is None:
return None
mod = importlib.import_module(appdata.module.fullname)
app_cls = getattr(mod, name)
app_cls.__AppData__ = appdata
app_cls.__source__ = appdata.source
return app_cls
def make_app(name, version=None, parent=None):
app_cls = getAppClass(name, version)
return app_cls(parent=parent)
| [
"tokenize.open",
"importlib.import_module",
"asyncio.set_event_loop",
"numpy.array",
"asyncio.wait",
"concurrent.futures.ThreadPoolExecutor",
"asyncio.new_event_loop"
] | [((12425, 12473), 'importlib.import_module', 'importlib.import_module', (['appdata.module.fullname'], {}), '(appdata.module.fullname)\n', (12448, 12473), False, 'import importlib\n'), ((7456, 7476), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {}), '()\n', (7474, 7476), False, 'from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor\n'), ((7509, 7533), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (7531, 7533), False, 'import asyncio\n'), ((7546, 7574), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (7568, 7574), False, 'import asyncio\n'), ((7716, 7735), 'asyncio.wait', 'asyncio.wait', (['tasks'], {}), '(tasks)\n', (7728, 7735), False, 'import asyncio\n'), ((5860, 5871), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (5868, 5871), True, 'import numpy as np\n'), ((1411, 1441), 'tokenize.open', 'tokenize.open', (['module.__file__'], {}), '(module.__file__)\n', (1424, 1441), False, 'import tokenize\n')] |
"""Tools for saving and loading trajectories without requiring the `imitation`
or `tensorflow` packages to be installed."""
import gzip
from pickle import Unpickler
from typing import List, NamedTuple, Optional
import gym
import numpy as np
from magical.benchmarks import ( # comment to stop yapf touching import
DEFAULT_PREPROC_ENTRY_POINT_WRAPPERS, update_magical_env_name)
class MAGICALTrajectory(NamedTuple):
"""Trajectory representation compatible with imitation's trajectory data
class."""
acts: np.ndarray
obs: dict
rews: np.ndarray
infos: Optional[List[dict]]
class _TrajRewriteUnpickler(Unpickler):
"""Custom unpickler that replaces references to `Trajectory` class in
`imitation` with custom trajectory class in this module."""
def find_class(self, module, name):
# print('find_class(%r, %r)' % (module, name))
if (module, name) == ('imitation.util.rollout', 'Trajectory') \
or (module, name) == ('milbench.baselines.saved_trajectories',
'MILBenchTrajectory'):
return MAGICALTrajectory
return super().find_class(module, name)
def load_demos(demo_paths, rewrite_traj_cls=True, verbose=False):
"""Use GzipFile & pickle to generate a sequence of demo dictionaries from a
sequence of file paths."""
n_demos = len(demo_paths)
for d_num, d_path in enumerate(demo_paths, start=1):
if verbose:
print(f"Loading '{d_path}' ({d_num}/{n_demos})")
with gzip.GzipFile(d_path, 'rb') as fp:
if rewrite_traj_cls:
unpickler = _TrajRewriteUnpickler(fp)
else:
unpickler = Unpickler(fp)
this_dict = unpickler.load()
yield this_dict
def splice_in_preproc_name(base_env_name, preproc_name):
"""Splice the name of a preprocessor into a magical benchmark name. e.g.
you might start with "MoveToCorner-Demo-v0" and insert "LoResStack" to end
up with "MoveToCorner-Demo-LoResStack-v0". Will do a sanity check to ensure
that the preprocessor actually exists."""
assert preproc_name in DEFAULT_PREPROC_ENTRY_POINT_WRAPPERS, \
f"no preprocessor named '{preproc_name}', options are " \
f"{', '.join(DEFAULT_PREPROC_ENTRY_POINT_WRAPPERS)}"
return update_magical_env_name(base_env_name, preproc=preproc_name)
class _MockDemoEnv(gym.Wrapper):
"""Mock Gym environment that just returns an observation"""
def __init__(self, orig_env, trajectory):
super().__init__(orig_env)
self._idx = 0
self._traj = trajectory
self._traj_length = len(self._traj.acts)
def reset(self):
self._idx = 0
return self._traj.obs[self._idx]
def step(self, action):
rew = self._traj.rews[self._idx]
info = self._traj.infos[self._idx] or {}
info['_mock_demo_act'] = self._traj.acts[self._idx]
self._idx += 1
# ignore action, return next obs
obs = self._traj.obs[self._idx]
# it's okay if we run one over the end
done = self._idx >= self._traj_length
return obs, rew, done, info
def preprocess_demos_with_wrapper(trajectories,
orig_env_name,
preproc_name=None,
wrapper=None):
"""Preprocess trajectories using one of the built-in environment
preprocessing pipelines.
Args:
trajectories ([Trajectory]): list of trajectories to process.
orig_env_name (str): name of original environment where trajectories
were collected. This function will instantiate a temporary instance
of that environment to get access to an observation space and other
metadata.
preproc_name (str or None): name of preprocessor to apply. Should be
available in
`magical.benchmarks.DEFAULT_PREPROC_ENTRY_POINT_WRAPPERS`.
wrapper (callable or None): wrapper constructor. Should take a
constructed Gym environment and return a wrapped Gym-like
environment. Either `preproc_name` or `wrapper` must be specified,
but both cannot be specified at once.
Returns:
rv_trajectories ([Trajectory]): equivalent list of trajectories that
have each been preprocessed with the given wrapper."""
if preproc_name is not None:
assert wrapper is None
wrapper = DEFAULT_PREPROC_ENTRY_POINT_WRAPPERS[preproc_name]
else:
assert wrapper is not None
orig_env = gym.make(orig_env_name)
wrapped_constructor = wrapper(_MockDemoEnv)
rv_trajectories = []
for traj in trajectories:
mock_env = wrapped_constructor(orig_env=orig_env, trajectory=traj)
obs = mock_env.reset()
values = {
'obs': [],
'acts': [],
'rews': [],
'infos': [],
}
values['obs'].append(obs)
done = False
while not done:
obs, rew, done, info = mock_env.step(None)
acts = info['_mock_demo_act']
del info['_mock_demo_act']
values['obs'].append(obs)
values['acts'].append(acts)
values['rews'].append(rew)
values['infos'].append(info)
# turn obs, acts, and rews into numpy arrays
stack_values = {
k: np.stack(vs, axis=0)
for k, vs in values.items() if k in ['obs', 'acts', 'rews']
}
# keep infos as a list (hard to get at elements otherwise)
stack_values['infos'] = values.get('infos')
# use type(traj) to preserve either MAGICAL trajectory type or custom
# type
new_traj = type(traj)(**stack_values)
rv_trajectories.append(new_traj)
return rv_trajectories
| [
"numpy.stack",
"gym.make",
"magical.benchmarks.update_magical_env_name",
"gzip.GzipFile",
"pickle.Unpickler"
] | [((2315, 2375), 'magical.benchmarks.update_magical_env_name', 'update_magical_env_name', (['base_env_name'], {'preproc': 'preproc_name'}), '(base_env_name, preproc=preproc_name)\n', (2338, 2375), False, 'from magical.benchmarks import DEFAULT_PREPROC_ENTRY_POINT_WRAPPERS, update_magical_env_name\n'), ((4589, 4612), 'gym.make', 'gym.make', (['orig_env_name'], {}), '(orig_env_name)\n', (4597, 4612), False, 'import gym\n'), ((1522, 1549), 'gzip.GzipFile', 'gzip.GzipFile', (['d_path', '"""rb"""'], {}), "(d_path, 'rb')\n", (1535, 1549), False, 'import gzip\n'), ((5413, 5433), 'numpy.stack', 'np.stack', (['vs'], {'axis': '(0)'}), '(vs, axis=0)\n', (5421, 5433), True, 'import numpy as np\n'), ((1690, 1703), 'pickle.Unpickler', 'Unpickler', (['fp'], {}), '(fp)\n', (1699, 1703), False, 'from pickle import Unpickler\n')] |
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import numpy as np
import random
import pandas as pd
from google.cloud import storage
import io
#import tensorflow as tf
class Dataset:
def __init__(self, data_files_path, sampling_size, test_size):
self.data_files_path = data_files_path # path to dataset (local or GCS)
self.sampling_size = sampling_size # how much data do you want to use? e.g) 0.1 means using 10% of dataset from total dataset
self.test_size = test_size # how much do you want to use as testset from dataset? e.g) 0.1 is 10%
self._get_data()
def read_csv_file(self, file_name):
bucket_name = 'crypto-airlock-199321-ml'
directory_path = 'RawPacketClassifier/input/'
client = storage.Client()
bucket = client.get_bucket(bucket_name)
blob = storage.Blob(directory_path+file_name, bucket) # get blob from GCS bucket
temp = blob.download_as_string().decode("utf-8") # decoding from byte format
#n = temp.count('\r') # count number of packetse
#samples = random.sample(range(n), n - sampling_size)
return pd.read_csv(io.StringIO(temp), delimiter=',', dtype=np.float32, header=None, skiprows=None)
def _get_data(self):
# 1.Read dataset files
# Tor: 11,743,657 nonTor: 9,342,053 Total: 21,085,710 (4.76 GB, 75 files)
print('Reading Tor csv files...')
print('Reading Audio related csv files...')
tor_Audio = np.concatenate((self.read_csv_file(r'AUDIO_spotifygateway.csv'), self.read_csv_file(r'AUDIO_tor_spotify.csv'), self.read_csv_file(r'AUDIO_tor_spotify2.csv')), axis=0)
tor_Audio = shuffle(tor_Audio)
print(tor_Audio.shape)
print('Reading Browsing related csv files...')
tor_Browsing = np.concatenate((self.read_csv_file(r'BROWSING_gate_SSL_Browsing.csv'), self.read_csv_file(r'BROWSING_ssl_browsing_gateway.csv'), self.read_csv_file(r'BROWSING_tor_browsing_ara.csv'), self.read_csv_file(r'BROWSING_tor_browsing_ger.csv'), self.read_csv_file(r'BROWSING_tor_browsing_mam.csv'), self.read_csv_file(r'BROWSING_tor_browsing_mam2.csv')), axis=0)
tor_Browsing = shuffle(tor_Browsing)
print(tor_Browsing.shape)
print('Reading Chat related csv files...')
tor_Chat = np.concatenate((self.read_csv_file(r'CHAT_aimchatgateway.csv'), self.read_csv_file(r'CHAT_facebookchatgateway.csv'), self.read_csv_file(r'CHAT_gate_AIM_chat.csv'), self.read_csv_file(r'CHAT_gate_facebook_chat.csv'), self.read_csv_file(r'CHAT_gate_hangout_chat.csv'), self.read_csv_file(r'CHAT_gate_ICQ_chat.csv'), self.read_csv_file(r'CHAT_gate_skype_chat.csv'), self.read_csv_file(r'CHAT_hangoutschatgateway.csv'), self.read_csv_file(r'CHAT_icqchatgateway.csv'), self.read_csv_file(r'CHAT_skypechatgateway.csv')), axis=0)
tor_Chat = shuffle(tor_Chat)
print(tor_Chat.shape)
print('Reading File transfer related csv files...')
tor_File = np.concatenate((self.read_csv_file(r'FILE-TRANSFER_gate_FTP_transfer.csv'), self.read_csv_file(r'FILE-TRANSFER_gate_SFTP_filetransfer.csv'), self.read_csv_file(r'FILE-TRANSFER_tor_skype_transfer.csv')), axis=0)
tor_File = shuffle(tor_File)
print(tor_File.shape)
print('Reading Mail related csv files...')
tor_Email = np.concatenate((self.read_csv_file(r'MAIL_gate_Email_IMAP_filetransfer.csv'), self.read_csv_file(r'MAIL_gate_POP_filetransfer.csv'), self.read_csv_file(r'MAIL_Gateway_Thunderbird_Imap.csv'), self.read_csv_file(r'MAIL_Gateway_Thunderbird_POP.csv')), axis=0)
tor_Email = shuffle(tor_Email)
print(tor_Email.shape)
print('Reading P2P related csv files...')
tor_P2P = np.concatenate((self.read_csv_file(r'P2P_tor_p2p_multipleSpeed.csv'), self.read_csv_file(r'P2P_tor_p2p_vuze.csv')), axis=0)
tor_P2P = shuffle(tor_P2P)
print(tor_P2P.shape)
print('Reading Video related csv files...')
tor_Video = np.concatenate((self.read_csv_file(r'VIDEO_Vimeo_Gateway.csv'), self.read_csv_file(r'VIDEO_Youtube_Flash_Gateway.csv'), self.read_csv_file(r'VIDEO_Youtube_HTML5_Gateway.csv')), axis=0)
tor_Video = shuffle(tor_Video)
print(tor_Video.shape)
print('Reading VoIP related csv files...')
tor_VoIP = np.concatenate((self.read_csv_file(r'VOIP_Facebook_Voice_Gateway.csv'), self.read_csv_file(r'VOIP_gate_facebook_Audio.csv'), self.read_csv_file(r'VOIP_gate_hangout_audio.csv'), self.read_csv_file(r'VOIP_gate_Skype_Audio.csv'), self.read_csv_file(r'VOIP_Hangouts_voice_Gateway.csv'), self.read_csv_file(r'VOIP_Skype_Voice_Gateway.csv')), axis=0)
tor_VoIP = shuffle(tor_VoIP)
print(tor_VoIP.shape)
print(tor_Audio.shape[0] + tor_Browsing.shape[0] + tor_Chat.shape[0] + tor_File.shape[0] + tor_Email.shape[0] + tor_P2P.shape[0] + tor_Video.shape[0] + tor_VoIP.shape[0])
print('Reading nonTor csv files...')
print('Reading Audio related csv files...')
nonTor_Audio = np.concatenate((self.read_csv_file(r'facebook_Audio.csv'), self.read_csv_file(r'Hangout_Audio.csv'), self.read_csv_file(r'Skype_Audio.csv'), self.read_csv_file(r'spotify.csv'), self.read_csv_file(r'spotify2.csv'), self.read_csv_file(r'spotifyAndrew.csv')), axis=0)
nonTor_Audio = shuffle(nonTor_Audio)
print(nonTor_Audio.shape)
print('Reading Browsing related csv files...')
nonTor_Browsing = np.concatenate((self.read_csv_file(r'browsing.csv'), self.read_csv_file(r'browsing_ara.csv'), self.read_csv_file(r'browsing_ara2.csv'), self.read_csv_file(r'browsing_ger.csv'), self.read_csv_file(r'browsing2.csv'), self.read_csv_file(r'ssl.csv'), self.read_csv_file(r'SSL_Browsing.csv')), axis=0)
nonTor_Browsing = shuffle(nonTor_Browsing)
print(nonTor_Browsing.shape)
print('Reading Chat related csv files...')
nonTor_Chat = np.concatenate((self.read_csv_file(r'AIM_Chat.csv'), self.read_csv_file(r'aimchat.csv'), self.read_csv_file(r'facebook_chat.csv'), self.read_csv_file(r'facebookchat.csv'), self.read_csv_file(r'hangout_chat.csv'), self.read_csv_file(r'hangoutschat.csv'), self.read_csv_file(r'ICQ_Chat.csv'), self.read_csv_file(r'icqchat.csv'), self.read_csv_file(r'skype_chat.csv'), self.read_csv_file(r'skypechat.csv')), axis=0)
nonTor_Chat = shuffle(nonTor_Chat)
print(nonTor_Chat.shape)
print('Reading File transfer related csv files...')
nonTor_File = np.concatenate((self.read_csv_file(r'FTP_filetransfer.csv'), self.read_csv_file(r'SFTP_filetransfer.csv'), self.read_csv_file(r'skype_transfer.csv')), axis=0)
nonTor_File = shuffle(nonTor_File)
print(nonTor_File.shape)
print('Reading Mail related csv files...')
nonTor_Email = np.concatenate((self.read_csv_file(r'Email_IMAP_filetransfer.csv'), self.read_csv_file(r'POP_filetransfer.csv'), self.read_csv_file(r'Workstation_Thunderbird_Imap.csv'), self.read_csv_file(r'Workstation_Thunderbird_POP.csv')), axis=0)
nonTor_Email = shuffle(nonTor_Email)
print(nonTor_Email.shape)
print('Reading P2P related csv files...')
nonTor_P2P = np.concatenate((self.read_csv_file(r'p2p_multipleSpeed.csv'), self.read_csv_file(r'p2p_vuze.csv') ), axis=0)
nonTor_P2P = shuffle(nonTor_P2P)
print(nonTor_P2P.shape)
print('Reading Video related csv files...')
nonTor_Video = np.concatenate((self.read_csv_file(r'Vimeo_Workstation.csv'), self.read_csv_file(r'Youtube_Flash_Workstation.csv'), self.read_csv_file(r'Youtube_HTML5_Workstation.csv')), axis=0)
nonTor_Video = shuffle(nonTor_Video)
print(nonTor_Video.shape)
print('Reading VoIP related csv files...')
nonTor_VoIP = np.concatenate((self.read_csv_file(r'Facebook_Voice_Workstation.csv'), self.read_csv_file(r'Hangouts_voice_Workstation.csv'), self.read_csv_file(r'Skype_Voice_Workstation.csv')), axis=0)
nonTor_VoIP = shuffle(nonTor_VoIP)
print(nonTor_VoIP.shape)
print(nonTor_Audio.shape[0] + nonTor_Browsing.shape[0] + nonTor_Chat.shape[0] + nonTor_File.shape[0] + nonTor_Email.shape[0] + nonTor_P2P.shape[0] + nonTor_Video.shape[0] + nonTor_VoIP.shape[0])
# 2.Split datasets into the training and testing sets (Shuffle is True by default)
print('Processing Tor files... (train/test split + object concatenation)')
'''tor_Audio[:, [-1]] = 1
tor_Browsing[:, [-1]] = 1
tor_Chat[:, [-1]] = 1
tor_File[:, [-1]] = 1
tor_Email[:, [-1]] = 1
tor_P2P[:, [-1]] = 1
tor_Video[:, [-1]] = 1
tor_VoIP[:, [-1]] = 1'''
tor_Audio_train, tor_Audio_test = train_test_split(tor_Audio[0:20000, :], test_size=self.test_size)
tor_Browsing_train, tor_Browsing_test = train_test_split(tor_Browsing[0:20000, :], test_size=self.test_size)
tor_Chat_train, tor_Chat_test = train_test_split(tor_Chat[0:20000, :], test_size=self.test_size)
tor_File_train, tor_File_test = train_test_split(tor_File[0:20000, :], test_size=self.test_size)
tor_Email_train, tor_Email_test = train_test_split(tor_Email[0:20000, :], test_size=self.test_size)
tor_P2P_train, tor_P2P_test = train_test_split(tor_P2P[0:20000, :], test_size=self.test_size)
tor_Video_train, tor_Video_test = train_test_split(tor_Video[0:20000, :], test_size=self.test_size)
tor_VoIP_train, tor_VoIP_test = train_test_split(tor_VoIP[0:20000, :], test_size=self.test_size)
print('Processing nonTor files... (train/test split + object concatenation)')
'''nonTor_Audio[:, [-1]] = 2
nonTor_Browsing[:, [-1]] = 2
nonTor_Chat[:, [-1]] = 2
nonTor_File[:, [-1]] = 2
nonTor_Email[:, [-1]] = 2
nonTor_P2P[:, [-1]] = 2
nonTor_Video[:, [-1]] = 2
nonTor_VoIP[:, [-1]] = 2'''
nonTor_Audio_train, nonTor_Audio_test = train_test_split(nonTor_Audio[0:20000, :], test_size=self.test_size)
nonTor_Browsing_train, nonTor_Browsing_test = train_test_split(nonTor_Browsing[0:20000, :], test_size=self.test_size)
nonTor_Chat_train, nonTor_Chat_test = train_test_split(nonTor_Chat[0:20000, :], test_size=self.test_size)
nonTor_File_train, nonTor_File_test = train_test_split(nonTor_File[0:20000, :], test_size=self.test_size)
nonTor_Email_train, nonTor_Email_test = train_test_split(nonTor_Email[0:20000, :], test_size=self.test_size)
nonTor_P2P_train, nonTor_P2P_test = train_test_split(nonTor_P2P[0:20000, :], test_size=self.test_size)
nonTor_Video_train, nonTor_Video_test = train_test_split(nonTor_Video[0:20000, :], test_size=self.test_size)
nonTor_VoIP_train, nonTor_VoIP_test = train_test_split(nonTor_VoIP[0:20000, :], test_size=self.test_size)
'''nonTor_Audio_train, nonTor_Audio_test = train_test_split(nonTor_Audio[0:721, :], test_size=self.test_size)
nonTor_Browsing_train, nonTor_Browsing_test = train_test_split(nonTor_Browsing[0:1604, :], test_size=self.test_size)
nonTor_Chat_train, nonTor_Chat_test = train_test_split(nonTor_Chat[0:323, :], test_size=self.test_size)
nonTor_File_train, nonTor_File_test = train_test_split(nonTor_File[0:864, :], test_size=self.test_size)
nonTor_Email_train, nonTor_Email_test = train_test_split(nonTor_Email[0:282, :], test_size=self.test_size)
nonTor_P2P_train, nonTor_P2P_test = train_test_split(nonTor_P2P[0:1085, :], test_size=self.test_size)
nonTor_Video_train, nonTor_Video_test = train_test_split(nonTor_Video[0:874, :], test_size=self.test_size)
nonTor_VoIP_train, nonTor_VoIP_test = train_test_split(nonTor_VoIP[0:2291, :], test_size=self.test_size)'''
# 3.Merge training and testing sets respectively.
print('Merge training and testing sets respectively...')
concatenated_train_set = np.concatenate((nonTor_Audio_train, nonTor_Browsing_train, nonTor_Chat_train, nonTor_File_train, nonTor_Email_train, nonTor_P2P_train, nonTor_Video_train, nonTor_VoIP_train, tor_Audio_train, tor_Browsing_train, tor_Chat_train, tor_File_train, tor_Email_train, tor_P2P_train, tor_Video_train, tor_VoIP_train), axis=0)
concatenated_test_set = np.concatenate((nonTor_Audio_test, nonTor_Browsing_test, nonTor_Chat_test, nonTor_File_test, nonTor_Email_test, nonTor_P2P_test, nonTor_Video_test, nonTor_VoIP_test, tor_Audio_test, tor_Browsing_test, tor_Chat_test, tor_File_test, tor_Email_test, tor_P2P_test, tor_Video_test, tor_VoIP_test), axis=0)
'''concatenated_train_set = np.concatenate((nonTor_Audio_train, nonTor_Browsing_train, nonTor_Chat_train, nonTor_File_train, nonTor_Email_train, nonTor_P2P_train, nonTor_Video_train, nonTor_VoIP_train), axis=0)
concatenated_test_set = np.concatenate((nonTor_Audio_test, nonTor_Browsing_test, nonTor_Chat_test, nonTor_File_test, nonTor_Email_test, nonTor_P2P_test, nonTor_Video_test, nonTor_VoIP_test), axis=0)'''
concatenated_train_set = shuffle(concatenated_train_set)
concatenated_test_set = shuffle(concatenated_test_set)
self.x_train = concatenated_train_set[:, 0:-1]
self.y_train = concatenated_train_set[:, [-1]] # 0 ~ 15
self.train_length = self.x_train.shape[0]
print('Number of data for train: '+repr(self.train_length))
self.x_test = concatenated_test_set[:, 0:-1]
self.y_test = concatenated_test_set[:, [-1]]
self.test_length = self.x_test.shape[0]
print('Number of data for test: '+repr(self.test_length))
self.nonTor_Audio_x_test = nonTor_Audio_test[:, 0:-1]
self.nonTor_Audio_y_test = nonTor_Audio_test[:, [-1]]
self.nonTor_Browsing_x_test = nonTor_Browsing_test[:, 0:-1]
self.nonTor_Browsing_y_test = nonTor_Browsing_test[:, [-1]]
self.nonTor_Chat_x_test = nonTor_Chat_test[:, 0:-1]
self.nonTor_Chat_y_test = nonTor_Chat_test[:, [-1]]
self.nonTor_File_x_test = nonTor_File_test[:, 0:-1]
self.nonTor_File_y_test = nonTor_File_test[:, [-1]]
self.nonTor_Email_x_test = nonTor_Email_test[:, 0:-1]
self.nonTor_Email_y_test = nonTor_Email_test[:, [-1]]
self.nonTor_P2P_x_test = nonTor_P2P_test[:, 0:-1]
self.nonTor_P2P_y_test = nonTor_P2P_test[:, [-1]]
self.nonTor_Video_x_test = nonTor_Video_test[:, 0:-1]
self.nonTor_Video_y_test = nonTor_Video_test[:, [-1]]
self.nonTor_VoIP_x_test = nonTor_VoIP_test[:, 0:-1]
self.nonTor_VoIP_y_test = nonTor_VoIP_test[:, [-1]]
self.tor_Audio_x_test = tor_Audio_test[:, 0:-1]
self.tor_Audio_y_test = tor_Audio_test[:, [-1]]
self.tor_Browsing_x_test = tor_Browsing_test[:, 0:-1]
self.tor_Browsing_y_test = tor_Browsing_test[:, [-1]]
self.tor_Chat_x_test = tor_Chat_test[:, 0:-1]
self.tor_Chat_y_test = tor_Chat_test[:, [-1]]
self.tor_File_x_test = tor_File_test[:, 0:-1]
self.tor_File_y_test = tor_File_test[:, [-1]]
self.tor_Email_x_test = tor_Email_test[:, 0:-1]
self.tor_Email_y_test = tor_Email_test[:, [-1]]
self.tor_P2P_x_test = tor_P2P_test[:, 0:-1]
self.tor_P2P_y_test = tor_P2P_test[:, [-1]]
self.tor_Video_x_test = tor_Video_test[:, 0:-1]
self.tor_Video_y_test = tor_Video_test[:, [-1]]
self.tor_VoIP_x_test = tor_VoIP_test[:, 0:-1]
self.tor_VoIP_y_test = tor_VoIP_test[:, [-1]]
print('Organizing dataset done...')
| [
"io.StringIO",
"sklearn.model_selection.train_test_split",
"google.cloud.storage.Client",
"sklearn.utils.shuffle",
"numpy.concatenate",
"google.cloud.storage.Blob"
] | [((809, 825), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (823, 825), False, 'from google.cloud import storage\n'), ((889, 937), 'google.cloud.storage.Blob', 'storage.Blob', (['(directory_path + file_name)', 'bucket'], {}), '(directory_path + file_name, bucket)\n', (901, 937), False, 'from google.cloud import storage\n'), ((1734, 1752), 'sklearn.utils.shuffle', 'shuffle', (['tor_Audio'], {}), '(tor_Audio)\n', (1741, 1752), False, 'from sklearn.utils import shuffle\n'), ((2249, 2270), 'sklearn.utils.shuffle', 'shuffle', (['tor_Browsing'], {}), '(tor_Browsing)\n', (2256, 2270), False, 'from sklearn.utils import shuffle\n'), ((2934, 2951), 'sklearn.utils.shuffle', 'shuffle', (['tor_Chat'], {}), '(tor_Chat)\n', (2941, 2951), False, 'from sklearn.utils import shuffle\n'), ((3292, 3309), 'sklearn.utils.shuffle', 'shuffle', (['tor_File'], {}), '(tor_File)\n', (3299, 3309), False, 'from sklearn.utils import shuffle\n'), ((3689, 3707), 'sklearn.utils.shuffle', 'shuffle', (['tor_Email'], {}), '(tor_Email)\n', (3696, 3707), False, 'from sklearn.utils import shuffle\n'), ((3950, 3966), 'sklearn.utils.shuffle', 'shuffle', (['tor_P2P'], {}), '(tor_P2P)\n', (3957, 3966), False, 'from sklearn.utils import shuffle\n'), ((4274, 4292), 'sklearn.utils.shuffle', 'shuffle', (['tor_Video'], {}), '(tor_Video)\n', (4281, 4292), False, 'from sklearn.utils import shuffle\n'), ((4759, 4776), 'sklearn.utils.shuffle', 'shuffle', (['tor_VoIP'], {}), '(tor_VoIP)\n', (4766, 4776), False, 'from sklearn.utils import shuffle\n'), ((5404, 5425), 'sklearn.utils.shuffle', 'shuffle', (['nonTor_Audio'], {}), '(nonTor_Audio)\n', (5411, 5425), False, 'from sklearn.utils import shuffle\n'), ((5865, 5889), 'sklearn.utils.shuffle', 'shuffle', (['nonTor_Browsing'], {}), '(nonTor_Browsing)\n', (5872, 5889), False, 'from sklearn.utils import shuffle\n'), ((6436, 6456), 'sklearn.utils.shuffle', 'shuffle', (['nonTor_Chat'], {}), '(nonTor_Chat)\n', (6443, 6456), False, 'from sklearn.utils import shuffle\n'), ((6754, 6774), 'sklearn.utils.shuffle', 'shuffle', (['nonTor_File'], {}), '(nonTor_File)\n', (6761, 6774), False, 'from sklearn.utils import shuffle\n'), ((7141, 7162), 'sklearn.utils.shuffle', 'shuffle', (['nonTor_Email'], {}), '(nonTor_Email)\n', (7148, 7162), False, 'from sklearn.utils import shuffle\n'), ((7399, 7418), 'sklearn.utils.shuffle', 'shuffle', (['nonTor_P2P'], {}), '(nonTor_P2P)\n', (7406, 7418), False, 'from sklearn.utils import shuffle\n'), ((7729, 7750), 'sklearn.utils.shuffle', 'shuffle', (['nonTor_Video'], {}), '(nonTor_Video)\n', (7736, 7750), False, 'from sklearn.utils import shuffle\n'), ((8068, 8088), 'sklearn.utils.shuffle', 'shuffle', (['nonTor_VoIP'], {}), '(nonTor_VoIP)\n', (8075, 8088), False, 'from sklearn.utils import shuffle\n'), ((8804, 8869), 'sklearn.model_selection.train_test_split', 'train_test_split', (['tor_Audio[0:20000, :]'], {'test_size': 'self.test_size'}), '(tor_Audio[0:20000, :], test_size=self.test_size)\n', (8820, 8869), False, 'from sklearn.model_selection import train_test_split\n'), ((8918, 8986), 'sklearn.model_selection.train_test_split', 'train_test_split', (['tor_Browsing[0:20000, :]'], {'test_size': 'self.test_size'}), '(tor_Browsing[0:20000, :], test_size=self.test_size)\n', (8934, 8986), False, 'from sklearn.model_selection import train_test_split\n'), ((9027, 9091), 'sklearn.model_selection.train_test_split', 'train_test_split', (['tor_Chat[0:20000, :]'], {'test_size': 'self.test_size'}), '(tor_Chat[0:20000, :], test_size=self.test_size)\n', (9043, 9091), False, 'from sklearn.model_selection import train_test_split\n'), ((9132, 9196), 'sklearn.model_selection.train_test_split', 'train_test_split', (['tor_File[0:20000, :]'], {'test_size': 'self.test_size'}), '(tor_File[0:20000, :], test_size=self.test_size)\n', (9148, 9196), False, 'from sklearn.model_selection import train_test_split\n'), ((9239, 9304), 'sklearn.model_selection.train_test_split', 'train_test_split', (['tor_Email[0:20000, :]'], {'test_size': 'self.test_size'}), '(tor_Email[0:20000, :], test_size=self.test_size)\n', (9255, 9304), False, 'from sklearn.model_selection import train_test_split\n'), ((9343, 9406), 'sklearn.model_selection.train_test_split', 'train_test_split', (['tor_P2P[0:20000, :]'], {'test_size': 'self.test_size'}), '(tor_P2P[0:20000, :], test_size=self.test_size)\n', (9359, 9406), False, 'from sklearn.model_selection import train_test_split\n'), ((9449, 9514), 'sklearn.model_selection.train_test_split', 'train_test_split', (['tor_Video[0:20000, :]'], {'test_size': 'self.test_size'}), '(tor_Video[0:20000, :], test_size=self.test_size)\n', (9465, 9514), False, 'from sklearn.model_selection import train_test_split\n'), ((9555, 9619), 'sklearn.model_selection.train_test_split', 'train_test_split', (['tor_VoIP[0:20000, :]'], {'test_size': 'self.test_size'}), '(tor_VoIP[0:20000, :], test_size=self.test_size)\n', (9571, 9619), False, 'from sklearn.model_selection import train_test_split\n'), ((10033, 10101), 'sklearn.model_selection.train_test_split', 'train_test_split', (['nonTor_Audio[0:20000, :]'], {'test_size': 'self.test_size'}), '(nonTor_Audio[0:20000, :], test_size=self.test_size)\n', (10049, 10101), False, 'from sklearn.model_selection import train_test_split\n'), ((10156, 10227), 'sklearn.model_selection.train_test_split', 'train_test_split', (['nonTor_Browsing[0:20000, :]'], {'test_size': 'self.test_size'}), '(nonTor_Browsing[0:20000, :], test_size=self.test_size)\n', (10172, 10227), False, 'from sklearn.model_selection import train_test_split\n'), ((10274, 10341), 'sklearn.model_selection.train_test_split', 'train_test_split', (['nonTor_Chat[0:20000, :]'], {'test_size': 'self.test_size'}), '(nonTor_Chat[0:20000, :], test_size=self.test_size)\n', (10290, 10341), False, 'from sklearn.model_selection import train_test_split\n'), ((10388, 10455), 'sklearn.model_selection.train_test_split', 'train_test_split', (['nonTor_File[0:20000, :]'], {'test_size': 'self.test_size'}), '(nonTor_File[0:20000, :], test_size=self.test_size)\n', (10404, 10455), False, 'from sklearn.model_selection import train_test_split\n'), ((10504, 10572), 'sklearn.model_selection.train_test_split', 'train_test_split', (['nonTor_Email[0:20000, :]'], {'test_size': 'self.test_size'}), '(nonTor_Email[0:20000, :], test_size=self.test_size)\n', (10520, 10572), False, 'from sklearn.model_selection import train_test_split\n'), ((10617, 10683), 'sklearn.model_selection.train_test_split', 'train_test_split', (['nonTor_P2P[0:20000, :]'], {'test_size': 'self.test_size'}), '(nonTor_P2P[0:20000, :], test_size=self.test_size)\n', (10633, 10683), False, 'from sklearn.model_selection import train_test_split\n'), ((10732, 10800), 'sklearn.model_selection.train_test_split', 'train_test_split', (['nonTor_Video[0:20000, :]'], {'test_size': 'self.test_size'}), '(nonTor_Video[0:20000, :], test_size=self.test_size)\n', (10748, 10800), False, 'from sklearn.model_selection import train_test_split\n'), ((10847, 10914), 'sklearn.model_selection.train_test_split', 'train_test_split', (['nonTor_VoIP[0:20000, :]'], {'test_size': 'self.test_size'}), '(nonTor_VoIP[0:20000, :], test_size=self.test_size)\n', (10863, 10914), False, 'from sklearn.model_selection import train_test_split\n'), ((11996, 12328), 'numpy.concatenate', 'np.concatenate', (['(nonTor_Audio_train, nonTor_Browsing_train, nonTor_Chat_train,\n nonTor_File_train, nonTor_Email_train, nonTor_P2P_train,\n nonTor_Video_train, nonTor_VoIP_train, tor_Audio_train,\n tor_Browsing_train, tor_Chat_train, tor_File_train, tor_Email_train,\n tor_P2P_train, tor_Video_train, tor_VoIP_train)'], {'axis': '(0)'}), '((nonTor_Audio_train, nonTor_Browsing_train,\n nonTor_Chat_train, nonTor_File_train, nonTor_Email_train,\n nonTor_P2P_train, nonTor_Video_train, nonTor_VoIP_train,\n tor_Audio_train, tor_Browsing_train, tor_Chat_train, tor_File_train,\n tor_Email_train, tor_P2P_train, tor_Video_train, tor_VoIP_train), axis=0)\n', (12010, 12328), True, 'import numpy as np\n'), ((12345, 12661), 'numpy.concatenate', 'np.concatenate', (['(nonTor_Audio_test, nonTor_Browsing_test, nonTor_Chat_test,\n nonTor_File_test, nonTor_Email_test, nonTor_P2P_test, nonTor_Video_test,\n nonTor_VoIP_test, tor_Audio_test, tor_Browsing_test, tor_Chat_test,\n tor_File_test, tor_Email_test, tor_P2P_test, tor_Video_test, tor_VoIP_test)'], {'axis': '(0)'}), '((nonTor_Audio_test, nonTor_Browsing_test, nonTor_Chat_test,\n nonTor_File_test, nonTor_Email_test, nonTor_P2P_test, nonTor_Video_test,\n nonTor_VoIP_test, tor_Audio_test, tor_Browsing_test, tor_Chat_test,\n tor_File_test, tor_Email_test, tor_P2P_test, tor_Video_test,\n tor_VoIP_test), axis=0)\n', (12359, 12661), True, 'import numpy as np\n'), ((13118, 13149), 'sklearn.utils.shuffle', 'shuffle', (['concatenated_train_set'], {}), '(concatenated_train_set)\n', (13125, 13149), False, 'from sklearn.utils import shuffle\n'), ((13182, 13212), 'sklearn.utils.shuffle', 'shuffle', (['concatenated_test_set'], {}), '(concatenated_test_set)\n', (13189, 13212), False, 'from sklearn.utils import shuffle\n'), ((1212, 1229), 'io.StringIO', 'io.StringIO', (['temp'], {}), '(temp)\n', (1223, 1229), False, 'import io\n')] |
from numpy.random import seed
from numpy.random import randn
from backend.stat.mean_tests import run_paired_t_test, run_wilcoxon_signed_rank_test
if __name__ == "__main__":
seed(1)
sample1 = 5 * randn(100) + 50
sample2 = 5 * randn(100) + 49
print()
print("Paired T-Test")
stat, p = run_paired_t_test(sample1, sample2, alpha=0.05, print_results=True)
print()
print("Wilcoxon Signed Rank Test")
stat, p = run_wilcoxon_signed_rank_test(sample1, sample2, alpha=0.05, print_results=True)
| [
"backend.stat.mean_tests.run_wilcoxon_signed_rank_test",
"numpy.random.seed",
"backend.stat.mean_tests.run_paired_t_test",
"numpy.random.randn"
] | [((178, 185), 'numpy.random.seed', 'seed', (['(1)'], {}), '(1)\n', (182, 185), False, 'from numpy.random import seed\n'), ((308, 375), 'backend.stat.mean_tests.run_paired_t_test', 'run_paired_t_test', (['sample1', 'sample2'], {'alpha': '(0.05)', 'print_results': '(True)'}), '(sample1, sample2, alpha=0.05, print_results=True)\n', (325, 375), False, 'from backend.stat.mean_tests import run_paired_t_test, run_wilcoxon_signed_rank_test\n'), ((442, 521), 'backend.stat.mean_tests.run_wilcoxon_signed_rank_test', 'run_wilcoxon_signed_rank_test', (['sample1', 'sample2'], {'alpha': '(0.05)', 'print_results': '(True)'}), '(sample1, sample2, alpha=0.05, print_results=True)\n', (471, 521), False, 'from backend.stat.mean_tests import run_paired_t_test, run_wilcoxon_signed_rank_test\n'), ((204, 214), 'numpy.random.randn', 'randn', (['(100)'], {}), '(100)\n', (209, 214), False, 'from numpy.random import randn\n'), ((238, 248), 'numpy.random.randn', 'randn', (['(100)'], {}), '(100)\n', (243, 248), False, 'from numpy.random import randn\n')] |
'''
Copyright (C) 2020 Shandong University
This program is licensed under the GNU General Public License 3.0
(https://www.gnu.org/licenses/gpl-3.0.html).
Any derivative work obtained under this license must be licensed
under the GNU General Public License as published by the Free
Software Foundation, either Version 3 of the License, or (at your option)
any later version, if this derivative work is distributed to a third party.
The copyright for the program is owned by Shandong University.
For commercial projects that require the ability to distribute
the code of this program as part of a program that cannot be
distributed under the GNU General Public License, please contact
<EMAIL>
to purchase a commercial license.
温馨提示:
抵制不良代码,拒绝乱用代码。
注意自我保护,谨防上当受骗。
适当编程益脑,沉迷编程伤身。
合理安排时间,享受健康生活!
'''
import numpy as np
"""
应该包括
- 计算占用显存模块
- 节省显存的模块(如混合精度计算之类)
- 当前显存占用情况
- 尽可能有效的清除缓存/自动清楚缓存?
- 自动并行?
"""
# 模型显存占用监测函数
# model:输入的模型
# input:实际中需要输入的Tensor变量
# type_size 默认为 4 默认类型为 float32
# 参考自https://blog.csdn.net/qq_28660035/article/details/80688427
# TODO 验证正确性
import os
def modelsize(model, input, type_size=4):
para = sum([np.prod(list(p.size())) for p in model.parameters()])
print('Model {} : params: {:4f}M'.format(model._get_name(), para * type_size / 1000 / 1000))
input_ = input.clone()
input_.requires_grad_(requires_grad=False)
mods = list(model.modules())
out_sizes = []
for i in range(1, len(mods)):
m = mods[i]
if isinstance(m, nn.ReLU):
if m.inplace:
continue
out = m(input_)
out_sizes.append(np.array(out.size()))
input_ = out
total_nums = 0
for i in range(len(out_sizes)):
s = out_sizes[i]
nums = np.prod(np.array(s))
total_nums += nums
print('Model {} : intermedite variables: {:3f} M (without backward)'
.format(model._get_name(), total_nums * type_size / 1000 / 1000))
print('Model {} : intermedite variables: {:3f} M (with backward)'
.format(model._get_name(), total_nums * type_size * 2 / 1000 / 1000))
def nvidia():
nowtime = os.popen("nvidia-smi")
print(nowtime.read()) | [
"os.popen",
"numpy.array"
] | [((2288, 2310), 'os.popen', 'os.popen', (['"""nvidia-smi"""'], {}), "('nvidia-smi')\n", (2296, 2310), False, 'import os\n'), ((1917, 1928), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (1925, 1928), True, 'import numpy as np\n')] |
import typing
import numpy as np
from xbbo.surrogate.base import SurrogateModel
# from xbbo.surrogate.transfer.rf_with_instances import RandomForestWithInstances
from xbbo.surrogate.prf import RandomForestWithInstances
# from xbbo.utils.util import get_types
class RandomForestEnsemble(SurrogateModel):
def __init__(self, cs, all_budgets, weight_list, fusion_method, types=None, bounds=None, rng=np.random.RandomState(42),**kwargs):
# if types is None or bounds is None:
# types, bounds = get_types(cs)
super().__init__(types=types, bounds=bounds,**kwargs)
# self.s_max = s_max
# self.eta = eta
self.fusion = fusion_method
self.surrogate_weight = dict()
self.surrogate_container = dict()
self.all_budgets = all_budgets
self.weight_list = weight_list
for i, budget in enumerate(all_budgets):
# r = int(item)
# self.surrogate_r.append(r)
self.surrogate_weight[budget] = self.weight_list[i]
self.surrogate_container[budget] = RandomForestWithInstances(cs, rng=rng)
def train(self, X: np.ndarray, Y: np.ndarray, r) -> 'SurrogateModel':
"""Trains the Model on X and Y.
Parameters
----------
X : np.ndarray [n_samples, n_features (config + instance features)]
Input data points.
Y : np.ndarray [n_samples, n_objectives]
The corresponding target values. n_objectives must match the
number of target names specified in the constructor.
r : int
Determine which surrogate in self.surrogate_container to train.
Returns
-------
self : BaseModel
"""
self.types = self._initial_types.copy()
if len(X.shape) != 2:
raise ValueError('Expected 2d array, got %dd array!' % len(X.shape))
if X.shape[1] != len(self.types):
raise ValueError('Feature mismatch: X should have %d features, but has %d' % (X.shape[1], len(self.types)))
if X.shape[0] != Y.shape[0]:
raise ValueError('X.shape[0] (%s) != y.shape[0] (%s)' % (X.shape[0], Y.shape[0]))
self.n_params = X.shape[1] - self.n_feats
# reduce dimensionality of features of larger than PCA_DIM
if self.pca and X.shape[0] > self.pca.n_components:
X_feats = X[:, -self.n_feats:]
# scale features
X_feats = self.scaler.fit_transform(X_feats)
X_feats = np.nan_to_num(X_feats) # if features with max == min
# PCA
X_feats = self.pca.fit_transform(X_feats)
X = np.hstack((X[:, :self.n_params], X_feats))
if hasattr(self, "types"):
# for RF, adapt types list
# if X_feats.shape[0] < self.pca, X_feats.shape[1] ==
# X_feats.shape[0]
self.types = np.array(
np.hstack((self.types[:self.n_params], np.zeros((X_feats.shape[1])))),
dtype=np.uint,
)
return self._train(X, Y, r)
def _train(self, X: np.ndarray, y: np.ndarray, r):
self.surrogate_container[r].train(X, y)
def _predict(self, X: np.ndarray, cov_return_type='diagonal_cov'):
if len(X.shape) != 2:
raise ValueError(
'Expected 2d array, got %dd array!' % len(X.shape))
if X.shape[1] != self.types.shape[0]:
raise ValueError('Rows in X should have %d entries but have %d!' %
(self.types.shape[0], X.shape[1]))
if self.fusion == 'idp':
means, vars = np.zeros((X.shape[0], 1)), np.zeros((X.shape[0], 1))
for r in self.all_budgets:
mean, var = self.surrogate_container[r].predict(X)
means += self.surrogate_weight[r] * mean
vars += self.surrogate_weight[r] * self.surrogate_weight[r] * var
return means.reshape((-1, 1)), vars.reshape((-1, 1))
else:
raise ValueError('Undefined Fusion Method: %s!' % self.fusion)
| [
"xbbo.surrogate.prf.RandomForestWithInstances",
"numpy.nan_to_num",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.hstack"
] | [((401, 426), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (422, 426), True, 'import numpy as np\n'), ((1069, 1107), 'xbbo.surrogate.prf.RandomForestWithInstances', 'RandomForestWithInstances', (['cs'], {'rng': 'rng'}), '(cs, rng=rng)\n', (1094, 1107), False, 'from xbbo.surrogate.prf import RandomForestWithInstances\n'), ((2501, 2523), 'numpy.nan_to_num', 'np.nan_to_num', (['X_feats'], {}), '(X_feats)\n', (2514, 2523), True, 'import numpy as np\n'), ((2643, 2685), 'numpy.hstack', 'np.hstack', (['(X[:, :self.n_params], X_feats)'], {}), '((X[:, :self.n_params], X_feats))\n', (2652, 2685), True, 'import numpy as np\n'), ((3645, 3670), 'numpy.zeros', 'np.zeros', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (3653, 3670), True, 'import numpy as np\n'), ((3672, 3697), 'numpy.zeros', 'np.zeros', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (3680, 3697), True, 'import numpy as np\n'), ((2971, 2997), 'numpy.zeros', 'np.zeros', (['X_feats.shape[1]'], {}), '(X_feats.shape[1])\n', (2979, 2997), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from fit1d.model import ModelMock
from fit1d.outlier import OutLierMock
from fit1d.fit1d import Fit1DMock, FitResults
class TestFit1D(unittest.TestCase):
def setUp(self):
self.x = np.linspace(-15.0, 15.0, num=50)
self.y = self.x
self.model = ModelMock({"param1": 5.5})
self.outlier = OutLierMock({'param1': 10, 'param2': 5.4, 'param3': 'hello'})
self.fit1d = Fit1DMock(model=self.model, outlier=self.outlier, remove_outliers=False)
def test_fit_results_init(self):
fit_ref = FitResults(model=self.model,
error_vector=np.array([1, 10, 100, 1000]),
rms=5.4,
outlier=self.outlier)
self.assertTrue(isinstance(fit_ref, FitResults))
def test_fit_results_equal(self):
fit_res1 = FitResults(model=self.model,
error_vector=np.array([1, 10, 100, 1000]),
rms=5.4,
outlier=self.outlier)
fit_res2 = FitResults(model=self.model,
error_vector=np.array([1, 10, 100, 1000]),
rms=5.4,
outlier=self.outlier)
self.assertTrue(fit_res1 == fit_res2)
def test_fit_results_nonequal(self):
fit_res1 = FitResults(model=self.model,
error_vector=np.array([1, 100, 100, 1000]),
rms=5.4,
outlier=self.outlier)
fit_res2 = FitResults(model=self.model,
error_vector=np.array([1, 10, 100, 1000]),
rms=5.4,
outlier=self.outlier)
self.assertFalse(fit_res1 == fit_res2)
def test_fit(self):
fit = self.fit1d.fit(self.x, self.y)
fit_ref = FitResults(model=self.model,
error_vector=np.array([1, 10, 100, 1000]),
rms=5.4,
outlier=self.outlier)
self.assertTrue(fit == fit_ref)
def test_evaluate_with_model(self):
e_val = self.fit1d.eval(self.x, self.model)
e_ref = np.array([1, 2, 3, 4])
self.assertTrue(np.array_equal(e_val, e_ref))
def test_eval(self):
e_val = self.fit1d.eval(self.x)
e_ref = np.array([1, 2, 3, 4])
self.assertTrue(np.array_equal(e_val, e_ref))
def test_remove_outliers(self):
o_val = self.fit1d._remove_outlier(self.x, self.y)
o_ref = [1, 2, 3]
self.assertListEqual(o_val, o_ref)
def test_calc_err(self):
x = np.array([1, 10, 100, 1000])
y = np.array([10, -10, 100, 2000])
dif_ref = np.array([-9, 20, 0, -1000])
dif_val = self.fit1d.calc_error(y=x, y_fit=y)
self.assertTrue(np.array_equal(dif_ref, dif_val))
def test_calc_rms(self):
x = np.array([1, 10, 100, 1000])
y = np.array([10, -10, 100, 2000])
residual = self.fit1d.calc_error(y=x, y_fit=y)
rms_ref = (sum(residual ** 2) / len(residual)) ** 0.5
rms_val = self.fit1d.calc_rms(residual=residual)
self.assertTrue(np.array_equal(rms_ref, rms_val))
def test_get_model(self):
model_ref = self.model
self.assertTrue(model_ref == self.fit1d.get_model())
| [
"fit1d.model.ModelMock",
"fit1d.outlier.OutLierMock",
"numpy.array",
"numpy.linspace",
"numpy.array_equal",
"fit1d.fit1d.Fit1DMock"
] | [((229, 261), 'numpy.linspace', 'np.linspace', (['(-15.0)', '(15.0)'], {'num': '(50)'}), '(-15.0, 15.0, num=50)\n', (240, 261), True, 'import numpy as np\n'), ((307, 333), 'fit1d.model.ModelMock', 'ModelMock', (["{'param1': 5.5}"], {}), "({'param1': 5.5})\n", (316, 333), False, 'from fit1d.model import ModelMock\n'), ((357, 418), 'fit1d.outlier.OutLierMock', 'OutLierMock', (["{'param1': 10, 'param2': 5.4, 'param3': 'hello'}"], {}), "({'param1': 10, 'param2': 5.4, 'param3': 'hello'})\n", (368, 418), False, 'from fit1d.outlier import OutLierMock\n'), ((440, 512), 'fit1d.fit1d.Fit1DMock', 'Fit1DMock', ([], {'model': 'self.model', 'outlier': 'self.outlier', 'remove_outliers': '(False)'}), '(model=self.model, outlier=self.outlier, remove_outliers=False)\n', (449, 512), False, 'from fit1d.fit1d import Fit1DMock, FitResults\n'), ((2269, 2291), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (2277, 2291), True, 'import numpy as np\n'), ((2428, 2450), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (2436, 2450), True, 'import numpy as np\n'), ((2712, 2740), 'numpy.array', 'np.array', (['[1, 10, 100, 1000]'], {}), '([1, 10, 100, 1000])\n', (2720, 2740), True, 'import numpy as np\n'), ((2753, 2783), 'numpy.array', 'np.array', (['[10, -10, 100, 2000]'], {}), '([10, -10, 100, 2000])\n', (2761, 2783), True, 'import numpy as np\n'), ((2802, 2830), 'numpy.array', 'np.array', (['[-9, 20, 0, -1000]'], {}), '([-9, 20, 0, -1000])\n', (2810, 2830), True, 'import numpy as np\n'), ((2985, 3013), 'numpy.array', 'np.array', (['[1, 10, 100, 1000]'], {}), '([1, 10, 100, 1000])\n', (2993, 3013), True, 'import numpy as np\n'), ((3026, 3056), 'numpy.array', 'np.array', (['[10, -10, 100, 2000]'], {}), '([10, -10, 100, 2000])\n', (3034, 3056), True, 'import numpy as np\n'), ((2316, 2344), 'numpy.array_equal', 'np.array_equal', (['e_val', 'e_ref'], {}), '(e_val, e_ref)\n', (2330, 2344), True, 'import numpy as np\n'), ((2475, 2503), 'numpy.array_equal', 'np.array_equal', (['e_val', 'e_ref'], {}), '(e_val, e_ref)\n', (2489, 2503), True, 'import numpy as np\n'), ((2909, 2941), 'numpy.array_equal', 'np.array_equal', (['dif_ref', 'dif_val'], {}), '(dif_ref, dif_val)\n', (2923, 2941), True, 'import numpy as np\n'), ((3255, 3287), 'numpy.array_equal', 'np.array_equal', (['rms_ref', 'rms_val'], {}), '(rms_ref, rms_val)\n', (3269, 3287), True, 'import numpy as np\n'), ((640, 668), 'numpy.array', 'np.array', (['[1, 10, 100, 1000]'], {}), '([1, 10, 100, 1000])\n', (648, 668), True, 'import numpy as np\n'), ((947, 975), 'numpy.array', 'np.array', (['[1, 10, 100, 1000]'], {}), '([1, 10, 100, 1000])\n', (955, 975), True, 'import numpy as np\n'), ((1159, 1187), 'numpy.array', 'np.array', (['[1, 10, 100, 1000]'], {}), '([1, 10, 100, 1000])\n', (1167, 1187), True, 'import numpy as np\n'), ((1460, 1489), 'numpy.array', 'np.array', (['[1, 100, 100, 1000]'], {}), '([1, 100, 100, 1000])\n', (1468, 1489), True, 'import numpy as np\n'), ((1673, 1701), 'numpy.array', 'np.array', (['[1, 10, 100, 1000]'], {}), '([1, 10, 100, 1000])\n', (1681, 1701), True, 'import numpy as np\n'), ((2001, 2029), 'numpy.array', 'np.array', (['[1, 10, 100, 1000]'], {}), '([1, 10, 100, 1000])\n', (2009, 2029), True, 'import numpy as np\n')] |
#%%
import json
import os
import os.path
from funcy import lmap, merge, lcat
from cmdstanpy import CmdStanModel
import numpy as np
import statsmodels.api as sm
#%%
def load_data(path):
with open(path, 'r') as fh:
return json.load(fh)
def load_all(directory: str):
paths = (os.path.join(directory, f) for f in os.listdir(directory) if f.startswith('sona'))
return lmap(load_data, paths)
def get_payoffs(trial):
p = trial['payoffs']
if trial['prediction'] == 0:
return {
'psp': (p[0] + p[2]) / 2,
'pop': (p[1] + p[3]) / 2,
'psn': (p[4] + p[6]) / 2,
'pon': (p[5] + p[7]) / 2
}
else:
return {
'psp': (p[4] + p[6]) / 2,
'pop': (p[5] + p[7]) / 2,
'psn': (p[0] + p[2]) / 2,
'pon': (p[1] + p[3]) / 2
}
def get_lambdas(summary):
return summary.loc[np.char.startswith(summary.index.to_numpy(dtype='str'), 'lambda['), 'Mean'].to_numpy()
#%%
subjects = load_all('data')
subjectss = [
[s for s in subjects if s['client']['lambda'] == l]
for l in [-1, 0, 1]
]
trialss = [
lcat([
[merge({ 'sid': i + 1 }, get_payoffs(t)) for t in s['trials']]
for i, s in enumerate(ss)
])
for ss in subjectss
]
data = [
merge(
{ 'nt': len(ts), 'ns': len(ss) },
{ k: [dic[k] for dic in ts] for k in ts[0] }
)
for ss, ts in zip(subjectss, trialss)
]
#%%
model = CmdStanModel(stan_file='model.stan')
fitn1 = model.sample(data=data[0], parallel_chains=1)
fit0 = model.sample(data=data[1], parallel_chains=1)
fit1 = model.sample(data=data[2], parallel_chains=1)
summaryn1 = fitn1.summary(sig_figs=4)
summary0 = fit0.summary(sig_figs=4)
summary1 = fit1.summary(sig_figs=4)
#%%
print('Posterior of mu for lambda = -1:')
print(' Mean:', summaryn1.loc['lambda_normal_mu', 'Mean'])
print(' SD:', summaryn1.loc['lambda_normal_sigma', 'StdDev'])
print()
print('Posterior of mu for lambda = 0:')
print(' Mean:', summary0.loc['lambda_normal_mu', 'Mean'])
print(' SD:', summary0.loc['lambda_normal_sigma', 'StdDev'])
print()
print('Posterior of mu for lambda = 1:')
print(' Mean:', summary1.loc['lambda_normal_mu', 'Mean'])
print(' SD:', summary1.loc['lambda_normal_sigma', 'StdDev'])
#%%
ln1 = get_lambdas(summaryn1)
l0 = get_lambdas(summary0)
l1 = get_lambdas(summary1)
xn1 = np.full_like(ln1, -1)
x0 = np.full_like(l0, 0)
x1 = np.full_like(l1, 1)
x = sm.add_constant(np.concatenate((xn1, x0, x1)))
y = np.concatenate((ln1, l0, l1))
results = sm.OLS(y, x).fit()
#%%
print('OLS regression on mean of posterior over \hat\lambda:\n')
print(results.summary())
| [
"cmdstanpy.CmdStanModel",
"numpy.full_like",
"funcy.lmap",
"json.load",
"statsmodels.api.OLS",
"os.path.join",
"os.listdir",
"numpy.concatenate"
] | [((1463, 1499), 'cmdstanpy.CmdStanModel', 'CmdStanModel', ([], {'stan_file': '"""model.stan"""'}), "(stan_file='model.stan')\n", (1475, 1499), False, 'from cmdstanpy import CmdStanModel\n'), ((2374, 2395), 'numpy.full_like', 'np.full_like', (['ln1', '(-1)'], {}), '(ln1, -1)\n', (2386, 2395), True, 'import numpy as np\n'), ((2401, 2420), 'numpy.full_like', 'np.full_like', (['l0', '(0)'], {}), '(l0, 0)\n', (2413, 2420), True, 'import numpy as np\n'), ((2426, 2445), 'numpy.full_like', 'np.full_like', (['l1', '(1)'], {}), '(l1, 1)\n', (2438, 2445), True, 'import numpy as np\n'), ((2501, 2530), 'numpy.concatenate', 'np.concatenate', (['(ln1, l0, l1)'], {}), '((ln1, l0, l1))\n', (2515, 2530), True, 'import numpy as np\n'), ((386, 408), 'funcy.lmap', 'lmap', (['load_data', 'paths'], {}), '(load_data, paths)\n', (390, 408), False, 'from funcy import lmap, merge, lcat\n'), ((2466, 2495), 'numpy.concatenate', 'np.concatenate', (['(xn1, x0, x1)'], {}), '((xn1, x0, x1))\n', (2480, 2495), True, 'import numpy as np\n'), ((234, 247), 'json.load', 'json.load', (['fh'], {}), '(fh)\n', (243, 247), False, 'import json\n'), ((292, 318), 'os.path.join', 'os.path.join', (['directory', 'f'], {}), '(directory, f)\n', (304, 318), False, 'import os\n'), ((2541, 2553), 'statsmodels.api.OLS', 'sm.OLS', (['y', 'x'], {}), '(y, x)\n', (2547, 2553), True, 'import statsmodels.api as sm\n'), ((328, 349), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (338, 349), False, 'import os\n')] |
import numpy as np
import tensorflow as tf
from neupy import init
from neupy.utils import asfloat, as_tuple, tf_utils
from neupy.exceptions import LayerConnectionError, WeightInitializationError
from neupy.core.properties import (
NumberProperty, TypedListProperty,
ParameterProperty, IntProperty,
)
from .base import BaseLayer
__all__ = (
'Linear', 'Sigmoid', 'Tanh', 'Softmax',
'Relu', 'LeakyRelu', 'Elu', 'PRelu',
'Softplus', 'HardSigmoid',
)
class Linear(BaseLayer):
"""
Layer with linear activation function. It applies linear transformation
when the ``n_units`` parameter specified and acts as an identity
when it's not specified.
Parameters
----------
n_units : int or None
Number of units in the layers. It also corresponds to the number of
output features that will be produced per sample after passing it
through this layer. The ``None`` value means that layer will not have
parameters and it will only apply activation function to the input
without linear transformation output for the specified input value.
Defaults to ``None``.
weight : array-like, Tensorfow variable, scalar or Initializer
Defines layer's weights. Default initialization methods
you can find :ref:`here <init-methods>`.
Defaults to :class:`HeNormal() <neupy.init.HeNormal>`.
bias : 1D array-like, Tensorfow variable, scalar, Initializer or None
Defines layer's bias. Default initialization methods you can find
:ref:`here <init-methods>`. Defaults to
:class:`Constant(0) <neupy.init.Constant>`.
The ``None`` value excludes bias from the calculations and
do not add it into parameters list.
{BaseLayer.name}
Methods
-------
{BaseLayer.Methods}
activation_function(input)
Applies activation function to the input.
Attributes
----------
{BaseLayer.Attributes}
Examples
--------
Linear Regression
>>> from neupy.layers import *
>>> network = Input(10) >> Linear(5)
"""
n_units = IntProperty(minval=1, allow_none=True)
weight = ParameterProperty()
bias = ParameterProperty(allow_none=True)
def __init__(self, n_units=None, weight=init.HeNormal(), bias=0,
name=None):
super(Linear, self).__init__(name=name)
self.n_units = n_units
self.weight = weight
self.bias = bias
def get_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if self.n_units is None:
return input_shape
if input_shape and input_shape.ndims != 2:
raise LayerConnectionError(
"Input shape expected to have 2 dimensions, got {} instead. "
"Shape: {}".format(input_shape.ndims, input_shape))
n_samples = input_shape[0]
return tf.TensorShape((n_samples, self.n_units))
def create_variables(self, input_shape):
if self.n_units is None:
return
input_shape = tf.TensorShape(input_shape)
self.input_shape = input_shape
_, n_input_features = input_shape
if n_input_features.value is None:
raise WeightInitializationError(
"Cannot create variables for the layer `{}`, because "
"number of input features is unknown. Input shape: {}"
"Layer: {}".format(self.name, input_shape, self))
self.weight = self.variable(
value=self.weight, name='weight',
shape=as_tuple(n_input_features, self.n_units))
if self.bias is not None:
self.bias = self.variable(
value=self.bias, name='bias',
shape=as_tuple(self.n_units))
def output(self, input, **kwargs):
input = tf.convert_to_tensor(input, dtype=tf.float32)
if self.n_units is None:
return self.activation_function(input)
if self.bias is None:
output = tf.matmul(input, self.weight)
return self.activation_function(output)
output = tf.matmul(input, self.weight) + self.bias
return self.activation_function(output)
def activation_function(self, input_value):
return input_value
def __repr__(self):
if self.n_units is None:
return self._repr_arguments(name=self.name)
return self._repr_arguments(
self.n_units,
name=self.name,
weight=self.weight,
bias=self.bias,
)
class Sigmoid(Linear):
"""
Layer with the sigmoid used as an activation function. It applies
linear transformation when the ``n_units`` parameter specified and
sigmoid function after the transformation. When ``n_units`` is not
specified, only sigmoid function will be applied to the input.
Parameters
----------
{Linear.Parameters}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Examples
--------
Logistic Regression (LR)
>>> from neupy.layers import *
>>> network = Input(10) >> Sigmoid(1)
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> Sigmoid(5) >> Sigmoid(1)
Convolutional Neural Networks (CNN) for Semantic Segmentation
Sigmoid layer can be used in order to normalize probabilities
per pixel in semantic classification task with two classes.
In the example below, we have as input 32x32 image that predicts
one of the two classes. Sigmoid normalizes raw predictions per pixel
to the valid probabilities.
>>> from neupy.layers import *
>>> network = Input((32, 32, 1)) >> Sigmoid()
"""
def activation_function(self, input_value):
return tf.nn.sigmoid(input_value)
class HardSigmoid(Linear):
"""
Layer with the hard sigmoid used as an activation function. It applies
linear transformation when the ``n_units`` parameter specified and
hard sigmoid function after the transformation. When ``n_units`` is
not specified, only hard sigmoid function will be applied to the input.
Parameters
----------
{Linear.Parameters}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Examples
--------
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> HardSigmoid(5)
"""
def activation_function(self, input_value):
input_value = (0.2 * input_value) + 0.5
return tf.clip_by_value(input_value, 0., 1.)
class Tanh(Linear):
"""
Layer with the hyperbolic tangent used as an activation function.
It applies linear transformation when the ``n_units`` parameter
specified and ``tanh`` function after the transformation. When
``n_units`` is not specified, only ``tanh`` function will be applied
to the input.
Parameters
----------
{Linear.Parameters}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Examples
--------
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> Tanh(5)
"""
def activation_function(self, input_value):
return tf.nn.tanh(input_value)
class Relu(Linear):
"""
Layer with the rectifier (ReLu) used as an activation function.
It applies linear transformation when the ``n_units`` parameter
specified and ``relu`` function after the transformation. When
``n_units`` is not specified, only ``relu`` function will be applied
to the input.
Parameters
----------
{Linear.n_units}
alpha : float
Alpha parameter defines the decreasing rate
for the negative values. If ``alpha``
is non-zero value then layer behave like a
leaky ReLu. Defaults to ``0``.
weight : array-like, Tensorfow variable, scalar or Initializer
Defines layer's weights. Default initialization methods
you can find :ref:`here <init-methods>`.
Defaults to :class:`HeNormal(gain=2) <neupy.init.HeNormal>`.
{Linear.bias}
{BaseLayer.name}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Examples
--------
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> Relu(20) >> Relu(1)
Convolutional Neural Networks (CNN)
>>> from neupy.layers import *
>>> network = join(
... Input((32, 32, 3)),
... Convolution((3, 3, 16)) >> Relu(),
... Convolution((3, 3, 32)) >> Relu(),
... Reshape(),
... Softmax(10),
... )
"""
alpha = NumberProperty(minval=0)
def __init__(self, n_units=None, alpha=0, weight=init.HeNormal(gain=2),
bias=init.Constant(value=0), name=None):
self.alpha = alpha
super(Relu, self).__init__(
n_units=n_units, weight=weight, bias=bias, name=name)
def activation_function(self, input_value):
if self.alpha == 0:
return tf.nn.relu(input_value)
return tf.nn.leaky_relu(input_value, asfloat(self.alpha))
def __repr__(self):
if self.n_units is None:
return self._repr_arguments(name=self.name, alpha=self.alpha)
return self._repr_arguments(
self.n_units,
name=self.name,
alpha=self.alpha,
weight=self.weight,
bias=self.bias,
)
class LeakyRelu(Linear):
"""
Layer with the leaky rectifier (Leaky ReLu) used as an activation
function. It applies linear transformation when the ``n_units``
parameter specified and leaky relu function after the transformation.
When ``n_units`` is not specified, only leaky relu function will be
applied to the input.
Parameters
----------
{Linear.Parameters}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Notes
-----
Do the same as ``Relu(input_size, alpha=0.01)``.
Examples
--------
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> LeakyRelu(20) >> LeakyRelu(1)
"""
def activation_function(self, input_value):
return tf.nn.leaky_relu(input_value, alpha=asfloat(0.01))
class Softplus(Linear):
"""
Layer with the softplus used as an activation function. It applies linear
transformation when the ``n_units`` parameter specified and softplus
function after the transformation. When ``n_units`` is not specified,
only softplus function will be applied to the input.
Parameters
----------
{Linear.Parameters}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Examples
--------
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> Softplus(4)
"""
def activation_function(self, input_value):
return tf.nn.softplus(input_value)
class Softmax(Linear):
"""
Layer with the softmax activation function. It applies linear
transformation when the ``n_units`` parameter specified and softmax
function after the transformation. When ``n_units`` is not specified,
only softmax function will be applied to the input.
Parameters
----------
{Linear.Parameters}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Examples
--------
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> Relu(20) >> Softmax(10)
Convolutional Neural Networks (CNN) for Semantic Segmentation
Softmax layer can be used in order to normalize probabilities
per pixel. In the example below, we have as input 32x32 image
with raw prediction per each pixel for 10 different classes.
Softmax normalizes raw predictions per pixel to the probability
distribution.
>>> from neupy.layers import *
>>> network = Input((32, 32, 10)) >> Softmax()
"""
def activation_function(self, input_value):
return tf.nn.softmax(input_value)
class Elu(Linear):
"""
Layer with the exponential linear unit (ELU) used as an activation
function. It applies linear transformation when the ``n_units``
parameter specified and elu function after the transformation.
When ``n_units`` is not specified, only elu function will be
applied to the input.
Parameters
----------
{Linear.Parameters}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Examples
--------
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> Elu(5) >> Elu(1)
References
----------
.. [1] http://arxiv.org/pdf/1511.07289v3.pdf
"""
def activation_function(self, input_value):
return tf.nn.elu(input_value)
class PRelu(Linear):
"""
Layer with the parametrized ReLu used as an activation function.
Layer learns additional parameter ``alpha`` during the training.
It applies linear transformation when the ``n_units`` parameter
specified and parametrized relu function after the transformation.
When ``n_units`` is not specified, only parametrized relu function
will be applied to the input.
Parameters
----------
alpha_axes : int or tuple
Axes that will not include unique alpha parameter.
Single integer value defines the same as a tuple with one value.
Defaults to ``-1``.
alpha : array-like, Tensorfow variable, scalar or Initializer
Separate alpha parameter per each non-shared axis for the ReLu.
Scalar value means that each element in the tensor will be
equal to the specified value. Default initialization methods you
can find :ref:`here <init-methods>`.
Defaults to ``Constant(value=0.25)``.
{Linear.Parameters}
Methods
-------
{Linear.Methods}
Attributes
----------
{Linear.Attributes}
Examples
--------
Feedforward Neural Networks (FNN)
>>> from neupy.layers import *
>>> network = Input(10) >> PRelu(20) >> PRelu(1)
Convolutional Neural Networks (CNN)
>>> from neupy.layers import *
>>> network = join(
... Input((32, 32, 3)),
... Convolution((3, 3, 16)) >> PRelu(),
... Convolution((3, 3, 32)) >> PRelu(),
... Reshape(),
... Softmax(10),
... )
References
----------
.. [1] Delving Deep into Rectifiers: Surpassing Human-Level
Performance on ImageNet Classification.
https://arxiv.org/pdf/1502.01852v1.pdf
"""
alpha_axes = TypedListProperty()
alpha = ParameterProperty()
def __init__(self, n_units=None, alpha_axes=-1, alpha=0.25,
weight=init.HeNormal(gain=2), bias=0, name=None):
self.alpha = alpha
self.alpha_axes = as_tuple(alpha_axes)
if 0 in self.alpha_axes:
raise ValueError("Cannot specify alpha for 0-axis")
super(PRelu, self).__init__(
n_units=n_units, weight=weight, bias=bias, name=name)
def get_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
if input_shape and max(self.alpha_axes) >= input_shape.ndims:
max_axis_index = input_shape.ndims - 1
raise LayerConnectionError(
"Cannot specify alpha for the axis #{}. Maximum "
"available axis is {} (0-based indices)."
"".format(max(self.alpha_axes), max_axis_index))
return super(PRelu, self).get_output_shape(input_shape)
def create_variables(self, input_shape):
super(PRelu, self).create_variables(input_shape)
output_shape = self.get_output_shape(input_shape)
self.alpha = self.variable(
value=self.alpha, name='alpha',
shape=[output_shape[axis] for axis in self.alpha_axes])
def activation_function(self, input):
input = tf.convert_to_tensor(input, dtype=tf.float32)
ndim = input.shape.ndims
dimensions = np.arange(ndim)
alpha_axes = dimensions[list(self.alpha_axes)]
alpha = tf_utils.dimshuffle(self.alpha, ndim, alpha_axes)
return tf.maximum(0.0, input) + alpha * tf.minimum(0.0, input)
def __repr__(self):
if self.n_units is None:
return self._repr_arguments(
name=self.name,
alpha_axes=self.alpha_axes,
alpha=self.alpha)
return self._repr_arguments(
self.n_units,
name=self.name,
alpha_axes=self.alpha_axes,
alpha=self.alpha,
weight=self.weight,
bias=self.bias)
| [
"tensorflow.clip_by_value",
"tensorflow.nn.tanh",
"tensorflow.maximum",
"neupy.core.properties.ParameterProperty",
"neupy.core.properties.TypedListProperty",
"tensorflow.matmul",
"numpy.arange",
"neupy.core.properties.IntProperty",
"tensorflow.nn.softmax",
"tensorflow.nn.relu",
"tensorflow.nn.el... | [((2107, 2145), 'neupy.core.properties.IntProperty', 'IntProperty', ([], {'minval': '(1)', 'allow_none': '(True)'}), '(minval=1, allow_none=True)\n', (2118, 2145), False, 'from neupy.core.properties import NumberProperty, TypedListProperty, ParameterProperty, IntProperty\n'), ((2159, 2178), 'neupy.core.properties.ParameterProperty', 'ParameterProperty', ([], {}), '()\n', (2176, 2178), False, 'from neupy.core.properties import NumberProperty, TypedListProperty, ParameterProperty, IntProperty\n'), ((2190, 2224), 'neupy.core.properties.ParameterProperty', 'ParameterProperty', ([], {'allow_none': '(True)'}), '(allow_none=True)\n', (2207, 2224), False, 'from neupy.core.properties import NumberProperty, TypedListProperty, ParameterProperty, IntProperty\n'), ((8798, 8822), 'neupy.core.properties.NumberProperty', 'NumberProperty', ([], {'minval': '(0)'}), '(minval=0)\n', (8812, 8822), False, 'from neupy.core.properties import NumberProperty, TypedListProperty, ParameterProperty, IntProperty\n'), ((14919, 14938), 'neupy.core.properties.TypedListProperty', 'TypedListProperty', ([], {}), '()\n', (14936, 14938), False, 'from neupy.core.properties import NumberProperty, TypedListProperty, ParameterProperty, IntProperty\n'), ((14951, 14970), 'neupy.core.properties.ParameterProperty', 'ParameterProperty', ([], {}), '()\n', (14968, 14970), False, 'from neupy.core.properties import NumberProperty, TypedListProperty, ParameterProperty, IntProperty\n'), ((2270, 2285), 'neupy.init.HeNormal', 'init.HeNormal', ([], {}), '()\n', (2283, 2285), False, 'from neupy import init\n'), ((2527, 2554), 'tensorflow.TensorShape', 'tf.TensorShape', (['input_shape'], {}), '(input_shape)\n', (2541, 2554), True, 'import tensorflow as tf\n'), ((2909, 2950), 'tensorflow.TensorShape', 'tf.TensorShape', (['(n_samples, self.n_units)'], {}), '((n_samples, self.n_units))\n', (2923, 2950), True, 'import tensorflow as tf\n'), ((3072, 3099), 'tensorflow.TensorShape', 'tf.TensorShape', (['input_shape'], {}), '(input_shape)\n', (3086, 3099), True, 'import tensorflow as tf\n'), ((3844, 3889), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input'], {'dtype': 'tf.float32'}), '(input, dtype=tf.float32)\n', (3864, 3889), True, 'import tensorflow as tf\n'), ((5831, 5857), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['input_value'], {}), '(input_value)\n', (5844, 5857), True, 'import tensorflow as tf\n'), ((6611, 6650), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['input_value', '(0.0)', '(1.0)'], {}), '(input_value, 0.0, 1.0)\n', (6627, 6650), True, 'import tensorflow as tf\n'), ((7342, 7365), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['input_value'], {}), '(input_value)\n', (7352, 7365), True, 'import tensorflow as tf\n'), ((8877, 8898), 'neupy.init.HeNormal', 'init.HeNormal', ([], {'gain': '(2)'}), '(gain=2)\n', (8890, 8898), False, 'from neupy import init\n'), ((8922, 8944), 'neupy.init.Constant', 'init.Constant', ([], {'value': '(0)'}), '(value=0)\n', (8935, 8944), False, 'from neupy import init\n'), ((11144, 11171), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['input_value'], {}), '(input_value)\n', (11158, 11171), True, 'import tensorflow as tf\n'), ((12294, 12320), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['input_value'], {}), '(input_value)\n', (12307, 12320), True, 'import tensorflow as tf\n'), ((13103, 13125), 'tensorflow.nn.elu', 'tf.nn.elu', (['input_value'], {}), '(input_value)\n', (13112, 13125), True, 'import tensorflow as tf\n'), ((15060, 15081), 'neupy.init.HeNormal', 'init.HeNormal', ([], {'gain': '(2)'}), '(gain=2)\n', (15073, 15081), False, 'from neupy import init\n'), ((15157, 15177), 'neupy.utils.as_tuple', 'as_tuple', (['alpha_axes'], {}), '(alpha_axes)\n', (15165, 15177), False, 'from neupy.utils import asfloat, as_tuple, tf_utils\n'), ((15448, 15475), 'tensorflow.TensorShape', 'tf.TensorShape', (['input_shape'], {}), '(input_shape)\n', (15462, 15475), True, 'import tensorflow as tf\n'), ((16262, 16307), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input'], {'dtype': 'tf.float32'}), '(input, dtype=tf.float32)\n', (16282, 16307), True, 'import tensorflow as tf\n'), ((16363, 16378), 'numpy.arange', 'np.arange', (['ndim'], {}), '(ndim)\n', (16372, 16378), True, 'import numpy as np\n'), ((16451, 16500), 'neupy.utils.tf_utils.dimshuffle', 'tf_utils.dimshuffle', (['self.alpha', 'ndim', 'alpha_axes'], {}), '(self.alpha, ndim, alpha_axes)\n', (16470, 16500), False, 'from neupy.utils import asfloat, as_tuple, tf_utils\n'), ((4027, 4056), 'tensorflow.matmul', 'tf.matmul', (['input', 'self.weight'], {}), '(input, self.weight)\n', (4036, 4056), True, 'import tensorflow as tf\n'), ((4127, 4156), 'tensorflow.matmul', 'tf.matmul', (['input', 'self.weight'], {}), '(input, self.weight)\n', (4136, 4156), True, 'import tensorflow as tf\n'), ((9184, 9207), 'tensorflow.nn.relu', 'tf.nn.relu', (['input_value'], {}), '(input_value)\n', (9194, 9207), True, 'import tensorflow as tf\n'), ((9253, 9272), 'neupy.utils.asfloat', 'asfloat', (['self.alpha'], {}), '(self.alpha)\n', (9260, 9272), False, 'from neupy.utils import asfloat, as_tuple, tf_utils\n'), ((16516, 16538), 'tensorflow.maximum', 'tf.maximum', (['(0.0)', 'input'], {}), '(0.0, input)\n', (16526, 16538), True, 'import tensorflow as tf\n'), ((3580, 3620), 'neupy.utils.as_tuple', 'as_tuple', (['n_input_features', 'self.n_units'], {}), '(n_input_features, self.n_units)\n', (3588, 3620), False, 'from neupy.utils import asfloat, as_tuple, tf_utils\n'), ((10442, 10455), 'neupy.utils.asfloat', 'asfloat', (['(0.01)'], {}), '(0.01)\n', (10449, 10455), False, 'from neupy.utils import asfloat, as_tuple, tf_utils\n'), ((16549, 16571), 'tensorflow.minimum', 'tf.minimum', (['(0.0)', 'input'], {}), '(0.0, input)\n', (16559, 16571), True, 'import tensorflow as tf\n'), ((3764, 3786), 'neupy.utils.as_tuple', 'as_tuple', (['self.n_units'], {}), '(self.n_units)\n', (3772, 3786), False, 'from neupy.utils import asfloat, as_tuple, tf_utils\n')] |
import torch
import src
from src import Metrics, utils
import collections
import os
import numpy as np
from PIL import Image
class Eval:
def __init__(self, L):
self.Loader = L
self.scores = collections.OrderedDict()
for val in L.vals:
self.scores[val] = src.Score(val, L)
def eval_Saliency(self, Model, epoch,exp_id, supervised=True,):
savedict = {}
# print(valdata['X'].shape)
Outputs = {val : Metrics.getOutPuts(Model, valdata['X'], self.Loader, supervised=supervised) for val, valdata in self.Loader.valdatas.items()}
# print(Outputs.keys())
for val in self.Loader.valdatas.keys():
Outputs[val]['Name'] = self.Loader.valdatas[val]['Name']
Outputs[val]['Shape'] = self.Loader.valdatas[val]['Shape']
Outputs[val]['gt'] = self.Loader.valdatas[val]['Y']
for valname, output in Outputs.items():
# print(valname)
# print(val)
save = 'output'
save_1 = 'output/'+valname
if not os.path.exists(save):
os.makedirs(save)
if not os.path.exists(save_1):
os.makedirs(save_1)
os.makedirs(save_1+'/pred')
os.makedirs(save_1+'/gt')
names, shapes, finals, time,gts = output['Name']['Y'], output['Shape'], output['final'] * 255., output['time'], output['gt']*255
for i in range(len(names)):
pred_p = 'output/'+valname+'/pred/'+names[i]
gt_p = 'output/'+valname+'/gt/'+names[i]
Image.fromarray(np.uint8(finals[i])).resize((shapes[i]), Image.BICUBIC).save(pred_p)
Image.fromarray(np.uint8(gts[i])).resize((shapes[i]), Image.BICUBIC).save(gt_p)
# print(F)
# saves = self.scores[val].update([F, M], epoch)
# savedict[val] = saves
# for val, score in self.scores.items():
# score.print_present()
# print('-----------------------------------------')
# if self.Loader.MODE == 'train':
# torch.save(utils.makeDict(Model.state_dict()), utils.genPath(self.Loader.spath, 'present.pkl'))
# for val, saves in savedict.items():
# for idx, save in enumerate(saves):
# if save:
# torch.save(utils.makeDict(Model.state_dict()), utils.genPath(self.Loader.spath, val+'_'+['F', 'M'][idx]+'.pkl'))
# for val, score in self.scores.items():
# score.print_best()
# else:
# for val in self.Loader.valdatas.keys():
# Outputs[val]['Name'] = self.Loader.valdatas[val]['Name']
# Outputs[val]['Shape'] = self.Loader.valdatas[val]['Shape']
# return Outputs if self.Loader.save else None
| [
"numpy.uint8",
"os.makedirs",
"os.path.exists",
"src.Score",
"collections.OrderedDict",
"src.Metrics.getOutPuts"
] | [((210, 235), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (233, 235), False, 'import collections\n'), ((294, 311), 'src.Score', 'src.Score', (['val', 'L'], {}), '(val, L)\n', (303, 311), False, 'import src\n'), ((464, 539), 'src.Metrics.getOutPuts', 'Metrics.getOutPuts', (['Model', "valdata['X']", 'self.Loader'], {'supervised': 'supervised'}), "(Model, valdata['X'], self.Loader, supervised=supervised)\n", (482, 539), False, 'from src import Metrics, utils\n'), ((1065, 1085), 'os.path.exists', 'os.path.exists', (['save'], {}), '(save)\n', (1079, 1085), False, 'import os\n'), ((1103, 1120), 'os.makedirs', 'os.makedirs', (['save'], {}), '(save)\n', (1114, 1120), False, 'import os\n'), ((1140, 1162), 'os.path.exists', 'os.path.exists', (['save_1'], {}), '(save_1)\n', (1154, 1162), False, 'import os\n'), ((1180, 1199), 'os.makedirs', 'os.makedirs', (['save_1'], {}), '(save_1)\n', (1191, 1199), False, 'import os\n'), ((1216, 1245), 'os.makedirs', 'os.makedirs', (["(save_1 + '/pred')"], {}), "(save_1 + '/pred')\n", (1227, 1245), False, 'import os\n'), ((1260, 1287), 'os.makedirs', 'os.makedirs', (["(save_1 + '/gt')"], {}), "(save_1 + '/gt')\n", (1271, 1287), False, 'import os\n'), ((1617, 1636), 'numpy.uint8', 'np.uint8', (['finals[i]'], {}), '(finals[i])\n', (1625, 1636), True, 'import numpy as np\n'), ((1718, 1734), 'numpy.uint8', 'np.uint8', (['gts[i]'], {}), '(gts[i])\n', (1726, 1734), True, 'import numpy as np\n')] |
"""
bpz: Bayesian Photo-Z estimation
Reference: Benitez 2000, ApJ, 536, p.571
Usage:
python bpz.py catalog.cat
Needs a catalog.columns file which describes the contents of catalog.cat
"""
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import map
from builtins import input
from builtins import range
from past.utils import old_div
from useful import *
import numpy as np
from numpy import *
from bpz_tools import *
from string import *
import os
import glob
import sys
import time
import shelve
from coetools import pause, params_cl
rolex = watch()
rolex.set()
class Printer():
"""Print things to stdout on one line dynamically"""
def __init__(self, data):
sys.stdout.write("\r\x1b[K" + data.__str__())
sys.stdout.flush()
def seglist(vals, mask=None):
"""Split vals into lists based on mask > 0"""
if mask is None:
mask = np.greater(vals, 0)
lists = []
i = 0
lastgood = False
list1 = []
for i in range(len(vals)):
if mask[i] is False:
if lastgood:
lists.append(list1)
list1 = []
lastgood = False
if mask[i]:
list1.append(vals[i])
lastgood = True
if lastgood:
lists.append(list1)
return lists
# Initialization and definitions#
#Current directory
homedir = os.getcwd()
#Parameter definition
pars = params()
pars.d = {
'SPECTRA': 'CWWSB4.list', # template list
#'PRIOR': 'hdfn_SB', # prior name
'PRIOR': 'hdfn_gen', # prior name
'NTYPES':
None, # Number of Elliptical, Spiral, and Starburst/Irregular templates Default: 1,2,n-3
'DZ': 0.01, # redshift resolution
'ZMIN': 0.01, # minimum redshift
'ZMAX': 10., # maximum redshift
'MAG': 'yes', # Data in magnitudes?
'MIN_MAGERR': 0.001, # minimum magnitude uncertainty --DC
'ODDS': 0.95, # Odds threshold: affects confidence limits definition
'INTERP':
0, # Number of interpolated templates between each of the original ones
'EXCLUDE': 'none', # Filters to be excluded from the estimation
'NEW_AB': 'no', # If yes, generate new AB files even if they already exist
'CHECK':
'yes', # Perform some checks, compare observed colors with templates, etc.
'VERBOSE': 'yes', # Print estimated redshifts to the standard output
'PROBS':
'no', # Save all the galaxy probability distributions (it will create a very large file)
'PROBS2':
'no', # Save all the galaxy probability distributions P(z,t) (but not priors) -- Compact
'PROBS_LITE': 'yes', # Save only the final probability distribution
'GET_Z': 'yes', # Actually obtain photo-z
'ONLY_TYPE': 'no', # Use spectroscopic redshifts instead of photo-z
'MADAU': 'yes', # Apply Madau correction to spectra
'Z_THR': 0, # Integrate probability for z>z_thr
'COLOR': 'no', # Use colors instead of fluxes
'PLOTS': 'no', # Don't produce plots
'INTERACTIVE': 'yes', # Don't query the user
'PHOTO_ERRORS':
'no', # Define the confidence interval using only the photometric errors
'MIN_RMS':
0.05, # "Intrinsic" photo-z rms in dz /(1+z) (Change to 0.05 for templates from Benitez et al. 2004
'N_PEAKS': 1,
'MERGE_PEAKS': 'no',
'CONVOLVE_P': 'yes',
'P_MIN': 1e-2,
'SED_DIR': sed_dir,
'AB_DIR': ab_dir,
'FILTER_DIR': fil_dir,
'DELTA_M_0': 0.,
'ZP_OFFSETS': 0.,
'ZC': None,
'FC': None,
"ADD_SPEC_PROB": None,
"ADD_CONTINUOUS_PROB": None,
"NMAX": None # Useful for testing
}
if pars.d['PLOTS'] == 'no': plots = 0
if plots:
# If pylab installed show plots
plots = 'pylab'
try:
import matplotlib
matplotlib.use('TkAgg')
import pylab as pyl
from pylab import *
# from coeplot2a import *
pyl.plot([1])
pyl.title('KILL THIS WINDOW!')
pyl.show()
pyl.ioff()
except:
try:
from biggles import *
plots = 'biggles'
except:
plots = 0
#Define the default values of the parameters
pars.d['INPUT'] = sys.argv[1] # catalog with the photometry
obs_file = pars.d['INPUT']
root = os.path.splitext(pars.d['INPUT'])[0]
pars.d[
'COLUMNS'] = root + '.columns' # column information for the input catalog
pars.d['OUTPUT'] = root + '.bpz' # output
nargs = len(sys.argv)
ipar = 2
if nargs > 2: # Check for parameter file and update parameters
if sys.argv[2] == '-P':
pars.fromfile(sys.argv[3])
ipar = 4
# Update the parameters using command line additions
#pars.fromcommandline(sys.argv[ipar:])
#for key in pars.d:
# print key, pars.d[key]
#pause()
pars.d.update(
params_cl()) # allows for flag only (no value after), e.g., -CHECK
def updateblank(var, ext):
global pars
if pars.d[var] in [None, 'yes']:
pars.d[var] = root + '.' + ext
updateblank('CHECK', 'flux_comparison')
updateblank('PROBS_LITE', 'probs')
updateblank('PROBS', 'full_probs')
updateblank('PROBS2', 'chisq')
#if pars.d['CHECK'] in [None, 'yes']:
# pars.d['CHECK'] = root+'.flux_comparison'
#This allows to change the auxiliary directories used by BPZ
if pars.d['SED_DIR'] != sed_dir:
print("Changing sed_dir to ", pars.d['SED_DIR'])
sed_dir = pars.d['SED_DIR']
if sed_dir[-1] != '/': sed_dir += '/'
if pars.d['AB_DIR'] != ab_dir:
print("Changing ab_dir to ", pars.d['AB_DIR'])
ab_dir = pars.d['AB_DIR']
if ab_dir[-1] != '/': ab_dir += '/'
if pars.d['FILTER_DIR'] != fil_dir:
print("Changing fil_dir to ", pars.d['FILTER_DIR'])
fil_dir = pars.d['FILTER_DIR']
if fil_dir[-1] != '/': fil_dir += '/'
#Better safe than sorry
if pars.d['OUTPUT'] == obs_file or pars.d['PROBS'] == obs_file or pars.d[
'PROBS2'] == obs_file or pars.d['PROBS_LITE'] == obs_file:
print("This would delete the input file!")
sys.exit()
if pars.d['OUTPUT'] == pars.d['COLUMNS'] or pars.d['PROBS_LITE'] == pars.d[
'COLUMNS'] or pars.d['PROBS'] == pars.d['COLUMNS']:
print("This would delete the .columns file!")
sys.exit()
#Assign the intrinsin rms
if pars.d['SPECTRA'] == 'CWWSB.list':
print('Setting the intrinsic rms to 0.067(1+z)')
pars.d['MIN_RMS'] = 0.067
pars.d['MIN_RMS'] = float(pars.d['MIN_RMS'])
pars.d['MIN_MAGERR'] = float(pars.d['MIN_MAGERR'])
if pars.d['INTERACTIVE'] == 'no': interactive = 0
else: interactive = 1
if pars.d['VERBOSE'] == 'yes':
print("Current parameters")
view_keys(pars.d)
pars.d['N_PEAKS'] = int(pars.d['N_PEAKS'])
if pars.d["ADD_SPEC_PROB"] is not None:
specprob = 1
specfile = pars.d["ADD_SPEC_PROB"]
spec = get_2Darray(specfile)
ns = spec.shape[1]
if old_div(ns, 2) != (old_div(ns, 2.)):
print("Number of columns in SPEC_PROB is odd")
sys.exit()
z_spec = spec[:, :old_div(ns, 2)]
p_spec = spec[:, old_div(ns, 2):]
# Write output file header
header = "#ID "
header += ns / 2 * " z_spec%i"
header += ns / 2 * " p_spec%i"
header += "\n"
header = header % tuple(list(range(old_div(ns, 2))) + list(range(old_div(
ns, 2))))
specout = open(specfile.split()[0] + ".p_spec", "w")
specout.write(header)
else:
specprob = 0
pars.d['DELTA_M_0'] = float(pars.d['DELTA_M_0'])
#Some misc. initialization info useful for the .columns file
#nofilters=['M_0','OTHER','ID','Z_S','X','Y']
nofilters = ['M_0', 'OTHER', 'ID', 'Z_S']
#Numerical codes for nondetection, etc. in the photometric catalog
unobs = -99. # Objects not observed
undet = 99. # Objects not detected
#Define the z-grid
zmin = float(pars.d['ZMIN'])
zmax = float(pars.d['ZMAX'])
if zmin > zmax: raise 'zmin < zmax !'
dz = float(pars.d['DZ'])
linear = 1
if linear:
z = np.arange(zmin, zmax + dz, dz)
else:
if zmax != 0.:
zi = zmin
z = []
while zi <= zmax:
z.append(zi)
zi = zi + dz * (1. + zi)
z = np.array(z)
else:
z = np.array([0.])
#Now check the contents of the FILTERS,SED and A diBrectories
#Get the filters in stock
filters_db = []
filters_db = glob.glob(fil_dir + '*.res')
for i in range(len(filters_db)):
filters_db[i] = os.path.basename(filters_db[i])
filters_db[i] = filters_db[i][:-4]
#Get the SEDs in stock
sed_db = []
sed_db = glob.glob(sed_dir + '*.sed')
for i in range(len(sed_db)):
sed_db[i] = os.path.basename(sed_db[i])
sed_db[i] = sed_db[i][:-4]
#Get the ABflux files in stock
ab_db = []
ab_db = glob.glob(ab_dir + '*.AB')
for i in range(len(ab_db)):
ab_db[i] = os.path.basename(ab_db[i])
ab_db[i] = ab_db[i][:-3]
#Get a list with the filter names and check whether they are in stock
col_file = pars.d['COLUMNS']
filters = get_str(col_file, 0)
for cosa in nofilters:
if filters.count(cosa): filters.remove(cosa)
if pars.d['EXCLUDE'] != 'none':
if type(pars.d['EXCLUDE']) == type(' '):
pars.d['EXCLUDE'] = [pars.d['EXCLUDE']]
for cosa in pars.d['EXCLUDE']:
if filters.count(cosa): filters.remove(cosa)
for filter in filters:
if filter[-4:] == '.res': filter = filter[:-4]
if filter not in filters_db:
print('filter ', filter, 'not in database at', fil_dir, ':')
if ask('Print filters in database?'):
for line in filters_db:
print(line)
sys.exit()
#Get a list with the spectrum names and check whether they're in stock
#Look for the list in the home directory first,
#if it's not there, look in the SED directory
spectra_file = os.path.join(homedir, pars.d['SPECTRA'])
if not os.path.exists(spectra_file):
spectra_file = os.path.join(sed_dir, pars.d['SPECTRA'])
spectra = get_str(spectra_file, 0)
for i in range(len(spectra)):
if spectra[i][-4:] == '.sed': spectra[i] = spectra[i][:-4]
nf = len(filters)
nt = len(spectra)
nz = len(z)
#Get the model fluxes
f_mod = np.zeros((nz, nt, nf)) * 0.
abfiles = []
for it in range(nt):
for jf in range(nf):
if filters[jf][-4:] == '.res': filtro = filters[jf][:-4]
else: filtro = filters[jf]
#model = join([spectra[it], filtro, 'AB'], '.')
model = '.'.join([spectra[it], filtro, 'AB'])
model_path = os.path.join(ab_dir, model)
abfiles.append(model)
#Generate new ABflux files if not present
# or if new_ab flag on
if pars.d['NEW_AB'] == 'yes' or model[:-3] not in ab_db:
if spectra[it] not in sed_db:
print('SED ', spectra[it], 'not in database at', sed_dir)
# for line in sed_db:
# print line
sys.exit()
#print spectra[it],filters[jf]
print(' Generating ', model, '....')
ABflux(spectra[it], filtro, madau=pars.d['MADAU'])
#z_ab=np.arange(0.,zmax_ab,dz_ab) #zmax_ab and dz_ab are def. in bpz_tools
# abflux=f_z_sed(spectra[it],filters[jf], z_ab,units='nu',madau=pars.d['MADAU'])
# abflux=clip(abflux,0.,1e400)
# buffer=join(['#',spectra[it],filters[jf], 'AB','\n'])
#for i in range(len(z_ab)):
# buffer=buffer+join([`z_ab[i]`,`abflux[i]`,'\n'])
#open(model_path,'w').write(buffer)
#zo=z_ab
#f_mod_0=abflux
#else:
#Read the data
zo, f_mod_0 = get_data(model_path, (0, 1))
#Rebin the data to the required redshift resolution
f_mod[:, it, jf] = match_resol(zo, f_mod_0, z)
#if sometrue(np.less(f_mod[:,it,jf],0.)):
if np.less(f_mod[:, it, jf], 0.).any():
print('Warning: some values of the model AB fluxes are <0')
print('due to the interpolation ')
print('Clipping them to f>=0 values')
#To avoid rounding errors in the calculation of the likelihood
f_mod[:, it, jf] = clip(f_mod[:, it, jf], 0., 1e300)
#We forbid f_mod to take values in the (0,1e-100) interval
#f_mod[:,it,jf]=np.where(np.less(f_mod[:,it,jf],1e-100)*np.greater(f_mod[:,it,jf],0.),0.,f_mod[:,it,jf])
#Here goes the interpolacion between the colors
ninterp = int(pars.d['INTERP'])
ntypes = pars.d['NTYPES']
if ntypes == None:
nt0 = nt
else:
nt0 = list(ntypes)
for i, nt1 in enumerate(nt0):
print(i, nt1)
nt0[i] = int(nt1)
if (len(nt0) != 3) or (sum(nt0) != nt):
print()
print('%d ellipticals + %d spirals + %d ellipticals' % tuple(nt0))
print('does not add up to %d templates' % nt)
print('USAGE: -NTYPES nell,nsp,nsb')
print('nell = # of elliptical templates')
print('nsp = # of spiral templates')
print('nsb = # of starburst templates')
print(
'These must add up to the number of templates in the SPECTRA list')
print('Quitting BPZ.')
sys.exit()
if ninterp:
nti = nt + (nt - 1) * ninterp
buffer = np.zeros((nz, nti, nf)) * 1.
tipos = np.arange(0., float(nti), float(ninterp) + 1.)
xtipos = np.arange(float(nti))
for iz in np.arange(nz):
for jf in range(nf):
buffer[iz, :, jf] = match_resol(tipos, f_mod[iz, :, jf], xtipos)
nt = nti
f_mod = buffer
#for j in range(nf):
# plot=FramedPlot()
# for i in range(nt): plot.add(Curve(z,log(f_mod[:,i,j]+1e-40)))
# plot.show()
# ask('More?')
#Load all the parameters in the columns file to a dictionary
col_pars = params()
col_pars.fromfile(col_file)
# Read which filters are in which columns
flux_cols = []
eflux_cols = []
cals = []
zp_errors = []
zp_offsets = []
for filter in filters:
datos = col_pars.d[filter]
flux_cols.append(int(datos[0]) - 1)
eflux_cols.append(int(datos[1]) - 1)
cals.append(datos[2])
zp_errors.append(datos[3])
zp_offsets.append(datos[4])
zp_offsets = np.array(list(map(float, zp_offsets)))
if pars.d['ZP_OFFSETS']:
zp_offsets += np.array(list(map(float, pars.d['ZP_OFFSETS'])))
flux_cols = tuple(flux_cols)
eflux_cols = tuple(eflux_cols)
#READ the flux and errors from obs_file
f_obs = get_2Darray(obs_file, flux_cols)
ef_obs = get_2Darray(obs_file, eflux_cols)
#Convert them to arbitrary fluxes if they are in magnitudes
if pars.d['MAG'] == 'yes':
seen = np.greater(f_obs, 0.) * np.less(f_obs, undet)
no_seen = equal(f_obs, undet)
no_observed = equal(f_obs, unobs)
todo = seen + no_seen + no_observed
#The minimum photometric error is 0.01
#ef_obs=ef_obs+seen*equal(ef_obs,0.)*0.001
ef_obs = np.where(
np.greater_equal(ef_obs, 0.), clip(ef_obs, pars.d['MIN_MAGERR'], 1e10),
ef_obs)
if add.reduce(add.reduce(todo)) != todo.shape[0] * todo.shape[1]:
print('Objects with unexpected magnitudes!')
print("""Allowed values for magnitudes are
0<m<""" + repr(undet) + " m=" + repr(undet) + "(non detection), m=" + repr(
unobs) + "(not observed)")
for i in range(len(todo)):
if not alltrue(todo[i, :]):
print(i + 1, f_obs[i, :], ef_obs[i, :])
sys.exit()
#Detected objects
try:
f_obs = np.where(seen, 10.**(-.4 * f_obs), f_obs)
except OverflowError:
print(
'Some of the input magnitudes have values which are >700 or <-700')
print('Purge the input photometric catalog')
print('Minimum value', min(f_obs))
print('Maximum value', max(f_obs))
print('Indexes for minimum values', argmin(f_obs, 0.))
print('Indexes for maximum values', argmax(f_obs, 0.))
print('Bye.')
sys.exit()
try:
ef_obs = np.where(seen, (10.**(.4 * ef_obs) - 1.) * f_obs, ef_obs)
except OverflowError:
print(
'Some of the input magnitude errors have values which are >700 or <-700')
print('Purge the input photometric catalog')
print('Minimum value', min(ef_obs))
print('Maximum value', max(ef_obs))
print('Indexes for minimum values', argmin(ef_obs, 0.))
print('Indexes for maximum values', argmax(ef_obs, 0.))
print('Bye.')
sys.exit()
#print 'ef', ef_obs[0,:nf]
#print 'f', f_obs[1,:nf]
#print 'ef', ef_obs[1,:nf]
#Looked at, but not detected objects (mag=99.)
#We take the flux equal to zero, and the error in the flux equal to the 1-sigma detection error.
#If m=99, the corresponding error magnitude column in supposed to be dm=m_1sigma, to avoid errors
#with the sign we take the absolute value of dm
f_obs = np.where(no_seen, 0., f_obs)
ef_obs = np.where(no_seen, 10.**(-.4 * abs(ef_obs)), ef_obs)
#Objects not looked at (mag=-99.)
f_obs = np.where(no_observed, 0., f_obs)
ef_obs = np.where(no_observed, 0., ef_obs)
#Flux codes:
# If f>0 and ef>0 : normal objects
# If f==0 and ef>0 :object not detected
# If f==0 and ef==0: object not observed
#Everything else will crash the program
#Check that the observed error fluxes are reasonable
#if sometrue(np.less(ef_obs,0.)): raise 'Negative input flux errors'
if np.less(ef_obs, 0.).any(): raise 'Negative input flux errors'
f_obs = np.where(np.less(f_obs, 0.), 0., f_obs) #Put non-detections to 0
ef_obs = np.where(
np.less(f_obs, 0.), maximum(1e-100, f_obs + ef_obs),
ef_obs) # Error equivalent to 1 sigma upper limit
#if sometrue(np.less(f_obs,0.)) : raise 'Negative input fluxes'
seen = np.greater(f_obs, 0.) * np.greater(ef_obs, 0.)
no_seen = equal(f_obs, 0.) * np.greater(ef_obs, 0.)
no_observed = equal(f_obs, 0.) * equal(ef_obs, 0.)
todo = seen + no_seen + no_observed
if add.reduce(add.reduce(todo)) != todo.shape[0] * todo.shape[1]:
print('Objects with unexpected fluxes/errors')
#Convert (internally) objects with zero flux and zero error(non observed)
#to objects with almost infinite (~1e108) error and still zero flux
#This will yield reasonable likelihoods (flat ones) for these objects
ef_obs = np.where(no_observed, 1e108, ef_obs)
#Include the zero point errors
zp_errors = np.array(list(map(float, zp_errors)))
zp_frac = e_mag2frac(zp_errors)
#zp_frac=10.**(.4*zp_errors)-1.
ef_obs = np.where(seen, sqrt(ef_obs * ef_obs + (zp_frac * f_obs)**2), ef_obs)
ef_obs = np.where(no_seen,
sqrt(ef_obs * ef_obs + (zp_frac * (old_div(ef_obs, 2.)))**2),
ef_obs)
#Add the zero-points offset
#The offsets are defined as m_new-m_old
zp_offsets = np.array(list(map(float, zp_offsets)))
zp_offsets = np.where(not_equal(zp_offsets, 0.), 10.**(-.4 * zp_offsets), 1.)
f_obs = f_obs * zp_offsets
ef_obs = ef_obs * zp_offsets
#Convert fluxes to AB if needed
for i in range(f_obs.shape[1]):
if cals[i] == 'Vega':
const = mag2flux(VegatoAB(0., filters[i]))
f_obs[:, i] = f_obs[:, i] * const
ef_obs[:, i] = ef_obs[:, i] * const
elif cals[i] == 'AB':
continue
else:
print('AB or Vega?. Check ' + col_file + ' file')
sys.exit()
#Get m_0 (if present)
if 'M_0' in col_pars.d:
m_0_col = int(col_pars.d['M_0']) - 1
m_0 = get_data(obs_file, m_0_col)
m_0 += pars.d['DELTA_M_0']
#Get the objects ID (as a string)
if 'ID' in col_pars.d:
# print col_pars.d['ID']
id_col = int(col_pars.d['ID']) - 1
id = get_str(obs_file, id_col)
else:
id = list(map(str, list(range(1, len(f_obs[:, 0]) + 1))))
#Get spectroscopic redshifts (if present)
if 'Z_S' in col_pars.d:
z_s_col = int(col_pars.d['Z_S']) - 1
z_s = get_data(obs_file, z_s_col)
#Get the X,Y coordinates
if 'X' in col_pars.d:
datos = col_pars.d['X']
if len(datos) == 1: # OTHERWISE IT'S A FILTER!
x_col = int(col_pars.d['X']) - 1
x = get_data(obs_file, x_col)
if 'Y' in col_pars.d:
datos = col_pars.d['Y']
if len(datos) == 1: # OTHERWISE IT'S A FILTER!
y_col = int(datos) - 1
y = get_data(obs_file, y_col)
#If 'check' on, initialize some variables
check = pars.d['CHECK']
# This generates a file with m,z,T and observed/expected colors
#if check=='yes': pars.d['FLUX_COMPARISON']=root+'.flux_comparison'
checkSED = check != 'no'
ng = f_obs.shape[0]
if checkSED:
# PHOTOMETRIC CALIBRATION CHECK
#r=np.zeros((ng,nf),float)+1.
#dm=np.zeros((ng,nf),float)+1.
#w=r*0.
# Defaults: r=1, dm=1, w=0
frat = np.ones((ng, nf), float)
dmag = np.ones((ng, nf), float)
fw = np.zeros((ng, nf), float)
#Visualize the colors of the galaxies and the templates
#When there are spectroscopic redshifts available
if interactive and 'Z_S' in col_pars.d and plots and checkSED and ask(
'Plot colors vs spectroscopic redshifts?'):
color_m = np.zeros((nz, nt, nf - 1)) * 1.
if plots == 'pylab':
figure(1)
nrows = 2
ncols = old_div((nf - 1), nrows)
if (nf - 1) % nrows: ncols += 1
for i in range(nf - 1):
##plot=FramedPlot()
# Check for overflows
fmu = f_obs[:, i + 1]
fml = f_obs[:, i]
good = np.greater(fml, 1e-100) * np.greater(fmu, 1e-100)
zz, fmu, fml = multicompress(good, (z_s, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
if plots == 'pylab':
subplot(nrows, ncols, i + 1)
plot(zz, colour, "bo")
elif plots == 'biggles':
d = Points(zz, colour, color='blue')
plot.add(d)
for it in range(nt):
#Prevent overflows
fmu = f_mod[:, it, i + 1]
fml = f_mod[:, it, i]
good = np.greater(fml, 1e-100)
zz, fmu, fml = multicompress(good, (z, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
if plots == 'pylab':
plot(zz, colour, "r")
elif plots == 'biggles':
d = Curve(zz, colour, color='red')
plot.add(d)
if plots == 'pylab':
xlabel(r'$z$')
ylabel('%s - %s' % (filters[i], filters[i + 1]))
elif plots == 'biggles':
plot.xlabel = r'$z$'
plot.ylabel = '%s - %s' % (filters[i], filters[i + 1])
plot.save_as_eps('%s-%s.eps' % (filters[i], filters[i + 1]))
plot.show()
if plots == 'pylab':
show()
inp = eval(input('Hit Enter to continue.'))
#Get other information which will go in the output file (as strings)
if 'OTHER' in col_pars.d:
if col_pars.d['OTHER'] != 'all':
other_cols = col_pars.d['OTHER']
if type(other_cols) == type((2, )):
other_cols = tuple(map(int, other_cols))
else:
other_cols = (int(other_cols), )
other_cols = [x - 1 for x in other_cols]
n_other = len(other_cols)
else:
n_other = get_2Darray(obs_file, cols='all', nrows=1).shape[1]
other_cols = list(range(n_other))
others = get_str(obs_file, other_cols)
if len(other_cols) > 1:
other = []
for j in range(len(others[0])):
lista = []
for i in range(len(others)):
lista.append(others[i][j])
other.append(join(lista))
else:
other = others
if pars.d['GET_Z'] == 'no': get_z = 0
else: get_z = 1
#Prepare the output file
out_name = pars.d['OUTPUT']
if get_z:
if os.path.exists(out_name):
os.system('cp %s %s.bak' % (out_name, out_name))
print("File %s exists. Copying it to %s.bak" % (out_name, out_name))
output = open(out_name, 'w')
if pars.d['PROBS_LITE'] == 'no': save_probs = 0
else: save_probs = 1
if pars.d['PROBS'] == 'no': save_full_probs = 0
else: save_full_probs = 1
if pars.d['PROBS2'] == 'no': save_probs2 = 0
else: save_probs2 = 1
#Include some header information
# File name and the date...
time_stamp = time.ctime(time.time())
if get_z: output.write('## File ' + out_name + ' ' + time_stamp + '\n')
#and also the parameters used to run bpz...
if get_z: output.write("""##
##Parameters used to run BPZ:
##
""")
claves = list(pars.d.keys())
claves.sort()
for key in claves:
if type(pars.d[key]) == type((1, )):
cosa = join(list(pars.d[key]), ',')
else:
cosa = str(pars.d[key])
if get_z: output.write('##' + key.upper() + '=' + cosa + '\n')
if save_full_probs:
#Shelve some info on the run
full_probs = shelve.open(pars.d['PROBS'])
full_probs['TIME'] = time_stamp
full_probs['PARS'] = pars.d
if save_probs:
probs = open(pars.d['PROBS_LITE'], 'w')
probs.write('# ID p_bayes(z) where z=arange(%.4f,%.4f,%.4f) \n' %
(zmin, zmax + dz, dz))
if save_probs2:
probs2 = open(pars.d['PROBS2'], 'w')
probs2.write(
'# id t z1 P(z1) P(z1+dz) P(z1+2*dz) ... where dz = %.4f\n' % dz)
#probs2.write('# ID\n')
#probs2.write('# t z1 P(z1) P(z1+dz) P(z1+2*dz) ... where dz = %.4f\n' % dz)
#Use a empirical prior?
tipo_prior = pars.d['PRIOR']
useprior = 0
if 'M_0' in col_pars.d:
has_mags = 1
else:
has_mags = 0
if has_mags and tipo_prior != 'none' and tipo_prior != 'flat':
useprior = 1
#Add cluster 'spikes' to the prior?
cluster_prior = 0.
if pars.d['ZC']:
cluster_prior = 1
if type(pars.d['ZC']) == type(""): zc = np.array([float(pars.d['ZC'])])
else: zc = np.array(list(map(float, pars.d['ZC'])))
if type(pars.d['FC']) == type(""): fc = np.array([float(pars.d['FC'])])
else: fc = np.array(list(map(float, pars.d['FC'])))
fcc = add.reduce(fc)
if fcc > 1.:
print(ftc)
raise 'Too many galaxies in clusters!'
pi_c = np.zeros((nz, nt)) * 1.
#Go over the different cluster spikes
for i in range(len(zc)):
#We define the cluster within dz=0.01 limits
cluster_range = np.less_equal(abs(z - zc[i]), .01) * 1.
#Clip values to avoid overflow
exponente = clip(-(z - zc[i])**2 / 2. / (0.00333)**2, -700., 0.)
#Outside the cluster range g is 0
g = exp(exponente) * cluster_range
norm = add.reduce(g)
pi_c[:, 0] = pi_c[:, 0] + g / norm * fc[i]
#Go over the different types
print('We only apply the cluster prior to the early type galaxies')
for i in range(1, 3 + 2 * ninterp):
pi_c[:, i] = pi_c[:, i] + pi_c[:, 0]
#Output format
format = '%' + repr(maximum(5, len(id[0]))) + 's' #ID format
format = format + pars.d[
'N_PEAKS'] * ' %.3f %.3f %.3f %.3f %.5f' + ' %.3f %.3f %10.3f'
#Add header with variable names to the output file
sxhdr = """##
##Column information
##
# 1 ID"""
k = 1
if pars.d['N_PEAKS'] > 1:
for j in range(pars.d['N_PEAKS']):
sxhdr += """
# %i Z_B_%i
# %i Z_B_MIN_%i
# %i Z_B_MAX_%i
# %i T_B_%i
# %i ODDS_%i""" % (k + 1, j + 1, k + 2, j + 1, k + 3, j + 1, k + 4, j + 1,
k + 5, j + 1)
k += 5
else:
sxhdr += """
# %i Z_B
# %i Z_B_MIN
# %i Z_B_MAX
# %i T_B
# %i ODDS""" % (k + 1, k + 2, k + 3, k + 4, k + 5)
k += 5
sxhdr += """
# %i Z_ML
# %i T_ML
# %i CHI-SQUARED\n""" % (k + 1, k + 2, k + 3)
nh = k + 4
if 'Z_S' in col_pars.d:
sxhdr = sxhdr + '# %i Z_S\n' % nh
format = format + ' %.3f'
nh += 1
if has_mags:
format = format + ' %.3f'
sxhdr = sxhdr + '# %i M_0\n' % nh
nh += 1
if 'OTHER' in col_pars.d:
sxhdr = sxhdr + '# %i OTHER\n' % nh
format = format + ' %s'
nh += n_other
#print sxhdr
if get_z: output.write(sxhdr + '##\n')
odds_i = float(pars.d['ODDS'])
oi = inv_gauss_int(odds_i)
print(odds_i, oi)
#Proceed to redshift estimation
if checkSED: buffer_flux_comparison = ""
if pars.d['CONVOLVE_P'] == 'yes':
# Will Convolve with a dz=0.03 gaussian to make probabilities smoother
# This is necessary; if not there are too many close peaks
sigma_g = 0.03
x = np.arange(-3. * sigma_g, 3. * sigma_g + old_div(dz, 10.),
dz) # made symmetric --DC
gaus = exp(-(old_div(x, sigma_g))**2)
if pars.d["NMAX"] != None: ng = int(pars.d["NMAX"])
for ig in range(ng):
currentPercent = ig / ng * 100
status = "{:.3f}% of {} completed.".format(currentPercent, ng)
Printer(status)
#Don't run BPZ on galaxies with have z_s > z_max
#if col_pars.d.has_key('Z_S'):
# if z_s[ig]<9.9 and z_s[ig]>zmax : continue
if not get_z: continue
if pars.d['COLOR'] == 'yes':
likelihood = p_c_z_t_color(f_obs[ig, :nf], ef_obs[ig, :nf],
f_mod[:nz, :nt, :nf])
else:
likelihood = p_c_z_t(f_obs[ig, :nf], ef_obs[ig, :nf],
f_mod[:nz, :nt, :nf])
if 0:
print(f_obs[ig, :nf])
print(ef_obs[ig, :nf])
iz_ml = likelihood.i_z_ml
t_ml = likelihood.i_t_ml
red_chi2 = old_div(likelihood.min_chi2, float(nf - 1.))
#p=likelihood.Bayes_likelihood
#likelihood.various_plots()
#print 'FULL BAYESAIN LIKELIHOOD'
p = likelihood.likelihood
if not ig:
print('ML * prior -- NOT QUITE BAYESIAN')
if pars.d[
'ONLY_TYPE'] == 'yes': #Use only the redshift information, no priors
p_i = np.zeros((nz, nt)) * 1.
j = searchsorted(z, z_s[ig])
#print j,nt,z_s[ig]
try:
p_i[j, :] = old_div(1., float(nt))
except IndexError:
pass
else:
if useprior:
if pars.d['PRIOR'] == 'lensing':
p_i = prior(z, m_0[ig], tipo_prior, nt0, ninterp, x[ig], y[ig])
else:
p_i = prior(z, m_0[ig], tipo_prior, nt0, ninterp)
else:
p_i = old_div(np.ones((nz, nt), float), float(nz * nt))
if cluster_prior: p_i = (1. - fcc) * p_i + pi_c
if save_full_probs:
full_probs[id[ig]] = [z, p_i[:nz, :nt], p[:nz, :nt], red_chi2]
#Multiply the prior by the likelihood to find the final probability
pb = p_i[:nz, :nt] * p[:nz, :nt]
#plo=FramedPlot()
#for i in range(p.shape[1]):
# plo.add(Curve(z,p_i[:nz,i]/sum(sum(p_i[:nz,:]))))
#for i in range(p.shape[1]):
# plo.add(Curve(z,p[:nz,i]/sum(sum(p[:nz,:])),color='red'))
#plo.add(Curve(z,pb[:nz,-1]/sum(pb[:nz,-1]),color='blue'))
#plo.show()
#ask('More?')
#Convolve with a gaussian of width \sigma(1+z) to take into
#accout the intrinsic scatter in the redshift estimation 0.06*(1+z)
#(to be done)
#Estimate the bayesian quantities
p_bayes = add.reduce(pb[:nz, :nt], -1)
#print p_bayes.shape
#print argmax(p_bayes)
#print p_bayes[300:310]
#Convolve with a gaussian
if pars.d['CONVOLVE_P'] == 'yes' and pars.d['ONLY_TYPE'] == 'no':
#print 'GAUSS CONV'
p_bayes = convolve(p_bayes, gaus, 1)
#print 'gaus', gaus
#print p_bayes.shape
#print argmax(p_bayes)
#print p_bayes[300:310]
# Eliminate all low level features in the prob. distribution
pmax = max(p_bayes)
p_bayes = np.where(
np.greater(p_bayes, pmax * float(pars.d['P_MIN'])), p_bayes, 0.)
norm = add.reduce(p_bayes)
p_bayes = old_div(p_bayes, norm)
if specprob:
p_spec[ig, :] = match_resol(z, p_bayes, z_spec[ig, :]) * p_spec[ig, :]
norma = add.reduce(p_spec[ig, :])
if norma == 0.: norma = 1.
p_spec[ig, :] /= norma
#vyjod=tuple([id[ig]]+list(z_spec[ig,:])+list(p_spec[ig,:])+[z_s[ig],
# int(float(other[ig]))])
vyjod = tuple([id[ig]] + list(z_spec[ig, :]) + list(p_spec[ig, :]))
formato = "%s " + 5 * " %.4f"
formato += 5 * " %.3f"
#formato+=" %4f %i"
formato += "\n"
print(formato % vyjod)
specout.write(formato % vyjod)
if pars.d['N_PEAKS'] > 1:
# Identify maxima and minima in the final probability
g_max = np.less(p_bayes[2:], p_bayes[1:-1]) * np.less(p_bayes[:-2],
p_bayes[1:-1])
g_min = np.greater(p_bayes[2:], p_bayes[1:-1]) * np.greater(p_bayes[:-2],
p_bayes[1:-1])
g_min += equal(p_bayes[1:-1], 0.) * np.greater(p_bayes[2:], 0.)
g_min += equal(p_bayes[1:-1], 0.) * np.greater(p_bayes[:-2], 0.)
i_max = compress(g_max, np.arange(nz - 2)) + 1
i_min = compress(g_min, np.arange(nz - 2)) + 1
# Check that the first point and the last one are not minima or maxima,
# if they are, add them to the index arrays
if p_bayes[0] > p_bayes[1]:
i_max = concatenate([[0], i_max])
i_min = concatenate([[0], i_min])
if p_bayes[-1] > p_bayes[-2]:
i_max = concatenate([i_max, [nz - 1]])
i_min = concatenate([i_min, [nz - 1]])
if p_bayes[0] < p_bayes[1]:
i_min = concatenate([[0], i_min])
if p_bayes[-1] < p_bayes[-2]:
i_min = concatenate([i_min, [nz - 1]])
p_max = take(p_bayes, i_max)
#p_min=take(p_bayes,i_min)
p_tot = []
z_peaks = []
t_peaks = []
# Sort them by probability values
p_max, i_max = multisort(old_div(1., p_max), (p_max, i_max))
# For each maximum, define the minima which sandwich it
# Assign minima to each maximum
jm = searchsorted(i_min, i_max)
p_max = list(p_max)
for i in range(len(i_max)):
z_peaks.append([z[i_max[i]], z[i_min[jm[i] - 1]], z[i_min[jm[i]]]])
t_peaks.append(argmax(pb[i_max[i], :nt]))
p_tot.append(sum(p_bayes[i_min[jm[i] - 1]:i_min[jm[i]]]))
# print z_peaks[-1][0],f_mod[i_max[i],t_peaks[-1]-1,:nf]
if ninterp:
t_peaks = list(old_div(np.array(t_peaks), (1. + ninterp)))
if pars.d['MERGE_PEAKS'] == 'yes':
# Merge peaks which are very close 0.03(1+z)
merged = []
for k in range(len(z_peaks)):
for j in range(len(z_peaks)):
if j > k and k not in merged and j not in merged:
if abs(z_peaks[k][0] - z_peaks[j][0]) < 0.06 * (
1. + z_peaks[j][0]):
# Modify the element which receives the accretion
z_peaks[k][1] = minimum(z_peaks[k][1],
z_peaks[j][1])
z_peaks[k][2] = maximum(z_peaks[k][2],
z_peaks[j][2])
p_tot[k] += p_tot[j]
# Put the merged element in the list
merged.append(j)
#print merged
# Clean up
copia = p_tot[:]
for j in merged:
p_tot.remove(copia[j])
copia = z_peaks[:]
for j in merged:
z_peaks.remove(copia[j])
copia = t_peaks[:]
for j in merged:
t_peaks.remove(copia[j])
copia = p_max[:]
for j in merged:
p_max.remove(copia[j])
if sum(np.array(p_tot)) != 1.:
p_tot = old_div(np.array(p_tot), sum(np.array(p_tot)))
# Define the peak
iz_b = argmax(p_bayes)
zb = z[iz_b]
# OKAY, NOW THAT GAUSSIAN CONVOLUTION BUG IS FIXED
# if pars.d['ONLY_TYPE']=='yes': zb=zb-dz/2. #This corrects a small bias
# else: zb=zb-dz #This corrects another small bias --DC
#Integrate within a ~ oi*sigma interval to estimate
# the odds. (based on a sigma=pars.d['MIN_RMS']*(1+z))
#Look for the number of sigma corresponding
#to the odds_i confidence limit
zo1 = zb - oi * pars.d['MIN_RMS'] * (1. + zb)
zo2 = zb + oi * pars.d['MIN_RMS'] * (1. + zb)
if pars.d['Z_THR'] > 0:
zo1 = float(pars.d['Z_THR'])
zo2 = float(pars.d['ZMAX'])
o = odds(p_bayes[:nz], z, zo1, zo2)
# Integrate within the same odds interval to find the type
# izo1=maximum(0,searchsorted(z,zo1)-1)
# izo2=minimum(nz,searchsorted(z,zo2))
# t_b=argmax(add.reduce(p[izo1:izo2,:nt],0))
it_b = argmax(pb[iz_b, :nt])
t_b = it_b + 1
if ninterp:
tt_b = old_div(float(it_b), (1. + ninterp))
tt_ml = old_div(float(t_ml), (1. + ninterp))
else:
tt_b = it_b
tt_ml = t_ml
if max(pb[iz_b, :]) < 1e-300:
print('NO CLEAR BEST t_b; ALL PROBABILITIES ZERO')
t_b = -1.
tt_b = -1.
#print it_b, t_b, tt_b, pb.shape
if 0:
print(f_mod[iz_b, it_b, :nf])
print(min(ravel(p_i)), max(ravel(p_i)))
print(min(ravel(p)), max(ravel(p)))
print(p_i[iz_b, :])
print(p[iz_b, :])
print(p_i[iz_b, it_b]) # prior
print(p[iz_b, it_b]) # chisq
print(likelihood.likelihood[iz_b, it_b])
print(likelihood.chi2[iz_b, it_b])
print(likelihood.ftt[iz_b, it_b])
print(likelihood.foo)
print()
print('t_b', t_b)
print('iz_b', iz_b)
print('nt', nt)
print(max(ravel(pb)))
impb = argmax(ravel(pb))
impbz = old_div(impb, nt)
impbt = impb % nt
print(impb, impbz, impbt)
print(ravel(pb)[impb])
print(pb.shape, (nz, nt))
print(pb[impbz, impbt])
print(pb[iz_b, it_b])
print('z, t', z[impbz], t_b)
print(t_b)
# Redshift confidence limits
z1, z2 = interval(p_bayes[:nz], z, odds_i)
if pars.d['PHOTO_ERRORS'] == 'no':
zo1 = zb - oi * pars.d['MIN_RMS'] * (1. + zb)
zo2 = zb + oi * pars.d['MIN_RMS'] * (1. + zb)
if zo1 < z1: z1 = maximum(0., zo1)
if zo2 > z2: z2 = zo2
# Print output
if pars.d['N_PEAKS'] == 1:
salida = [id[ig], zb, z1, z2, tt_b + 1, o, z[iz_ml], tt_ml + 1,
red_chi2]
else:
salida = [id[ig]]
for k in range(pars.d['N_PEAKS']):
if k <= len(p_tot) - 1:
salida = salida + list(z_peaks[k]) + [t_peaks[k] + 1, p_tot[k]]
else:
salida += [-1., -1., -1., -1., -1.]
salida += [z[iz_ml], tt_ml + 1, red_chi2]
if 'Z_S' in col_pars.d: salida.append(z_s[ig])
if has_mags: salida.append(m_0[ig] - pars.d['DELTA_M_0'])
if 'OTHER' in col_pars.d: salida.append(other[ig])
if get_z: output.write(format % tuple(salida) + '\n')
if pars.d['VERBOSE'] == 'yes': print(format % tuple(salida))
#try:
# if sometrue(np.greater(z_peaks,7.5)):
# connect(z,p_bayes)
# ask('More?')
#except:
# pass
odd_check = odds_i
if checkSED:
ft = f_mod[iz_b, it_b, :]
fo = f_obs[ig, :]
efo = ef_obs[ig, :]
dfosq = (old_div((ft - fo), efo))**2
if 0:
print(ft)
print(fo)
print(efo)
print(dfosq)
pause()
factor = ft / efo / efo
ftt = add.reduce(ft * factor)
fot = add.reduce(fo * factor)
am = old_div(fot, ftt)
ft = ft * am
if 0:
print(factor)
print(ftt)
print(fot)
print(am)
print(ft)
print()
pause()
flux_comparison = [id[ig], m_0[ig], z[iz_b], t_b, am] + list(
concatenate([ft, fo, efo]))
nfc = len(flux_comparison)
format_fc = '%s %.2f %.2f %i' + (nfc - 4) * ' %.3e' + '\n'
buffer_flux_comparison = buffer_flux_comparison + format_fc % tuple(
flux_comparison)
if o >= odd_check:
# PHOTOMETRIC CALIBRATION CHECK
# Calculate flux ratios, but only for objects with ODDS >= odd_check
# (odd_check = 0.95 by default)
# otherwise, leave weight w = 0 by default
eps = 1e-10
frat[ig, :] = divsafe(fo, ft, inf=eps, nan=eps)
#fw[ig,:] = np.greater(fo, 0)
fw[ig, :] = divsafe(fo, efo, inf=1e8, nan=0)
fw[ig, :] = clip(fw[ig, :], 0, 100)
#print fw[ig,:]
#print
if 0:
bad = np.less_equal(ft, 0.)
#Avoid overflow by setting r to 0.
fo = np.where(bad, 0., fo)
ft = np.where(bad, 1., ft)
r[ig, :] = old_div(fo, ft)
try:
dm[ig, :] = -flux2mag(old_div(fo, ft))
except:
dm[ig, :] = -100
# Clip ratio between 0.01 & 100
r[ig, :] = np.where(np.greater(r[ig, :], 100.), 100., r[ig, :])
r[ig, :] = np.where(np.less_equal(r[ig, :], 0.), 0.01, r[ig, :])
#Weight by flux
w[ig, :] = np.where(np.greater(fo, 0.), 1, 0.)
#w[ig,:]=np.where(np.greater(fo,0.),fo,0.)
#print fo
#print r[ig,:]
#print
# This is no good becasue r is always > 0 (has been clipped that way)
#w[ig,:]=np.where(np.greater(r[ig,:],0.),fo,0.)
# The is bad because it would include non-detections:
#w[ig,:]=np.where(np.greater(r[ig,:],0.),1.,0.)
if save_probs:
texto = '%s ' % str(id[ig])
texto += len(p_bayes) * '%.3e ' + '\n'
probs.write(texto % tuple(p_bayes))
# pb[z,t] -> p_bayes[z]
# 1. tb are summed over
# 2. convolved with Gaussian if CONVOLVE_P
# 3. Clipped above P_MIN * max(P), where P_MIN = 0.01 by default
# 4. normalized such that sum(P(z)) = 1
if save_probs2: # P = exp(-chisq / 2)
#probs2.write('%s\n' % id[ig])
pmin = pmax * float(pars.d['P_MIN'])
#pb = np.where(np.less(pb,pmin), 0, pb)
chisq = -2 * log(pb)
for itb in range(nt):
chisqtb = chisq[:, itb]
pqual = np.greater(pb[:, itb], pmin)
chisqlists = seglist(chisqtb, pqual)
if len(chisqlists) == 0:
continue
#print pb[:,itb]
#print chisqlists
zz = np.arange(zmin, zmax + dz, dz)
zlists = seglist(zz, pqual)
for i in range(len(zlists)):
probs2.write('%s %2d %.3f ' %
(id[ig], itb + 1, zlists[i][0]))
fmt = len(chisqlists[i]) * '%4.2f ' + '\n'
probs2.write(fmt % tuple(chisqlists[i]))
#fmt = len(chisqtb) * '%4.2f '+'\n'
#probs2.write('%d ' % itb)
#probs2.write(fmt % tuple(chisqtb))
#if checkSED: open(pars.d['FLUX_COMPARISON'],'w').write(buffer_flux_comparison)
if checkSED: open(pars.d['CHECK'], 'w').write(buffer_flux_comparison)
if get_z: output.close()
#if checkSED and get_z:
if checkSED:
#try:
if 1:
if interactive:
print("")
print("")
print("PHOTOMETRIC CALIBRATION TESTS")
# See PHOTOMETRIC CALIBRATION CHECK above
#ratios=add.reduce(w*r,0)/add.reduce(w,0)
#print "Average, weighted by flux ratios f_obs/f_model for objects with odds >= %g" % odd_check
#print len(filters)*' %s' % tuple(filters)
#print nf*' % 7.3f ' % tuple(ratios)
#print "Corresponding zero point shifts"
#print nf*' % 7.3f ' % tuple(-flux2mag(ratios))
#print
fratavg = old_div(sum(fw * frat, axis=0), sum(fw, axis=0))
dmavg = -flux2mag(fratavg)
fnobj = sum(np.greater(fw, 0), axis=0)
#print 'fratavg', fratavg
#print 'dmavg', dmavg
#print 'fnobj', fnobj
#fnobj = sum(np.greater(w[:,i],0))
print(
"If the dmag are large, add them to the .columns file (zp_offset), then re-run BPZ.")
print(
"(For better results, first re-run with -ONLY_TYPE yes to fit SEDs to known spec-z.)")
print()
print(' fo/ft dmag nobj filter')
#print nf
for i in range(nf):
print('% 7.3f % 7.3f %5d %s'\
% (fratavg[i], dmavg[i], fnobj[i], filters[i]))
#% (ratios[i], -flux2mag(ratios)[i], sum(np.greater(w[:,i],0)), filters[i])
#print ' fo/ft dmag filter'
#for i in range(nf):
# print '% 7.3f % 7.3f %s' % (ratios[i], -flux2mag(ratios)[i], filters[i])
print(
"fo/ft = Average f_obs/f_model weighted by f_obs/ef_obs for objects with ODDS >= %g"
% odd_check)
print(
"dmag = magnitude offset which should be applied (added) to the photometry (zp_offset)")
print(
"nobj = # of galaxies considered in that filter (detected and high ODDS >= %g)"
% odd_check)
# print r
# print w
#print
#print "Number of galaxies considered (with ODDS >= %g):" % odd_check
#print ' ', sum(np.greater(w,0)) / float(nf)
#print '(Note a galaxy detected in only 5 / 6 filters counts as 5/6 = 0.833)'
#print sum(np.greater(w,0))
#This part is experimental and may not work in the general case
#print "Median color offsets for objects with odds > "+`odd_check`+" (not weighted)"
#print len(filters)*' %s' % tuple(filters)
#r=flux2mag(r)
#print nf*' %.3f ' % tuple(-median(r))
#print nf*' %.3f ' % tuple(median(dm))
#rms=[]
#efobs=[]
#for j in range(nf):
# ee=np.where(np.greater(f_obs[:,j],0.),f_obs[:,j],2.)
# zz=e_frac2mag(ef_obs[:,j]/ee)
#
# xer=np.arange(0.,1.,.02)
# hr=hist(abs(r[:,j]),xer)
# hee=hist(zz,xer)
# rms.append(std_log(compress(np.less_equal(r[:,j],1.),r[:,j])))
# zz=compress(np.less_equal(zz,1.),zz)
# efobs.append(sqrt(mean(zz*zz)))
#print nf*' %.3f ' % tuple(rms)
#print nf*' %.3f ' % tuple(efobs)
#print nf*' %.3f ' % tuple(sqrt(abs(array(rms)**2-array(efobs)**2)))
#except: pass
if save_full_probs: full_probs.close()
if save_probs: probs.close()
if save_probs2: probs2.close()
if plots and checkSED:
zb, zm, zb1, zb2, o, tb = get_data(out_name, (1, 6, 2, 3, 5, 4))
#Plot the comparison between z_spec and z_B
if 'Z_S' in col_pars.d:
if not interactive or ask('Compare z_B vs z_spec?'):
good = np.less(z_s, 9.99)
print(
'Total initial number of objects with spectroscopic redshifts= ',
sum(good))
od_th = 0.
if ask('Select for galaxy characteristics?\n'):
od_th = eval(input('Odds threshold?\n'))
good *= np.greater_equal(o, od_th)
t_min = eval(input('Minimum spectral type\n'))
t_max = eval(input('Maximum spectral type\n'))
good *= np.less_equal(tb, t_max) * np.greater_equal(tb, t_min)
if has_mags:
mg_min = eval(input('Bright magnitude limit?\n'))
mg_max = eval(input('Faint magnitude limit?\n'))
good = good * np.less_equal(m_0, mg_max) * np.greater_equal(
m_0, mg_min)
zmo, zso, zbo, zb1o, zb2o, tb = multicompress(good, (zm, z_s, zb,
zb1, zb2, tb))
print('Number of objects with odds > %.2f= %i ' %
(od_th, len(zbo)))
deltaz = old_div((zso - zbo), (1. + zso))
sz = stat_robust(deltaz, 3., 3)
sz.run()
outliers = np.greater_equal(abs(deltaz), 3. * sz.rms)
print('Number of outliers [dz >%.2f*(1+z)]=%i' %
(3. * sz.rms, add.reduce(outliers)))
catastrophic = np.greater_equal(deltaz * (1. + zso), 1.)
n_catast = sum(catastrophic)
print('Number of catastrophic outliers [dz >1]=', n_catast)
print('Delta z/(1+z) = %.4f +- %.4f' % (sz.median, sz.rms))
if interactive and plots:
if plots == 'pylab':
figure(2)
subplot(211)
plot(
np.arange(
min(zso), max(zso) + 0.01, 0.01), np.arange(
min(zso), max(zso) + 0.01, 0.01), "r")
errorbar(zso,
zbo, [abs(zbo - zb1o), abs(zb2o - zbo)],
fmt="bo")
xlabel(r'$z_{spec}$')
ylabel(r'$z_{bpz}$')
subplot(212)
plot(zso, zmo, "go", zso, zso, "r")
xlabel(r'$z_{spec}$')
ylabel(r'$z_{ML}$')
show()
elif plots == 'biggles':
plot = FramedPlot()
if len(zso) > 2000: symbol = 'dot'
else: symbol = 'circle'
plot.add(Points(zso, zbo, symboltype=symbol, color='blue'))
plot.add(Curve(zso, zso, linewidth=2., color='red'))
plot.add(ErrorBarsY(zso, zb1o, zb2o))
plot.xlabel = r'$z_{spec}$'
plot.ylabel = r'$z_{bpz}$'
# plot.xrange=0.,1.5
# plot.yrange=0.,1.5
plot.show()
#
plot_ml = FramedPlot()
if len(zso) > 2000: symbol = 'dot'
else: symbol = 'circle'
plot_ml.add(Points(
zso, zmo, symboltype=symbol,
color='blue'))
plot_ml.add(Curve(zso, zso, linewidth=2., color='red'))
plot_ml.xlabel = r"$z_{spec}$"
plot_ml.ylabel = r"$z_{ML}$"
plot_ml.show()
if interactive and plots and ask('Plot Bayesian photo-z histogram?'):
if plots == 'biggles':
dz = eval(input('Redshift interval?\n'))
od_th = eval(input('Odds threshold?\n'))
good = np.greater_equal(o, od_th)
if has_mags:
mg_min = eval(input('Bright magnitude limit?\n'))
mg_max = eval(input('Faint magnitude limit?\n'))
good = good * np.less_equal(m_0, mg_max) * np.greater_equal(m_0,
mg_min)
z = compress(good, zb)
xz = np.arange(zmin, zmax, dz)
hz = hist(z, xz)
plot = FramedPlot()
h = Histogram(hz, 0., dz, color='blue')
plot.add(h)
plot.xlabel = r'$z_{bpz}$'
plot.ylabel = r'$N(z_{bpz})$'
plot.show()
if ask('Want to save plot as eps file?'):
file = eval(input('File name?\n'))
if file[-2:] != 'ps': file = file + '.eps'
plot.save_as_eps(file)
if interactive and plots and ask(
'Compare colors with photometric redshifts?'):
if plots == 'biggles':
color_m = np.zeros((nz, nt, nf - 1)) * 1.
for i in range(nf - 1):
plot = FramedPlot()
# Check for overflows
fmu = f_obs[:, i + 1]
fml = f_obs[:, i]
good = np.greater(fml, 1e-100) * np.greater(fmu, 1e-100)
zz, fmu, fml = multicompress(good, (zb, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
d = Points(zz, colour, color='blue')
plot.add(d)
for it in range(nt):
#Prevent overflows
fmu = f_mod[:, it, i + 1]
fml = f_mod[:, it, i]
good = np.greater(fml, 1e-100)
zz, fmu, fml = multicompress(good, (z, fmu, fml))
colour = old_div(fmu, fml)
colour = clip(colour, 1e-5, 1e5)
colour = 2.5 * log10(colour)
d = Curve(zz, colour, color='red')
plot.add(d)
plot.xlabel = r'$z$'
plot.ylabel = '%s - %s' % (filters[i], filters[i + 1])
plot.save_as_eps('%s-%s.eps' % (filters[i], filters[i + 1]))
plot.show()
rolex.check()
| [
"coetools.pause",
"past.utils.old_div",
"numpy.greater",
"numpy.ones",
"numpy.arange",
"sys.stdout.flush",
"glob.glob",
"builtins.range",
"os.path.join",
"pylab.title",
"os.path.exists",
"coetools.params_cl",
"numpy.less",
"numpy.less_equal",
"os.path.basename",
"pylab.ioff",
"shelve... | [((1410, 1421), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1419, 1421), False, 'import os\n'), ((8163, 8191), 'glob.glob', 'glob.glob', (["(fil_dir + '*.res')"], {}), "(fil_dir + '*.res')\n", (8172, 8191), False, 'import glob\n'), ((8365, 8393), 'glob.glob', 'glob.glob', (["(sed_dir + '*.sed')"], {}), "(sed_dir + '*.sed')\n", (8374, 8393), False, 'import glob\n'), ((8549, 8575), 'glob.glob', 'glob.glob', (["(ab_dir + '*.AB')"], {}), "(ab_dir + '*.AB')\n", (8558, 8575), False, 'import glob\n'), ((9580, 9620), 'os.path.join', 'os.path.join', (['homedir', "pars.d['SPECTRA']"], {}), "(homedir, pars.d['SPECTRA'])\n", (9592, 9620), False, 'import os\n'), ((9979, 9988), 'builtins.range', 'range', (['nt'], {}), '(nt)\n', (9984, 9988), False, 'from builtins import range\n'), ((17928, 17965), 'numpy.where', 'np.where', (['no_observed', '(1e+108)', 'ef_obs'], {}), '(no_observed, 1e+108, ef_obs)\n', (17936, 17965), True, 'import numpy as np\n'), ((18613, 18634), 'builtins.range', 'range', (['f_obs.shape[1]'], {}), '(f_obs.shape[1])\n', (18618, 18634), False, 'from builtins import range\n'), ((27941, 27950), 'builtins.range', 'range', (['ng'], {}), '(ng)\n', (27946, 27950), False, 'from builtins import range\n'), ((4258, 4291), 'os.path.splitext', 'os.path.splitext', (["pars.d['INPUT']"], {}), "(pars.d['INPUT'])\n", (4274, 4291), False, 'import os\n'), ((4771, 4782), 'coetools.params_cl', 'params_cl', ([], {}), '()\n', (4780, 4782), False, 'from coetools import pause, params_cl\n'), ((5949, 5959), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5957, 5959), False, 'import sys\n'), ((6150, 6160), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6158, 6160), False, 'import sys\n'), ((7806, 7836), 'numpy.arange', 'np.arange', (['zmin', '(zmax + dz)', 'dz'], {}), '(zmin, zmax + dz, dz)\n', (7815, 7836), True, 'import numpy as np\n'), ((8245, 8276), 'os.path.basename', 'os.path.basename', (['filters_db[i]'], {}), '(filters_db[i])\n', (8261, 8276), False, 'import os\n'), ((8439, 8466), 'os.path.basename', 'os.path.basename', (['sed_db[i]'], {}), '(sed_db[i])\n', (8455, 8466), False, 'import os\n'), ((8619, 8645), 'os.path.basename', 'os.path.basename', (['ab_db[i]'], {}), '(ab_db[i])\n', (8635, 8645), False, 'import os\n'), ((9628, 9656), 'os.path.exists', 'os.path.exists', (['spectra_file'], {}), '(spectra_file)\n', (9642, 9656), False, 'import os\n'), ((9677, 9717), 'os.path.join', 'os.path.join', (['sed_dir', "pars.d['SPECTRA']"], {}), "(sed_dir, pars.d['SPECTRA'])\n", (9689, 9717), False, 'import os\n'), ((9927, 9949), 'numpy.zeros', 'np.zeros', (['(nz, nt, nf)'], {}), '((nz, nt, nf))\n', (9935, 9949), True, 'import numpy as np\n'), ((10004, 10013), 'builtins.range', 'range', (['nf'], {}), '(nf)\n', (10009, 10013), False, 'from builtins import range\n'), ((13108, 13121), 'numpy.arange', 'np.arange', (['nz'], {}), '(nz)\n', (13117, 13121), True, 'import numpy as np\n'), ((16541, 16570), 'numpy.where', 'np.where', (['no_seen', '(0.0)', 'f_obs'], {}), '(no_seen, 0.0, f_obs)\n', (16549, 16570), True, 'import numpy as np\n'), ((16686, 16719), 'numpy.where', 'np.where', (['no_observed', '(0.0)', 'f_obs'], {}), '(no_observed, 0.0, f_obs)\n', (16694, 16719), True, 'import numpy as np\n'), ((16732, 16766), 'numpy.where', 'np.where', (['no_observed', '(0.0)', 'ef_obs'], {}), '(no_observed, 0.0, ef_obs)\n', (16740, 16766), True, 'import numpy as np\n'), ((17142, 17161), 'numpy.less', 'np.less', (['f_obs', '(0.0)'], {}), '(f_obs, 0.0)\n', (17149, 17161), True, 'import numpy as np\n'), ((17222, 17241), 'numpy.less', 'np.less', (['f_obs', '(0.0)'], {}), '(f_obs, 0.0)\n', (17229, 17241), True, 'import numpy as np\n'), ((17402, 17424), 'numpy.greater', 'np.greater', (['f_obs', '(0.0)'], {}), '(f_obs, 0.0)\n', (17412, 17424), True, 'import numpy as np\n'), ((17426, 17449), 'numpy.greater', 'np.greater', (['ef_obs', '(0.0)'], {}), '(ef_obs, 0.0)\n', (17436, 17449), True, 'import numpy as np\n'), ((17478, 17501), 'numpy.greater', 'np.greater', (['ef_obs', '(0.0)'], {}), '(ef_obs, 0.0)\n', (17488, 17501), True, 'import numpy as np\n'), ((20269, 20293), 'numpy.ones', 'np.ones', (['(ng, nf)', 'float'], {}), '((ng, nf), float)\n', (20276, 20293), True, 'import numpy as np\n'), ((20305, 20329), 'numpy.ones', 'np.ones', (['(ng, nf)', 'float'], {}), '((ng, nf), float)\n', (20312, 20329), True, 'import numpy as np\n'), ((20339, 20364), 'numpy.zeros', 'np.zeros', (['(ng, nf)', 'float'], {}), '((ng, nf), float)\n', (20347, 20364), True, 'import numpy as np\n'), ((20711, 20733), 'past.utils.old_div', 'old_div', (['(nf - 1)', 'nrows'], {}), '(nf - 1, nrows)\n', (20718, 20733), False, 'from past.utils import old_div\n'), ((20785, 20798), 'builtins.range', 'range', (['(nf - 1)'], {}), '(nf - 1)\n', (20790, 20798), False, 'from builtins import range\n'), ((23322, 23346), 'os.path.exists', 'os.path.exists', (['out_name'], {}), '(out_name)\n', (23336, 23346), False, 'import os\n'), ((23817, 23828), 'time.time', 'time.time', ([], {}), '()\n', (23826, 23828), False, 'import time\n'), ((24342, 24370), 'shelve.open', 'shelve.open', (["pars.d['PROBS']"], {}), "(pars.d['PROBS'])\n", (24353, 24370), False, 'import shelve\n'), ((26181, 26206), 'builtins.range', 'range', (['(1)', '(3 + 2 * ninterp)'], {}), '(1, 3 + 2 * ninterp)\n', (26186, 26206), False, 'from builtins import range\n'), ((26571, 26595), 'builtins.range', 'range', (["pars.d['N_PEAKS']"], {}), "(pars.d['N_PEAKS'])\n", (26576, 26595), False, 'from builtins import range\n'), ((30964, 30986), 'past.utils.old_div', 'old_div', (['p_bayes', 'norm'], {}), '(p_bayes, norm)\n', (30971, 30986), False, 'from past.utils import old_div\n'), ((806, 824), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (822, 824), False, 'import sys\n'), ((942, 961), 'numpy.greater', 'np.greater', (['vals', '(0)'], {}), '(vals, 0)\n', (952, 961), True, 'import numpy as np\n'), ((3777, 3800), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (3791, 3800), False, 'import matplotlib\n'), ((3899, 3912), 'pylab.plot', 'pyl.plot', (['[1]'], {}), '([1])\n', (3907, 3912), True, 'import pylab as pyl\n'), ((3921, 3951), 'pylab.title', 'pyl.title', (['"""KILL THIS WINDOW!"""'], {}), "('KILL THIS WINDOW!')\n", (3930, 3951), True, 'import pylab as pyl\n'), ((3960, 3970), 'pylab.show', 'pyl.show', ([], {}), '()\n', (3968, 3970), True, 'import pylab as pyl\n'), ((3979, 3989), 'pylab.ioff', 'pyl.ioff', ([], {}), '()\n', (3987, 3989), True, 'import pylab as pyl\n'), ((6765, 6779), 'past.utils.old_div', 'old_div', (['ns', '(2)'], {}), '(ns, 2)\n', (6772, 6779), False, 'from past.utils import old_div\n'), ((6784, 6800), 'past.utils.old_div', 'old_div', (['ns', '(2.0)'], {}), '(ns, 2.0)\n', (6791, 6800), False, 'from past.utils import old_div\n'), ((6865, 6875), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6873, 6875), False, 'import sys\n'), ((7995, 8006), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (8003, 8006), True, 'import numpy as np\n'), ((8029, 8044), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (8037, 8044), True, 'import numpy as np\n'), ((9388, 9398), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9396, 9398), False, 'import sys\n'), ((10246, 10273), 'os.path.join', 'os.path.join', (['ab_dir', 'model'], {}), '(ab_dir, model)\n', (10258, 10273), False, 'import os\n'), ((12900, 12910), 'sys.exit', 'sys.exit', ([], {}), '()\n', (12908, 12910), False, 'import sys\n'), ((12971, 12994), 'numpy.zeros', 'np.zeros', (['(nz, nti, nf)'], {}), '((nz, nti, nf))\n', (12979, 12994), True, 'import numpy as np\n'), ((13141, 13150), 'builtins.range', 'range', (['nf'], {}), '(nf)\n', (13146, 13150), False, 'from builtins import range\n'), ((13885, 13907), 'builtins.map', 'map', (['float', 'zp_offsets'], {}), '(float, zp_offsets)\n', (13888, 13907), False, 'from builtins import map\n'), ((14287, 14309), 'numpy.greater', 'np.greater', (['f_obs', '(0.0)'], {}), '(f_obs, 0.0)\n', (14297, 14309), True, 'import numpy as np\n'), ((14311, 14332), 'numpy.less', 'np.less', (['f_obs', 'undet'], {}), '(f_obs, undet)\n', (14318, 14332), True, 'import numpy as np\n'), ((14566, 14595), 'numpy.greater_equal', 'np.greater_equal', (['ef_obs', '(0.0)'], {}), '(ef_obs, 0.0)\n', (14582, 14595), True, 'import numpy as np\n'), ((15083, 15093), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15091, 15093), False, 'import sys\n'), ((15138, 15183), 'numpy.where', 'np.where', (['seen', '(10.0 ** (-0.4 * f_obs))', 'f_obs'], {}), '(seen, 10.0 ** (-0.4 * f_obs), f_obs)\n', (15146, 15183), True, 'import numpy as np\n'), ((15634, 15696), 'numpy.where', 'np.where', (['seen', '((10.0 ** (0.4 * ef_obs) - 1.0) * f_obs)', 'ef_obs'], {}), '(seen, (10.0 ** (0.4 * ef_obs) - 1.0) * f_obs, ef_obs)\n', (15642, 15696), True, 'import numpy as np\n'), ((17062, 17082), 'numpy.less', 'np.less', (['ef_obs', '(0.0)'], {}), '(ef_obs, 0.0)\n', (17069, 17082), True, 'import numpy as np\n'), ((18023, 18044), 'builtins.map', 'map', (['float', 'zp_errors'], {}), '(float, zp_errors)\n', (18026, 18044), False, 'from builtins import map\n'), ((18412, 18434), 'builtins.map', 'map', (['float', 'zp_offsets'], {}), '(float, zp_offsets)\n', (18415, 18434), False, 'from builtins import map\n'), ((20610, 20636), 'numpy.zeros', 'np.zeros', (['(nz, nt, nf - 1)'], {}), '((nz, nt, nf - 1))\n', (20618, 20636), True, 'import numpy as np\n'), ((21056, 21073), 'past.utils.old_div', 'old_div', (['fmu', 'fml'], {}), '(fmu, fml)\n', (21063, 21073), False, 'from past.utils import old_div\n'), ((21381, 21390), 'builtins.range', 'range', (['nt'], {}), '(nt)\n', (21386, 21390), False, 'from builtins import range\n'), ((23356, 23404), 'os.system', 'os.system', (["('cp %s %s.bak' % (out_name, out_name))"], {}), "('cp %s %s.bak' % (out_name, out_name))\n", (23365, 23404), False, 'import os\n'), ((24187, 24203), 'builtins.str', 'str', (['pars.d[key]'], {}), '(pars.d[key])\n', (24190, 24203), False, 'from builtins import str\n'), ((25573, 25591), 'numpy.zeros', 'np.zeros', (['(nz, nt)'], {}), '((nz, nt))\n', (25581, 25591), True, 'import numpy as np\n'), ((37051, 37068), 'past.utils.old_div', 'old_div', (['impb', 'nt'], {}), '(impb, nt)\n', (37058, 37068), False, 'from past.utils import old_div\n'), ((37818, 37842), 'builtins.range', 'range', (["pars.d['N_PEAKS']"], {}), "(pars.d['N_PEAKS'])\n", (37823, 37842), False, 'from builtins import range\n'), ((38938, 38955), 'past.utils.old_div', 'old_div', (['fot', 'ftt'], {}), '(fot, ftt)\n', (38945, 38955), False, 'from past.utils import old_div\n'), ((41601, 41610), 'builtins.range', 'range', (['nt'], {}), '(nt)\n', (41606, 41610), False, 'from builtins import range\n'), ((13967, 13999), 'builtins.map', 'map', (['float', "pars.d['ZP_OFFSETS']"], {}), "(float, pars.d['ZP_OFFSETS'])\n", (13970, 13999), False, 'from builtins import map\n'), ((15596, 15606), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15604, 15606), False, 'import sys\n'), ((16118, 16128), 'sys.exit', 'sys.exit', ([], {}), '()\n', (16126, 16128), False, 'import sys\n'), ((18918, 18928), 'sys.exit', 'sys.exit', ([], {}), '()\n', (18926, 18928), False, 'import sys\n'), ((20929, 20952), 'numpy.greater', 'np.greater', (['fml', '(1e-100)'], {}), '(fml, 1e-100)\n', (20939, 20952), True, 'import numpy as np\n'), ((20955, 20978), 'numpy.greater', 'np.greater', (['fmu', '(1e-100)'], {}), '(fmu, 1e-100)\n', (20965, 20978), True, 'import numpy as np\n'), ((21514, 21537), 'numpy.greater', 'np.greater', (['fml', '(1e-100)'], {}), '(fml, 1e-100)\n', (21524, 21537), True, 'import numpy as np\n'), ((21621, 21638), 'past.utils.old_div', 'old_div', (['fmu', 'fml'], {}), '(fmu, fml)\n', (21628, 21638), False, 'from past.utils import old_div\n'), ((22318, 22349), 'builtins.input', 'input', (['"""Hit Enter to continue."""'], {}), "('Hit Enter to continue.')\n", (22323, 22349), False, 'from builtins import input\n'), ((22870, 22884), 'builtins.range', 'range', (['n_other'], {}), '(n_other)\n', (22875, 22884), False, 'from builtins import range\n'), ((27776, 27793), 'past.utils.old_div', 'old_div', (['dz', '(10.0)'], {}), '(dz, 10.0)\n', (27783, 27793), False, 'from past.utils import old_div\n'), ((29027, 29045), 'numpy.zeros', 'np.zeros', (['(nz, nt)'], {}), '((nz, nt))\n', (29035, 29045), True, 'import numpy as np\n'), ((31697, 31732), 'numpy.less', 'np.less', (['p_bayes[2:]', 'p_bayes[1:-1]'], {}), '(p_bayes[2:], p_bayes[1:-1])\n', (31704, 31732), True, 'import numpy as np\n'), ((31735, 31771), 'numpy.less', 'np.less', (['p_bayes[:-2]', 'p_bayes[1:-1]'], {}), '(p_bayes[:-2], p_bayes[1:-1])\n', (31742, 31771), True, 'import numpy as np\n'), ((31844, 31882), 'numpy.greater', 'np.greater', (['p_bayes[2:]', 'p_bayes[1:-1]'], {}), '(p_bayes[2:], p_bayes[1:-1])\n', (31854, 31882), True, 'import numpy as np\n'), ((31885, 31924), 'numpy.greater', 'np.greater', (['p_bayes[:-2]', 'p_bayes[1:-1]'], {}), '(p_bayes[:-2], p_bayes[1:-1])\n', (31895, 31924), True, 'import numpy as np\n'), ((32032, 32060), 'numpy.greater', 'np.greater', (['p_bayes[2:]', '(0.0)'], {}), '(p_bayes[2:], 0.0)\n', (32042, 32060), True, 'import numpy as np\n'), ((32104, 32133), 'numpy.greater', 'np.greater', (['p_bayes[:-2]', '(0.0)'], {}), '(p_bayes[:-2], 0.0)\n', (32114, 32133), True, 'import numpy as np\n'), ((33026, 33045), 'past.utils.old_div', 'old_div', (['(1.0)', 'p_max'], {}), '(1.0, p_max)\n', (33033, 33045), False, 'from past.utils import old_div\n'), ((38663, 38684), 'past.utils.old_div', 'old_div', (['(ft - fo)', 'efo'], {}), '(ft - fo, efo)\n', (38670, 38684), False, 'from past.utils import old_div\n'), ((38809, 38816), 'coetools.pause', 'pause', ([], {}), '()\n', (38814, 38816), False, 'from coetools import pause, params_cl\n'), ((39139, 39146), 'coetools.pause', 'pause', ([], {}), '()\n', (39144, 39146), False, 'from coetools import pause, params_cl\n'), ((40036, 40058), 'numpy.less_equal', 'np.less_equal', (['ft', '(0.0)'], {}), '(ft, 0.0)\n', (40049, 40058), True, 'import numpy as np\n'), ((40122, 40144), 'numpy.where', 'np.where', (['bad', '(0.0)', 'fo'], {}), '(bad, 0.0, fo)\n', (40130, 40144), True, 'import numpy as np\n'), ((40161, 40183), 'numpy.where', 'np.where', (['bad', '(1.0)', 'ft'], {}), '(bad, 1.0, ft)\n', (40169, 40183), True, 'import numpy as np\n'), ((40206, 40221), 'past.utils.old_div', 'old_div', (['fo', 'ft'], {}), '(fo, ft)\n', (40213, 40221), False, 'from past.utils import old_div\n'), ((41058, 41069), 'builtins.str', 'str', (['id[ig]'], {}), '(id[ig])\n', (41061, 41069), False, 'from builtins import str\n'), ((41668, 41696), 'numpy.greater', 'np.greater', (['pb[:, itb]', 'pmin'], {}), '(pb[:, itb], pmin)\n', (41678, 41696), True, 'import numpy as np\n'), ((41884, 41914), 'numpy.arange', 'np.arange', (['zmin', '(zmax + dz)', 'dz'], {}), '(zmin, zmax + dz, dz)\n', (41893, 41914), True, 'import numpy as np\n'), ((43866, 43875), 'builtins.range', 'range', (['nf'], {}), '(nf)\n', (43871, 43875), False, 'from builtins import range\n'), ((46485, 46503), 'numpy.less', 'np.less', (['z_s', '(9.99)'], {}), '(z_s, 9.99)\n', (46492, 46503), True, 'import numpy as np\n'), ((47593, 47622), 'past.utils.old_div', 'old_div', (['(zso - zbo)', '(1.0 + zso)'], {}), '(zso - zbo, 1.0 + zso)\n', (47600, 47622), False, 'from past.utils import old_div\n'), ((47900, 47943), 'numpy.greater_equal', 'np.greater_equal', (['(deltaz * (1.0 + zso))', '(1.0)'], {}), '(deltaz * (1.0 + zso), 1.0)\n', (47916, 47943), True, 'import numpy as np\n'), ((50240, 50266), 'numpy.greater_equal', 'np.greater_equal', (['o', 'od_th'], {}), '(o, od_th)\n', (50256, 50266), True, 'import numpy as np\n'), ((50634, 50659), 'numpy.arange', 'np.arange', (['zmin', 'zmax', 'dz'], {}), '(zmin, zmax, dz)\n', (50643, 50659), True, 'import numpy as np\n'), ((51309, 51322), 'builtins.range', 'range', (['(nf - 1)'], {}), '(nf - 1)\n', (51314, 51322), False, 'from builtins import range\n'), ((6898, 6912), 'past.utils.old_div', 'old_div', (['ns', '(2)'], {}), '(ns, 2)\n', (6905, 6912), False, 'from past.utils import old_div\n'), ((6935, 6949), 'past.utils.old_div', 'old_div', (['ns', '(2)'], {}), '(ns, 2)\n', (6942, 6949), False, 'from past.utils import old_div\n'), ((10669, 10679), 'sys.exit', 'sys.exit', ([], {}), '()\n', (10677, 10679), False, 'import sys\n'), ((11601, 11631), 'numpy.less', 'np.less', (['f_mod[:, it, jf]', '(0.0)'], {}), '(f_mod[:, it, jf], 0.0)\n', (11608, 11631), True, 'import numpy as np\n'), ((22600, 22620), 'builtins.map', 'map', (['int', 'other_cols'], {}), '(int, other_cols)\n', (22603, 22620), False, 'from builtins import map\n'), ((25294, 25318), 'builtins.map', 'map', (['float', "pars.d['ZC']"], {}), "(float, pars.d['ZC'])\n", (25297, 25318), False, 'from builtins import map\n'), ((25426, 25450), 'builtins.map', 'map', (['float', "pars.d['FC']"], {}), "(float, pars.d['FC'])\n", (25429, 25450), False, 'from builtins import map\n'), ((27853, 27872), 'past.utils.old_div', 'old_div', (['x', 'sigma_g'], {}), '(x, sigma_g)\n', (27860, 27872), False, 'from past.utils import old_div\n'), ((29500, 29524), 'numpy.ones', 'np.ones', (['(nz, nt)', 'float'], {}), '((nz, nt), float)\n', (29507, 29524), True, 'import numpy as np\n'), ((32166, 32183), 'numpy.arange', 'np.arange', (['(nz - 2)'], {}), '(nz - 2)\n', (32175, 32183), True, 'import numpy as np\n'), ((32221, 32238), 'numpy.arange', 'np.arange', (['(nz - 2)'], {}), '(nz - 2)\n', (32230, 32238), True, 'import numpy as np\n'), ((35044, 35059), 'numpy.array', 'np.array', (['p_tot'], {}), '(p_tot)\n', (35052, 35059), True, 'import numpy as np\n'), ((35096, 35111), 'numpy.array', 'np.array', (['p_tot'], {}), '(p_tot)\n', (35104, 35111), True, 'import numpy as np\n'), ((40415, 40442), 'numpy.greater', 'np.greater', (['r[ig, :]', '(100.0)'], {}), '(r[ig, :], 100.0)\n', (40425, 40442), True, 'import numpy as np\n'), ((40491, 40519), 'numpy.less_equal', 'np.less_equal', (['r[ig, :]', '(0.0)'], {}), '(r[ig, :], 0.0)\n', (40504, 40519), True, 'import numpy as np\n'), ((40596, 40615), 'numpy.greater', 'np.greater', (['fo', '(0.0)'], {}), '(fo, 0.0)\n', (40606, 40615), True, 'import numpy as np\n'), ((43327, 43344), 'numpy.greater', 'np.greater', (['fw', '(0)'], {}), '(fw, 0)\n', (43337, 43344), True, 'import numpy as np\n'), ((46796, 46822), 'numpy.greater_equal', 'np.greater_equal', (['o', 'od_th'], {}), '(o, od_th)\n', (46812, 46822), True, 'import numpy as np\n'), ((50137, 50166), 'builtins.input', 'input', (['"""Redshift interval?\n"""'], {}), "('Redshift interval?\\n')\n", (50142, 50166), False, 'from builtins import input\n'), ((50193, 50219), 'builtins.input', 'input', (['"""Odds threshold?\n"""'], {}), "('Odds threshold?\\n')\n", (50198, 50219), False, 'from builtins import input\n'), ((51256, 51282), 'numpy.zeros', 'np.zeros', (['(nz, nt, nf - 1)'], {}), '((nz, nt, nf - 1))\n', (51264, 51282), True, 'import numpy as np\n'), ((51635, 51652), 'past.utils.old_div', 'old_div', (['fmu', 'fml'], {}), '(fmu, fml)\n', (51642, 51652), False, 'from past.utils import old_div\n'), ((51854, 51863), 'builtins.range', 'range', (['nt'], {}), '(nt)\n', (51859, 51863), False, 'from builtins import range\n'), ((18266, 18286), 'past.utils.old_div', 'old_div', (['ef_obs', '(2.0)'], {}), '(ef_obs, 2.0)\n', (18273, 18286), False, 'from past.utils import old_div\n'), ((33600, 33617), 'numpy.array', 'np.array', (['t_peaks'], {}), '(t_peaks)\n', (33608, 33617), True, 'import numpy as np\n'), ((35117, 35132), 'numpy.array', 'np.array', (['p_tot'], {}), '(p_tot)\n', (35125, 35132), True, 'import numpy as np\n'), ((46744, 46770), 'builtins.input', 'input', (['"""Odds threshold?\n"""'], {}), "('Odds threshold?\\n')\n", (46749, 46770), False, 'from builtins import input\n'), ((46852, 46884), 'builtins.input', 'input', (['"""Minimum spectral type\n"""'], {}), "('Minimum spectral type\\n')\n", (46857, 46884), False, 'from builtins import input\n'), ((46915, 46947), 'builtins.input', 'input', (['"""Maximum spectral type\n"""'], {}), "('Maximum spectral type\\n')\n", (46920, 46947), False, 'from builtins import input\n'), ((46973, 46997), 'numpy.less_equal', 'np.less_equal', (['tb', 't_max'], {}), '(tb, t_max)\n', (46986, 46997), True, 'import numpy as np\n'), ((47000, 47027), 'numpy.greater_equal', 'np.greater_equal', (['tb', 't_min'], {}), '(tb, t_min)\n', (47016, 47027), True, 'import numpy as np\n'), ((50322, 50356), 'builtins.input', 'input', (['"""Bright magnitude limit?\n"""'], {}), "('Bright magnitude limit?\\n')\n", (50327, 50356), False, 'from builtins import input\n'), ((50388, 50421), 'builtins.input', 'input', (['"""Faint magnitude limit?\n"""'], {}), "('Faint magnitude limit?\\n')\n", (50393, 50421), False, 'from builtins import input\n'), ((50482, 50511), 'numpy.greater_equal', 'np.greater_equal', (['m_0', 'mg_min'], {}), '(m_0, mg_min)\n', (50498, 50511), True, 'import numpy as np\n'), ((50984, 51005), 'builtins.input', 'input', (['"""File name?\n"""'], {}), "('File name?\\n')\n", (50989, 51005), False, 'from builtins import input\n'), ((51493, 51516), 'numpy.greater', 'np.greater', (['fml', '(1e-100)'], {}), '(fml, 1e-100)\n', (51503, 51516), True, 'import numpy as np\n'), ((51519, 51542), 'numpy.greater', 'np.greater', (['fmu', '(1e-100)'], {}), '(fmu, 1e-100)\n', (51529, 51542), True, 'import numpy as np\n'), ((52019, 52042), 'numpy.greater', 'np.greater', (['fml', '(1e-100)'], {}), '(fml, 1e-100)\n', (52029, 52042), True, 'import numpy as np\n'), ((52142, 52159), 'past.utils.old_div', 'old_div', (['fmu', 'fml'], {}), '(fmu, fml)\n', (52149, 52159), False, 'from past.utils import old_div\n'), ((7131, 7145), 'past.utils.old_div', 'old_div', (['ns', '(2)'], {}), '(ns, 2)\n', (7138, 7145), False, 'from past.utils import old_div\n'), ((7161, 7175), 'past.utils.old_div', 'old_div', (['ns', '(2)'], {}), '(ns, 2)\n', (7168, 7175), False, 'from past.utils import old_div\n'), ((40277, 40292), 'past.utils.old_div', 'old_div', (['fo', 'ft'], {}), '(fo, ft)\n', (40284, 40292), False, 'from past.utils import old_div\n'), ((47091, 47125), 'builtins.input', 'input', (['"""Bright magnitude limit?\n"""'], {}), "('Bright magnitude limit?\\n')\n", (47096, 47125), False, 'from builtins import input\n'), ((47161, 47194), 'builtins.input', 'input', (['"""Faint magnitude limit?\n"""'], {}), "('Faint magnitude limit?\\n')\n", (47166, 47194), False, 'from builtins import input\n'), ((47259, 47288), 'numpy.greater_equal', 'np.greater_equal', (['m_0', 'mg_min'], {}), '(m_0, mg_min)\n', (47275, 47288), True, 'import numpy as np\n'), ((50453, 50479), 'numpy.less_equal', 'np.less_equal', (['m_0', 'mg_max'], {}), '(m_0, mg_max)\n', (50466, 50479), True, 'import numpy as np\n'), ((47230, 47256), 'numpy.less_equal', 'np.less_equal', (['m_0', 'mg_max'], {}), '(m_0, mg_max)\n', (47243, 47256), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.ensemble import ExtraTreesClassifier
from visualize.feature_plots import load_unsw_feature_names
def autolabel(rects, label):
"""Attach a text label above each bar displaying its height"""
for (i, rect) in enumerate(rects):
height = rect.get_height()
plt.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%s' % label[i],
ha='center', va='bottom')
def plot_feature_importance(X, y, dirname, prefix):
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
criterion='entropy',
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
feature_names = load_unsw_feature_names()
sorted_features = [feature_names[i] for i in indices]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("Feature %s(%d) = %f" % (feature_names[indices[f]],
indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature Importances")
rects = plt.bar(range(X.shape[1]), importances[indices],
color="b", yerr=std[indices], ecolor='r', align="center")
autolabel(rects, sorted_features)
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.savefig('%s/%s_feature_importance.png' % (dirname, prefix), dpi=400)
plt.show()
def plot_pca_components(X, y, dirname, prefix):
pca = PCA(n_components=None, random_state=0)
pca.fit(X)
eigenvalues = pca.explained_variance_
importance = pca.explained_variance_ratio_
indices = np.argsort(eigenvalues)[::-1]
print('PCA ranking:')
for f in range(X.shape[1]):
print("%d. eigenvalue = %f" % (f, eigenvalues[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("PCA Components Ratio")
plt.bar(range(X.shape[1]), importance[indices],
color="b", align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.savefig('%s/%s_principle_components.png' % (dirname, prefix), dpi=400)
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.show",
"visualize.feature_plots.load_unsw_feature_names",
"numpy.std",
"numpy.argsort",
"sklearn.ensemble.ExtraTreesClassifier",
"matplotlib.pyplot.figure",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.savefig"
] | [((638, 713), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'n_estimators': '(250)', 'criterion': '"""entropy"""', 'random_state': '(0)'}), "(n_estimators=250, criterion='entropy', random_state=0)\n", (658, 713), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((859, 933), 'numpy.std', 'np.std', (['[tree.feature_importances_ for tree in forest.estimators_]'], {'axis': '(0)'}), '([tree.feature_importances_ for tree in forest.estimators_], axis=0)\n', (865, 933), True, 'import numpy as np\n'), ((1015, 1040), 'visualize.feature_plots.load_unsw_feature_names', 'load_unsw_feature_names', ([], {}), '()\n', (1038, 1040), False, 'from visualize.feature_plots import load_unsw_feature_names\n'), ((1391, 1403), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1401, 1403), True, 'import matplotlib.pyplot as plt\n'), ((1408, 1440), 'matplotlib.pyplot.title', 'plt.title', (['"""Feature Importances"""'], {}), "('Feature Importances')\n", (1417, 1440), True, 'import matplotlib.pyplot as plt\n'), ((1665, 1691), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-1, X.shape[1]]'], {}), '([-1, X.shape[1]])\n', (1673, 1691), True, 'import matplotlib.pyplot as plt\n'), ((1696, 1768), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/%s_feature_importance.png' % (dirname, prefix))"], {'dpi': '(400)'}), "('%s/%s_feature_importance.png' % (dirname, prefix), dpi=400)\n", (1707, 1768), True, 'import matplotlib.pyplot as plt\n'), ((1773, 1783), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1781, 1783), True, 'import matplotlib.pyplot as plt\n'), ((1844, 1882), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'None', 'random_state': '(0)'}), '(n_components=None, random_state=0)\n', (1847, 1882), False, 'from sklearn.decomposition import PCA\n'), ((2212, 2224), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2222, 2224), True, 'import matplotlib.pyplot as plt\n'), ((2229, 2262), 'matplotlib.pyplot.title', 'plt.title', (['"""PCA Components Ratio"""'], {}), "('PCA Components Ratio')\n", (2238, 2262), True, 'import matplotlib.pyplot as plt\n'), ((2401, 2427), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-1, X.shape[1]]'], {}), '([-1, X.shape[1]])\n', (2409, 2427), True, 'import matplotlib.pyplot as plt\n'), ((2432, 2506), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s/%s_principle_components.png' % (dirname, prefix))"], {'dpi': '(400)'}), "('%s/%s_principle_components.png' % (dirname, prefix), dpi=400)\n", (2443, 2506), True, 'import matplotlib.pyplot as plt\n'), ((2511, 2521), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2519, 2521), True, 'import matplotlib.pyplot as plt\n'), ((965, 988), 'numpy.argsort', 'np.argsort', (['importances'], {}), '(importances)\n', (975, 988), True, 'import numpy as np\n'), ((2001, 2024), 'numpy.argsort', 'np.argsort', (['eigenvalues'], {}), '(eigenvalues)\n', (2011, 2024), True, 'import numpy as np\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""retrieval_ft"""
import random
import numpy as np
from cytoolz import concat
from config import config
from toolz.sandbox import unzip
from .data_three import DetectFeatTxtTokTwoDataset, get_ids_three
from .data import pad_tensors, pad_tensors_pos, get_gather_index
from .utils import pad_sequence
def _has_overlap(la, lb):
if len(la) < len(lb):
la, lb = lb, la
s = set(la)
return any(b in s for b in lb)
def sample_negative(sample_pool, ground_truths, num_sample):
""" random and retry """
outputs = ground_truths[:1]
while _has_overlap(outputs, ground_truths):
outputs = random.sample(sample_pool, num_sample)
return outputs
class ItmFlickrRankDataset(DetectFeatTxtTokTwoDataset):
"""
ItmFlickrRankDataset
"""
def __init__(self, ids_path, txt_db, img_db, neg_sample_size=1):
assert neg_sample_size > 0, \
"ItmRankDataset need at least 1 negative sample"
super().__init__(ids_path, txt_db, img_db, use_video=False)
self.ids = get_ids_three(ids_path)
assert neg_sample_size > 0
self.neg_sample_size = neg_sample_size
def __getitem__(self, i):
gt_txt_id = self.ids[i]
gt_img_fname = self.ids[i]
id_pairs = [(gt_txt_id, gt_img_fname)]
# sample negatives
neg_sample_img_ids = sample_negative(
self.ids, [gt_img_fname], self.neg_sample_size)
neg_sample_txt_ids = sample_negative(
self.ids, [gt_txt_id], self.neg_sample_size)
id_pairs.extend([(gt_txt_id, neg_img_id)
for neg_img_id in neg_sample_img_ids] +
[(neg_txt_id, gt_img_fname)
for neg_txt_id in neg_sample_txt_ids])
inputs = self._collect_inputs(id_pairs)
assert len(inputs) == (1 + 2 * self.neg_sample_size)
return inputs
def _collect_inputs(self, id_pairs):
"""
collect_inputs
"""
# create input features
inputs = []
for txt_id, img_id in id_pairs:
example = self.txt_db[txt_id]
# text input
input_ids = example['input_ids'][:config.MAX_TEXT_LEN]
input_ids = self.txt_db.combine_inputs(input_ids)
# img input
img_feat, img_pos_feat, _ = self._get_img_feat(img_id)
# mask
attn_masks = np.ones(len(input_ids) + img_feat.shape[0], dtype=np.int64)
inputs.append((input_ids, img_feat, img_pos_feat, attn_masks))
return inputs
def itm_rank_collate(inputs):
"""
itm_rank_collate
"""
(input_ids, img_feats, img_pos_feats, attn_masks,
) = map(list, unzip(concat(i for i in inputs)))
txt_lens = [i.shape[0] for i in input_ids]
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=0)
position_ids = np.expand_dims(np.arange(0, input_ids.shape[1], dtype=np.int64
), 0)
num_bbs = [f.shape[0] for f in img_feats]
img_feat = pad_tensors(img_feats, num_bbs)
img_pos_feat = pad_tensors_pos(img_pos_feats, num_bbs, img_feat)
attn_masks = pad_sequence(attn_masks, batch_first=True, padding_value=0, max_lens=90)
sample_size = len(inputs[0])
assert all(sample_size == len(i) for i in inputs)
bs, max_tl = input_ids.shape
out_size = attn_masks.shape[1]
gather_index = get_gather_index(txt_lens, num_bbs, bs, max_tl, out_size)
batch = {'input_ids': input_ids,
'position_ids': position_ids,
'img_feat': img_feat,
'img_pos_feat': img_pos_feat,
'attn_masks': attn_masks,
'gather_index': gather_index,
'sample_size': sample_size}
return batch
| [
"random.sample",
"numpy.arange",
"cytoolz.concat"
] | [((1287, 1325), 'random.sample', 'random.sample', (['sample_pool', 'num_sample'], {}), '(sample_pool, num_sample)\n', (1300, 1325), False, 'import random\n'), ((3552, 3600), 'numpy.arange', 'np.arange', (['(0)', 'input_ids.shape[1]'], {'dtype': 'np.int64'}), '(0, input_ids.shape[1], dtype=np.int64)\n', (3561, 3600), True, 'import numpy as np\n'), ((3368, 3393), 'cytoolz.concat', 'concat', (['(i for i in inputs)'], {}), '(i for i in inputs)\n', (3374, 3393), False, 'from cytoolz import concat\n')] |
import tensorflow as tf
from src.utils.read_dataset import read_and_parse_sharded_dataset, parse_camera_tfrecord_example
from absl import app
from absl import flags
from time import time
import numpy as np
import os
tf.enable_eager_execution()
FLAGS = flags.FLAGS
flags.DEFINE_string('inference_graph_path', 'saved_models/optimized_faster_rcnn/frozen_inference_graph.pb',
'Path to the inference graph')
flags.DEFINE_string('dataset_file_pattern', "data/camera_data/training/*",
'TFRecord file containing ground truths and detections')
flags.DEFINE_string('metrics_file', None, "Metrics csv file to write average inference time")
flags.DEFINE_integer('num_images', 1000, "Number of images to test")
flags.DEFINE_integer('gpu_device', None, 'Select GPU device')
tf.flags.DEFINE_integer('num_additional_channels', 0, 'Number of additional channels to use')
# Load the Tensorflow model into memory.
def load_detection_graph(frozen_graph_path):
detection_graph = tf.Graph()
with detection_graph.as_default():
graph_def = tf.GraphDef()
with tf.gfile.GFile(frozen_graph_path, 'rb') as fid:
serialized_graph = fid.read()
graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(graph_def, name='')
# tf.train.import_meta_graph(graph_def)
return detection_graph
def read_encoded_image(data, num_additional_channels):
image = tf.image.decode_jpeg(data['image/encoded'])
if num_additional_channels > 0:
additional_channels = tf.image.decode_jpeg(data['image/additional_channels/encoded'])
image = tf.concat([image, additional_channels], axis=2)
return image.numpy()
def run_inference_for_single_image(sess, detection_graph, image):
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# The model expects a batch of images, so add an axis
image_expanded = np.expand_dims(image, axis=0)
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Run inference
inference_times = []
for i in range(3):
t1 = time()
(boxes, scores, classes, n_detections) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_expanded})
t2 = time()
inference_times.append(t2-t1)
# print(t2-t1)
# print("Average inference time (ms) :", np.mean(inference_times[1:]))
avg_inf_time = np.mean(inference_times[1:])
num_detections = int(n_detections)
# Take out batch dimension and get first num_detections
boxes = np.squeeze(boxes)[:num_detections]
scores = np.squeeze(scores)[:num_detections]
classes = np.squeeze(classes)[:num_detections].astype(np.int32)
return boxes, scores, classes, num_detections, avg_inf_time
def main(_):
if FLAGS.gpu_device:
os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu_device)
detection_graph = load_detection_graph(FLAGS.inference_graph_path)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(graph=detection_graph, config=config)
dataset = read_and_parse_sharded_dataset(FLAGS.dataset_file_pattern,
additional_channels=bool(FLAGS.num_additional_channels))
inference_times = []
for i, d in enumerate(dataset):
image = read_encoded_image(d, 0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num, avg_inf_time) = run_inference_for_single_image(sess, detection_graph, image)
inference_times.append(avg_inf_time)
if i == FLAGS.num_images:
break
avg_time = np.mean(inference_times[1:])
print("AVERAGE INFERENCE TIME:%.6f" % avg_time)
if FLAGS.metrics_file:
f = open(FLAGS.metrics_file, 'a')
f.write("INFERENCE TIME,%.6f" % avg_time)
if __name__ == '__main__':
app.run(main)
| [
"tensorflow.Session",
"numpy.expand_dims",
"tensorflow.GraphDef",
"tensorflow.concat",
"absl.flags.DEFINE_string",
"tensorflow.ConfigProto",
"time.time",
"numpy.mean",
"absl.flags.DEFINE_integer",
"absl.app.run",
"tensorflow.Graph",
"tensorflow.enable_eager_execution",
"numpy.squeeze",
"te... | [((217, 244), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (242, 244), True, 'import tensorflow as tf\n'), ((266, 412), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""inference_graph_path"""', '"""saved_models/optimized_faster_rcnn/frozen_inference_graph.pb"""', '"""Path to the inference graph"""'], {}), "('inference_graph_path',\n 'saved_models/optimized_faster_rcnn/frozen_inference_graph.pb',\n 'Path to the inference graph')\n", (285, 412), False, 'from absl import flags\n'), ((425, 560), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""dataset_file_pattern"""', '"""data/camera_data/training/*"""', '"""TFRecord file containing ground truths and detections"""'], {}), "('dataset_file_pattern', 'data/camera_data/training/*',\n 'TFRecord file containing ground truths and detections')\n", (444, 560), False, 'from absl import flags\n'), ((577, 674), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""metrics_file"""', 'None', '"""Metrics csv file to write average inference time"""'], {}), "('metrics_file', None,\n 'Metrics csv file to write average inference time')\n", (596, 674), False, 'from absl import flags\n'), ((671, 739), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_images"""', '(1000)', '"""Number of images to test"""'], {}), "('num_images', 1000, 'Number of images to test')\n", (691, 739), False, 'from absl import flags\n'), ((740, 801), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""gpu_device"""', 'None', '"""Select GPU device"""'], {}), "('gpu_device', None, 'Select GPU device')\n", (760, 801), False, 'from absl import flags\n'), ((802, 899), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""num_additional_channels"""', '(0)', '"""Number of additional channels to use"""'], {}), "('num_additional_channels', 0,\n 'Number of additional channels to use')\n", (825, 899), True, 'import tensorflow as tf\n'), ((1006, 1016), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1014, 1016), True, 'import tensorflow as tf\n'), ((1449, 1492), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (["data['image/encoded']"], {}), "(data['image/encoded'])\n", (1469, 1492), True, 'import tensorflow as tf\n'), ((2049, 2078), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2063, 2078), True, 'import numpy as np\n'), ((3195, 3223), 'numpy.mean', 'np.mean', (['inference_times[1:]'], {}), '(inference_times[1:])\n', (3202, 3223), True, 'import numpy as np\n'), ((3745, 3761), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (3759, 3761), True, 'import tensorflow as tf\n'), ((3816, 3864), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'detection_graph', 'config': 'config'}), '(graph=detection_graph, config=config)\n', (3826, 3864), True, 'import tensorflow as tf\n'), ((4695, 4708), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (4702, 4708), False, 'from absl import app\n'), ((1076, 1089), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1087, 1089), True, 'import tensorflow as tf\n'), ((1559, 1622), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (["data['image/additional_channels/encoded']"], {}), "(data['image/additional_channels/encoded'])\n", (1579, 1622), True, 'import tensorflow as tf\n'), ((1639, 1686), 'tensorflow.concat', 'tf.concat', (['[image, additional_channels]'], {'axis': '(2)'}), '([image, additional_channels], axis=2)\n', (1648, 1686), True, 'import tensorflow as tf\n'), ((2815, 2821), 'time.time', 'time', ([], {}), '()\n', (2819, 2821), False, 'from time import time\n'), ((3032, 3038), 'time.time', 'time', ([], {}), '()\n', (3036, 3038), False, 'from time import time\n'), ((3336, 3353), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (3346, 3353), True, 'import numpy as np\n'), ((3384, 3402), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (3394, 3402), True, 'import numpy as np\n'), ((4461, 4489), 'numpy.mean', 'np.mean', (['inference_times[1:]'], {}), '(inference_times[1:])\n', (4468, 4489), True, 'import numpy as np\n'), ((1103, 1142), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['frozen_graph_path', '"""rb"""'], {}), "(frozen_graph_path, 'rb')\n", (1117, 1142), True, 'import tensorflow as tf\n'), ((1261, 1300), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (1280, 1300), True, 'import tensorflow as tf\n'), ((3434, 3453), 'numpy.squeeze', 'np.squeeze', (['classes'], {}), '(classes)\n', (3444, 3453), True, 'import numpy as np\n')] |
"""Chi distribution."""
import numpy
from scipy import special
from ..baseclass import Dist
from ..operators.addition import Add
class chi(Dist):
"""Chi distribution."""
def __init__(self, df=1):
Dist.__init__(self, df=df)
def _pdf(self, x, df):
return x**(df-1.)*numpy.exp(-x*x*0.5)/(2.0)**(df*0.5-1)\
/special.gamma(df*0.5)
def _cdf(self, x, df):
return special.gammainc(df*0.5,0.5*x*x)
def _ppf(self, q, df):
return numpy.sqrt(2*special.gammaincinv(df*0.5,q))
def _bnd(self, x, df):
return 0, self._ppf(1-1e-10, df)
def _mom(self, k, df):
return 2**(.5*k)*special.gamma(.5*(df+k))\
/special.gamma(.5*df)
class Chi(Add):
"""
Chi distribution.
Args:
df (float, Dist) : Degrees of freedom
scale (float, Dist) : Scaling parameter
shift (float, Dist) : Location parameter
Examples:
>>> distribution = chaospy.Chi(2, 4, 1)
>>> print(distribution)
Chi(df=2, scale=4, shift=1)
>>> q = numpy.linspace(0, 1, 5)
>>> print(numpy.around(distribution.inv(q), 4))
[ 1. 4.0341 5.7096 7.6604 28.1446]
>>> print(numpy.around(distribution.fwd(distribution.inv(q)), 4))
[0. 0.25 0.5 0.75 1. ]
>>> print(numpy.around(distribution.pdf(distribution.inv(q)), 4))
[0. 0.1422 0.1472 0.1041 0. ]
>>> print(numpy.around(distribution.sample(4), 4))
[ 6.8244 2.9773 10.8003 5.5892]
>>> print(numpy.around(distribution.mom(1), 4))
6.0133
>>> print(numpy.around(distribution.ttr([1, 2, 3]), 4))
[[ 7.6671 9.0688 10.2809]
[ 6.8673 12.6824 18.2126]]
"""
def __init__(self, df=1, scale=1, shift=0):
self._repr = {"df": df, "scale": scale, "shift": shift}
Add.__init__(self, left=chi(df)*scale, right=shift)
class Maxwell(Add):
"""
Maxwell-Boltzmann distribution
Chi distribution with 3 degrees of freedom
Args:
scale (float, Dist) : Scaling parameter
shift (float, Dist) : Location parameter
Examples:
>>> distribution = chaospy.Maxwell(2, 3)
>>> print(distribution)
Maxwell(scale=2, shift=3)
>>> q = numpy.linspace(0, 1, 5)
>>> print(numpy.around(distribution.inv(q), 4))
[ 3. 5.2023 6.0763 7.0538 17.0772]
>>> print(numpy.around(distribution.fwd(distribution.inv(q)), 4))
[0. 0.25 0.5 0.75 1. ]
>>> print(numpy.around(distribution.pdf(distribution.inv(q)), 4))
[0. 0.2638 0.2892 0.2101 0. ]
>>> print(numpy.around(distribution.sample(4), 4))
[6.6381 4.6119 8.5955 6.015 ]
>>> print(numpy.around(distribution.mom(1), 4))
6.1915
>>> print(numpy.around(distribution.ttr([1, 2, 3]), 4))
[[6.8457 7.4421 7.9834]
[1.8141 3.3964 4.8716]]
"""
def __init__(self, scale=1, shift=0):
self._repr = {"scale": scale, "shift": shift}
Add.__init__(self, left=chi(3)*scale, right=shift)
class Rayleigh(Add):
"""
Rayleigh distribution
Args:
scale (float, Dist) : Scaling parameter
shift (float, Dist) : Location parameter
Examples:
>>> distribution = chaospy.Rayleigh(2, 3)
>>> print(distribution)
Rayleigh(scale=2, shift=3)
>>> q = numpy.linspace(0, 1, 5)
>>> print(numpy.around(distribution.inv(q), 4))
[ 3. 4.5171 5.3548 6.3302 16.5723]
>>> print(numpy.around(distribution.fwd(distribution.inv(q)), 4))
[0. 0.25 0.5 0.75 1. ]
>>> print(numpy.around(distribution.pdf(distribution.inv(q)), 4))
[0. 0.2844 0.2944 0.2081 0. ]
>>> print(numpy.around(distribution.sample(4), 4))
[5.9122 3.9886 7.9001 5.2946]
>>> print(numpy.around(distribution.mom(1), 4))
5.5066
>>> print(numpy.around(distribution.ttr([1, 2, 3]), 4))
[[6.3336 7.0344 7.6405]
[1.7168 3.1706 4.5532]]
"""
def __init__(self, scale=1, shift=0):
self._repr = {"scale": scale, "shift": shift}
Add.__init__(self, left=chi(2)*scale, right=shift)
| [
"scipy.special.gamma",
"numpy.exp",
"scipy.special.gammainc",
"scipy.special.gammaincinv"
] | [((418, 457), 'scipy.special.gammainc', 'special.gammainc', (['(df * 0.5)', '(0.5 * x * x)'], {}), '(df * 0.5, 0.5 * x * x)\n', (434, 457), False, 'from scipy import special\n'), ((353, 376), 'scipy.special.gamma', 'special.gamma', (['(df * 0.5)'], {}), '(df * 0.5)\n', (366, 376), False, 'from scipy import special\n'), ((703, 726), 'scipy.special.gamma', 'special.gamma', (['(0.5 * df)'], {}), '(0.5 * df)\n', (716, 726), False, 'from scipy import special\n'), ((507, 539), 'scipy.special.gammaincinv', 'special.gammaincinv', (['(df * 0.5)', 'q'], {}), '(df * 0.5, q)\n', (526, 539), False, 'from scipy import special\n'), ((660, 689), 'scipy.special.gamma', 'special.gamma', (['(0.5 * (df + k))'], {}), '(0.5 * (df + k))\n', (673, 689), False, 'from scipy import special\n'), ((297, 320), 'numpy.exp', 'numpy.exp', (['(-x * x * 0.5)'], {}), '(-x * x * 0.5)\n', (306, 320), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 18 15:10:19 2017
@author: fd1014
"""
from __future__ import print_function
import types
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import granulometry
import hk
import generate_gaussianRF as grf
import timeit
matplotlib.rcParams.update({'font.size': 14, 'axes.labelsize': 16})
matplotlib.rc('font',family='Times New Roman')
def calc_order(distinct_runs, av_runs, box_size, model, x_i_range, gen_new = True, timed = True):
"""
Calculate order parameter for multiple ionization fractions, with capacity
to average over many runs.
"""
if timed == True:
start_time = timeit.default_timer()
x_i = np.linspace(x_i_range[0], x_i_range[1], num = distinct_runs, endpoint = False)
Pinf = [] # list of order parameter data for each x_i value, averaged over runs.
vol_hist = [] # list of volumes data for each x_i value
if gen_new == False:
field = grf.generate(Pk = model, size = box_size)
fieldR = field.real
av_runs = 1
for i in range(distinct_runs):
order_param = []
vols = []
for j in range(av_runs):
print('\rFilling fraction is %f (%i)' % (x_i[i], j+1), end = '')
if type(model) == list:
RD = granulometry.RandomDisks(DIM = box_size, fillingfraction = x_i[i], params = model, nooverlap = False)
box_inv = RD.box
box = np.ones(box_inv.shape) - box_inv # invert box so filled disks identified as 1's
if type(model) == types.LambdaType:
if gen_new == True:
field = grf.generate(Pk = model, size = box_size)
fieldR = field.real
box = grf.binary(fieldR, x_i[i])
clusters = hk.hoshen_kopelman(box)
volumes, spanning, order = hk.summary_statistics(box, clusters)
order_param.append(order)
if len(volumes) > 1:
vols = np.concatenate((vols,volumes[1:]))
Pinf.append(np.mean(order_param))
if len(vols) > 0:
hist, bin_edges = np.histogram(vols, bins = np.logspace(0, np.log10(int(np.max(vols)+3)), 20))
vol_hist.append([hist, bin_edges])
elapsed = timeit.default_timer() - start_time
print('\rCompleted in %f s' % elapsed, end = '')
return x_i, Pinf, vol_hist
def all_plots(x_i_for_size_distr, model_name = False):
# Order parameter
plt.figure(figsize = (6, 6))
plt.plot(x_i, Pinf)
plt.xlabel(r'$x_i$')
plt.ylabel(r'$\frac{P_\infty}{x_i}$')
if model_name:
plt.savefig('Pinf__runs%ix%i_size%i_model-%s_xi(%.2f-%.2f).png'
% (distinct_runs,av_runs,box_size,model_name,x_i_range[0],x_i_range[1]))
# Cluster volume distribution
for x_i_size in x_i_for_size_distr:
plt.figure(figsize = (6, 6))
bin_edges = vol_hist[int(len(vol_hist)*x_i_size)][1]
hist = np.asarray(vol_hist[int(len(vol_hist)*x_i_size)][0], dtype=float)
counts = sum(hist)
plt.plot(np.log10(bin_edges[0:-1]), np.log10(hist/counts), 'ko-', linewidth = 2, drawstyle = 'steps-mid')
plt.xlabel(r'$\log(V)$')
plt.ylabel(r'Frequency')
if model_name:
plt.savefig('vol_distr__runs%ix%i_size%i_model-%s_xi%.2f.png'
% (distinct_runs,av_runs,box_size,model_name,x_i_size))
plt.show()
# Parameters
"""
<model> can be:
[mean, variance] --> random disks with mean and variance parameters
grf.power_law(exponent) --> GRF with power law as power spectrum
grf.gaussian(mean, variance) --> GRF with gaussian power spectrum
grf.expn(scale) --> GRF with exponential power spectrum
or other lambda function.. --> GRF with arbitrary function as power spectrum
"""
distinct_runs = 20
av_runs = 20
box_size = 256
model = grf.power_law(-2.)
x_i_range = [0., 1.]
# Simulate order paramter and cluster volume distribution
x_i, Pinf, vol_hist = calc_order(distinct_runs, av_runs, box_size, model, x_i_range, gen_new = True)
all_plots(x_i_for_size_distr = [0.25, 0.50, 0.75], model_name = 'grf.power_law(-2.)')
| [
"matplotlib.rc",
"hk.summary_statistics",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.mean",
"generate_gaussianRF.generate",
"matplotlib.rcParams.update",
"granulometry.RandomDisks",
"numpy.max",
"numpy.linspace",
"numpy.log10",
"matplotlib.pyplot.show",
"generate_gaussianRF.power_law",... | [((302, 369), 'matplotlib.rcParams.update', 'matplotlib.rcParams.update', (["{'font.size': 14, 'axes.labelsize': 16}"], {}), "({'font.size': 14, 'axes.labelsize': 16})\n", (328, 369), False, 'import matplotlib\n'), ((371, 418), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {'family': '"""Times New Roman"""'}), "('font', family='Times New Roman')\n", (384, 418), False, 'import matplotlib\n'), ((4136, 4155), 'generate_gaussianRF.power_law', 'grf.power_law', (['(-2.0)'], {}), '(-2.0)\n', (4149, 4155), True, 'import generate_gaussianRF as grf\n'), ((748, 822), 'numpy.linspace', 'np.linspace', (['x_i_range[0]', 'x_i_range[1]'], {'num': 'distinct_runs', 'endpoint': '(False)'}), '(x_i_range[0], x_i_range[1], num=distinct_runs, endpoint=False)\n', (759, 822), True, 'import numpy as np\n'), ((2645, 2671), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (2655, 2671), True, 'import matplotlib.pyplot as plt\n'), ((2679, 2698), 'matplotlib.pyplot.plot', 'plt.plot', (['x_i', 'Pinf'], {}), '(x_i, Pinf)\n', (2687, 2698), True, 'import matplotlib.pyplot as plt\n'), ((2704, 2723), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$x_i$"""'], {}), "('$x_i$')\n", (2714, 2723), True, 'import matplotlib.pyplot as plt\n'), ((2730, 2768), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\frac{P_\\\\infty}{x_i}$"""'], {}), "('$\\\\frac{P_\\\\infty}{x_i}$')\n", (2740, 2768), True, 'import matplotlib.pyplot as plt\n'), ((3623, 3633), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3631, 3633), True, 'import matplotlib.pyplot as plt\n'), ((708, 730), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (728, 730), False, 'import timeit\n'), ((1023, 1060), 'generate_gaussianRF.generate', 'grf.generate', ([], {'Pk': 'model', 'size': 'box_size'}), '(Pk=model, size=box_size)\n', (1035, 1060), True, 'import generate_gaussianRF as grf\n'), ((2418, 2440), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (2438, 2440), False, 'import timeit\n'), ((2797, 2943), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('Pinf__runs%ix%i_size%i_model-%s_xi(%.2f-%.2f).png' % (distinct_runs,\n av_runs, box_size, model_name, x_i_range[0], x_i_range[1]))"], {}), "('Pinf__runs%ix%i_size%i_model-%s_xi(%.2f-%.2f).png' % (\n distinct_runs, av_runs, box_size, model_name, x_i_range[0], x_i_range[1]))\n", (2808, 2943), True, 'import matplotlib.pyplot as plt\n'), ((3047, 3073), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (3057, 3073), True, 'import matplotlib.pyplot as plt\n'), ((3372, 3396), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\log(V)$"""'], {}), "('$\\\\log(V)$')\n", (3382, 3396), True, 'import matplotlib.pyplot as plt\n'), ((3406, 3429), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (3416, 3429), True, 'import matplotlib.pyplot as plt\n'), ((1918, 1941), 'hk.hoshen_kopelman', 'hk.hoshen_kopelman', (['box'], {}), '(box)\n', (1936, 1941), False, 'import hk\n'), ((1982, 2018), 'hk.summary_statistics', 'hk.summary_statistics', (['box', 'clusters'], {}), '(box, clusters)\n', (2003, 2018), False, 'import hk\n'), ((2182, 2202), 'numpy.mean', 'np.mean', (['order_param'], {}), '(order_param)\n', (2189, 2202), True, 'import numpy as np\n'), ((3266, 3291), 'numpy.log10', 'np.log10', (['bin_edges[0:-1]'], {}), '(bin_edges[0:-1])\n', (3274, 3291), True, 'import numpy as np\n'), ((3293, 3316), 'numpy.log10', 'np.log10', (['(hist / counts)'], {}), '(hist / counts)\n', (3301, 3316), True, 'import numpy as np\n'), ((3468, 3594), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('vol_distr__runs%ix%i_size%i_model-%s_xi%.2f.png' % (distinct_runs,\n av_runs, box_size, model_name, x_i_size))"], {}), "('vol_distr__runs%ix%i_size%i_model-%s_xi%.2f.png' % (\n distinct_runs, av_runs, box_size, model_name, x_i_size))\n", (3479, 3594), True, 'import matplotlib.pyplot as plt\n'), ((1379, 1476), 'granulometry.RandomDisks', 'granulometry.RandomDisks', ([], {'DIM': 'box_size', 'fillingfraction': 'x_i[i]', 'params': 'model', 'nooverlap': '(False)'}), '(DIM=box_size, fillingfraction=x_i[i], params=model,\n nooverlap=False)\n', (1403, 1476), False, 'import granulometry\n'), ((1853, 1879), 'generate_gaussianRF.binary', 'grf.binary', (['fieldR', 'x_i[i]'], {}), '(fieldR, x_i[i])\n', (1863, 1879), True, 'import generate_gaussianRF as grf\n'), ((2116, 2151), 'numpy.concatenate', 'np.concatenate', (['(vols, volumes[1:])'], {}), '((vols, volumes[1:]))\n', (2130, 2151), True, 'import numpy as np\n'), ((1538, 1560), 'numpy.ones', 'np.ones', (['box_inv.shape'], {}), '(box_inv.shape)\n', (1545, 1560), True, 'import numpy as np\n'), ((1747, 1784), 'generate_gaussianRF.generate', 'grf.generate', ([], {'Pk': 'model', 'size': 'box_size'}), '(Pk=model, size=box_size)\n', (1759, 1784), True, 'import generate_gaussianRF as grf\n'), ((2326, 2338), 'numpy.max', 'np.max', (['vols'], {}), '(vols)\n', (2332, 2338), True, 'import numpy as np\n')] |
import tensorflow as tf
import time
from tensorflow.python.client import timeline
import numpy as np
import logging as log
IMAGE = tf.placeholder(tf.float64)
DEPTH = tf.placeholder(tf.float64)
def _image_op(x):
y = x ** 0.45 # gamma correction
y = tf.clip_by_value(y, 0, 1)
y = y * 255.
y = tf.cast(y, tf.uint8)
return y
def _depth_op(x):
x = x ** -(1 / 3.)
x = _normalize_op(x)
x = _heatmap_op(x)
return x
def _normalize_op(x):
amax = tf.reduce_max(x)
amin = tf.reduce_min(x)
arange = amax - amin
x = (x - amin) / arange
return x
def _heatmap_op(x):
red = x
green = 1.0 - tf.abs(0.5 - x) * 2.
blue = 1. - x
y = tf.stack([red, green, blue])
y = tf.transpose(y, (1, 2, 0))
y = tf.cast(y * 255, tf.uint8)
return y
image_op = _image_op(IMAGE)
depth_op = _depth_op(DEPTH)
def preprocess_image(image, sess, trace=False):
return _run_op(sess, image_op, IMAGE, image, trace, op_name='preprocess_image')
def preprocess_depth(depth, sess, trace=False):
return _run_op(sess, depth_op, DEPTH, depth, trace, op_name='preprocess_depth')
def _run_op(sess, op, X, x, trace=False, op_name='tf_op'):
start = time.time()
if trace:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
ret = sess.run(op, feed_dict={X: x}, options=run_options, run_metadata=run_metadata)
# Create the Timeline object, and write it to a json
tl = timeline.Timeline(run_metadata.step_stats)
ctf = tl.generate_chrome_trace_format()
with open('timeline.json', 'w') as f:
f.write(ctf)
else:
ret = sess.run(op, feed_dict={X: x})
end = time.time()
log.debug('%r took %rms', op_name, (end - start) * 1000)
return ret
def _main():
h = w = 227
import sys
log.basicConfig(level=log.DEBUG, stream=sys.stdout, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
with tf.Session() as sess:
preprocess_image(np.random.rand(h, w, 3), sess)
preprocess_image(np.random.rand(h, w, 3), sess)
preprocess_image(np.random.rand(h, w, 3), sess)
preprocess_depth(np.random.rand(h, w,), sess)
preprocess_depth(np.random.rand(h, w,), sess)
preprocess_depth(np.random.rand(h, w,), sess)
if __name__ == '__main__':
_main()
| [
"tensorflow.abs",
"logging.debug",
"tensorflow.clip_by_value",
"logging.basicConfig",
"tensorflow.Session",
"tensorflow.transpose",
"tensorflow.stack",
"tensorflow.placeholder",
"tensorflow.cast",
"time.time",
"tensorflow.RunMetadata",
"tensorflow.python.client.timeline.Timeline",
"numpy.ran... | [((133, 159), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {}), '(tf.float64)\n', (147, 159), True, 'import tensorflow as tf\n'), ((168, 194), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float64'], {}), '(tf.float64)\n', (182, 194), True, 'import tensorflow as tf\n'), ((261, 286), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['y', '(0)', '(1)'], {}), '(y, 0, 1)\n', (277, 286), True, 'import tensorflow as tf\n'), ((312, 332), 'tensorflow.cast', 'tf.cast', (['y', 'tf.uint8'], {}), '(y, tf.uint8)\n', (319, 332), True, 'import tensorflow as tf\n'), ((485, 501), 'tensorflow.reduce_max', 'tf.reduce_max', (['x'], {}), '(x)\n', (498, 501), True, 'import tensorflow as tf\n'), ((513, 529), 'tensorflow.reduce_min', 'tf.reduce_min', (['x'], {}), '(x)\n', (526, 529), True, 'import tensorflow as tf\n'), ((695, 723), 'tensorflow.stack', 'tf.stack', (['[red, green, blue]'], {}), '([red, green, blue])\n', (703, 723), True, 'import tensorflow as tf\n'), ((732, 758), 'tensorflow.transpose', 'tf.transpose', (['y', '(1, 2, 0)'], {}), '(y, (1, 2, 0))\n', (744, 758), True, 'import tensorflow as tf\n'), ((767, 793), 'tensorflow.cast', 'tf.cast', (['(y * 255)', 'tf.uint8'], {}), '(y * 255, tf.uint8)\n', (774, 793), True, 'import tensorflow as tf\n'), ((1206, 1217), 'time.time', 'time.time', ([], {}), '()\n', (1215, 1217), False, 'import time\n'), ((1741, 1752), 'time.time', 'time.time', ([], {}), '()\n', (1750, 1752), False, 'import time\n'), ((1757, 1813), 'logging.debug', 'log.debug', (['"""%r took %rms"""', 'op_name', '((end - start) * 1000)'], {}), "('%r took %rms', op_name, (end - start) * 1000)\n", (1766, 1813), True, 'import logging as log\n'), ((1879, 1998), 'logging.basicConfig', 'log.basicConfig', ([], {'level': 'log.DEBUG', 'stream': 'sys.stdout', 'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(level=log.DEBUG, stream=sys.stdout, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (1894, 1998), True, 'import logging as log\n'), ((1254, 1305), 'tensorflow.RunOptions', 'tf.RunOptions', ([], {'trace_level': 'tf.RunOptions.FULL_TRACE'}), '(trace_level=tf.RunOptions.FULL_TRACE)\n', (1267, 1305), True, 'import tensorflow as tf\n'), ((1329, 1345), 'tensorflow.RunMetadata', 'tf.RunMetadata', ([], {}), '()\n', (1343, 1345), True, 'import tensorflow as tf\n'), ((1514, 1556), 'tensorflow.python.client.timeline.Timeline', 'timeline.Timeline', (['run_metadata.step_stats'], {}), '(run_metadata.step_stats)\n', (1531, 1556), False, 'from tensorflow.python.client import timeline\n'), ((2003, 2015), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2013, 2015), True, 'import tensorflow as tf\n'), ((648, 663), 'tensorflow.abs', 'tf.abs', (['(0.5 - x)'], {}), '(0.5 - x)\n', (654, 663), True, 'import tensorflow as tf\n'), ((2050, 2073), 'numpy.random.rand', 'np.random.rand', (['h', 'w', '(3)'], {}), '(h, w, 3)\n', (2064, 2073), True, 'import numpy as np\n'), ((2106, 2129), 'numpy.random.rand', 'np.random.rand', (['h', 'w', '(3)'], {}), '(h, w, 3)\n', (2120, 2129), True, 'import numpy as np\n'), ((2162, 2185), 'numpy.random.rand', 'np.random.rand', (['h', 'w', '(3)'], {}), '(h, w, 3)\n', (2176, 2185), True, 'import numpy as np\n'), ((2218, 2238), 'numpy.random.rand', 'np.random.rand', (['h', 'w'], {}), '(h, w)\n', (2232, 2238), True, 'import numpy as np\n'), ((2272, 2292), 'numpy.random.rand', 'np.random.rand', (['h', 'w'], {}), '(h, w)\n', (2286, 2292), True, 'import numpy as np\n'), ((2326, 2346), 'numpy.random.rand', 'np.random.rand', (['h', 'w'], {}), '(h, w)\n', (2340, 2346), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon May 11 14:34:08 2020
@author: <NAME> (<EMAIL>),
Finnish Meteorological Institute)
Olli's python implementation of ESA SNAP s2toolbox biophysical processor and
computation of vegetation indices.
See ATBD at https://step.esa.int/docs/extra/ATBD_S2ToolBox_L2B_V1.1.pdf
And java source code at
https://github.com/senbox-org/s2tbx/tree/master/s2tbx-biophysical/src/main/java/org/esa/s2tbx/biophysical
Caveats
Currently changes out of bounds inputs and outputs to nan (or min or max value
if output wihtin tolerance). Maybe output flagging information as well ( i.e.
diffferent flags input and output out of bounds).
Convex hull input checking currently disabled. It's computationally slow and
not sure of its benefits. Better to filter out bad data based on L2A quality
info/classification\
and hope averaging removes some bad pixels.
"""
import requests
import io
import numpy as np
import xarray as xr
# url to Sentinel 2 Toolbox's auxdata
# This base_url points towards the original toolbox(not the one created by Olli)
base_url = "https://raw.githubusercontent.com/senbox-org/s2tbx/master/s2tbx-biophysical/src/main/resources/auxdata/2_1/{}/{}"
def get_fromurl(var, pattern):
"""
Fetches the contents of a text file from the base url and stores it in a ndarray.
Author: <NAME>
Parameters
----------
var (str) -- type of the product, one of FAPAR, FCOVER, LAI, LAI_Cab and LAI_Cw.
pattern (str) -- name of the file excluding the initial variable part.
Returns
-------
ndarray -- loaded with the contents of the text file.
"""
# attach variable and file name to the base url
res_url = base_url.format(var, str(var) + "%s" % str(pattern))
# make a GET request to the url to fetch the data.
res_url = requests.get(res_url)
# check the HTTP status code to see if any error has occured.
res_url.raise_for_status()
# store the contents of the url in an in-memory buffer and use it to load the ndarray.
return np.loadtxt(io.BytesIO(res_url.content), delimiter=",")
# Read SNAP Biophysical processor neural network parameters
nn_params = {}
for var in ["FAPAR", "FCOVER", "LAI", "LAI_Cab", "LAI_Cw"]:
norm_minmax = get_fromurl(var, "_Normalisation")
denorm_minmax = get_fromurl(var, "_Denormalisation")
layer1_weights = get_fromurl(var, "_Weights_Layer1_Neurons")
layer1_bias = get_fromurl(var, "_Weights_Layer1_Bias").reshape(-1, 1)
layer2_weights = get_fromurl(var, "_Weights_Layer2_Neurons").reshape(1, -1)
layer2_bias = get_fromurl(var, "_Weights_Layer2_Bias").reshape(1, -1)
extreme_cases = get_fromurl(var, "_ExtremeCases")
if var == "FCOVER":
nn_params[var] = {
"norm_minmax": norm_minmax,
"denorm_minmax": denorm_minmax,
"layer1_weights": layer1_weights,
"layer1_bias": layer1_bias,
"layer2_weights": layer2_weights,
"layer2_bias": layer2_bias,
"extreme_cases": extreme_cases,
}
else:
defdom_min = get_fromurl(var, "_DefinitionDomain_MinMax")[0, :].reshape(-1, 1)
defdom_max = get_fromurl(var, "_DefinitionDomain_MinMax")[1, :].reshape(-1, 1)
defdom_grid = get_fromurl(var, "_DefinitionDomain_Grid")
nn_params[var] = {
"norm_minmax": norm_minmax,
"denorm_minmax": denorm_minmax,
"layer1_weights": layer1_weights,
"layer1_bias": layer1_bias,
"layer2_weights": layer2_weights,
"layer2_bias": layer2_bias,
"defdom_min": defdom_min,
"defdom_max": defdom_max,
"defdom_grid": defdom_grid,
"extreme_cases": extreme_cases,
}
def _normalization(x, x_min, x_max):
x_norm = 2 * (x - x_min) / (x_max - x_min) - 1
return x_norm
def _denormalization(y_norm, y_min, y_max):
y = 0.5 * (y_norm + 1) * (y_max - y_min)
return y
def _input_ouf_of_range(x, variable):
x_copy = x.copy()
x_bands = x_copy[:8, :]
# check min max domain
defdom_min = nn_params[variable]["defdom_min"][:, 0].reshape(-1, 1)
defdom_max = nn_params[variable]["defdom_max"][:, 0].reshape(-1, 1)
bad_input_mask = (x_bands < defdom_min) | (x_bands > defdom_max)
bad_vector = np.any(bad_input_mask, axis=0)
x_bands[:, bad_vector] = np.nan
# convex hull check, currently disabled due to time consumption vs benefit
# gridProject = lambda v: np.floor(10 * (v - defdom_min) / (defdom_max - defdom_min) + 1 ).astype(int)
# x_bands = gridProject(x_bands)
# isInGrid = lambda v: any((v == x).all() for x in nn_params[variable]['defdom_grid'])
# notInGrid = ~np.array([isInGrid(v) for v in x_bands.T])
# x[:,notInGrid | bad_vector] = np.nan
x_copy[:, bad_vector] = np.nan
return x_copy
def _output_ouf_of_range(output, variable):
new_output = np.copy(output)
tolerance = nn_params[variable]["extreme_cases"][0]
output_min = nn_params[variable]["extreme_cases"][1]
output_max = nn_params[variable]["extreme_cases"][2]
new_output[output < (output_min + tolerance)] = np.nan
new_output[(output > (output_min + tolerance)) & (output < output_min)] = output_min
new_output[(output < (output_max - tolerance)) & (output > output_max)] = output_max
new_output[output > (output_max - tolerance)] = np.nan
return new_output
def _compute_variable(x, variable):
x_norm = np.zeros_like(x)
x = _input_ouf_of_range(x, variable)
x_norm = _normalization(
x,
nn_params[variable]["norm_minmax"][:, 0].reshape(-1, 1),
nn_params[variable]["norm_minmax"][:, 1].reshape(-1, 1),
)
out_layer1 = np.tanh(
nn_params[variable]["layer1_weights"].dot(x_norm)
+ nn_params[variable]["layer1_bias"]
)
out_layer2 = (
nn_params[variable]["layer2_weights"].dot(out_layer1)
+ nn_params[variable]["layer2_bias"]
)
output = _denormalization(
out_layer2,
nn_params[variable]["denorm_minmax"][0],
nn_params[variable]["denorm_minmax"][1],
)[0]
output = _output_ouf_of_range(output, variable)
output = output.reshape(1, np.shape(x)[1])
return output
def run_snap_biophys(dataset, variable):
"""Compute specified variable using the SNAP algorithm.
See ATBD at https://step.esa.int/docs/extra/ATBD_S2ToolBox_L2B_V1.1.pdf
Parameters
----------
dataset : xr dataset
xarray dataset.
variable : str
Options 'FAPAR', 'FCOVER', 'LAI', 'LAI_Cab' or 'LAI_Cw'
Returns
-------
xarray dataset
Adds the specified variable array to dataset (variable name in
lowercase).
"""
# generate view angle bands/layers
vz = (
np.ones_like(dataset.band_data[:, 0, :, :]).T
* np.cos(np.radians(dataset.view_zenith)).values
)
vz = vz[..., np.newaxis]
vzarr = xr.DataArray(
vz,
coords=[dataset.y, dataset.x, dataset.time, ["view_zenith"]],
dims=["y", "x", "time", "band"],
)
sz = (
np.ones_like(dataset.band_data[:, 0, :, :]).T
* np.cos(np.radians(dataset.sun_zenith)).values
)
sz = sz[..., np.newaxis]
szarr = xr.DataArray(
sz,
coords=[dataset.y, dataset.x, dataset.time, ["sun_zenith"]],
dims=["y", "x", "time", "band"],
)
raz = (
np.ones_like(dataset.band_data[:, 0, :, :]).T
* np.cos(np.radians(dataset.sun_azimuth - dataset.view_azimuth)).values
)
raz = raz[..., np.newaxis]
razarr = xr.DataArray(
raz,
coords=[dataset.y, dataset.x, dataset.time, ["relative_azimuth"]],
dims=["y", "x", "time", "band"],
)
newarr = xr.concat([dataset.band_data, vzarr, szarr, razarr], dim="band")
newarr = newarr.stack(xy=("x", "y"))
arr = xr.apply_ufunc(
_compute_variable,
newarr,
input_core_dims=[["band", "xy"]],
output_core_dims=[["xy"]],
kwargs={"variable": variable},
vectorize=True,
).unstack()
return dataset.assign({variable.lower(): arr})
| [
"numpy.radians",
"io.BytesIO",
"numpy.zeros_like",
"numpy.ones_like",
"numpy.copy",
"xarray.concat",
"numpy.any",
"numpy.shape",
"xarray.DataArray",
"requests.get",
"xarray.apply_ufunc"
] | [((1817, 1838), 'requests.get', 'requests.get', (['res_url'], {}), '(res_url)\n', (1829, 1838), False, 'import requests\n'), ((4311, 4341), 'numpy.any', 'np.any', (['bad_input_mask'], {'axis': '(0)'}), '(bad_input_mask, axis=0)\n', (4317, 4341), True, 'import numpy as np\n'), ((4915, 4930), 'numpy.copy', 'np.copy', (['output'], {}), '(output)\n', (4922, 4930), True, 'import numpy as np\n'), ((5472, 5488), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (5485, 5488), True, 'import numpy as np\n'), ((6944, 7060), 'xarray.DataArray', 'xr.DataArray', (['vz'], {'coords': "[dataset.y, dataset.x, dataset.time, ['view_zenith']]", 'dims': "['y', 'x', 'time', 'band']"}), "(vz, coords=[dataset.y, dataset.x, dataset.time, ['view_zenith'\n ]], dims=['y', 'x', 'time', 'band'])\n", (6956, 7060), True, 'import xarray as xr\n'), ((7256, 7371), 'xarray.DataArray', 'xr.DataArray', (['sz'], {'coords': "[dataset.y, dataset.x, dataset.time, ['sun_zenith']]", 'dims': "['y', 'x', 'time', 'band']"}), "(sz, coords=[dataset.y, dataset.x, dataset.time, ['sun_zenith']\n ], dims=['y', 'x', 'time', 'band'])\n", (7268, 7371), True, 'import xarray as xr\n'), ((7595, 7717), 'xarray.DataArray', 'xr.DataArray', (['raz'], {'coords': "[dataset.y, dataset.x, dataset.time, ['relative_azimuth']]", 'dims': "['y', 'x', 'time', 'band']"}), "(raz, coords=[dataset.y, dataset.x, dataset.time, [\n 'relative_azimuth']], dims=['y', 'x', 'time', 'band'])\n", (7607, 7717), True, 'import xarray as xr\n'), ((7758, 7822), 'xarray.concat', 'xr.concat', (['[dataset.band_data, vzarr, szarr, razarr]'], {'dim': '"""band"""'}), "([dataset.band_data, vzarr, szarr, razarr], dim='band')\n", (7767, 7822), True, 'import xarray as xr\n'), ((2049, 2076), 'io.BytesIO', 'io.BytesIO', (['res_url.content'], {}), '(res_url.content)\n', (2059, 2076), False, 'import io\n'), ((6215, 6226), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (6223, 6226), True, 'import numpy as np\n'), ((6794, 6837), 'numpy.ones_like', 'np.ones_like', (['dataset.band_data[:, 0, :, :]'], {}), '(dataset.band_data[:, 0, :, :])\n', (6806, 6837), True, 'import numpy as np\n'), ((7107, 7150), 'numpy.ones_like', 'np.ones_like', (['dataset.band_data[:, 0, :, :]'], {}), '(dataset.band_data[:, 0, :, :])\n', (7119, 7150), True, 'import numpy as np\n'), ((7419, 7462), 'numpy.ones_like', 'np.ones_like', (['dataset.band_data[:, 0, :, :]'], {}), '(dataset.band_data[:, 0, :, :])\n', (7431, 7462), True, 'import numpy as np\n'), ((7874, 8027), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['_compute_variable', 'newarr'], {'input_core_dims': "[['band', 'xy']]", 'output_core_dims': "[['xy']]", 'kwargs': "{'variable': variable}", 'vectorize': '(True)'}), "(_compute_variable, newarr, input_core_dims=[['band', 'xy']],\n output_core_dims=[['xy']], kwargs={'variable': variable}, vectorize=True)\n", (7888, 8027), True, 'import xarray as xr\n'), ((6857, 6888), 'numpy.radians', 'np.radians', (['dataset.view_zenith'], {}), '(dataset.view_zenith)\n', (6867, 6888), True, 'import numpy as np\n'), ((7170, 7200), 'numpy.radians', 'np.radians', (['dataset.sun_zenith'], {}), '(dataset.sun_zenith)\n', (7180, 7200), True, 'import numpy as np\n'), ((7482, 7536), 'numpy.radians', 'np.radians', (['(dataset.sun_azimuth - dataset.view_azimuth)'], {}), '(dataset.sun_azimuth - dataset.view_azimuth)\n', (7492, 7536), True, 'import numpy as np\n')] |
import sys
import typing
import numba as nb
import numpy as np
@nb.njit(
(nb.i8, nb.i8[:, :]),
cache=True,
)
def solve(
n: int,
g: np.array,
) -> typing.NoReturn:
indeg = np.zeros(
n,
dtype=np.int64,
)
for v in g[:, 1]:
indeg[v] += 1
g = g[g[:, 0].argsort()]
i = np.searchsorted(
g[:, 0],
np.arange(n + 1)
)
q = [
v for v in range(n)
if not indeg[v]
]
dist = np.zeros(
n,
dtype=np.int64,
)
for u in q:
for j in range(
i[u], i[u + 1],
):
v = g[j, 1]
indeg[v] -= 1
dist[v] = max(
dist[v],
dist[u] + 1,
)
if indeg[v]: continue
q.append(v)
print(dist.max())
def main() -> typing.NoReturn:
n, m = map(
int, input().split(),
)
g = np.array(
sys.stdin.read().split(),
dtype=np.int64,
).reshape(m, 2) - 1
solve(n, g)
main()
| [
"numba.njit",
"numpy.zeros",
"numpy.arange",
"sys.stdin.read"
] | [((67, 108), 'numba.njit', 'nb.njit', (['(nb.i8, nb.i8[:, :])'], {'cache': '(True)'}), '((nb.i8, nb.i8[:, :]), cache=True)\n', (74, 108), True, 'import numba as nb\n'), ((184, 211), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.int64'}), '(n, dtype=np.int64)\n', (192, 211), True, 'import numpy as np\n'), ((416, 443), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.int64'}), '(n, dtype=np.int64)\n', (424, 443), True, 'import numpy as np\n'), ((330, 346), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (339, 346), True, 'import numpy as np\n'), ((790, 806), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (804, 806), False, 'import sys\n')] |
# noinspection PyPackageRequirements
import datawrangler as dw
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
import quail
import warnings
def get_bounds(data, bounds):
data = [d for d in data.values.ravel() if not np.isnan(d)]
return [np.percentile(data, p) for p in bounds]
def distplot(data, bounds, cmap, x='Performance', y='Number of participants', bins=15):
bound_vals = get_bounds(data, bounds)
data = [d for d in data.values.ravel() if not np.isnan(d)]
sns.histplot(data, kde=True, color='k', bins=bins, edgecolor='w')
plt.xlabel(x, fontsize=14)
plt.ylabel(y, fontsize=14)
ylim = plt.ylim()
for i, p in enumerate(bound_vals):
plt.plot([p, p], ylim, color=cmap[i], linewidth=4)
plt.ylim(ylim);
plt.xlim([0, 1]);
def create_fr_plots(fr, bounds, task_cmaps, prefix='', max_lag=8, save=False):
# noinspection PyShadowingNames
def get_fname(n, prefix):
if len(prefix) == 0:
return n + '.pdf'
else:
return f'{prefix}_{n}.pdf'
# noinspection PyShadowingNames
def group(vals, bounds):
edges = np.percentile(vals, bounds)
edges[-1] += 1
return np.digitize(vals, edges).ravel() - 1
# noinspection PyTypeChecker
def plot_grouped_dists(data, groups, cmap, core_color='k', alpha1=0.25, alpha2=0.3, x='Feature',
y='Clustering score'):
# noinspection PyShadowingNames
def violin(x, color, alpha1=1, alpha2=0.5, linewidth=1):
for i, c in enumerate(x.columns):
parts = plt.violinplot([v for v in x[c].values if not np.isnan(v)], positions=[i])
for p in parts['bodies']:
p.set_facecolor(color)
p.set_edgecolor(None)
p.set_alpha(alpha1 * alpha2)
for j in ['cmeans', 'cmins', 'cmaxes', 'cbars', 'cmedians', 'cquantiles']:
if j not in parts.keys():
continue
parts[j].set_color(color)
parts[j].set_alpha(alpha1)
parts[j].set_linewidth(linewidth)
unique_groups = np.unique(groups)
for i, g in enumerate(unique_groups):
violin(data.loc[groups == g], cmap[i], alpha1=alpha1, linewidth=1)
violin(data, core_color, alpha1=alpha2, linewidth=2)
plt.xticks(np.arange(len(data.columns)), [t.replace('_', '\n').capitalize() for t in data.columns.values])
plt.xlabel(x, fontsize=14)
plt.ylabel(y, fontsize=14)
def plot_ribbon(data, color='k', alpha1=1, alpha2=0.25, linewidth=2):
x = data.columns.values
y = data.mean(axis=0)
sem = np.divide(data.std(axis=0), np.sqrt(np.sum(1 - np.isnan(data.values), axis=0)))
plt.fill_between(x, y + sem, y - sem, color=color, alpha=alpha1 * alpha2, edgecolor=None)
plt.plot(x, y, c=color, linewidth=linewidth, alpha=alpha1)
# noinspection PyUnusedLocal
def plot_lines(data, color='k', opacity=0.05, linewidth=0.25):
x = data.columns.values
plt.plot(x, data.values.T, color=color, alpha=opacity, linewidth=linewidth)
# noinspection PyTypeChecker
def fr_summary_plot(data, groups, cmap, core_color='k', alpha1=0.5, alpha2=0.25, x='Serial position',
y='p(recall)'):
unique_groups = np.unique(groups)
for i, g in enumerate(unique_groups):
# plotting individual curves looks messy; skip by default... (uncomment to show single-subject data)
# plot_lines(data.loc[groups == g], cmap[i], opacity=alpha3)
plot_ribbon(data.loc[groups == g], cmap[i + 1], alpha1=alpha1, alpha2=alpha2)
plot_ribbon(data, core_color, alpha2=alpha2)
plt.xlabel(x, fontsize=14)
plt.ylabel(y, fontsize=14)
# analyze data
warnings.simplefilter('ignore')
pfr = fr.analyze('pfr').data.groupby('Subject').mean()
crp = fr.analyze('lagcrp').data.groupby('Subject').mean()
spc = fr.analyze('spc').data.groupby('Subject').mean()
fingerprint = fr.analyze('fingerprint').data.groupby('Subject').mean()
accuracy = fr.analyze('accuracy').data.groupby('Subject').mean()
fr_groups = group(accuracy.values, bounds)
# plot accuracy (distribution)
distplot(accuracy, bounds, task_cmaps['Free recall'], x='Accuracy')
if save:
plt.savefig(get_fname('accuracy', prefix))
plt.clf()
# plot fingerprints
plot_grouped_dists(fingerprint, fr_groups, task_cmaps['Free recall'])
if save:
plt.savefig(get_fname('fingerprints', prefix))
plt.clf()
# probability of first recall
fr_summary_plot(pfr, fr_groups, task_cmaps['Free recall'], y='Probability of first recall')
if save:
plt.savefig(get_fname('pfr', prefix))
plt.clf()
# lag-CRP
lags = np.arange(-max_lag, max_lag + 1)
fr_summary_plot(crp[lags], fr_groups, task_cmaps['Free recall'], x='Lag', y='Conditional response probability')
if save:
plt.savefig(get_fname('lag_crp', prefix))
plt.clf()
# serial position curve
fr_summary_plot(spc, fr_groups, task_cmaps['Free recall'], y='Recall probability')
if save:
plt.savefig(get_fname('spc', prefix))
plt.clf()
def split_by_feature(egg, feature):
pres_items = egg.get_pres_items()
pres_features = egg.get_pres_features()
rec_items = egg.get_rec_items()
rec_features = egg.get_rec_features()
vals = np.unique([v[feature] for v in pres_features.values.ravel()])
pres_words = []
rec_words = []
# subjs = pres_items.index.to_frame()['Subject'].values
current_id = pres_items.index[0][0]
subj_pres_words = []
subj_rec_words = []
for i in range(pres_items.shape[0]):
next_id = pres_items.index[i][0]
if next_id != current_id:
pres_words.append(subj_pres_words)
rec_words.append(subj_rec_words)
subj_pres_words = []
subj_rec_words = []
current_id = next_id
for v in vals:
# get next presented and recalled words
next_pres_items = [dw.core.update_dict(x, {'item': w}) for w, x in
zip(pres_items.iloc[i], pres_features.iloc[i]) if x[feature] == v]
next_rec_items = [dw.core.update_dict(x, {'item': w}) for w, x in
zip(rec_items.iloc[i], rec_features.iloc[i]) if w in [y['item'] for y in
next_pres_items]]
# remove "feature" from pres and rec items
[x.pop(feature, None) for x in next_pres_items]
[x.pop(feature, None) for x in next_rec_items]
subj_pres_words.append(next_pres_items)
subj_rec_words.append(next_rec_items)
try:
pres_words.append(subj_pres_words)
rec_words.append(subj_rec_words)
except NameError:
pass
return quail.Egg(pres=pres_words, rec=rec_words) | [
"matplotlib.pyplot.xlim",
"numpy.digitize",
"seaborn.histplot",
"warnings.simplefilter",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.clf",
"numpy.unique",
"numpy.isnan",
"numpy.percentile",
"numpy.arange",
"datawrangler.core.update_dict",
"matplotlib.pyplot.ylabel"... | [((518, 583), 'seaborn.histplot', 'sns.histplot', (['data'], {'kde': '(True)', 'color': '"""k"""', 'bins': 'bins', 'edgecolor': '"""w"""'}), "(data, kde=True, color='k', bins=bins, edgecolor='w')\n", (530, 583), True, 'import seaborn as sns\n'), ((588, 614), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x'], {'fontsize': '(14)'}), '(x, fontsize=14)\n', (598, 614), True, 'from matplotlib import pyplot as plt\n'), ((619, 645), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y'], {'fontsize': '(14)'}), '(y, fontsize=14)\n', (629, 645), True, 'from matplotlib import pyplot as plt\n'), ((657, 667), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (665, 667), True, 'from matplotlib import pyplot as plt\n'), ((772, 786), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (780, 786), True, 'from matplotlib import pyplot as plt\n'), ((792, 808), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (800, 808), True, 'from matplotlib import pyplot as plt\n'), ((3901, 3932), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (3922, 3932), False, 'import warnings\n'), ((4480, 4489), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4487, 4489), True, 'from matplotlib import pyplot as plt\n'), ((4661, 4670), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4668, 4670), True, 'from matplotlib import pyplot as plt\n'), ((4865, 4874), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4872, 4874), True, 'from matplotlib import pyplot as plt\n'), ((4901, 4933), 'numpy.arange', 'np.arange', (['(-max_lag)', '(max_lag + 1)'], {}), '(-max_lag, max_lag + 1)\n', (4910, 4933), True, 'import numpy as np\n'), ((5117, 5126), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5124, 5126), True, 'from matplotlib import pyplot as plt\n'), ((5306, 5315), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5313, 5315), True, 'from matplotlib import pyplot as plt\n'), ((7042, 7083), 'quail.Egg', 'quail.Egg', ([], {'pres': 'pres_words', 'rec': 'rec_words'}), '(pres=pres_words, rec=rec_words)\n', (7051, 7083), False, 'import quail\n'), ((278, 300), 'numpy.percentile', 'np.percentile', (['data', 'p'], {}), '(data, p)\n', (291, 300), True, 'import numpy as np\n'), ((716, 766), 'matplotlib.pyplot.plot', 'plt.plot', (['[p, p]', 'ylim'], {'color': 'cmap[i]', 'linewidth': '(4)'}), '([p, p], ylim, color=cmap[i], linewidth=4)\n', (724, 766), True, 'from matplotlib import pyplot as plt\n'), ((1150, 1177), 'numpy.percentile', 'np.percentile', (['vals', 'bounds'], {}), '(vals, bounds)\n', (1163, 1177), True, 'import numpy as np\n'), ((2207, 2224), 'numpy.unique', 'np.unique', (['groups'], {}), '(groups)\n', (2216, 2224), True, 'import numpy as np\n'), ((2534, 2560), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x'], {'fontsize': '(14)'}), '(x, fontsize=14)\n', (2544, 2560), True, 'from matplotlib import pyplot as plt\n'), ((2569, 2595), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y'], {'fontsize': '(14)'}), '(y, fontsize=14)\n', (2579, 2595), True, 'from matplotlib import pyplot as plt\n'), ((2836, 2929), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', '(y + sem)', '(y - sem)'], {'color': 'color', 'alpha': '(alpha1 * alpha2)', 'edgecolor': 'None'}), '(x, y + sem, y - sem, color=color, alpha=alpha1 * alpha2,\n edgecolor=None)\n', (2852, 2929), True, 'from matplotlib import pyplot as plt\n'), ((2934, 2992), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'c': 'color', 'linewidth': 'linewidth', 'alpha': 'alpha1'}), '(x, y, c=color, linewidth=linewidth, alpha=alpha1)\n', (2942, 2992), True, 'from matplotlib import pyplot as plt\n'), ((3134, 3209), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'data.values.T'], {'color': 'color', 'alpha': 'opacity', 'linewidth': 'linewidth'}), '(x, data.values.T, color=color, alpha=opacity, linewidth=linewidth)\n', (3142, 3209), True, 'from matplotlib import pyplot as plt\n'), ((3414, 3431), 'numpy.unique', 'np.unique', (['groups'], {}), '(groups)\n', (3423, 3431), True, 'import numpy as np\n'), ((3815, 3841), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x'], {'fontsize': '(14)'}), '(x, fontsize=14)\n', (3825, 3841), True, 'from matplotlib import pyplot as plt\n'), ((3850, 3876), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y'], {'fontsize': '(14)'}), '(y, fontsize=14)\n', (3860, 3876), True, 'from matplotlib import pyplot as plt\n'), ((253, 264), 'numpy.isnan', 'np.isnan', (['d'], {}), '(d)\n', (261, 264), True, 'import numpy as np\n'), ((500, 511), 'numpy.isnan', 'np.isnan', (['d'], {}), '(d)\n', (508, 511), True, 'import numpy as np\n'), ((6194, 6229), 'datawrangler.core.update_dict', 'dw.core.update_dict', (['x', "{'item': w}"], {}), "(x, {'item': w})\n", (6213, 6229), True, 'import datawrangler as dw\n'), ((6370, 6405), 'datawrangler.core.update_dict', 'dw.core.update_dict', (['x', "{'item': w}"], {}), "(x, {'item': w})\n", (6389, 6405), True, 'import datawrangler as dw\n'), ((1216, 1240), 'numpy.digitize', 'np.digitize', (['vals', 'edges'], {}), '(vals, edges)\n', (1227, 1240), True, 'import numpy as np\n'), ((2794, 2815), 'numpy.isnan', 'np.isnan', (['data.values'], {}), '(data.values)\n', (2802, 2815), True, 'import numpy as np\n'), ((1659, 1670), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (1667, 1670), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2022 The Balloon Learning Environment Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract class for Balloon Learning Environment agents."""
import abc
import enum
from typing import Optional, Sequence, Union
from flax.metrics import tensorboard
import numpy as np
class AgentMode(enum.Enum):
"""An enum for the agent mode."""
TRAIN = 'train'
EVAL = 'eval'
class Agent(abc.ABC):
"""Abstract class for defining Balloon Learning Environment agents."""
def __init__(self, num_actions: int, observation_shape: Sequence[int]):
"""Agent constructor.
A child class should have the same two arguments in its constructor
in order to work with `train_lib` and `eval_lib`, which it should pass
to this constructor.
Args:
num_actions: The number of actions available in the environment.
observation_shape: The shape of the observation vector.
"""
self._num_actions = num_actions
self._observation_shape = observation_shape
self.set_mode(AgentMode.TRAIN)
def get_name(self) -> str:
"""Gets the name of the agent."""
return self.__class__.__name__
@abc.abstractmethod
def begin_episode(self, observation: np.ndarray) -> int:
"""Begins the episode.
Must be overridden by child class.
Args:
observation: The first observation of an episode returned by the
environment.
Returns:
The action to be applied to the environment.
"""
@abc.abstractmethod
def step(self, reward: float, observation: np.ndarray) -> int:
"""Steps the agent.
Must be overridden by child class.
Args:
reward: The last reward returned by the environment.
observation: The last observation returned by the environment.
Returns:
A new action to apply to the environment.
"""
@abc.abstractmethod
def end_episode(self, reward: float, terminal: bool = True) -> None:
"""Lets the agent know the episode has ended.
Must be overriden by child class.
Args:
reward: The final reward returned by the environment.
terminal: Whether the episode ended at a terminal state or not. This
may be False if the episode ended without reaching a terminal state,
for example in the case that we are using fixed-length episodes.
"""
def set_summary_writer(
self, summary_writer: Optional[tensorboard.SummaryWriter]) -> None:
"""Sets a summary writer for logging to tensorboard."""
self.summary_writer = summary_writer
def set_mode(self, mode: Union[AgentMode, str]) -> None:
"""Sets the mode of the agent.
No-op. It is recommended to override this in the child class.
If set to train, then the agent may train when being stepped.
However, if set to eval the agent should be fixed for evaluation.
Args:
mode: The mode to set the agent to. Accepts either an enum, or the
string value of the enum.
"""
def save_checkpoint(self, checkpoint_dir: str, iteration_number: int) -> None:
"""If available, save agent parameters to a checkpoint.
No-op. It is recommended to override this in the child class.
Args:
checkpoint_dir: The directory to write the checkpoint to.
iteration_number: The current iteration number.
"""
def load_checkpoint(self, checkpoint_dir: str, iteration_number: int) -> None:
"""If available, load agent parameters from a checkpoint.
No-op. It is recommended to override this in the child class.
Args:
checkpoint_dir: The directory to load the checkpoint from.
iteration_number: The current iteration number.
"""
def reload_latest_checkpoint(self, checkpoint_dir: str) -> int:
"""If available, load agent parameters from the latest checkpoint.
No-op. It is recommended to override this in the child class.
Args:
checkpoint_dir: Directory in which to look for checkpoints.
Returns:
Latest checkpoint number found, or -1 if none found.
"""
del checkpoint_dir
return -1
class RandomAgent(Agent):
"""An agent that takes uniform random actions."""
def _random_action(self) -> int:
return np.random.randint(self._num_actions)
def begin_episode(self, unused_obs: np.ndarray) -> int:
return self._random_action()
def step(self, reward: float, observation: np.ndarray) -> int:
return self._random_action()
def end_episode(self, reward: float, terminal: bool = True) -> None:
pass
| [
"numpy.random.randint"
] | [((4677, 4713), 'numpy.random.randint', 'np.random.randint', (['self._num_actions'], {}), '(self._num_actions)\n', (4694, 4713), True, 'import numpy as np\n')] |
import xarray as xr
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
def load_all_dataset(
data_path_root,
parameter_list,
show_progress=True
):
dss = []
if show_progress:
ps = tqdm(parameter_list)
else:
ps = parameter_list
for parameter in ps:
name = parameter.get("name", parameter["parameter"])
ds = xr.open_mfdataset(f"{data_path_root}/*.{name}.nc")
dss.append(ds)
ds = xr.merge(dss)
return ds
def get_forecast_time_list(forecast_time):
forecast_time_range = pd.to_timedelta(np.arange(0, 167, 6), unit="h").append(
pd.to_timedelta(np.arange(168, 246, 12), unit="h")
)
if forecast_time > pd.Timedelta(hours=60):
time_list = forecast_time_range[
(forecast_time_range >= forecast_time - pd.Timedelta(hours=60))
&
(forecast_time_range <= forecast_time)
]
else:
time_list = forecast_time_range[
(forecast_time_range <= forecast_time)
]
return time_list
def reshape_array_to_samples(array: np.ndarray):
s = array.shape
return array.reshape(s[0], np.prod(s[1:]))
def reshape_array_to_sample(array: np.ndarray):
s = array.shape
return array.reshape(1, np.prod(s)) | [
"xarray.merge",
"tqdm.auto.tqdm",
"numpy.arange",
"pandas.Timedelta",
"xarray.open_mfdataset",
"numpy.prod"
] | [((478, 491), 'xarray.merge', 'xr.merge', (['dss'], {}), '(dss)\n', (486, 491), True, 'import xarray as xr\n'), ((236, 256), 'tqdm.auto.tqdm', 'tqdm', (['parameter_list'], {}), '(parameter_list)\n', (240, 256), False, 'from tqdm.auto import tqdm\n'), ((394, 444), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['f"""{data_path_root}/*.{name}.nc"""'], {}), "(f'{data_path_root}/*.{name}.nc')\n", (411, 444), True, 'import xarray as xr\n'), ((722, 744), 'pandas.Timedelta', 'pd.Timedelta', ([], {'hours': '(60)'}), '(hours=60)\n', (734, 744), True, 'import pandas as pd\n'), ((1173, 1187), 'numpy.prod', 'np.prod', (['s[1:]'], {}), '(s[1:])\n', (1180, 1187), True, 'import numpy as np\n'), ((1287, 1297), 'numpy.prod', 'np.prod', (['s'], {}), '(s)\n', (1294, 1297), True, 'import numpy as np\n'), ((657, 680), 'numpy.arange', 'np.arange', (['(168)', '(246)', '(12)'], {}), '(168, 246, 12)\n', (666, 680), True, 'import numpy as np\n'), ((593, 613), 'numpy.arange', 'np.arange', (['(0)', '(167)', '(6)'], {}), '(0, 167, 6)\n', (602, 613), True, 'import numpy as np\n'), ((839, 861), 'pandas.Timedelta', 'pd.Timedelta', ([], {'hours': '(60)'}), '(hours=60)\n', (851, 861), True, 'import pandas as pd\n')] |
from rsopt.codes.runner.Runner import Runner
from libensemble.message_numbers import WORKER_DONE, WORKER_KILL, TASK_FAILED
import os, logging
import numpy as np
from time import sleep
def sim_function_with_runner(H, persis_info, sim_specs, libE_info):
logger = logging.getLogger('libensemble')
logger.info('sim launching from {}'.format(os.getcwd()))
# Worker 1 for libEnsemble (with APOSMM Generator) does not do sim evals
# but we are farming out to servers based on worker number
# You should use nworkers = number_of_servers + 1 then workerID - 1 is [1, number_of_servers]
# and we can assign to server
try:
persistant_worker_count = sim_specs['user']['persistant_worker_count']
except KeyError:
logger.warning("persistant_worker_count was not explicitly set. Assuming 1 persistant worker.")
persistant_worker_count = 1
server_id = libE_info['workerID'] - persistant_worker_count
# Sleep time is used to make sure we do not start rsmpi jobs too closely together
# past experience has shown this leads to fatal errors. Possibly due to temporary files that are created in the same location?
st = server_id * 5.
sleep(st)
x = H['x'][0]
base_schema = sim_specs['user']['base_schema']
objective_function = sim_specs['user']['objective_function']
try:
processing_funcs = sim_specs['user']['processing']
except KeyError:
processing_funcs = None
# Run Simulations
runner = Runner(base_schema.format(server_id), objective_function=objective_function, processing=processing_funcs)
result = runner.run(x)
print('result on {} is {}'.format(server_id, result))
# Evaluate Result
try:
# Catch if runner encountered an error (most likely full loss from opal)
error = result[1]
print("Error detected.")
print("Runner settings:\n{}".format(result[0]))
outspecs = sim_specs['out']
output = np.zeros(1, dtype=outspecs)
output['f'][0] = 100e3
return output, persis_info, TASK_FAILED
except (TypeError, IndexError) as e:
pass
outspecs = sim_specs['out']
output = np.zeros(1, dtype=outspecs)
output['f'][0] = result
print("Worker {} finished. Result: {}".format(server_id, result))
return output, persis_info, WORKER_DONE | [
"os.getcwd",
"numpy.zeros",
"logging.getLogger",
"time.sleep"
] | [((267, 299), 'logging.getLogger', 'logging.getLogger', (['"""libensemble"""'], {}), "('libensemble')\n", (284, 299), False, 'import os, logging\n'), ((1211, 1220), 'time.sleep', 'sleep', (['st'], {}), '(st)\n', (1216, 1220), False, 'from time import sleep\n'), ((2220, 2247), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'outspecs'}), '(1, dtype=outspecs)\n', (2228, 2247), True, 'import numpy as np\n'), ((1993, 2020), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'outspecs'}), '(1, dtype=outspecs)\n', (2001, 2020), True, 'import numpy as np\n'), ((347, 358), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (356, 358), False, 'import os, logging\n')] |
from molSimplify.Scripts.cellbuilder_tools import *
from molSimplify.Classes import mol3D
from molSimplify.Informatics.autocorrelation import*
from molSimplify.Informatics.misc_descriptors import*
from molSimplify.Informatics.graph_analyze import*
from molSimplify.Informatics.RACassemble import *
import os
import numpy as np
import pandas as pd
from scipy.spatial import distance
from scipy import sparse
import itertools
from molSimplify.Informatics.MOF.PBC_functions import *
#### NOTE: In addition to molSimplify's dependencies, this portion requires
#### pymatgen to be installed. The RACs are intended to be computed
#### on the primitive cell of the material. You can compute them
#### using the commented out snippet of code if necessary.
# Example usage is given at the bottom of the script.
'''<<<< CODE TO COMPUTE PRIMITIVE UNIT CELLS >>>>'''
#########################################################################################
# This MOF RAC generator assumes that pymatgen is installed. #
# Pymatgen is used to get the primitive cell. #
#########################################################################################
from pymatgen.io.cif import CifParser
def get_primitive(datapath, writepath):
s = CifParser(datapath, occupancy_tolerance=1).get_structures()[0]
sprim = s.get_primitive_structure()
sprim.to("cif",writepath)
'''<<<< END OF CODE TO COMPUTE PRIMITIVE UNIT CELLS >>>>'''
#########################################################################################
# The RAC functions here average over the different SBUs or linkers present. This is #
# because one MOF could have multiple different linkers or multiple SBUs, and we need #
# the vector to be of constant dimension so we can correlate the output property. #
#########################################################################################
def make_MOF_SBU_RACs(SBUlist, SBU_subgraph, molcif, depth, name,cell,anchoring_atoms, sbupath=False, connections_list=False, connections_subgraphlist=False):
descriptor_list = []
lc_descriptor_list = []
lc_names = []
names = []
n_sbu = len(SBUlist)
descriptor_names = []
descriptors = []
if sbupath:
sbu_descriptor_path = os.path.dirname(sbupath)
if os.path.getsize(sbu_descriptor_path+'/sbu_descriptors.csv')>0:
sbu_descriptors = pd.read_csv(sbu_descriptor_path+'/sbu_descriptors.csv')
else:
sbu_descriptors = pd.DataFrame()
if os.path.getsize(sbu_descriptor_path+'/lc_descriptors.csv')>0:
lc_descriptors = pd.read_csv(sbu_descriptor_path+'/lc_descriptors.csv')
else:
lc_descriptors = pd.DataFrame()
"""""""""
Loop over all SBUs as identified by subgraphs. Then create the mol3Ds for each SBU.
"""""""""
for i, SBU in enumerate(SBUlist):
descriptor_names = []
descriptors = []
SBU_mol = mol3D()
for val in SBU:
SBU_mol.addAtom(molcif.getAtom(val))
SBU_mol.graph = SBU_subgraph[i].todense()
"""""""""
For each linker connected to the SBU, find the lc atoms for the lc-RACs.
"""""""""
for j, linker in enumerate(connections_list):
descriptor_names = []
descriptors = []
if len(set(SBU).intersection(linker))>0:
#### This means that the SBU and linker are connected.
temp_mol = mol3D()
link_list = []
for jj, val2 in enumerate(linker):
if val2 in anchoring_atoms:
link_list.append(jj)
# This builds a mol object for the linker --> even though it is in the SBU section.
temp_mol.addAtom(molcif.getAtom(val2))
temp_mol.graph = connections_subgraphlist[j].todense()
"""""""""
Generate all of the lc autocorrelations (from the connecting atoms)
"""""""""
results_dictionary = generate_atomonly_autocorrelations(temp_mol, link_list, loud=False, depth=depth, oct=False, polarizability=True)
descriptor_names, descriptors = append_descriptors(descriptor_names, descriptors,results_dictionary['colnames'],results_dictionary['results'],'lc','all')
# print('1',len(descriptor_names),len(descriptors))
results_dictionary = generate_atomonly_deltametrics(temp_mol, link_list, loud=False, depth=depth, oct=False, polarizability=True)
descriptor_names, descriptors = append_descriptors(descriptor_names, descriptors,results_dictionary['colnames'],results_dictionary['results'],'D_lc','all')
# print('2',len(descriptor_names),len(descriptors))
"""""""""
If heteroatom functional groups exist (anything that is not C or H, so methyl is missed, also excludes anything lc, so carboxylic metal-coordinating oxygens skipped),
compile the list of atoms
"""""""""
functional_atoms = []
for jj in range(len(temp_mol.graph)):
if not jj in link_list:
if not set({temp_mol.atoms[jj].sym}) & set({"C","H"}):
functional_atoms.append(jj)
if len(functional_atoms) > 0:
results_dictionary = generate_atomonly_autocorrelations(temp_mol, functional_atoms , loud=False, depth=depth, oct=False, polarizability=True)
descriptor_names, descriptors = append_descriptors(descriptor_names, descriptors,results_dictionary['colnames'],results_dictionary['results'],'func','all')
# print('3',len(descriptor_names),len(descriptors))
results_dictionary = generate_atomonly_deltametrics(temp_mol, functional_atoms , loud=False, depth=depth, oct=False, polarizability=True)
descriptor_names, descriptors = append_descriptors(descriptor_names, descriptors,results_dictionary['colnames'],results_dictionary['results'],'D_func','all')
# print('4',len(descriptor_names),len(descriptors))
else:
descriptor_names, descriptors = append_descriptors(descriptor_names, descriptors,results_dictionary['colnames'],list([numpy.zeros(int(6*(depth + 1)))]),'func','all')
descriptor_names, descriptors = append_descriptors(descriptor_names, descriptors,results_dictionary['colnames'],list([numpy.zeros(int(6*(depth + 1)))]),'D_func','all')
# print('5b',len(descriptor_names),len(descriptors))
for val in descriptors:
if not (type(val) == float or isinstance(val, numpy.float64)):
print('Mixed typing. Please convert to python float, and avoid np float')
raise AssertionError('Mixed typing creates issues. Please convert your typing.')
descriptor_names += ['name']
descriptors += [name]
desc_dict = {key2: descriptors[kk] for kk, key2 in enumerate(descriptor_names)}
descriptors.remove(name)
descriptor_names.remove('name')
lc_descriptors = lc_descriptors.append(desc_dict, ignore_index=True)
lc_descriptor_list.append(descriptors)
if j == 0:
lc_names = descriptor_names
averaged_lc_descriptors = np.mean(np.array(lc_descriptor_list), axis=0)
lc_descriptors.to_csv(sbu_descriptor_path+'/lc_descriptors.csv',index=False)
descriptors = []
descriptor_names = []
SBU_mol_cart_coords=np.array([atom.coords() for atom in SBU_mol.atoms])
SBU_mol_atom_labels=[atom.sym for atom in SBU_mol.atoms]
SBU_mol_adj_mat = np.array(SBU_mol.graph)
###### WRITE THE SBU MOL TO THE PLACE
if sbupath and not os.path.exists(sbupath+"/"+str(name)+str(i)+'.xyz'):
xyzname = sbupath+"/"+str(name)+"_sbu_"+str(i)+".xyz"
SBU_mol_fcoords_connected = XYZ_connected(cell , SBU_mol_cart_coords , SBU_mol_adj_mat )
writeXYZandGraph(xyzname , SBU_mol_atom_labels , cell , SBU_mol_fcoords_connected,SBU_mol_adj_mat)
"""""""""
Generate all of the SBU based RACs (full scope, mc)
"""""""""
results_dictionary = generate_full_complex_autocorrelations(SBU_mol,depth=depth,loud=False,flag_name=False)
descriptor_names, descriptors = append_descriptors(descriptor_names, descriptors,results_dictionary['colnames'],results_dictionary['results'],'f','all')
# print('6',len(descriptor_names),len(descriptors))
#### Now starts at every metal on the graph and autocorrelates
results_dictionary = generate_multimetal_autocorrelations(molcif,depth=depth,loud=False)
descriptor_names, descriptors = append_descriptors(descriptor_names, descriptors, results_dictionary['colnames'],results_dictionary['results'],'mc','all')
# print('7',len(descriptor_names),len(descriptors))
results_dictionary = generate_multimetal_deltametrics(molcif,depth=depth,loud=False)
descriptor_names, descriptors = append_descriptors(descriptor_names, descriptors,results_dictionary['colnames'],results_dictionary['results'],'D_mc','all')
# print('8',len(descriptor_names),len(descriptors))
descriptor_names += ['name']
descriptors += [name]
descriptors == list(descriptors)
desc_dict = {key: descriptors[ii] for ii, key in enumerate(descriptor_names)}
descriptors.remove(name)
descriptor_names.remove('name')
sbu_descriptors = sbu_descriptors.append(desc_dict, ignore_index=True)
descriptor_list.append(descriptors)
if i == 0:
names = descriptor_names
sbu_descriptors.to_csv(sbu_descriptor_path+'/sbu_descriptors.csv',index=False)
averaged_SBU_descriptors = np.mean(np.array(descriptor_list), axis=0)
return names, averaged_SBU_descriptors, lc_names, averaged_lc_descriptors
def make_MOF_linker_RACs(linkerlist, linker_subgraphlist, molcif, depth, name, cell, linkerpath=False):
#### This function makes full scope linker RACs for MOFs ####
descriptor_list = []
nlink = len(linkerlist)
descriptor_names = []
descriptors = []
if linkerpath:
linker_descriptor_path = os.path.dirname(linkerpath)
if os.path.getsize(linker_descriptor_path+'/linker_descriptors.csv')>0:
linker_descriptors = pd.read_csv(linker_descriptor_path+'/linker_descriptors.csv')
else:
linker_descriptors = pd.DataFrame()
for i, linker in enumerate(linkerlist):
linker_mol = mol3D()
for val in linker:
linker_mol.addAtom(molcif.getAtom(val))
linker_mol.graph = linker_subgraphlist[i].todense()
linker_mol_cart_coords=np.array([atom.coords() for atom in linker_mol.atoms])
linker_mol_atom_labels=[atom.sym for atom in linker_mol.atoms]
linker_mol_adj_mat = np.array(linker_mol.graph)
###### WRITE THE LINKER MOL TO THE PLACE
if linkerpath and not os.path.exists(linkerpath+"/"+str(name)+str(i)+".xyz"):
xyzname = linkerpath+"/"+str(name)+"_linker_"+str(i)+".xyz"
linker_mol_fcoords_connected = XYZ_connected(cell, linker_mol_cart_coords, linker_mol_adj_mat)
writeXYZandGraph(xyzname, linker_mol_atom_labels, cell, linker_mol_fcoords_connected, linker_mol_adj_mat)
allowed_strings = ['electronegativity', 'nuclear_charge', 'ident', 'topology', 'size']
labels_strings = ['chi', 'Z', 'I', 'T', 'S']
colnames = []
lig_full = list()
for ii, properties in enumerate(allowed_strings):
if not list(descriptors):
ligand_ac_full = full_autocorrelation(linker_mol, properties, depth)
else:
ligand_ac_full += full_autocorrelation(linker_mol, properties, depth)
this_colnames = []
for j in range(0,depth+1):
this_colnames.append('f-lig-'+labels_strings[ii] + '-' + str(j))
colnames.append(this_colnames)
lig_full.append(ligand_ac_full)
lig_full = [item for sublist in lig_full for item in sublist] #flatten lists
colnames = [item for sublist in colnames for item in sublist]
colnames += ['name']
lig_full += [name]
desc_dict = {key: lig_full[i] for i, key in enumerate(colnames)}
linker_descriptors = linker_descriptors.append(desc_dict, ignore_index = True)
lig_full.remove(name)
colnames.remove('name')
descriptor_list.append(lig_full)
#### We dump the standard lc descriptors without averaging or summing so that the user
#### can make the modifications that they want. By default we take the average ones.
linker_descriptors.to_csv(linker_descriptor_path+'/linker_descriptors.csv', index=False)
averaged_ligand_descriptors = np.mean(np.array(descriptor_list), axis=0)
return colnames, averaged_ligand_descriptors
def get_MOF_descriptors(data, depth, path=False, xyzpath = False):
if not path:
print('Need a directory to place all of the linker, SBU, and ligand objects. Exiting now.')
raise ValueError('Base path must be specified in order to write descriptors.')
else:
if path.endswith('/'):
path = path[:-1]
if not os.path.isdir(path+'/ligands'):
os.mkdir(path+'/ligands')
if not os.path.isdir(path+'/linkers'):
os.mkdir(path+'/linkers')
if not os.path.isdir(path+'/sbus'):
os.mkdir(path+'/sbus')
if not os.path.isdir(path+'/xyz'):
os.mkdir(path+'/xyz')
if not os.path.isdir(path+'/logs'):
os.mkdir(path+'/logs')
if not os.path.exists(path+'/sbu_descriptors.csv'):
with open(path+'/sbu_descriptors.csv','w') as f:
f.close()
if not os.path.exists(path+'/linker_descriptors.csv'):
with open(path+'/linker_descriptors.csv','w') as g:
g.close()
if not os.path.exists(path+'/lc_descriptors.csv'):
with open(path+'/lc_descriptors.csv','w') as h:
h.close()
ligandpath = path+'/ligands'
linkerpath = path+'/linkers'
sbupath = path+'/sbus'
logpath = path+"/logs"
"""""""""
Input cif file and get the cell parameters and adjacency matrix. If overlap, do not featurize.
Simultaneously prepare mol3D class for MOF for future RAC featurization (molcif)
"""""""""
cpar, allatomtypes, fcoords = readcif(data)
cell_v = mkcell(cpar)
cart_coords = fractional2cart(fcoords,cell_v)
name = os.path.basename(data).strip(".cif")
if len(cart_coords) > 2000:
print("Too large cif file, skipping it for now...")
full_names = [0]
full_descriptors = [0]
tmpstr = "Failed to featurize %s: large primitive cell\n"%(name)
write2file(path,"/FailedStructures.log",tmpstr)
return full_names, full_descriptors
distance_mat = compute_distance_matrix2(cell_v,cart_coords)
try:
adj_matrix=compute_adj_matrix(distance_mat,allatomtypes)
except NotImplementedError:
full_names = [0]
full_descriptors = [0]
tmpstr = "Failed to featurize %s: atomic overlap\n"%(name)
write2file(path,"/FailedStructures.log",tmpstr)
return full_names, full_descriptors
writeXYZandGraph(xyzpath, allatomtypes, cell_v, fcoords, adj_matrix.todense())
molcif,_,_,_,_ = import_from_cif(data, True)
molcif.graph = adj_matrix.todense()
"""""""""
check number of connected components.
if more than 1: it checks if the structure is interpenetrated. Fails if no metal in one of the connected components (identified by the graph).
This includes floating solvent molecules.
"""""""""
n_components, labels_components = sparse.csgraph.connected_components(csgraph=adj_matrix, directed=False, return_labels=True)
metal_list = set([at for at in molcif.findMetal(transition_metals_only=False)])
# print('##### METAL LIST', metal_list, [molcif.getAtom(val).symbol() for val in list(metal_list)])
# print('##### METAL LIST', metal_list, [val.symbol() for val in molcif.atoms])
if not len(metal_list) > 0:
full_names = [0]
full_descriptors = [0]
tmpstr = "Failed to featurize %s: no metal found\n"%(name)
write2file(path,"/FailedStructures.log",tmpstr)
return full_names, full_descriptors
for comp in range(n_components):
inds_in_comp = [i for i in range(len(labels_components)) if labels_components[i]==comp]
if not set(inds_in_comp)&metal_list:
full_names = [0]
full_descriptors = [0]
tmpstr = "Failed to featurize %s: solvent molecules\n"%(name)
write2file(path,"/FailedStructures.log",tmpstr)
return full_names, full_descriptors
if n_components > 1 :
print("structure is interpenetrated")
tmpstr = "%s found to be an interpenetrated structure\n"%(name)
write2file(logpath,"/%s.log"%name,tmpstr)
"""""""""
step 1: metallic part
removelist = metals (1) + atoms only connected to metals (2) + H connected to (1+2)
SBUlist = removelist + 1st coordination shell of the metals
removelist = set()
Logs the atom types of the connecting atoms to the metal in logpath.
"""""""""
SBUlist = set()
metal_list = set([at for at in molcif.findMetal(transition_metals_only=False)])
# print('##### METAL LIST2', metal_list, [molcif.getAtom(val).symbol() for val in list(metal_list)])
# print('##### all LIST2', metal_list, [val.symbol() for val in molcif.atoms])
[SBUlist.update(set([metal])) for metal in molcif.findMetal(transition_metals_only=False)] #Remove all metals as part of the SBU
[SBUlist.update(set(molcif.getBondedAtomsSmart(metal))) for metal in molcif.findMetal(transition_metals_only=False)]
removelist = set()
[removelist.update(set([metal])) for metal in molcif.findMetal(transition_metals_only=False)] #Remove all metals as part of the SBU
for metal in removelist:
bonded_atoms = set(molcif.getBondedAtomsSmart(metal))
bonded_atoms_types = set([str(allatomtypes[at]) for at in set(molcif.getBondedAtomsSmart(metal))])
cn = len(bonded_atoms)
cn_atom = ",".join([at for at in bonded_atoms_types])
tmpstr = "atom %i with type of %s found to have %i coordinates with atom types of %s\n"%(metal,allatomtypes[metal],cn,cn_atom)
write2file(logpath,"/%s.log"%name,tmpstr)
[removelist.update(set([atom])) for atom in SBUlist if all((molcif.getAtom(val).ismetal() or molcif.getAtom(val).symbol().upper() == 'H') for val in molcif.getBondedAtomsSmart(atom))]
"""""""""
adding hydrogens connected to atoms which are only connected to metals. In particular interstitial OH, like in UiO SBU.
"""""""""
for atom in SBUlist:
for val in molcif.getBondedAtomsSmart(atom):
if molcif.getAtom(val).symbol().upper() == 'H':
removelist.update(set([val]))
"""""""""
At this point:
The remove list only removes metals and things ONLY connected to metals or hydrogens.
Thus the coordinating atoms are double counted in the linker.
step 2: organic part
removelist = linkers are all atoms - the removelist (assuming no bond between
organiclinkers)
"""""""""
allatoms = set(range(0, adj_matrix.shape[0]))
linkers = allatoms - removelist
linker_list, linker_subgraphlist = get_closed_subgraph(linkers.copy(), removelist.copy(), adj_matrix)
connections_list = copy.deepcopy(linker_list)
connections_subgraphlist = copy.deepcopy(linker_subgraphlist)
linker_length_list = [len(linker_val) for linker_val in linker_list]
adjmat = adj_matrix.todense()
"""""""""
find all anchoring atoms on linkers and ligands (lc identification)
"""""""""
anc_atoms = set()
for linker in linker_list:
for atom_linker in linker:
bonded2atom = np.nonzero(adj_matrix[atom_linker,:])[1]
if set(bonded2atom) & metal_list:
anc_atoms.add(atom_linker)
"""""""""
step 3: linker or ligand ?
checking to find the anchors and #SBUs that are connected to an organic part
anchor <= 1 -> ligand
anchor > 1 and #SBU > 1 -> linker
else: walk over the linker graph and count #crossing PBC
if #crossing is odd -> linker
else -> ligand
"""""""""
initial_SBU_list, initial_SBU_subgraphlist = get_closed_subgraph(removelist.copy(), linkers.copy(), adj_matrix)
templist = linker_list[:]
tempgraphlist = linker_subgraphlist[:]
long_ligands = False
max_min_linker_length , min_max_linker_length = (0,100)
for ii, atoms_list in reversed(list(enumerate(linker_list))): #Loop over all linker subgraphs
linkeranchors_list = set()
linkeranchors_atoms = set()
sbuanchors_list = set()
sbu_connect_list = set()
"""""""""
Here, we are trying to identify what is actually a linker and what is a ligand.
To do this, we check if something is connected to more than one SBU. Set to
handle cases where primitive cell is small, ambiguous cases are recorded.
"""""""""
for iii,atoms in enumerate(atoms_list): #loop over all atoms in a linker
connected_atoms = np.nonzero(adj_matrix[atoms,:])[1]
for kk, sbu_atoms_list in enumerate(initial_SBU_list): #loop over all SBU subgraphs
for sbu_atoms in sbu_atoms_list: #Loop over SBU
if sbu_atoms in connected_atoms:
linkeranchors_list.add(iii)
linkeranchors_atoms.add(atoms)
sbuanchors_list.add(sbu_atoms)
sbu_connect_list.add(kk) #Add if unique SBUs
min_length,max_length = linker_length(linker_subgraphlist[ii].todense(),linkeranchors_list)
if len(linkeranchors_list) >=2 : # linker, and in one ambigous case, could be a ligand.
if len(sbu_connect_list) >= 2: #Something that connects two SBUs is certain to be a linker
max_min_linker_length = max(min_length,max_min_linker_length)
min_max_linker_length = min(max_length,min_max_linker_length)
continue
else:
# check number of times we cross PBC :
# TODO: we still can fail in multidentate ligands!
linker_cart_coords=np.array([at.coords() \
for at in [molcif.getAtom(val) for val in atoms_list]])
linker_adjmat = np.array(linker_subgraphlist[ii].todense())
pr_image_organic = ligand_detect(cell_v,linker_cart_coords,linker_adjmat,linkeranchors_list)
sbu_temp = linkeranchors_atoms.copy()
sbu_temp.update({val for val in initial_SBU_list[list(sbu_connect_list)[0]]})
sbu_temp = list(sbu_temp)
sbu_cart_coords=np.array([at.coords() \
for at in [molcif.getAtom(val) for val in sbu_temp]])
sbu_adjmat = slice_mat(adj_matrix.todense(),sbu_temp)
pr_image_sbu = ligand_detect(cell_v,sbu_cart_coords,sbu_adjmat,set(range(len(linkeranchors_list))))
if not (len(np.unique(pr_image_sbu, axis=0))==1 and len(np.unique(pr_image_organic, axis=0))==1): # linker
max_min_linker_length = max(min_length,max_min_linker_length)
min_max_linker_length = min(max_length,min_max_linker_length)
tmpstr = str(name)+','+' Anchors list: '+str(sbuanchors_list) \
+','+' SBU connectlist: '+str(sbu_connect_list)+' set to be linker\n'
write2file(ligandpath,"/ambiguous.txt",tmpstr)
continue
else: # all anchoring atoms are in the same unitcell -> ligand
removelist.update(set(templist[ii])) # we also want to remove these ligands
SBUlist.update(set(templist[ii])) # we also want to remove these ligands
linker_list.pop(ii)
linker_subgraphlist.pop(ii)
tmpstr = str(name)+','+' Anchors list: '+str(sbuanchors_list) \
+','+' SBU connectlist: '+str(sbu_connect_list)+' set to be ligand\n'
write2file(ligandpath,"/ambiguous.txt",tmpstr)
tmpstr = str(name)+str(ii)+','+' Anchors list: '+ \
str(sbuanchors_list)+','+' SBU connectlist: '+str(sbu_connect_list)+'\n'
write2file(ligandpath,"/ligand.txt",tmpstr)
else: #definite ligand
write2file(logpath,"/%s.log"%name,"found ligand\n")
removelist.update(set(templist[ii])) # we also want to remove these ligands
SBUlist.update(set(templist[ii])) # we also want to remove these ligands
linker_list.pop(ii)
linker_subgraphlist.pop(ii)
tmpstr = str(name)+','+' Anchors list: '+str(sbuanchors_list) \
+','+' SBU connectlist: '+str(sbu_connect_list)+'\n'
write2file(ligandpath,"/ligand.txt",tmpstr)
tmpstr = str(name) + ", (min_max_linker_length,max_min_linker_length): " + \
str(min_max_linker_length) + " , " +str(max_min_linker_length) + "\n"
write2file(logpath,"/%s.log"%name,tmpstr)
if min_max_linker_length < 3:
write2file(linkerpath,"/short_ligands.txt",tmpstr)
if min_max_linker_length > 2:
# for N-C-C-N ligand ligand
if max_min_linker_length == min_max_linker_length:
long_ligands = True
elif min_max_linker_length > 3:
long_ligands = True
"""""""""
In the case of long linkers, add second coordination shell without further checks. In the case of short linkers, start from metal
and grow outwards using the include_extra_shells function
"""""""""
linker_length_list = [len(linker_val) for linker_val in linker_list]
if len(set(linker_length_list)) != 1:
write2file(linkerpath,"/uneven.txt",str(name)+'\n')
if not min_max_linker_length < 2: # treating the 2 atom ligands differently! Need caution
if long_ligands:
tmpstr = "\nStructure has LONG ligand\n\n"
write2file(logpath,"/%s.log"%name,tmpstr)
[[SBUlist.add(val) for val in molcif.getBondedAtomsSmart(zero_first_shell)] for zero_first_shell in SBUlist.copy()] #First account for all of the carboxylic acid type linkers, add in the carbons.
truncated_linkers = allatoms - SBUlist
SBU_list, SBU_subgraphlist = get_closed_subgraph(SBUlist, truncated_linkers, adj_matrix)
if not long_ligands:
tmpstr = "\nStructure has SHORT ligand\n\n"
write2file(logpath,"/%s.log"%name,tmpstr)
SBU_list , SBU_subgraphlist = include_extra_shells(SBU_list,SBU_subgraphlist,molcif ,adj_matrix)
else:
tmpstr = "Structure %s has extreamly short ligands, check the outputs\n"%name
write2file(ligandpath,"/ambiguous.txt",tmpstr)
tmpstr = "Structure has extreamly short ligands\n"
write2file(logpath,"/%s.log"%name,tmpstr)
tmpstr = "Structure has extreamly short ligands\n"
write2file(logpath,"/%s.log"%name,tmpstr)
truncated_linkers = allatoms - removelist
SBU_list, SBU_subgraphlist = get_closed_subgraph(removelist, truncated_linkers, adj_matrix)
SBU_list, SBU_subgraphlist = include_extra_shells(SBU_list,SBU_subgraphlist,molcif ,adj_matrix)
SBU_list, SBU_subgraphlist = include_extra_shells(SBU_list,SBU_subgraphlist,molcif ,adj_matrix)
"""""""""
For the cases that have a linker subgraph, do the featurization.
"""""""""
if len(linker_subgraphlist)>=1: #Featurize cases that did not fail
try:
# if True:
descriptor_names, descriptors, lc_descriptor_names, lc_descriptors = make_MOF_SBU_RACs(SBU_list, SBU_subgraphlist, molcif, depth, name , cell_v,anc_atoms, sbupath, connections_list, connections_subgraphlist)
lig_descriptor_names, lig_descriptors = make_MOF_linker_RACs(linker_list, linker_subgraphlist, molcif, depth, name, cell_v, linkerpath)
full_names = descriptor_names+lig_descriptor_names+lc_descriptor_names #+ ECFP_names
full_descriptors = list(descriptors)+list(lig_descriptors)+list(lc_descriptors)
print(len(full_names),len(full_descriptors))
# else:
except:
full_names = [0]
full_descriptors = [0]
elif len(linker_subgraphlist) == 1: # this never happens, right?
print('Suspicious featurization')
full_names = [1]
full_descriptors = [1]
else:
print('Failed to featurize this MOF.')
full_names = [0]
full_descriptors = [0]
if (len(full_names) <= 1) and (len(full_descriptors) <= 1):
tmpstr = "Failed to featurize %s\n"%(name)
write2file(path,"/FailedStructures.log",tmpstr)
return full_names, full_descriptors
##### Example of usage over a set of cif files.
# featurization_list = []
# import sys
# featurization_directory = sys.argv[1]
# for cif_file in os.listdir(featurization_directory+'/cif/'):
# #### This first part gets the primitive cells ####
# get_primitive(featurization_directory+'/cif/'+cif_file, featurization_directory+'/primitive/'+cif_file)
# full_names, full_descriptors = get_MOF_descriptors(featurization_directory+'/primitive/'+cif_file,3,path=featurization_directory+'/',
# xyzpath=featurization_directory+'/xyz/'+cif_file.replace('cif','xyz'))
# full_names.append('filename')
# full_descriptors.append(cif_file)
# featurization = dict(zip(full_names, full_descriptors))
# featurization_list.append(featurization)
# df = pd.DataFrame(featurization_list)
# df.to_csv('./full_featurization_frame.csv',index=False)
| [
"pandas.DataFrame",
"os.mkdir",
"os.path.basename",
"pandas.read_csv",
"os.path.getsize",
"molSimplify.Classes.mol3D",
"os.path.dirname",
"os.path.isdir",
"os.path.exists",
"numpy.nonzero",
"numpy.array",
"scipy.sparse.csgraph.connected_components",
"pymatgen.io.cif.CifParser",
"numpy.uniq... | [((16145, 16240), 'scipy.sparse.csgraph.connected_components', 'sparse.csgraph.connected_components', ([], {'csgraph': 'adj_matrix', 'directed': '(False)', 'return_labels': '(True)'}), '(csgraph=adj_matrix, directed=False,\n return_labels=True)\n', (16180, 16240), False, 'from scipy import sparse\n'), ((2313, 2337), 'os.path.dirname', 'os.path.dirname', (['sbupath'], {}), '(sbupath)\n', (2328, 2337), False, 'import os\n'), ((3012, 3019), 'molSimplify.Classes.mol3D', 'mol3D', ([], {}), '()\n', (3017, 3019), False, 'from molSimplify.Classes import mol3D\n'), ((7962, 7985), 'numpy.array', 'np.array', (['SBU_mol.graph'], {}), '(SBU_mol.graph)\n', (7970, 7985), True, 'import numpy as np\n'), ((10100, 10125), 'numpy.array', 'np.array', (['descriptor_list'], {}), '(descriptor_list)\n', (10108, 10125), True, 'import numpy as np\n'), ((10536, 10563), 'os.path.dirname', 'os.path.dirname', (['linkerpath'], {}), '(linkerpath)\n', (10551, 10563), False, 'import os\n'), ((10866, 10873), 'molSimplify.Classes.mol3D', 'mol3D', ([], {}), '()\n', (10871, 10873), False, 'from molSimplify.Classes import mol3D\n'), ((11201, 11227), 'numpy.array', 'np.array', (['linker_mol.graph'], {}), '(linker_mol.graph)\n', (11209, 11227), True, 'import numpy as np\n'), ((13168, 13193), 'numpy.array', 'np.array', (['descriptor_list'], {}), '(descriptor_list)\n', (13176, 13193), True, 'import numpy as np\n'), ((2349, 2410), 'os.path.getsize', 'os.path.getsize', (["(sbu_descriptor_path + '/sbu_descriptors.csv')"], {}), "(sbu_descriptor_path + '/sbu_descriptors.csv')\n", (2364, 2410), False, 'import os\n'), ((2454, 2511), 'pandas.read_csv', 'pd.read_csv', (["(sbu_descriptor_path + '/sbu_descriptors.csv')"], {}), "(sbu_descriptor_path + '/sbu_descriptors.csv')\n", (2465, 2511), True, 'import pandas as pd\n'), ((2554, 2568), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2566, 2568), True, 'import pandas as pd\n'), ((2580, 2640), 'os.path.getsize', 'os.path.getsize', (["(sbu_descriptor_path + '/lc_descriptors.csv')"], {}), "(sbu_descriptor_path + '/lc_descriptors.csv')\n", (2595, 2640), False, 'import os\n'), ((2671, 2727), 'pandas.read_csv', 'pd.read_csv', (["(sbu_descriptor_path + '/lc_descriptors.csv')"], {}), "(sbu_descriptor_path + '/lc_descriptors.csv')\n", (2682, 2727), True, 'import pandas as pd\n'), ((2769, 2783), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2781, 2783), True, 'import pandas as pd\n'), ((7611, 7639), 'numpy.array', 'np.array', (['lc_descriptor_list'], {}), '(lc_descriptor_list)\n', (7619, 7639), True, 'import numpy as np\n'), ((10575, 10642), 'os.path.getsize', 'os.path.getsize', (["(linker_descriptor_path + '/linker_descriptors.csv')"], {}), "(linker_descriptor_path + '/linker_descriptors.csv')\n", (10590, 10642), False, 'import os\n'), ((10677, 10740), 'pandas.read_csv', 'pd.read_csv', (["(linker_descriptor_path + '/linker_descriptors.csv')"], {}), "(linker_descriptor_path + '/linker_descriptors.csv')\n", (10688, 10740), True, 'import pandas as pd\n'), ((10786, 10800), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10798, 10800), True, 'import pandas as pd\n'), ((13610, 13642), 'os.path.isdir', 'os.path.isdir', (["(path + '/ligands')"], {}), "(path + '/ligands')\n", (13623, 13642), False, 'import os\n'), ((13654, 13681), 'os.mkdir', 'os.mkdir', (["(path + '/ligands')"], {}), "(path + '/ligands')\n", (13662, 13681), False, 'import os\n'), ((13695, 13727), 'os.path.isdir', 'os.path.isdir', (["(path + '/linkers')"], {}), "(path + '/linkers')\n", (13708, 13727), False, 'import os\n'), ((13739, 13766), 'os.mkdir', 'os.mkdir', (["(path + '/linkers')"], {}), "(path + '/linkers')\n", (13747, 13766), False, 'import os\n'), ((13780, 13809), 'os.path.isdir', 'os.path.isdir', (["(path + '/sbus')"], {}), "(path + '/sbus')\n", (13793, 13809), False, 'import os\n'), ((13821, 13845), 'os.mkdir', 'os.mkdir', (["(path + '/sbus')"], {}), "(path + '/sbus')\n", (13829, 13845), False, 'import os\n'), ((13859, 13887), 'os.path.isdir', 'os.path.isdir', (["(path + '/xyz')"], {}), "(path + '/xyz')\n", (13872, 13887), False, 'import os\n'), ((13899, 13922), 'os.mkdir', 'os.mkdir', (["(path + '/xyz')"], {}), "(path + '/xyz')\n", (13907, 13922), False, 'import os\n'), ((13936, 13965), 'os.path.isdir', 'os.path.isdir', (["(path + '/logs')"], {}), "(path + '/logs')\n", (13949, 13965), False, 'import os\n'), ((13977, 14001), 'os.mkdir', 'os.mkdir', (["(path + '/logs')"], {}), "(path + '/logs')\n", (13985, 14001), False, 'import os\n'), ((14015, 14060), 'os.path.exists', 'os.path.exists', (["(path + '/sbu_descriptors.csv')"], {}), "(path + '/sbu_descriptors.csv')\n", (14029, 14060), False, 'import os\n'), ((14162, 14210), 'os.path.exists', 'os.path.exists', (["(path + '/linker_descriptors.csv')"], {}), "(path + '/linker_descriptors.csv')\n", (14176, 14210), False, 'import os\n'), ((14315, 14359), 'os.path.exists', 'os.path.exists', (["(path + '/lc_descriptors.csv')"], {}), "(path + '/lc_descriptors.csv')\n", (14329, 14359), False, 'import os\n'), ((14914, 14936), 'os.path.basename', 'os.path.basename', (['data'], {}), '(data)\n', (14930, 14936), False, 'import os\n'), ((1305, 1347), 'pymatgen.io.cif.CifParser', 'CifParser', (['datapath'], {'occupancy_tolerance': '(1)'}), '(datapath, occupancy_tolerance=1)\n', (1314, 1347), False, 'from pymatgen.io.cif import CifParser\n'), ((3537, 3544), 'molSimplify.Classes.mol3D', 'mol3D', ([], {}), '()\n', (3542, 3544), False, 'from molSimplify.Classes import mol3D\n'), ((20404, 20442), 'numpy.nonzero', 'np.nonzero', (['adj_matrix[atom_linker, :]'], {}), '(adj_matrix[atom_linker, :])\n', (20414, 20442), True, 'import numpy as np\n'), ((21786, 21818), 'numpy.nonzero', 'np.nonzero', (['adj_matrix[atoms, :]'], {}), '(adj_matrix[atoms, :])\n', (21796, 21818), True, 'import numpy as np\n'), ((23751, 23782), 'numpy.unique', 'np.unique', (['pr_image_sbu'], {'axis': '(0)'}), '(pr_image_sbu, axis=0)\n', (23760, 23782), True, 'import numpy as np\n'), ((23795, 23830), 'numpy.unique', 'np.unique', (['pr_image_organic'], {'axis': '(0)'}), '(pr_image_organic, axis=0)\n', (23804, 23830), True, 'import numpy as np\n')] |
#!/usr/bin/python
# Imports
import pandas as pd
import numpy as np
from collections import Counter
import tqdm
import math, os
from sklearn.metrics import mean_squared_error
from scipy.sparse.csgraph import minimum_spanning_tree as mst_nsim
from scipy.sparse.linalg import svds
from scipy.sparse import csr_matrix
from scipy import sparse
import implicit
from .data import preprocess_binary
# Methods
def compute_rmse(preds, ground_truth):
grouped = pd.DataFrame({'count' : ground_truth.groupby(['user_nickname','town'])['town'].apply(len)}).reset_index()
pred_values = []
real_values = []
for index, row in tqdm.tqdm(grouped.iterrows(), total=grouped.shape[0], position=0):
user_index = preds.index.tolist().index(row['user_nickname'])
town_index = preds.columns.tolist().index(row['town'])
#pred_values.append(predictions[(predictions.index==row['user_nickname'])][row['town']][0])
pred_values.append(preds.iloc[user_index,town_index])
real_values.append(float(row['count']))
rms = math.sqrt(mean_squared_error(real_values, pred_values))
return rms
def compute_precision_recall_N(PR, valid, N):
grouped_val = pd.DataFrame({'count' : valid.groupby(['user_nickname','town'])['town'].apply(len)}).reset_index()
concat_preds = pd.DataFrame()
for interval in range(1000,PR.shape[0]+1000,1000):
flat_preds = pd.melt(PR.iloc[interval-1000:interval],
id_vars='user_nickname',
value_vars=PR.iloc[interval-1000:interval].columns, # list of days of the week
var_name='town',
value_name='predicted_count')
flat_preds['user_nickname'] = PR.iloc[interval-1000:interval].index.tolist() * len(PR.columns)
flat_preds = flat_preds[flat_preds.predicted_count >= 0.]
flat_preds = flat_preds.groupby('user_nickname')[['user_nickname','town','predicted_count']].apply(lambda grp: grp.nlargest(N,'predicted_count'))
concat_preds = pd.concat([concat_preds, flat_preds], axis=0)
tp, fp, fn = 0.,0.,0.
for user in tqdm.tqdm(grouped_val.user_nickname.unique().tolist(), total=len(grouped_val.user_nickname.unique().tolist()), position=0):
tmp_val_df = grouped_val[grouped_val.user_nickname==user]
if tmp_val_df.shape[0] != 0:
tmp_pr_towns = concat_preds[concat_preds.user_nickname==user].town.tolist()
tmp_val_towns = tmp_val_df.town.tolist()
for gt_town in tmp_val_towns:
if gt_town in tmp_pr_towns:
#print('TP')
tp+=1.
elif gt_town not in tmp_pr_towns:
#print('FN')
fn+=1.
for pr_town in tmp_pr_towns[:len(tmp_val_towns)]:
if pr_town not in tmp_val_towns:
fp+=1.
#print('FP')
return tp,fp,fn
def svd_model(user_item_df, latent_dimension, N):
Checkins_demeaned = user_item_df.values/np.mean(user_item_df.values)
U, sigma, Vt = svds(Checkins_demeaned, latent_dimension)
sigma = np.diag(sigma)
all_user_predicted_checkins = np.dot(np.dot(U, sigma), Vt) + np.mean(user_item_df.values)
preds = pd.DataFrame(all_user_predicted_checkins, columns = user_item_df.columns, index=user_item_df.index)
return preds
def implicit_model(user_item_df, train, validate, latent_dimension, N, preproccesing):
if preproccesing==2:
validate = pd.DataFrame({'count' : validate.groupby(['user_nickname','town'])['town'].apply(len)}).reset_index()
validate['count'] = [1]*validate.shape[0]
train = pd.DataFrame({'count' : train.groupby(['user_nickname','town'])['town'].apply(len)}).reset_index()
train['count'] = [1]*train.shape[0]
elif preproccesing==1:
validate = pd.DataFrame({'count' : validate.groupby(['user_nickname','town'])['town'].apply(len)}).reset_index()
train = pd.DataFrame({'count' : train.groupby(['user_nickname','town'])['town'].apply(len)}).reset_index()
# initialize a model
model = implicit.als.AlternatingLeastSquares(factors=latent_dimension)
# train the model on a sparse matrix of item/user/confidence weights
#model.fit(csr_matrix(user_item_df.values.T))
# recommend items for a user
user_items = csr_matrix(user_item_df).T.tocsr()
# get top N recommendations
user_dict = dict(zip(list(range(len(user_item_df.index.tolist()))), user_item_df.index.tolist()))
user_dict_inverse = dict(zip(user_item_df.index.tolist(), list(range(len(user_item_df.index.tolist())))))
town_dict = dict(zip(list(range(len(user_item_df.columns.tolist()))), user_item_df.columns.tolist()))
town_dict_inverse = dict(zip(user_item_df.columns.tolist(), list(range(len(user_item_df.columns.tolist())))))
# recommend items for a user
user_items = csr_matrix(user_item_df).T.tocsr()
item_users = user_items.T
# train the model on a sparse matrix of item/user/confidence weights
print('Tranining model...')
model.fit(user_items, show_progress=True)
print('Computing RMSE for training set')
rmse_train = rmse_implicit(user_dict, town_dict_inverse, model, user_item_df, train, item_users)
print('Computing RMSE for validation set')
rmse_valid = rmse_implicit(user_dict, town_dict_inverse, model, user_item_df, validate, item_users)
print("Calculating precision, recall on training set")
precisionN_train, recallN_train = prec_recall_implicit(user_dict, town_dict_inverse, town_dict, model, train, item_users, N)
print("Calculating precision, recall on validation set")
precisionN_val, recallN_val = prec_recall_implicit(user_dict, town_dict_inverse, town_dict, model, validate, item_users, N)
# tp, fp, fn = 0., 0., 0.
# for userid in tqdm.tqdm(list(user_dict.keys())[:3000], total=len(list(user_dict.keys())[:3000]),position=0):
# recs = model.recommend(userid, item_users, N=N)
# gt = validate[validate.user_nickname==user_dict[userid]].town.unique().tolist()
# for enum, recomendation in enumerate(recs):
# if enum == len(gt): break
# if town_dict[recomendation[0]] in gt:
# tp += 1.
# elif town_dict[recomendation[0]] not in gt:
# fp += 1.
# for real in gt:
# if town_dict_inverse[real] not in [r[0] for r in recs]:
# fn += 1.
# print('tp:{}, fp:{}, fn:{}'.format(tp,fp,fn))
# precisionN = tp/(tp+fp)
# recallN = tp/(tp+fn)
return model, precisionN_train, recallN_train, precisionN_val, recallN_val, rmse_train, rmse_valid
def prec_recall_implicit(user_dict, town_dict_inverse, town_dict, model, validate, item_users, N):
tp, fp, fn = 0., 0., 0.
for userid in tqdm.tqdm(list(user_dict.keys())[:3000], total=len(list(user_dict.keys())[:3000]),position=0):
recs = model.recommend(userid, item_users, N=N)
gt = validate[validate.user_nickname==user_dict[userid]].town.unique().tolist()
for enum, recomendation in enumerate(recs):
if enum == len(gt): break
if town_dict[recomendation[0]] in gt:
tp += 1.
elif town_dict[recomendation[0]] not in gt:
fp += 1.
for real in gt:
if town_dict_inverse[real] not in [r[0] for r in recs]:
fn += 1.
print('tp:{}, fp:{}, fn:{}'.format(tp,fp,fn))
precisionN = tp/(tp+fp)
recallN = tp/(tp+fn)
return precisionN, recallN
def rmse_implicit(user_dict, town_dict_inverse, model, user_item_df, validate, item_users):
rmse_pred, rmse_gt = [],[]
for userid in list(user_dict.keys())[:3000]:
recs = model.recommend(userid, item_users, N=len(user_item_df.columns.tolist()))
gt = validate[validate.user_nickname==user_dict[userid]]#.town.unique().tolist()
if len(gt)==0: continue
for ind, row in gt.iterrows():
try:
pred_val = recs[[r[0] for r in recs].index(town_dict_inverse[row['town']])][1]
except:
print(ValueError, row['town'])
continue
rmse_gt.append(row['count'])
rmse_pred.append(pred_val)
rmse = math.sqrt(mean_squared_error(rmse_gt, rmse_pred))
return rmse
# Class
class locationRec(object):
"""DocString"""
def __init__(self):
self.user_item_df = None
self.train = None
self.validate = None
self.test = None
self.preds = None
self.rmse_train = None
self.rmse_val = None
self.rmse_test = None
self.precision_train = None
self.recall_train = None
self.precision_val = None
self.recall_val = None
self.precision_test = None
self.recall_test = None
self.model = None
self.preproccesing = None
def datapipeline(self, preproccesing=1):
self.preproccesing = preproccesing
if preproccesing==1:
self.user_item_df = pd.read_pickle('User_Item.pckl')
self.train = pd.read_pickle('train.pckl')
self.validate = pd.read_pickle('validate.pckl')
self.test = pd.read_pickle('test.pckl')
if preproccesing==2:
if os.path.isfile('User_Item2.pckl') and os.path.isfile('train2.pckl') and os.path.isfile('validate2.pckl') and os.path.isfile('test2.pckl'):
pass
else:
preprocess_binary()
self.user_item_df = pd.read_pickle('User_Item2.pckl')
self.train = pd.read_pickle('train2.pckl')
self.validate = pd.read_pickle('validate2.pckl')
self.test = pd.read_pickle('test2.pckl')
def train_model(self, model_type='SVD', latent_dimension=50, N=10):
if model_type == 'SVD':
print('Training using SVD model...')
self.preds = svd_model(self.user_item_df, latent_dimension, N)
elif model_type == 'SVD_implicit':
print('Training using SVD_implicit model...')
#self.preds = implicit_model(self.user_item_df, self.train, self.validate, latent_dimension, N, self.preproccesing)
self.model, self.precision_train, self.recall_train, self.precision_val, self.recall_val, self.rmse_train, self.rmse_val = \
implicit_model(self.user_item_df, self.train, self.validate, latent_dimension, N, self.preproccesing)
#TODO compute self.preds using self.model
elif model_type == 'AutoEncoder':
pass
print('Done')
def eval_rmse(self, data='val'):
if data == 'val':
self.rmse_val = compute_rmse(self.preds, self.validate)
elif data == 'train':
self.rmse_train = compute_rmse(self.preds, self.train)
elif data == 'test':
self.rmse_test = compute_rmse(self.preds, self.test)
def eval_precision_N(self, N, data = 'val'):
#compute_precision_recall_N(PR, valid, N)
if data == 'val':
tp,fp,fn = compute_precision_recall_N(self.preds, self.validate, N)
self.precision_val = tp/(tp+fp)
self.recall_val = tp/(tp+fn)
elif data == 'train':
tp,fp,fn = compute_precision_recall_N(self.preds, self.train, N)
self.precision_train = tp/(tp+fp)
self.recall_train = tp/(tp+fn)
elif data == 'test':
tp,fp,fn = compute_precision_recall_N(self.preds, self.test, N)
self.precision_test = tp/(tp+fp)
self.recall_test = tp/(tp+fn)
else:
print("'data' should be equal to 'val', 'train', or 'test'!!!")
return (tp/(tp+fp), tp/(tp+fn))
def recommend_N_cities_for_user(self, N, user, data='val'):
# Get User Index
user_index = self.preds.index.tolist().index(user)
# Get Sorted User recommendations
recommended_list_sorted = self.preds.iloc[user_index,:].sort_values(ascending=False).head(N).index.tolist()
if data=='val':
val_user_df = self.validate[self.validate.user_nickname == user]
actual_list = val_user_df.town.tolist()
if data=='test':
val_user_df = self.test[self.test.user_nickname == user]
actual_list = val_user_df.town.tolist()
return recommended_list_sorted, actual_list
| [
"pandas.DataFrame",
"pandas.melt",
"scipy.sparse.linalg.svds",
"pandas.read_pickle",
"os.path.isfile",
"numpy.mean",
"scipy.sparse.csr_matrix",
"numpy.dot",
"numpy.diag",
"implicit.als.AlternatingLeastSquares",
"pandas.concat",
"sklearn.metrics.mean_squared_error"
] | [((1312, 1326), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1324, 1326), True, 'import pandas as pd\n'), ((3102, 3143), 'scipy.sparse.linalg.svds', 'svds', (['Checkins_demeaned', 'latent_dimension'], {}), '(Checkins_demeaned, latent_dimension)\n', (3106, 3143), False, 'from scipy.sparse.linalg import svds\n'), ((3156, 3170), 'numpy.diag', 'np.diag', (['sigma'], {}), '(sigma)\n', (3163, 3170), True, 'import numpy as np\n'), ((3277, 3378), 'pandas.DataFrame', 'pd.DataFrame', (['all_user_predicted_checkins'], {'columns': 'user_item_df.columns', 'index': 'user_item_df.index'}), '(all_user_predicted_checkins, columns=user_item_df.columns,\n index=user_item_df.index)\n', (3289, 3378), True, 'import pandas as pd\n'), ((4140, 4202), 'implicit.als.AlternatingLeastSquares', 'implicit.als.AlternatingLeastSquares', ([], {'factors': 'latent_dimension'}), '(factors=latent_dimension)\n', (4176, 4202), False, 'import implicit\n'), ((1065, 1109), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['real_values', 'pred_values'], {}), '(real_values, pred_values)\n', (1083, 1109), False, 'from sklearn.metrics import mean_squared_error\n'), ((1403, 1579), 'pandas.melt', 'pd.melt', (['PR.iloc[interval - 1000:interval]'], {'id_vars': '"""user_nickname"""', 'value_vars': 'PR.iloc[interval - 1000:interval].columns', 'var_name': '"""town"""', 'value_name': '"""predicted_count"""'}), "(PR.iloc[interval - 1000:interval], id_vars='user_nickname',\n value_vars=PR.iloc[interval - 1000:interval].columns, var_name='town',\n value_name='predicted_count')\n", (1410, 1579), True, 'import pandas as pd\n'), ((2042, 2087), 'pandas.concat', 'pd.concat', (['[concat_preds, flat_preds]'], {'axis': '(0)'}), '([concat_preds, flat_preds], axis=0)\n', (2051, 2087), True, 'import pandas as pd\n'), ((3054, 3082), 'numpy.mean', 'np.mean', (['user_item_df.values'], {}), '(user_item_df.values)\n', (3061, 3082), True, 'import numpy as np\n'), ((3236, 3264), 'numpy.mean', 'np.mean', (['user_item_df.values'], {}), '(user_item_df.values)\n', (3243, 3264), True, 'import numpy as np\n'), ((8327, 8365), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['rmse_gt', 'rmse_pred'], {}), '(rmse_gt, rmse_pred)\n', (8345, 8365), False, 'from sklearn.metrics import mean_squared_error\n'), ((3212, 3228), 'numpy.dot', 'np.dot', (['U', 'sigma'], {}), '(U, sigma)\n', (3218, 3228), True, 'import numpy as np\n'), ((9109, 9141), 'pandas.read_pickle', 'pd.read_pickle', (['"""User_Item.pckl"""'], {}), "('User_Item.pckl')\n", (9123, 9141), True, 'import pandas as pd\n'), ((9167, 9195), 'pandas.read_pickle', 'pd.read_pickle', (['"""train.pckl"""'], {}), "('train.pckl')\n", (9181, 9195), True, 'import pandas as pd\n'), ((9224, 9255), 'pandas.read_pickle', 'pd.read_pickle', (['"""validate.pckl"""'], {}), "('validate.pckl')\n", (9238, 9255), True, 'import pandas as pd\n'), ((9280, 9307), 'pandas.read_pickle', 'pd.read_pickle', (['"""test.pckl"""'], {}), "('test.pckl')\n", (9294, 9307), True, 'import pandas as pd\n'), ((9598, 9631), 'pandas.read_pickle', 'pd.read_pickle', (['"""User_Item2.pckl"""'], {}), "('User_Item2.pckl')\n", (9612, 9631), True, 'import pandas as pd\n'), ((9657, 9686), 'pandas.read_pickle', 'pd.read_pickle', (['"""train2.pckl"""'], {}), "('train2.pckl')\n", (9671, 9686), True, 'import pandas as pd\n'), ((9715, 9747), 'pandas.read_pickle', 'pd.read_pickle', (['"""validate2.pckl"""'], {}), "('validate2.pckl')\n", (9729, 9747), True, 'import pandas as pd\n'), ((9772, 9800), 'pandas.read_pickle', 'pd.read_pickle', (['"""test2.pckl"""'], {}), "('test2.pckl')\n", (9786, 9800), True, 'import pandas as pd\n'), ((4376, 4400), 'scipy.sparse.csr_matrix', 'csr_matrix', (['user_item_df'], {}), '(user_item_df)\n', (4386, 4400), False, 'from scipy.sparse import csr_matrix\n'), ((4930, 4954), 'scipy.sparse.csr_matrix', 'csr_matrix', (['user_item_df'], {}), '(user_item_df)\n', (4940, 4954), False, 'from scipy.sparse import csr_matrix\n'), ((9352, 9385), 'os.path.isfile', 'os.path.isfile', (['"""User_Item2.pckl"""'], {}), "('User_Item2.pckl')\n", (9366, 9385), False, 'import math, os\n'), ((9390, 9419), 'os.path.isfile', 'os.path.isfile', (['"""train2.pckl"""'], {}), "('train2.pckl')\n", (9404, 9419), False, 'import math, os\n'), ((9424, 9456), 'os.path.isfile', 'os.path.isfile', (['"""validate2.pckl"""'], {}), "('validate2.pckl')\n", (9438, 9456), False, 'import math, os\n'), ((9461, 9489), 'os.path.isfile', 'os.path.isfile', (['"""test2.pckl"""'], {}), "('test2.pckl')\n", (9475, 9489), False, 'import math, os\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.