code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import json
import os
import re
from pathlib import Path
import numpy as np
import pandas as pd
SIMRAD_FILENAME_MATCHER = re.compile(
r"(?P<survey>.+)?-?D(?P<date>\w{1,8})-T(?P<time>\w{1,6})-?(?P<postfix>\w+)?\..+"
)
def from_JSON(j):
"""Opens a JSON file
Parameters
----------
j : str
Valid JSON string or path to JSON file
"""
if os.path.isfile(j):
with open(j, "r") as f:
data_dict = json.load(f)
else:
try:
data_dict = json.loads(j)
except json.decoder.JSONDecodeError:
raise ValueError("Invalid JSON string")
return data_dict
def validate_path(save_path=None, input_file=None, ext=".json"):
# Check if save_path is specified.
# If not try to create one with the input_file and ext
if save_path is None:
if input_file is None:
raise ValueError("No paths given")
elif ext is None:
raise ValueError("No extension given")
else:
input_file = Path(input_file)
save_path = input_file.parent / (input_file.stem + ext)
# If save path is specified, check if folders need to be made
else:
save_path = Path(save_path)
# If save path is a directory, use name of input file
if save_path.suffix == "":
if input_file is None:
raise ValueError("No filename given")
else:
input_file = Path(input_file)
save_path = save_path / (input_file.stem + ext)
# Check if extension of save path matches desired file format
if save_path.suffix.lower() != ext.lower():
raise ValueError(f"{save_path} is not a {ext} file")
# Create directories if they do not exist
if not save_path.parent.is_dir():
save_path.parent.mkdir(parents=True, exist_ok=True)
return str(save_path)
def parse_time(ev_time, datetime_format="%Y%m%d %H%M%S%f", unix=False):
"""Convert EV datetime to a numpy datetime64 object
Parameters
----------
ev_time : str, list
EV datetime string or list of these
datetime_format : str
Format of datestring to be used with datetime strptime
in CCYYMMDD HHmmSSssss format
unix : bool, default False
Output the time in the unix time format
Returns
-------
np.datetime64 or float
converted input datetime
"""
if isinstance(ev_time, np.ndarray) and np.issubdtype(
ev_time.dtype, "datetime64[ms]"
):
return ev_time
elif not isinstance(ev_time, str) and not isinstance(ev_time, list):
raise ValueError("'ev_time' must be type str or list")
t = pd.to_datetime(ev_time, format=datetime_format)
if unix:
t = (t - pd.Timestamp("1970-01-01")) / pd.Timedelta("1s")
return t
def parse_simrad_fname_time(filenames):
"""Convert Simrad-style datetime to a numpy datetime64 object
Parameters
----------
filenames : str, list
Simrad-style filename
Returns
-------
datetime : np.datetime64
converted input datetime
"""
if isinstance(filenames, list):
f_list = []
for f in filenames:
groups = SIMRAD_FILENAME_MATCHER.match(f)
f_list.append(groups["date"] + " " + groups["time"])
elif isinstance(filenames, str):
groups = SIMRAD_FILENAME_MATCHER.match(filenames)
f_list = [groups["date"] + " " + groups["time"]]
else:
raise ValueError("'filenames' must be type str or list")
return parse_time(f_list, "%Y%m%d %H%M%S")
| [
"json.load",
"pandas.Timestamp",
"json.loads",
"os.path.isfile",
"pathlib.Path",
"pandas.to_datetime",
"pandas.Timedelta",
"numpy.issubdtype",
"re.compile"
] | [((124, 229), 're.compile', 're.compile', (['"""(?P<survey>.+)?-?D(?P<date>\\\\w{1,8})-T(?P<time>\\\\w{1,6})-?(?P<postfix>\\\\w+)?\\\\..+"""'], {}), "(\n '(?P<survey>.+)?-?D(?P<date>\\\\w{1,8})-T(?P<time>\\\\w{1,6})-?(?P<postfix>\\\\w+)?\\\\..+'\n )\n", (134, 229), False, 'import re\n'), ((373, 390), 'os.path.isfile', 'os.path.isfile', (['j'], {}), '(j)\n', (387, 390), False, 'import os\n'), ((2685, 2732), 'pandas.to_datetime', 'pd.to_datetime', (['ev_time'], {'format': 'datetime_format'}), '(ev_time, format=datetime_format)\n', (2699, 2732), True, 'import pandas as pd\n'), ((1207, 1222), 'pathlib.Path', 'Path', (['save_path'], {}), '(save_path)\n', (1211, 1222), False, 'from pathlib import Path\n'), ((2456, 2502), 'numpy.issubdtype', 'np.issubdtype', (['ev_time.dtype', '"""datetime64[ms]"""'], {}), "(ev_time.dtype, 'datetime64[ms]')\n", (2469, 2502), True, 'import numpy as np\n'), ((448, 460), 'json.load', 'json.load', (['f'], {}), '(f)\n', (457, 460), False, 'import json\n'), ((508, 521), 'json.loads', 'json.loads', (['j'], {}), '(j)\n', (518, 521), False, 'import json\n'), ((2793, 2811), 'pandas.Timedelta', 'pd.Timedelta', (['"""1s"""'], {}), "('1s')\n", (2805, 2811), True, 'import pandas as pd\n'), ((1026, 1042), 'pathlib.Path', 'Path', (['input_file'], {}), '(input_file)\n', (1030, 1042), False, 'from pathlib import Path\n'), ((1456, 1472), 'pathlib.Path', 'Path', (['input_file'], {}), '(input_file)\n', (1460, 1472), False, 'from pathlib import Path\n'), ((2763, 2789), 'pandas.Timestamp', 'pd.Timestamp', (['"""1970-01-01"""'], {}), "('1970-01-01')\n", (2775, 2789), True, 'import pandas as pd\n')] |
import copy
import dace
import dace.sdfg.nodes
import numpy as np
# Python version of the SDFG below
# @dace.program
# def reduce_with_offsets(A: dace.float64[50, 50], B: dace.float64[25]):
# B[4:11] = dace.reduce(lambda a,b: a+b, A[25:50, 13:20], axis=0,
# identity=0)
reduce_with_offsets = dace.SDFG('reduce_with_offsets')
reduce_with_offsets.add_array('A', [50, 50], dace.float64)
reduce_with_offsets.add_array('B', [25], dace.float64)
state = reduce_with_offsets.add_state()
node_a = state.add_read('A')
node_b = state.add_write('B')
red = state.add_reduce('lambda a,b: a+b', [0], 0)
state.add_nedge(node_a, red, dace.Memlet.simple('A', '25:50, 13:20'))
state.add_nedge(red, node_b, dace.Memlet.simple('B', '4:11'))
def test_offset_reduce():
A = np.random.rand(50, 50)
B = np.random.rand(25)
sdfg = copy.deepcopy(reduce_with_offsets)
sdfg(A=A, B=B)
assert np.allclose(B[4:11], np.sum(A[25:50, 13:20], axis=0))
def test_offset_reduce_sequential():
A = np.random.rand(50, 50)
B = np.random.rand(25)
sdfg = copy.deepcopy(reduce_with_offsets)
sdfg.expand_library_nodes()
for node, _ in sdfg.all_nodes_recursive():
if isinstance(node, dace.sdfg.nodes.MapEntry):
node.map.schedule = dace.ScheduleType.Sequential
sdfg(A=A, B=B)
assert np.allclose(B[4:11], np.sum(A[25:50, 13:20], axis=0))
def test_offset_reduce_indices():
A = np.random.rand(10, 10, 10, 10)
B = np.ndarray([1], dtype=np.float64)
B[0] = -np.inf
reduce_with_indices = dace.SDFG('reduce_with_indices')
reduce_with_indices.add_array('A', [10, 10, 10, 10], dace.float64)
reduce_with_indices.add_array('B', [1], dace.float64)
state = reduce_with_indices.add_state()
node_a = state.add_read('A')
node_b = state.add_write('B')
red = state.add_reduce('lambda a,b: max(a,b)', [0, 1, 2, 3])
state.add_nedge(node_a, red, dace.Memlet.simple('A', '0, 1, 2, 0:10'))
state.add_nedge(red, node_b, dace.Memlet.simple('B', '0'))
reduce_with_indices(A=A, B=B)
assert np.allclose(B, np.max(A[0, 1, 2, :], axis=0))
if __name__ == '__main__':
test_offset_reduce()
test_offset_reduce_sequential()
test_offset_reduce_indices()
| [
"copy.deepcopy",
"numpy.sum",
"dace.Memlet.simple",
"numpy.max",
"numpy.random.rand",
"dace.SDFG",
"numpy.ndarray"
] | [((324, 356), 'dace.SDFG', 'dace.SDFG', (['"""reduce_with_offsets"""'], {}), "('reduce_with_offsets')\n", (333, 356), False, 'import dace\n'), ((650, 689), 'dace.Memlet.simple', 'dace.Memlet.simple', (['"""A"""', '"""25:50, 13:20"""'], {}), "('A', '25:50, 13:20')\n", (668, 689), False, 'import dace\n'), ((720, 751), 'dace.Memlet.simple', 'dace.Memlet.simple', (['"""B"""', '"""4:11"""'], {}), "('B', '4:11')\n", (738, 751), False, 'import dace\n'), ((789, 811), 'numpy.random.rand', 'np.random.rand', (['(50)', '(50)'], {}), '(50, 50)\n', (803, 811), True, 'import numpy as np\n'), ((820, 838), 'numpy.random.rand', 'np.random.rand', (['(25)'], {}), '(25)\n', (834, 838), True, 'import numpy as np\n'), ((851, 885), 'copy.deepcopy', 'copy.deepcopy', (['reduce_with_offsets'], {}), '(reduce_with_offsets)\n', (864, 885), False, 'import copy\n'), ((1018, 1040), 'numpy.random.rand', 'np.random.rand', (['(50)', '(50)'], {}), '(50, 50)\n', (1032, 1040), True, 'import numpy as np\n'), ((1049, 1067), 'numpy.random.rand', 'np.random.rand', (['(25)'], {}), '(25)\n', (1063, 1067), True, 'import numpy as np\n'), ((1080, 1114), 'copy.deepcopy', 'copy.deepcopy', (['reduce_with_offsets'], {}), '(reduce_with_offsets)\n', (1093, 1114), False, 'import copy\n'), ((1440, 1470), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)', '(10)', '(10)'], {}), '(10, 10, 10, 10)\n', (1454, 1470), True, 'import numpy as np\n'), ((1479, 1512), 'numpy.ndarray', 'np.ndarray', (['[1]'], {'dtype': 'np.float64'}), '([1], dtype=np.float64)\n', (1489, 1512), True, 'import numpy as np\n'), ((1559, 1591), 'dace.SDFG', 'dace.SDFG', (['"""reduce_with_indices"""'], {}), "('reduce_with_indices')\n", (1568, 1591), False, 'import dace\n'), ((938, 969), 'numpy.sum', 'np.sum', (['A[25:50, 13:20]'], {'axis': '(0)'}), '(A[25:50, 13:20], axis=0)\n', (944, 969), True, 'import numpy as np\n'), ((1363, 1394), 'numpy.sum', 'np.sum', (['A[25:50, 13:20]'], {'axis': '(0)'}), '(A[25:50, 13:20], axis=0)\n', (1369, 1394), True, 'import numpy as np\n'), ((1931, 1971), 'dace.Memlet.simple', 'dace.Memlet.simple', (['"""A"""', '"""0, 1, 2, 0:10"""'], {}), "('A', '0, 1, 2, 0:10')\n", (1949, 1971), False, 'import dace\n'), ((2006, 2034), 'dace.Memlet.simple', 'dace.Memlet.simple', (['"""B"""', '"""0"""'], {}), "('B', '0')\n", (2024, 2034), False, 'import dace\n'), ((2098, 2127), 'numpy.max', 'np.max', (['A[0, 1, 2, :]'], {'axis': '(0)'}), '(A[0, 1, 2, :], axis=0)\n', (2104, 2127), True, 'import numpy as np\n')] |
from setuptools import setup
from Cython.Build import cythonize
import numpy
setup(
name='matmul',
ext_modules=cythonize('matmul.pyx', language_level=3),
include_dirs=[numpy.get_include()],
setup_requires=[
'Cython',
'NumPy',
],
install_requires=[
'NumPy',
],
) | [
"Cython.Build.cythonize",
"numpy.get_include"
] | [((120, 161), 'Cython.Build.cythonize', 'cythonize', (['"""matmul.pyx"""'], {'language_level': '(3)'}), "('matmul.pyx', language_level=3)\n", (129, 161), False, 'from Cython.Build import cythonize\n'), ((181, 200), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (198, 200), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
"""
Functions to train the readout module to perform
tasks
@author: <NAME>
"""
import numpy as np
import pandas as pd
import scipy as sp
import mdp
from sklearn import metrics
from sklearn.model_selection import ParameterGrid
from sklearn.linear_model import Ridge, RidgeClassifier
from sklearn.multioutput import MultiOutputRegressor, MultiOutputClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, ConfusionMatrixDisplay
from sklearn.ensemble import RandomForestRegressor
import matplotlib.pyplot as plt
def check_xy_dims(x,y):
"""
Check that X,Y have the right dimensions
#TODO
"""
x_train, x_test = x
y_train, y_test = y
if not ((x_train.squeeze().ndim == 2) and (x_test.ndim == 2)):
x_train = x_train.squeeze()[:, np.newaxis]
x_test = x_test.squeeze()[:, np.newaxis]
else:
x_train = x_train.squeeze()
x_test = x_test.squeeze()
y_train = y_train.squeeze()
y_test = y_test.squeeze()
return x_train, x_test, y_train, y_test
def regression(x, y, **kwargs):
"""
Regression tasks
#TODO
"""
x_train, x_test, y_train, y_test = check_xy_dims(x,y)
model = Ridge(fit_intercept=False, alpha=0.5, **kwargs).fit(x_train, y_train)
score = model.score(x_test, y_test)
return score
def multiOutputRegression(x, y, **kwargs):
"""
Multiple Output Regression tasks
#TODO
"""
x_train, x_test, y_train, y_test = check_xy_dims(x,y)
model = MultiOutputRegressor(Ridge(fit_intercept=False, alpha=0.5, **kwargs)).fit(x_train, y_train)
# estimate score
y_pred = model.predict(x_test)
n_outputs = y_pred.shape[1]
score = []
for output in range(n_outputs):
score.append(np.abs((np.corrcoef(y_test[:,output], y_pred[:,output])[0][1])))
# for i in range(20):
# corr = np.round(np.corrcoef(y_test[:,i], y_pred[:,i])[0][1], 2)
# plt.scatter(y_test[:,i], y_pred[:,i], s=2, label=f'Tau={i+1} - {corr}')
# plt.legend()
# plt.show()
# plt.close()
# print('\n')
# print(score)
return np.sum(score)
def classification(x, y, **kwargs):
"""
Classification tasks
#TODO
"""
x_train, x_test, y_train, y_test = check_xy_dims(x,y)
model = RidgeClassifier(alpha=0.0, fit_intercept=True, **kwargs).fit(x_train, y_train)
# estimate score
#TODO - average accuracy across classes or something like this
# score = model.score(x_test, y_test)
score = accuracy_score(y_test, model.predict(x_test))
# ConfusionMatrixDisplay.from_predictions(y_test, model.predict(x_test))
# plt.show()
# plt.close()
return score
def multiOutputClassification(x, y, **kwargs):
"""
Multiple Output Classification tasks
#TODO
"""
x_train, x_test, y_train, y_test = check_xy_dims(x,y)
model = MultiOutputRegressor(RidgeClassifier(alpha=0.5, fit_intercept=True, **kwargs)).fit(x_train, y_train)
# estimate score
#TODO - average accuracy across outputs????
# score = model.score(x_test, y_test)
score = accuracy_score(y_test, model.predict(x_test))
# ConfusionMatrixDisplay.from_predictions(y_test, model.predict(x_test))
# plt.show()
# plt.close()
return score
def run_task(reservoir_states, target, **kwargs):
"""
#TODO
Function that calls the method to run the task specified by 'task'
Parameters
----------
task : {'regression', 'classification'}
reservoir_states : tuple of numpy.ndarrays
simulated reservoir states for training and test; the shape of each
numpy.ndarray is n_samples, n_reservoir_nodes
target : tuple of numpy.ndarrays
training and test targets or output labels; the shape of each
numpy.ndarray is n_samples, n_labels
kwargs : other keyword arguments are passed to one of the following
functions:
memory_capacity_task(); delays=None, t_on=0
pattern_recognition_task(); pttn_lens
Returns
-------
df_res : pandas.DataFrame
data frame with task scores
"""
func = select_stat_model(y=target)
score = func(x=reservoir_states, y=target, **kwargs)
df_res = pd.DataFrame(data=[score],
columns=['score'])
return df_res
def select_stat_model(y):
"""
Select the right model depending on the nature of the target
variable
#TODO
"""
if isinstance(y, tuple): y = y[0]
if y.dtype in [np.float32, np.float64]:
if y.ndim > 1:
return multiOutputRegression
else:
return regression
elif y.dtype in [np.int32, np.int64]:
if y.ndim > 1:
return multiOutputClassification
else:
return classification
| [
"pandas.DataFrame",
"numpy.sum",
"numpy.corrcoef",
"sklearn.linear_model.RidgeClassifier",
"sklearn.linear_model.Ridge"
] | [((2131, 2144), 'numpy.sum', 'np.sum', (['score'], {}), '(score)\n', (2137, 2144), True, 'import numpy as np\n'), ((4265, 4310), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': '[score]', 'columns': "['score']"}), "(data=[score], columns=['score'])\n", (4277, 4310), True, 'import pandas as pd\n'), ((1213, 1260), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'fit_intercept': '(False)', 'alpha': '(0.5)'}), '(fit_intercept=False, alpha=0.5, **kwargs)\n', (1218, 1260), False, 'from sklearn.linear_model import Ridge, RidgeClassifier\n'), ((2306, 2362), 'sklearn.linear_model.RidgeClassifier', 'RidgeClassifier', ([], {'alpha': '(0.0)', 'fit_intercept': '(True)'}), '(alpha=0.0, fit_intercept=True, **kwargs)\n', (2321, 2362), False, 'from sklearn.linear_model import Ridge, RidgeClassifier\n'), ((1542, 1589), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'fit_intercept': '(False)', 'alpha': '(0.5)'}), '(fit_intercept=False, alpha=0.5, **kwargs)\n', (1547, 1589), False, 'from sklearn.linear_model import Ridge, RidgeClassifier\n'), ((2922, 2978), 'sklearn.linear_model.RidgeClassifier', 'RidgeClassifier', ([], {'alpha': '(0.5)', 'fit_intercept': '(True)'}), '(alpha=0.5, fit_intercept=True, **kwargs)\n', (2937, 2978), False, 'from sklearn.linear_model import Ridge, RidgeClassifier\n'), ((1787, 1836), 'numpy.corrcoef', 'np.corrcoef', (['y_test[:, output]', 'y_pred[:, output]'], {}), '(y_test[:, output], y_pred[:, output])\n', (1798, 1836), True, 'import numpy as np\n')] |
import pickle
import numpy as np
import matplotlib.pyplot as plt
def running_average(data, subset_size=10):
new_data = np.zeros(data.shape[0] - subset_size + 1)
for i in range(new_data.shape[0]):
new_data[i] = np.mean(data[i : i + subset_size])
return new_data
path_name = "models/wall/maac_ccr1/run5/"
reward_file_name = "rewards.pkl"
with open(path_name + reward_file_name, 'rb') as f:
reward_data = np.array(pickle.load(f))
# hack to multiply with agent number and episode length
reward_data *= 6
reward_data *= 35
reward_data = running_average(reward_data)
plt.plot(reward_data)
plt.savefig(path_name + 'new_rewards.png') | [
"matplotlib.pyplot.plot",
"numpy.zeros",
"pickle.load",
"numpy.mean",
"matplotlib.pyplot.savefig"
] | [((602, 623), 'matplotlib.pyplot.plot', 'plt.plot', (['reward_data'], {}), '(reward_data)\n', (610, 623), True, 'import matplotlib.pyplot as plt\n'), ((625, 667), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_name + 'new_rewards.png')"], {}), "(path_name + 'new_rewards.png')\n", (636, 667), True, 'import matplotlib.pyplot as plt\n'), ((124, 165), 'numpy.zeros', 'np.zeros', (['(data.shape[0] - subset_size + 1)'], {}), '(data.shape[0] - subset_size + 1)\n', (132, 165), True, 'import numpy as np\n'), ((227, 259), 'numpy.mean', 'np.mean', (['data[i:i + subset_size]'], {}), '(data[i:i + subset_size])\n', (234, 259), True, 'import numpy as np\n'), ((438, 452), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (449, 452), False, 'import pickle\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 29 16:53:19 2019
@author: gparkes
"""
from numba import jit
import numpy as np
import matplotlib.pyplot as plt
# task 1
@jit(nopython=True)
def calc_positional_e(spins, J):
"""
In this strategy, we use pure Python and beef up our code with JIT or Cython.
"""
N = spins.shape[0]
E = np.zeros_like(spins)
# for loop
for i in range(N):
for j in range(N):
if i == 0:
spin_l = spins[N-1,j]
spin_r = spins[i+1,j]
elif i == N-1:
spin_l = spins[i-1,j]
spin_r = spins[0,j]
else:
spin_l = spins[i,j-1]
spin_r = spins[i,j+1]
if j == 0:
spin_u = spins[i,N-1]
spin_d = spins[i,j+1]
elif j == N-1:
spin_u = spins[i,j-1]
spin_d = spins[i,0]
else:
spin_u = spins[i,j-1]
spin_d = spins[i,j+1]
E[i,j] = spin_l + spin_r + spin_u + spin_d
return E
# task 2
@jit
def delta_e(spins, i, j, J):
N = spins.shape[0]
# check boundaries first!
if i == 0:
spin_l = spins[N-1,j]
spin_r = spins[i+1,j]
elif i == N-1:
spin_l = spins[i-1,j]
spin_r = spins[0,j]
else:
spin_l = spins[i-1,j]
spin_r = spins[i+1,j]
if j == 0:
spin_u = spins[i,N-1]
spin_d = spins[i,j+1]
elif j == N-1:
spin_u = spins[i,j-1]
spin_d = spins[i,0]
else:
spin_u = spins[i,j-1]
spin_d = spins[i,j+1]
return 2. * J * spins[i,j] * (spin_l+spin_r+spin_u+spin_d)
# task 3
def total_mag_e(spins, J):
return -J*np.sum(calc_positional_e(spins, J))
# task 4
def metropolis_hastings(N, n_steps, beta):
spins = np.random.choice([-1,1], size=(N,N))
J = 1
E_series = np.ones(n_steps)
E_series[0] = total_mag_e(spins, J)
for s in range(1, n_steps):
randx = np.random.randint(N)
randy = np.random.randint(N)
# calculate de
dE = delta_e(spins, randx, randy, J)
# if we decrease the energy or meet boltzmann requirements, flip the spin
if dE < 0. or np.exp(-beta*dE) > np.random.rand():
spins[randx, randy] *= -1
E_series[s] = E_series[s-1] + dE
else:
E_series[s] = E_series[s-1]
return spins, E_series
# task 5
def task_5():
A = metropolis_hastings(40, int(5*10**5), 0.1)
B = metropolis_hastings(40, int(5*10**5), 1.0)
fig,ax=plt.subplots(ncols=3, figsize=(16,4))
ax[0].plot(A[1], label=r"$\beta=0.1$")
ax[0].plot(B[1], label=r"$\beta=1.0$")
ax[0].legend()
ax[1].imshow(A[0], cmap="gray")
ax[2].imshow(B[0], cmap='gray')
for a in [ax[1], ax[2]]:
a.get_xaxis().set_visible(False)
a.get_yaxis().set_visible(False)
ax[0].set_xlabel("steps")
ax[0].set_ylabel(r"Energy $E$")
plt.show()
# task 6
def task_6():
bvals = 50
betas = np.linspace(0.1, 0.6, bvals)
m_ser = np.zeros(bvals)
def average_magnetization(spins, N):
return (1 / N**2) * np.sum(spins)
for b in range(bvals):
# print("Running beta=%.4f" % betas[b])
C, E = metropolis_hastings(20, 500000, betas[b])
m_ser[b] = average_magnetization(C[0], 20)
# plot
fig = plt.figure(figsize=(13,5))
plt.plot(betas, m_ser, "g*")
plt.xlabel(r"$\beta$")
plt.ylabel(r"$M$")
plt.show()
# task 7
"""
Uncomment this code below to run in Jupyter notebook
"""
# %prun metropolis_hastings(40, 500000, 0.5)
# %timeit metropolis_hastings(40, 500000, 0.5)
# after running cython block...
# %timeit metropolis_hastings2(40, 500000, 0.5)
def task_7():
C1, E1 = metropolis_hastings(40, 500000, .6)
C2, E2 = metropolis_hastings2(40, 500000, .6)
C3, E3 = metropolis_hastings(40, 500000, .1)
C4, E4 = metropolis_hastings2(40, 500000, .1)
fig,ax=plt.subplots(ncols=2, figsize=(16,4))
ax[0].plot(E1, label=r"python")
ax[0].plot(E2, label="cython")
ax[0].legend()
ax[1].plot(E3, label=r"python")
ax[1].plot(E4, label="cython")
ax[1].legend()
| [
"numpy.zeros_like",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.ones",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"numba.jit",
"numpy.exp",
"numpy.linspace",
"numpy.random.choice",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matpl... | [((194, 212), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (197, 212), False, 'from numba import jit\n'), ((375, 395), 'numpy.zeros_like', 'np.zeros_like', (['spins'], {}), '(spins)\n', (388, 395), True, 'import numpy as np\n'), ((1880, 1918), 'numpy.random.choice', 'np.random.choice', (['[-1, 1]'], {'size': '(N, N)'}), '([-1, 1], size=(N, N))\n', (1896, 1918), True, 'import numpy as np\n'), ((1942, 1958), 'numpy.ones', 'np.ones', (['n_steps'], {}), '(n_steps)\n', (1949, 1958), True, 'import numpy as np\n'), ((2618, 2656), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(3)', 'figsize': '(16, 4)'}), '(ncols=3, figsize=(16, 4))\n', (2630, 2656), True, 'import matplotlib.pyplot as plt\n'), ((3017, 3027), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3025, 3027), True, 'import matplotlib.pyplot as plt\n'), ((3080, 3108), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.6)', 'bvals'], {}), '(0.1, 0.6, bvals)\n', (3091, 3108), True, 'import numpy as np\n'), ((3121, 3136), 'numpy.zeros', 'np.zeros', (['bvals'], {}), '(bvals)\n', (3129, 3136), True, 'import numpy as np\n'), ((3427, 3454), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(13, 5)'}), '(figsize=(13, 5))\n', (3437, 3454), True, 'import matplotlib.pyplot as plt\n'), ((3458, 3486), 'matplotlib.pyplot.plot', 'plt.plot', (['betas', 'm_ser', '"""g*"""'], {}), "(betas, m_ser, 'g*')\n", (3466, 3486), True, 'import matplotlib.pyplot as plt\n'), ((3491, 3513), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\beta$"""'], {}), "('$\\\\beta$')\n", (3501, 3513), True, 'import matplotlib.pyplot as plt\n'), ((3518, 3535), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$M$"""'], {}), "('$M$')\n", (3528, 3535), True, 'import matplotlib.pyplot as plt\n'), ((3541, 3551), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3549, 3551), True, 'import matplotlib.pyplot as plt\n'), ((4024, 4062), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)', 'figsize': '(16, 4)'}), '(ncols=2, figsize=(16, 4))\n', (4036, 4062), True, 'import matplotlib.pyplot as plt\n'), ((2048, 2068), 'numpy.random.randint', 'np.random.randint', (['N'], {}), '(N)\n', (2065, 2068), True, 'import numpy as np\n'), ((2085, 2105), 'numpy.random.randint', 'np.random.randint', (['N'], {}), '(N)\n', (2102, 2105), True, 'import numpy as np\n'), ((3207, 3220), 'numpy.sum', 'np.sum', (['spins'], {}), '(spins)\n', (3213, 3220), True, 'import numpy as np\n'), ((2278, 2296), 'numpy.exp', 'np.exp', (['(-beta * dE)'], {}), '(-beta * dE)\n', (2284, 2296), True, 'import numpy as np\n'), ((2297, 2313), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2311, 2313), True, 'import numpy as np\n')] |
""" Tests for the Deep explainer.
"""
from distutils.version import LooseVersion
from urllib.error import HTTPError
import numpy as np
import pandas as pd
import pytest
import shap
from shap import DeepExplainer
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# pylint: disable=import-outside-toplevel
def test_tf_eager():
""" This is a basic eager example from keras.
"""
tf = pytest.importorskip('tensorflow')
if LooseVersion(tf.__version__) >= LooseVersion("2.4.0"):
pytest.skip("Deep explainer does not work for TF 2.4 in eager mode.")
x = pd.DataFrame({"B": np.random.random(size=(100,))})
y = x.B
y = y.map(lambda zz: chr(int(zz * 2 + 65))).str.get_dummies()
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(10, input_shape=(x.shape[1],), activation="relu"))
model.add(tf.keras.layers.Dense(y.shape[1], input_shape=(10,), activation="softmax"))
model.summary()
model.compile(loss="categorical_crossentropy", optimizer="Adam")
model.fit(x.values, y.values, epochs=2)
e = DeepExplainer(model, x.values[:1])
sv = e.shap_values(x.values)
assert np.abs(e.expected_value[0] + sv[0].sum(-1) - model(x.values)[:, 0]).max() < 1e-4
def test_tf_keras_mnist_cnn(): # pylint: disable=too-many-locals
""" This is the basic mnist cnn example from keras.
"""
tf = pytest.importorskip('tensorflow')
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras import backend as K
tf.compat.v1.disable_eager_execution()
batch_size = 128
num_classes = 10
epochs = 1
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(8, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(16, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(32, activation='relu')) # 128
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train[:1000, :], y_train[:1000, :],
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test[:1000, :], y_test[:1000, :]))
# explain by passing the tensorflow inputs and outputs
np.random.seed(0)
inds = np.random.choice(x_train.shape[0], 10, replace=False)
e = shap.DeepExplainer((model.layers[0].input, model.layers[-1].input), x_train[inds, :, :])
shap_values = e.shap_values(x_test[:1])
sess = tf.compat.v1.keras.backend.get_session()
diff = sess.run(model.layers[-1].input, feed_dict={model.layers[0].input: x_test[:1]}) - \
sess.run(model.layers[-1].input, feed_dict={model.layers[0].input: x_train[inds, :, :]}).mean(0)
sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
d = np.abs(sums - diff).sum()
assert d / np.abs(diff).sum() < 0.001, "Sum of SHAP values does not match difference! %f" % d
def test_tf_keras_linear():
"""Test verifying that a linear model with linear data gives the correct result.
"""
tf = pytest.importorskip('tensorflow')
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.optimizers import SGD
tf.compat.v1.disable_eager_execution()
np.random.seed(0)
# coefficients relating y with x1 and x2.
coef = np.array([1, 2]).T
# generate data following a linear relationship
x = np.random.normal(1, 10, size=(1000, len(coef)))
y = np.dot(x, coef) + 1 + np.random.normal(scale=0.1, size=1000)
# create a linear model
inputs = Input(shape=(2,))
preds = Dense(1, activation='linear')(inputs)
model = Model(inputs=inputs, outputs=preds)
model.compile(optimizer=SGD(), loss='mse', metrics=['mse'])
model.fit(x, y, epochs=30, shuffle=False, verbose=0)
fit_coef = model.layers[1].get_weights()[0].T[0]
# explain
e = shap.DeepExplainer((model.layers[0].input, model.layers[-1].output), x)
shap_values = e.shap_values(x)
# verify that the explanation follows the equation in LinearExplainer
values = shap_values[0] # since this is a "multi-output" model with one output
assert values.shape == (1000, 2)
expected = (x - x.mean(0)) * fit_coef
np.testing.assert_allclose(expected - values, 0, atol=1e-5)
def test_tf_keras_imdb_lstm():
""" Basic LSTM example using the keras API defined in tensorflow
"""
tf = pytest.importorskip('tensorflow')
from tensorflow.keras.datasets import imdb
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Embedding
from tensorflow.keras.preprocessing import sequence
tf.compat.v1.disable_eager_execution()
# load the data from keras
np.random.seed(7)
max_features = 1000
try:
(X_train, _), (X_test, _) = imdb.load_data(num_words=max_features)
except Exception: # pylint: disable=broad-except
return # this hides a bug in the most recent version of keras that prevents data loading
X_train = sequence.pad_sequences(X_train, maxlen=100)
X_test = sequence.pad_sequences(X_test, maxlen=100)
# create the model. note that this is model is very small to make the test
# run quick and we don't care about accuracy here
mod = Sequential()
mod.add(Embedding(max_features, 8))
mod.add(LSTM(10, dropout=0.2, recurrent_dropout=0.2))
mod.add(Dense(1, activation='sigmoid'))
mod.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# select the background and test samples
inds = np.random.choice(X_train.shape[0], 3, replace=False)
background = X_train[inds]
testx = X_test[10:11]
# Question for Scott: we can explain without fitting?
# mod.fit(X_train, y_train, epochs=1, shuffle=False, verbose=1)
# explain a prediction and make sure it sums to the difference between the average output
# over the background samples and the current output
sess = tf.compat.v1.keras.backend.get_session()
sess.run(tf.compat.v1.global_variables_initializer())
# For debugging, can view graph:
# writer = tf.compat.v1.summary.FileWriter("c:\\tmp", sess.graph)
# writer.close()
e = shap.DeepExplainer((mod.layers[0].input, mod.layers[-1].output), background)
shap_values = e.shap_values(testx)
sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
diff = sess.run(mod.layers[-1].output, feed_dict={mod.layers[0].input: testx})[0, :] - \
sess.run(mod.layers[-1].output, feed_dict={mod.layers[0].input: background}).mean(0)
assert np.allclose(sums, diff, atol=1e-02), "Sum of SHAP values does not match difference!"
def test_pytorch_mnist_cnn(tmpdir):
"""The same test as above, but for pytorch
"""
torch = pytest.importorskip('torch')
torchvision = pytest.importorskip('torchvision')
datasets = torchvision.datasets
transforms = torchvision.transforms
from torch import nn
from torch.nn import functional as F
def run_test(train_loader, test_loader, interim):
class Net(nn.Module):
""" Basic conv net.
"""
def __init__(self):
super().__init__()
# Testing several different activations
self.conv_layers = nn.Sequential(
nn.Conv2d(1, 10, kernel_size=5),
nn.MaxPool2d(2),
nn.Tanh(),
nn.Conv2d(10, 20, kernel_size=5),
nn.ConvTranspose2d(20, 20, 1),
nn.AdaptiveAvgPool2d(output_size=(4, 4)),
nn.Softplus(),
)
self.fc_layers = nn.Sequential(
nn.Linear(320, 50),
nn.BatchNorm1d(50),
nn.ReLU(),
nn.Linear(50, 10),
nn.ELU(),
nn.Softmax(dim=1)
)
def forward(self, x):
""" Run the model.
"""
x = self.conv_layers(x)
x = x.view(-1, 320)
x = self.fc_layers(x)
return x
model = Net()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
def train(model, device, train_loader, optimizer, epoch, cutoff=2000):
model.train()
num_examples = 0
for batch_idx, (data, target) in enumerate(train_loader):
num_examples += target.shape[0]
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.mse_loss(output, torch.eye(10)[target])
# loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if num_examples > cutoff:
break
device = torch.device('cpu')
train(model, device, train_loader, optimizer, 1)
next_x, _ = next(iter(train_loader))
np.random.seed(0)
inds = np.random.choice(next_x.shape[0], 20, replace=False)
if interim:
e = shap.DeepExplainer((model, model.conv_layers[0]), next_x[inds, :, :, :])
else:
e = shap.DeepExplainer(model, next_x[inds, :, :, :])
test_x, _ = next(iter(test_loader))
input_tensor = test_x[:1]
input_tensor.requires_grad = True
shap_values = e.shap_values(input_tensor)
model.eval()
model.zero_grad()
with torch.no_grad():
diff = (model(test_x[:1]) - model(next_x[inds, :, :, :])).detach().numpy().mean(0)
sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
d = np.abs(sums - diff).sum()
assert d / np.abs(diff).sum() < 0.001, "Sum of SHAP values does not match difference! %f" % (d / np.abs(diff).sum())
batch_size = 128
try:
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(tmpdir, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(tmpdir, train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)
except HTTPError:
pytest.skip()
#print('Running test on interim layer')
run_test(train_loader, test_loader, interim=True)
#print('Running test on whole model')
run_test(train_loader, test_loader, interim=False)
def test_pytorch_custom_nested_models():
"""Testing single outputs
"""
torch = pytest.importorskip('torch')
from torch import nn
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from sklearn.datasets import load_boston
X, y = load_boston(return_X_y=True)
num_features = X.shape[1]
data = TensorDataset(torch.tensor(X).float(),
torch.tensor(y).float())
loader = DataLoader(data, batch_size=128)
class CustomNet1(nn.Module):
""" Model 1.
"""
def __init__(self):
super().__init__()
self.net = nn.Sequential(
nn.Sequential(
nn.Conv1d(1, 1, 1),
nn.ConvTranspose1d(1, 1, 1),
),
nn.AdaptiveAvgPool1d(output_size=6),
)
def forward(self, X):
""" Run the model.
"""
return self.net(X.unsqueeze(1)).squeeze(1)
class CustomNet2(nn.Module):
""" Model 2.
"""
def __init__(self, num_features):
super().__init__()
self.net = nn.Sequential(
nn.LeakyReLU(),
nn.Linear(num_features // 2, 2)
)
def forward(self, X):
""" Run the model.
"""
return self.net(X).unsqueeze(1)
class CustomNet(nn.Module):
""" Model 3.
"""
def __init__(self, num_features):
super().__init__()
self.net1 = CustomNet1()
self.net2 = CustomNet2(num_features)
self.maxpool2 = nn.MaxPool1d(kernel_size=2)
def forward(self, X):
""" Run the model.
"""
x = self.net1(X)
return self.maxpool2(self.net2(x)).squeeze(1)
model = CustomNet(num_features)
optimizer = torch.optim.Adam(model.parameters())
def train(model, device, train_loader, optimizer, epoch):
model.train()
num_examples = 0
for batch_idx, (data, target) in enumerate(train_loader):
num_examples += target.shape[0]
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.mse_loss(output.squeeze(1), target)
loss.backward()
optimizer.step()
if batch_idx % 2 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
device = torch.device('cpu')
train(model, device, loader, optimizer, 1)
next_x, _ = next(iter(loader))
np.random.seed(0)
inds = np.random.choice(next_x.shape[0], 20, replace=False)
e = shap.DeepExplainer(model, next_x[inds, :])
test_x, _ = next(iter(loader))
shap_values = e.shap_values(test_x[:1])
model.eval()
model.zero_grad()
with torch.no_grad():
diff = (model(test_x[:1]) - model(next_x[inds, :])).detach().numpy().mean(0)
sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
d = np.abs(sums - diff).sum()
assert d / np.abs(diff).sum() < 0.001, "Sum of SHAP values does not match difference! %f" % (d / np.abs(diff).sum())
def test_pytorch_single_output():
"""Testing single outputs
"""
torch = pytest.importorskip('torch')
from torch import nn
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from sklearn.datasets import load_boston
X, y = load_boston(return_X_y=True)
num_features = X.shape[1]
data = TensorDataset(torch.tensor(X).float(),
torch.tensor(y).float())
loader = DataLoader(data, batch_size=128)
class Net(nn.Module):
""" Test model.
"""
def __init__(self, num_features):
super().__init__()
self.linear = nn.Linear(num_features // 2, 2)
self.conv1d = nn.Conv1d(1, 1, 1)
self.convt1d = nn.ConvTranspose1d(1, 1, 1)
self.leaky_relu = nn.LeakyReLU()
self.aapool1d = nn.AdaptiveAvgPool1d(output_size=6)
self.maxpool2 = nn.MaxPool1d(kernel_size=2)
def forward(self, X):
""" Run the model.
"""
x = self.aapool1d(self.convt1d(self.conv1d(X.unsqueeze(1)))).squeeze(1)
return self.maxpool2(self.linear(self.leaky_relu(x)).unsqueeze(1)).squeeze(1)
model = Net(num_features)
optimizer = torch.optim.Adam(model.parameters())
def train(model, device, train_loader, optimizer, epoch):
model.train()
num_examples = 0
for batch_idx, (data, target) in enumerate(train_loader):
num_examples += target.shape[0]
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.mse_loss(output.squeeze(1), target)
loss.backward()
optimizer.step()
if batch_idx % 2 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
device = torch.device('cpu')
train(model, device, loader, optimizer, 1)
next_x, _ = next(iter(loader))
np.random.seed(0)
inds = np.random.choice(next_x.shape[0], 20, replace=False)
e = shap.DeepExplainer(model, next_x[inds, :])
test_x, _ = next(iter(loader))
shap_values = e.shap_values(test_x[:1])
model.eval()
model.zero_grad()
with torch.no_grad():
diff = (model(test_x[:1]) - model(next_x[inds, :])).detach().numpy().mean(0)
sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
d = np.abs(sums - diff).sum()
assert d / np.abs(diff).sum() < 0.001, "Sum of SHAP values does not match difference! %f" % (d / np.abs(diff).sum())
def test_pytorch_multiple_inputs():
""" Check a multi-input scenario.
"""
torch = pytest.importorskip('torch')
def _run_pytorch_multiple_inputs_test(disconnected):
""" Testing multiple inputs
"""
from torch import nn
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from sklearn.datasets import load_boston
torch.manual_seed(1)
X, y = load_boston(return_X_y=True)
num_features = X.shape[1]
x1 = X[:, num_features // 2:]
x2 = X[:, :num_features // 2]
data = TensorDataset(torch.tensor(x1).float(),
torch.tensor(x2).float(),
torch.tensor(y).float())
loader = DataLoader(data, batch_size=128)
class Net(nn.Module):
""" Testing model.
"""
def __init__(self, num_features, disconnected):
super().__init__()
self.disconnected = disconnected
if disconnected:
num_features = num_features // 2 + 1
self.linear = nn.Linear(num_features, 2)
self.output = nn.Sequential(
nn.MaxPool1d(2),
nn.ReLU()
)
def forward(self, x1, x2):
""" Run the model.
"""
if self.disconnected:
x = self.linear(x1).unsqueeze(1)
else:
x = self.linear(torch.cat((x1, x2), dim=-1)).unsqueeze(1)
return self.output(x).squeeze(1)
model = Net(num_features, disconnected)
optimizer = torch.optim.Adam(model.parameters())
def train(model, device, train_loader, optimizer, epoch):
model.train()
num_examples = 0
for batch_idx, (data1, data2, target) in enumerate(train_loader):
num_examples += target.shape[0]
data1, data2, target = data1.to(device), data2.to(device), target.to(device)
optimizer.zero_grad()
output = model(data1, data2)
loss = F.mse_loss(output.squeeze(1), target)
loss.backward()
optimizer.step()
if batch_idx % 2 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
device = torch.device('cpu')
train(model, device, loader, optimizer, 1)
next_x1, next_x2, _ = next(iter(loader))
np.random.seed(0)
inds = np.random.choice(next_x1.shape[0], 20, replace=False)
background = [next_x1[inds, :], next_x2[inds, :]]
e = shap.DeepExplainer(model, background)
test_x1, test_x2, _ = next(iter(loader))
shap_x1, shap_x2 = e.shap_values([test_x1[:1], test_x2[:1]])
model.eval()
model.zero_grad()
with torch.no_grad():
diff = (model(test_x1[:1], test_x2[:1]) - model(*background)).detach().numpy().mean(0)
sums = np.array([shap_x1[i].sum() + shap_x2[i].sum() for i in range(len(shap_x1))])
d = np.abs(sums - diff).sum()
assert d / np.abs(diff).sum() < 0.001, "Sum of SHAP values does not match difference! %f" % (d / np.abs(diff).sum())
_run_pytorch_multiple_inputs_test(disconnected=True)
_run_pytorch_multiple_inputs_test(disconnected=False)
| [
"numpy.random.seed",
"tensorflow.keras.layers.MaxPooling2D",
"numpy.abs",
"tensorflow.keras.layers.Dense",
"numpy.allclose",
"tensorflow.keras.optimizers.SGD",
"torch.nn.MaxPool1d",
"sklearn.datasets.load_boston",
"torch.nn.Softmax",
"tensorflow.keras.models.Sequential",
"numpy.random.normal",
... | [((390, 423), 'pytest.importorskip', 'pytest.importorskip', (['"""tensorflow"""'], {}), "('tensorflow')\n", (409, 423), False, 'import pytest\n'), ((1063, 1097), 'shap.DeepExplainer', 'DeepExplainer', (['model', 'x.values[:1]'], {}), '(model, x.values[:1])\n', (1076, 1097), False, 'from shap import DeepExplainer\n'), ((1363, 1396), 'pytest.importorskip', 'pytest.importorskip', (['"""tensorflow"""'], {}), "('tensorflow')\n", (1382, 1396), False, 'import pytest\n'), ((1923, 1955), 'tensorflow.keras.datasets.mnist.load_data', 'keras.datasets.mnist.load_data', ([], {}), '()\n', (1953, 1955), False, 'from tensorflow import keras\n'), ((2587, 2635), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (2613, 2635), False, 'from tensorflow import keras\n'), ((2649, 2696), 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (2675, 2696), False, 'from tensorflow import keras\n'), ((2710, 2722), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2720, 2722), False, 'from tensorflow.keras.models import Sequential\n'), ((3592, 3609), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3606, 3609), True, 'import numpy as np\n'), ((3621, 3674), 'numpy.random.choice', 'np.random.choice', (['x_train.shape[0]', '(10)'], {'replace': '(False)'}), '(x_train.shape[0], 10, replace=False)\n', (3637, 3674), True, 'import numpy as np\n'), ((3683, 3776), 'shap.DeepExplainer', 'shap.DeepExplainer', (['(model.layers[0].input, model.layers[-1].input)', 'x_train[inds, :, :]'], {}), '((model.layers[0].input, model.layers[-1].input), x_train\n [inds, :, :])\n', (3701, 3776), False, 'import shap\n'), ((4407, 4440), 'pytest.importorskip', 'pytest.importorskip', (['"""tensorflow"""'], {}), "('tensorflow')\n", (4426, 4440), False, 'import pytest\n'), ((4638, 4655), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4652, 4655), True, 'import numpy as np\n'), ((4953, 4970), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(2,)'}), '(shape=(2,))\n', (4958, 4970), False, 'from tensorflow.keras.layers import Dense, Input\n'), ((5034, 5069), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'preds'}), '(inputs=inputs, outputs=preds)\n', (5039, 5069), False, 'from tensorflow.keras.models import Model\n'), ((5268, 5339), 'shap.DeepExplainer', 'shap.DeepExplainer', (['(model.layers[0].input, model.layers[-1].output)', 'x'], {}), '((model.layers[0].input, model.layers[-1].output), x)\n', (5286, 5339), False, 'import shap\n'), ((5618, 5678), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(expected - values)', '(0)'], {'atol': '(1e-05)'}), '(expected - values, 0, atol=1e-05)\n', (5644, 5678), True, 'import numpy as np\n'), ((5797, 5830), 'pytest.importorskip', 'pytest.importorskip', (['"""tensorflow"""'], {}), "('tensorflow')\n", (5816, 5830), False, 'import pytest\n'), ((6207, 6224), 'numpy.random.seed', 'np.random.seed', (['(7)'], {}), '(7)\n', (6221, 6224), True, 'import numpy as np\n'), ((6497, 6540), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['X_train'], {'maxlen': '(100)'}), '(X_train, maxlen=100)\n', (6519, 6540), False, 'from tensorflow.keras.preprocessing import sequence\n'), ((6554, 6596), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['X_test'], {'maxlen': '(100)'}), '(X_test, maxlen=100)\n', (6576, 6596), False, 'from tensorflow.keras.preprocessing import sequence\n'), ((6741, 6753), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (6751, 6753), False, 'from tensorflow.keras.models import Sequential\n'), ((7037, 7089), 'numpy.random.choice', 'np.random.choice', (['X_train.shape[0]', '(3)'], {'replace': '(False)'}), '(X_train.shape[0], 3, replace=False)\n', (7053, 7089), True, 'import numpy as np\n'), ((7672, 7748), 'shap.DeepExplainer', 'shap.DeepExplainer', (['(mod.layers[0].input, mod.layers[-1].output)', 'background'], {}), '((mod.layers[0].input, mod.layers[-1].output), background)\n', (7690, 7748), False, 'import shap\n'), ((8062, 8096), 'numpy.allclose', 'np.allclose', (['sums', 'diff'], {'atol': '(0.01)'}), '(sums, diff, atol=0.01)\n', (8073, 8096), True, 'import numpy as np\n'), ((8252, 8280), 'pytest.importorskip', 'pytest.importorskip', (['"""torch"""'], {}), "('torch')\n", (8271, 8280), False, 'import pytest\n'), ((8299, 8333), 'pytest.importorskip', 'pytest.importorskip', (['"""torchvision"""'], {}), "('torchvision')\n", (8318, 8333), False, 'import pytest\n'), ((12779, 12807), 'pytest.importorskip', 'pytest.importorskip', (['"""torch"""'], {}), "('torch')\n", (12798, 12807), False, 'import pytest\n'), ((12991, 13019), 'sklearn.datasets.load_boston', 'load_boston', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (13002, 13019), False, 'from sklearn.datasets import load_boston\n'), ((13163, 13195), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'batch_size': '(128)'}), '(data, batch_size=128)\n', (13173, 13195), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((15477, 15494), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (15491, 15494), True, 'import numpy as np\n'), ((15506, 15558), 'numpy.random.choice', 'np.random.choice', (['next_x.shape[0]', '(20)'], {'replace': '(False)'}), '(next_x.shape[0], 20, replace=False)\n', (15522, 15558), True, 'import numpy as np\n'), ((15567, 15609), 'shap.DeepExplainer', 'shap.DeepExplainer', (['model', 'next_x[inds, :]'], {}), '(model, next_x[inds, :])\n', (15585, 15609), False, 'import shap\n'), ((16158, 16186), 'pytest.importorskip', 'pytest.importorskip', (['"""torch"""'], {}), "('torch')\n", (16177, 16186), False, 'import pytest\n'), ((16370, 16398), 'sklearn.datasets.load_boston', 'load_boston', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (16381, 16398), False, 'from sklearn.datasets import load_boston\n'), ((16542, 16574), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'batch_size': '(128)'}), '(data, batch_size=128)\n', (16552, 16574), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((18217, 18234), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (18231, 18234), True, 'import numpy as np\n'), ((18246, 18298), 'numpy.random.choice', 'np.random.choice', (['next_x.shape[0]', '(20)'], {'replace': '(False)'}), '(next_x.shape[0], 20, replace=False)\n', (18262, 18298), True, 'import numpy as np\n'), ((18307, 18349), 'shap.DeepExplainer', 'shap.DeepExplainer', (['model', 'next_x[inds, :]'], {}), '(model, next_x[inds, :])\n', (18325, 18349), False, 'import shap\n'), ((18908, 18936), 'pytest.importorskip', 'pytest.importorskip', (['"""torch"""'], {}), "('torch')\n", (18927, 18936), False, 'import pytest\n'), ((431, 459), 'distutils.version.LooseVersion', 'LooseVersion', (['tf.__version__'], {}), '(tf.__version__)\n', (443, 459), False, 'from distutils.version import LooseVersion\n'), ((463, 484), 'distutils.version.LooseVersion', 'LooseVersion', (['"""2.4.0"""'], {}), "('2.4.0')\n", (475, 484), False, 'from distutils.version import LooseVersion\n'), ((494, 563), 'pytest.skip', 'pytest.skip', (['"""Deep explainer does not work for TF 2.4 in eager mode."""'], {}), "('Deep explainer does not work for TF 2.4 in eager mode.')\n", (505, 563), False, 'import pytest\n'), ((1964, 1985), 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (1983, 1985), True, 'from tensorflow.keras import backend as K\n'), ((2737, 2810), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(8)'], {'kernel_size': '(3, 3)', 'activation': '"""relu"""', 'input_shape': 'input_shape'}), "(8, kernel_size=(3, 3), activation='relu', input_shape=input_shape)\n", (2743, 2810), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((2868, 2905), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(16)', '(3, 3)'], {'activation': '"""relu"""'}), "(16, (3, 3), activation='relu')\n", (2874, 2905), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((2921, 2951), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2933, 2951), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((2967, 2980), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (2974, 2980), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation\n'), ((2996, 3005), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3003, 3005), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation\n'), ((3021, 3049), 'tensorflow.keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (3026, 3049), False, 'from tensorflow.keras.layers import Dense\n'), ((3071, 3083), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3078, 3083), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation\n'), ((3099, 3117), 'tensorflow.keras.layers.Dense', 'Dense', (['num_classes'], {}), '(num_classes)\n', (3104, 3117), False, 'from tensorflow.keras.layers import Dense\n'), ((3133, 3154), 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (3143, 3154), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation\n'), ((4714, 4730), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (4722, 4730), True, 'import numpy as np\n'), ((4872, 4910), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.1)', 'size': '(1000)'}), '(scale=0.1, size=1000)\n', (4888, 4910), True, 'import numpy as np\n'), ((4983, 5012), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (4988, 5012), False, 'from tensorflow.keras.layers import Dense\n'), ((6294, 6332), 'tensorflow.keras.datasets.imdb.load_data', 'imdb.load_data', ([], {'num_words': 'max_features'}), '(num_words=max_features)\n', (6308, 6332), False, 'from tensorflow.keras.datasets import imdb\n'), ((6766, 6792), 'tensorflow.keras.layers.Embedding', 'Embedding', (['max_features', '(8)'], {}), '(max_features, 8)\n', (6775, 6792), False, 'from tensorflow.keras.layers import Embedding\n'), ((6806, 6850), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(10)'], {'dropout': '(0.2)', 'recurrent_dropout': '(0.2)'}), '(10, dropout=0.2, recurrent_dropout=0.2)\n', (6810, 6850), False, 'from tensorflow.keras.layers import LSTM\n'), ((6864, 6894), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (6869, 6894), False, 'from tensorflow.keras.layers import Dense\n'), ((10815, 10832), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (10829, 10832), True, 'import numpy as np\n'), ((10848, 10900), 'numpy.random.choice', 'np.random.choice', (['next_x.shape[0]', '(20)'], {'replace': '(False)'}), '(next_x.shape[0], 20, replace=False)\n', (10864, 10900), True, 'import numpy as np\n'), ((19273, 19301), 'sklearn.datasets.load_boston', 'load_boston', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (19284, 19301), False, 'from sklearn.datasets import load_boston\n'), ((19593, 19625), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'batch_size': '(128)'}), '(data, batch_size=128)\n', (19603, 19625), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((21543, 21560), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (21557, 21560), True, 'import numpy as np\n'), ((21576, 21629), 'numpy.random.choice', 'np.random.choice', (['next_x1.shape[0]', '(20)'], {'replace': '(False)'}), '(next_x1.shape[0], 20, replace=False)\n', (21592, 21629), True, 'import numpy as np\n'), ((21700, 21737), 'shap.DeepExplainer', 'shap.DeepExplainer', (['model', 'background'], {}), '(model, background)\n', (21718, 21737), False, 'import shap\n'), ((592, 621), 'numpy.random.random', 'np.random.random', ([], {'size': '(100,)'}), '(size=(100,))\n', (608, 621), True, 'import numpy as np\n'), ((3247, 3274), 'tensorflow.keras.optimizers.Adadelta', 'keras.optimizers.Adadelta', ([], {}), '()\n', (3272, 3274), False, 'from tensorflow import keras\n'), ((4151, 4170), 'numpy.abs', 'np.abs', (['(sums - diff)'], {}), '(sums - diff)\n', (4157, 4170), True, 'import numpy as np\n'), ((4850, 4865), 'numpy.dot', 'np.dot', (['x', 'coef'], {}), '(x, coef)\n', (4856, 4865), True, 'import numpy as np\n'), ((5098, 5103), 'tensorflow.keras.optimizers.SGD', 'SGD', ([], {}), '()\n', (5101, 5103), False, 'from tensorflow.keras.optimizers import SGD\n'), ((10937, 11009), 'shap.DeepExplainer', 'shap.DeepExplainer', (['(model, model.conv_layers[0])', 'next_x[inds, :, :, :]'], {}), '((model, model.conv_layers[0]), next_x[inds, :, :, :])\n', (10955, 11009), False, 'import shap\n'), ((11040, 11088), 'shap.DeepExplainer', 'shap.DeepExplainer', (['model', 'next_x[inds, :, :, :]'], {}), '(model, next_x[inds, :, :, :])\n', (11058, 11088), False, 'import shap\n'), ((12476, 12489), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (12487, 12489), False, 'import pytest\n'), ((14346, 14373), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (14358, 14373), False, 'from torch import nn\n'), ((15925, 15944), 'numpy.abs', 'np.abs', (['(sums - diff)'], {}), '(sums - diff)\n', (15931, 15944), True, 'import numpy as np\n'), ((16737, 16768), 'torch.nn.Linear', 'nn.Linear', (['(num_features // 2)', '(2)'], {}), '(num_features // 2, 2)\n', (16746, 16768), False, 'from torch import nn\n'), ((16795, 16813), 'torch.nn.Conv1d', 'nn.Conv1d', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (16804, 16813), False, 'from torch import nn\n'), ((16841, 16868), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (16859, 16868), False, 'from torch import nn\n'), ((16899, 16913), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (16911, 16913), False, 'from torch import nn\n'), ((16942, 16977), 'torch.nn.AdaptiveAvgPool1d', 'nn.AdaptiveAvgPool1d', ([], {'output_size': '(6)'}), '(output_size=6)\n', (16962, 16977), False, 'from torch import nn\n'), ((17006, 17033), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (17018, 17033), False, 'from torch import nn\n'), ((18665, 18684), 'numpy.abs', 'np.abs', (['(sums - diff)'], {}), '(sums - diff)\n', (18671, 18684), True, 'import numpy as np\n'), ((11525, 11544), 'numpy.abs', 'np.abs', (['(sums - diff)'], {}), '(sums - diff)\n', (11531, 11544), True, 'import numpy as np\n'), ((13515, 13550), 'torch.nn.AdaptiveAvgPool1d', 'nn.AdaptiveAvgPool1d', ([], {'output_size': '(6)'}), '(output_size=6)\n', (13535, 13550), False, 'from torch import nn\n'), ((13893, 13907), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (13905, 13907), False, 'from torch import nn\n'), ((13925, 13956), 'torch.nn.Linear', 'nn.Linear', (['(num_features // 2)', '(2)'], {}), '(num_features // 2, 2)\n', (13934, 13956), False, 'from torch import nn\n'), ((19968, 19994), 'torch.nn.Linear', 'nn.Linear', (['num_features', '(2)'], {}), '(num_features, 2)\n', (19977, 19994), False, 'from torch import nn\n'), ((22137, 22156), 'numpy.abs', 'np.abs', (['(sums - diff)'], {}), '(sums - diff)\n', (22143, 22156), True, 'import numpy as np\n'), ((4192, 4204), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (4198, 4204), True, 'import numpy as np\n'), ((8805, 8836), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(10)'], {'kernel_size': '(5)'}), '(1, 10, kernel_size=5)\n', (8814, 8836), False, 'from torch import nn\n'), ((8858, 8873), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (8870, 8873), False, 'from torch import nn\n'), ((8895, 8904), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (8902, 8904), False, 'from torch import nn\n'), ((8926, 8958), 'torch.nn.Conv2d', 'nn.Conv2d', (['(10)', '(20)'], {'kernel_size': '(5)'}), '(10, 20, kernel_size=5)\n', (8935, 8958), False, 'from torch import nn\n'), ((8980, 9009), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(20)', '(20)', '(1)'], {}), '(20, 20, 1)\n', (8998, 9009), False, 'from torch import nn\n'), ((9031, 9071), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', ([], {'output_size': '(4, 4)'}), '(output_size=(4, 4))\n', (9051, 9071), False, 'from torch import nn\n'), ((9093, 9106), 'torch.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (9104, 9106), False, 'from torch import nn\n'), ((9194, 9212), 'torch.nn.Linear', 'nn.Linear', (['(320)', '(50)'], {}), '(320, 50)\n', (9203, 9212), False, 'from torch import nn\n'), ((9234, 9252), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(50)'], {}), '(50)\n', (9248, 9252), False, 'from torch import nn\n'), ((9274, 9283), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9281, 9283), False, 'from torch import nn\n'), ((9305, 9322), 'torch.nn.Linear', 'nn.Linear', (['(50)', '(10)'], {}), '(50, 10)\n', (9314, 9322), False, 'from torch import nn\n'), ((9344, 9352), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (9350, 9352), False, 'from torch import nn\n'), ((9374, 9391), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (9384, 9391), False, 'from torch import nn\n'), ((13411, 13429), 'torch.nn.Conv1d', 'nn.Conv1d', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (13420, 13429), False, 'from torch import nn\n'), ((13451, 13478), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (13469, 13478), False, 'from torch import nn\n'), ((15966, 15978), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (15972, 15978), True, 'import numpy as np\n'), ((16052, 16064), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (16058, 16064), True, 'import numpy as np\n'), ((18706, 18718), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (18712, 18718), True, 'import numpy as np\n'), ((18792, 18804), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (18798, 18804), True, 'import numpy as np\n'), ((20060, 20075), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (['(2)'], {}), '(2)\n', (20072, 20075), False, 'from torch import nn\n'), ((20097, 20106), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (20104, 20106), False, 'from torch import nn\n'), ((11570, 11582), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (11576, 11582), True, 'import numpy as np\n'), ((11656, 11668), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (11662, 11668), True, 'import numpy as np\n'), ((22182, 22194), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (22188, 22194), True, 'import numpy as np\n'), ((22268, 22280), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (22274, 22280), True, 'import numpy as np\n')] |
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse
import time
import tensorflow as tf
import json
import tensorflow_hub as hub
import numpy as np
hostName = "192.168.1.3"
hostPort = 80
module_url = "https://tfhub.dev/google/universal-sentence-encoder/4"
model = hub.load(module_url)
print("module %s loaded" % module_url)
def similarity(message1, message2):
message_embeddings_ = model([message1,message2])
return np.inner(message_embeddings_, message_embeddings_)[0][1]
class MyServer(BaseHTTPRequestHandler):
def do_GET(self):
query = urlparse(self.path).query
result = 0
print("client address: ", self.client_address[0])
try:
query_components = dict(qc.split("=") for qc in query.split("&"))
title = query_components["title"].replace("%20", " ")
area = query_components["area"].replace("%20", " ")
result = similarity(area, title)
print("title = ", title)
print("area = ", area)
print("response = ", result)
except:
print("Invalid request")
# query_components = { "imsi" : "Hello" }
self.send_response(200)
# self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(bytes(str(result), "utf-8"))
print("testing ts model: similarity between programmer and learning python in one hour: ", similarity("programmer", "learning java in one hour"))
myServer = HTTPServer((hostName, hostPort), MyServer)
print(time.asctime(), "Server Starts - %s:%s" % (hostName, hostPort))
try:
myServer.serve_forever()
except KeyboardInterrupt:
pass
myServer.server_close()
print(time.asctime(), "Server Stops - %s:%s" % (hostName, hostPort)) | [
"time.asctime",
"http.server.HTTPServer",
"tensorflow_hub.load",
"numpy.inner",
"urllib.parse.urlparse"
] | [((308, 328), 'tensorflow_hub.load', 'hub.load', (['module_url'], {}), '(module_url)\n', (316, 328), True, 'import tensorflow_hub as hub\n'), ((1508, 1550), 'http.server.HTTPServer', 'HTTPServer', (['(hostName, hostPort)', 'MyServer'], {}), '((hostName, hostPort), MyServer)\n', (1518, 1550), False, 'from http.server import BaseHTTPRequestHandler, HTTPServer\n'), ((1557, 1571), 'time.asctime', 'time.asctime', ([], {}), '()\n', (1569, 1571), False, 'import time\n'), ((1722, 1736), 'time.asctime', 'time.asctime', ([], {}), '()\n', (1734, 1736), False, 'import time\n'), ((465, 515), 'numpy.inner', 'np.inner', (['message_embeddings_', 'message_embeddings_'], {}), '(message_embeddings_, message_embeddings_)\n', (473, 515), True, 'import numpy as np\n'), ((602, 621), 'urllib.parse.urlparse', 'urlparse', (['self.path'], {}), '(self.path)\n', (610, 621), False, 'from urllib.parse import urlparse\n')] |
"""
Copyright (c) 2020 University of Massachusetts
All rights reserved.
This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree.
Authors: <NAME> <<EMAIL>>
"""
"""
Run WASR inference on a single image, return the segmented image mask
Based on wasr_inference_noimu_general.py at https://github.com/bborja/wasr_network (Apache-2 License)
"""
import tensorflow as tf
import numpy as np
#from wasr_models import wasr_NOIMU2, ImageReader, decode_labels, prepare_label
from wasr_models import wasr_NOIMU2
# COLOR MEANS OF IMAGES FROM MODDv1 DATASET
IMG_MEAN = np.array((148.8430, 171.0260, 162.4082), dtype=np.float32)
# Number of classes
NUM_CLASSES = 3
# Input image size. Our network expects images of resolution 512x384
IMG_SIZE = [384, 512]
""" WASR network object for inference"""
class WASR_net(object):
def __init__( self, weights_path, per_process_gpu_memory_fraction = 0 ):
# Create network
self.img_input = tf.placeholder(dtype=tf.uint8, shape=(IMG_SIZE[0], IMG_SIZE[1], 3))
# Convert from opencv BGR to tensorflow's RGB format
img_b, img_g, img_r = tf.split(axis=2, num_or_size_splits=3, value=self.img_input)
# Join and subtract means
img = tf.cast(tf.concat(axis=2, values=[img_r, img_g, img_b]), dtype=tf.float32)
img -= IMG_MEAN
# Expand first dimension
#img = tf.expand_dims(img, dim=0) # tf 1.2
img = tf.expand_dims(img, axis=0)
with tf.variable_scope('', reuse=False):
net = wasr_NOIMU2({'data': img}, is_training=False, num_classes=NUM_CLASSES)
# Which variables to load...
restore_var = tf.global_variables()
# Predictions (last layer of the decoder)
raw_output = net.layers['fc1_voc12']
# Upsample image to the original resolution
raw_output = tf.image.resize_bilinear(raw_output, tf.shape(img)[1:3, ])
#raw_output = tf.argmax(raw_output, dimension=3) # tf 1.2
#pred = tf.expand_dims(raw_output, dim=3) # tf 1.2
raw_output = tf.argmax(raw_output, axis=3)
self.pred = tf.expand_dims(raw_output, axis=3)
# Set up TF session and initialize variables.
config = tf.ConfigProto()
# limit gpu,if specified
if per_process_gpu_memory_fraction > 0:
config.gpu_options.per_process_gpu_memory_fraction=per_process_gpu_memory_fraction
else:
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
init = tf.global_variables_initializer()
self.sess.run(init)
# Load weights
self.loader = tf.train.Saver(var_list=restore_var)
self.loader.restore( self.sess, weights_path )
def predict( self, img_in ):
# Run inference
preds = self.sess.run( self.pred, feed_dict={self.img_input: img_in})
# just convert prediction mask to uint8, return it in 2D
result = np.squeeze(preds[0]).astype('uint8')
return result | [
"tensorflow.train.Saver",
"wasr_models.wasr_NOIMU2",
"tensorflow.argmax",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.concat",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"tensorflow.global_variables",
"tensorflow.ConfigProto",
"numpy.array",
"tensor... | [((628, 684), 'numpy.array', 'np.array', (['(148.843, 171.026, 162.4082)'], {'dtype': 'np.float32'}), '((148.843, 171.026, 162.4082), dtype=np.float32)\n', (636, 684), True, 'import numpy as np\n'), ((1011, 1078), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.uint8', 'shape': '(IMG_SIZE[0], IMG_SIZE[1], 3)'}), '(dtype=tf.uint8, shape=(IMG_SIZE[0], IMG_SIZE[1], 3))\n', (1025, 1078), True, 'import tensorflow as tf\n'), ((1171, 1231), 'tensorflow.split', 'tf.split', ([], {'axis': '(2)', 'num_or_size_splits': '(3)', 'value': 'self.img_input'}), '(axis=2, num_or_size_splits=3, value=self.img_input)\n', (1179, 1231), True, 'import tensorflow as tf\n'), ((1480, 1507), 'tensorflow.expand_dims', 'tf.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1494, 1507), True, 'import tensorflow as tf\n'), ((1707, 1728), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (1726, 1728), True, 'import tensorflow as tf\n'), ((2104, 2133), 'tensorflow.argmax', 'tf.argmax', (['raw_output'], {'axis': '(3)'}), '(raw_output, axis=3)\n', (2113, 2133), True, 'import tensorflow as tf\n'), ((2154, 2188), 'tensorflow.expand_dims', 'tf.expand_dims', (['raw_output'], {'axis': '(3)'}), '(raw_output, axis=3)\n', (2168, 2188), True, 'import tensorflow as tf\n'), ((2261, 2277), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2275, 2277), True, 'import tensorflow as tf\n'), ((2541, 2566), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2551, 2566), True, 'import tensorflow as tf\n'), ((2582, 2615), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2613, 2615), True, 'import tensorflow as tf\n'), ((2691, 2727), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'restore_var'}), '(var_list=restore_var)\n', (2705, 2727), True, 'import tensorflow as tf\n'), ((1289, 1336), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(2)', 'values': '[img_r, img_g, img_b]'}), '(axis=2, values=[img_r, img_g, img_b])\n', (1298, 1336), True, 'import tensorflow as tf\n'), ((1522, 1556), 'tensorflow.variable_scope', 'tf.variable_scope', (['""""""'], {'reuse': '(False)'}), "('', reuse=False)\n", (1539, 1556), True, 'import tensorflow as tf\n'), ((1576, 1646), 'wasr_models.wasr_NOIMU2', 'wasr_NOIMU2', (["{'data': img}"], {'is_training': '(False)', 'num_classes': 'NUM_CLASSES'}), "({'data': img}, is_training=False, num_classes=NUM_CLASSES)\n", (1587, 1646), False, 'from wasr_models import wasr_NOIMU2\n'), ((1936, 1949), 'tensorflow.shape', 'tf.shape', (['img'], {}), '(img)\n', (1944, 1949), True, 'import tensorflow as tf\n'), ((3011, 3031), 'numpy.squeeze', 'np.squeeze', (['preds[0]'], {}), '(preds[0])\n', (3021, 3031), True, 'import numpy as np\n')] |
"""Classes to compute cluster centers with different algorithms.
This file contains the generic centering class and various different
implementations of different centering algorithms. The main algorithm is
CenteringWcenZred, but others are included as part of the centering training
procedure.
"""
import fitsio
import esutil
import numpy as np
from .utilities import gaussFunction
from .utilities import interpol
class Centering(object):
"""
Generic centering base class for computing cluster centers.
"""
def __init__(self, cluster, zlambda_corr=None):
"""
Instantiate a Centering object for a cluster.
Parameters
----------
cluster: `redmapper.Cluster`
Cluster to compute centering
zlambda_corr: `redmapper.ZlambdaCorrectionPar`, optional
z_lambda correction parameters, if desired. Default is None.
"""
# Reference to the cluster; may need to copy
self.cluster = cluster
# And the zlambda_corr structure
self.zlambda_corr = zlambda_corr
# For convenience, make references to these structures
self.zredstr = cluster.zredstr
self.config = cluster.config
self.cosmo = cluster.cosmo
# Reset values
self.ra = np.zeros(self.config.percolation_maxcen) - 400.0
self.dec = np.zeros(self.config.percolation_maxcen) - 400.0
self.ngood = 0
self.index = np.zeros(self.config.percolation_maxcen, dtype=np.int32) - 1
self.maxind = -1
self.lnlamlike = -1.0
self.lnbcglike = -1.0
self.p_cen = np.zeros(self.config.percolation_maxcen)
self.q_cen = np.zeros(self.config.percolation_maxcen)
self.p_fg = np.zeros(self.config.percolation_maxcen)
self.q_miss = 0.0
self.p_sat = np.zeros(self.config.percolation_maxcen)
self.p_c = np.zeros(self.config.percolation_maxcen)
def find_center(self):
"""
Stub to override to find center
"""
return False
class CenteringBCG(Centering):
"""
Centering class using the brightest cluster galaxy (BCG) algorithm.
"""
def find_center(self):
"""
Find the center using the CenteringBCG algorithm.
This algorithm takes the brightest member with pmem > 0.8 and calls it
the central galaxy.
Will set self.maxind (index of best center); self.ra, self.dec
(position of best center); self.ngood (number of good candidates);
self.index[:] (indices of all the candidates); self.p_cen[:] (pcen
centering probabilities); self.q_cen[:] (qcen unused miss
probabilities); self.p_sat[:] (p_sat satellite probabilities).
Returns
-------
success: `bool`
True when a center is successfully found. (Always True).
"""
# This is somewhat arbitrary, and is not yet configurable
pmem_cut = 0.8
use, = np.where((self.cluster.neighbors.r < self.cluster.r_lambda) &
((self.cluster.neighbors.pmem > pmem_cut) |
(np.abs(self.cluster.neighbors.zred - self.cluster.redshift) < 2.0 * self.cluster.neighbors.zred_e)))
if use.size == 0:
return False
mind = np.argmin(self.cluster.neighbors.refmag[use])
self.maxind = use[mind]
self.ra = np.array([self.cluster.neighbors.ra[self.maxind]])
self.dec = np.array([self.cluster.neighbors.dec[self.maxind]])
self.ngood = 1
self.index[0] = self.maxind
self.p_cen[0] = 1.0
self.q_cen[0] = 1.0
self.p_sat[0] = 0.0
return True
class CenteringWcenZred(Centering):
"""
Centering class using the "wcen-zred" algorithm.
This algorithm computes the primary centering likelihood algorithm by
computing the connectivity of the members, as well as ensuring
consistency between zred of the candidates and the cluster redshift.
"""
def find_center(self):
"""
Find the center using the CenteringWcenZred algorithm.
This algorithm computes the primary centering likelihood algorithm by
computing the connectivity of the members, as well as ensuring
consistency between zred of the candidates and the cluster redshift.
Will set self.maxind (index of best center); self.ra, self.dec
(position of best center); self.ngood (number of good candidates);
self.index[:] (indices of all the candidates); self.p_cen[:] (pcen
centering probabilities); self.q_cen[:] (qcen unused miss
probabilities); self.p_sat[:] (p_sat satellite probabilities).
Returns
-------
success: `bool`
True when a center is successfully found. (Always True).
"""
# These are the galaxies considered as candidate centers
use, = np.where((self.cluster.neighbors.r < self.cluster.r_lambda) &
(self.cluster.neighbors.pfree >= self.config.percolation_pbcg_cut) &
(self.cluster.neighbors.zred_chisq < self.config.wcen_zred_chisq_max) &
((self.cluster.neighbors.pmem > 0.0) |
(np.abs(self.cluster.redshift - self.cluster.neighbors.zred) < 5.0 * self.cluster.neighbors.zred_e)))
# Do the phi_cen filter
mbar = self.cluster.mstar + self.config.wcen_Delta0 + self.config.wcen_Delta1 * np.log(self.cluster.Lambda / self.config.wcen_pivot)
phi_cen = gaussFunction(self.cluster.neighbors.refmag[use],
1. / (np.sqrt(2. * np.pi) * self.config.wcen_sigma_m),
mbar,
self.config.wcen_sigma_m)
if self.zlambda_corr is not None:
zrmod = interpol(self.zlambda_corr.zred_uncorr, self.zlambda_corr.z, self.cluster.redshift)
gz = gaussFunction(self.cluster.neighbors.zred[use],
1. / (np.sqrt(2. * np.pi) * self.cluster.neighbors.zred_e[use]),
zrmod,
self.cluster.neighbors.zred_e[use])
else:
gz = gaussFunction(self.cluster.neighbors.zred[use],
1. / (np.sqrt(2. * np.pi) * self.cluster.neighbors.zred_e[use]),
self.cluster.redshift,
self.cluster.neighbors.zred_e[use])
# and the w filter. We need w for each galaxy that is considered a candidate center.
# Note that in order to calculate w we need to know all the galaxies that are
# around it, but only within r_lambda *of that galaxy*. This is tricky.
u, = np.where(self.cluster.neighbors.p > 0.0)
# This is the maximum radius in units of degrees (r_lambda is Mpc; mpc_scale is Mpc / degree)
maxrad = 1.1 * self.cluster.r_lambda / self.cluster.mpc_scale
htm_matcher = esutil.htm.Matcher(self.cluster.neighbors.depth,
self.cluster.neighbors.ra[use],
self.cluster.neighbors.dec[use])
i2, i1, dist = htm_matcher.match(self.cluster.neighbors.ra[u],
self.cluster.neighbors.dec[u],
maxrad, maxmatch=0)
subdifferent, = np.where(~(use[i1] == u[i2]))
i1 = i1[subdifferent]
i2 = i2[subdifferent]
pdis = dist[subdifferent] * self.cluster.mpc_scale
pdis = np.sqrt(pdis**2. + self.config.wcen_rsoft**2.)
lum = 10.**((self.cluster.mstar - self.cluster.neighbors.refmag) / (2.5))
# Put a floor on w when we have a strange candidate at the edge that doesn't
# match any good galaxies
w = np.zeros(use.size) + 1e-3
for i in range(use.size):
# need to filter on r_lambda...
subgal, = np.where(i1 == i)
if subgal.size > 0:
inside, = np.where(pdis[subgal] < self.cluster.r_lambda)
if inside.size > 0:
indices = u[i2[subgal[inside]]]
if self.config.wcen_uselum:
w[i] = np.log(np.sum(self.cluster.neighbors.p[indices] * lum[indices] /
pdis[subgal[inside]]) /
((1. / self.cluster.r_lambda) *
np.sum(self.cluster.neighbors.p[indices] * lum[indices])))
else:
w[i] = np.log(np.sum(self.cluster.neighbors.p[indices] /
pdis[subgal[inside]]) /
((1. / self.cluster.r_lambda) *
np.sum(self.cluster.neighbors.p[indices])))
sigscale = np.sqrt((np.clip(self.cluster.Lambda, None, self.config.wcen_maxlambda) / self.cluster.scaleval) / self.config.wcen_pivot)
# scale with richness for Poisson errors
sig = self.config.lnw_cen_sigma / sigscale
fw = gaussFunction(np.log(w),
1. / (np.sqrt(2. * np.pi) * sig),
self.config.lnw_cen_mean,
sig)
ucen = phi_cen * gz * fw
lo, = np.where(ucen < 1e-10)
ucen[lo] = 0.0
# and the satellite function
maxmag = self.cluster.mstar - 2.5 * np.log10(self.config.lval_reference)
phi_sat = self.cluster._calc_luminosity(maxmag, idx=use)
satsig = self.config.lnw_sat_sigma / sigscale
fsat = gaussFunction(np.log(w),
1. / (np.sqrt(2. * np.pi) * satsig),
self.config.lnw_sat_mean,
satsig)
usat = phi_sat * gz * fsat
lo, = np.where(usat < 1e-10)
usat[lo] = 0.0
# and the background/foreground
fgsig = self.config.lnw_fg_sigma / sigscale
ffg = gaussFunction(np.log(w),
1. / (np.sqrt(2. * np.pi) * fgsig),
self.config.lnw_fg_mean,
fgsig)
# we want to divide out the r, and we don't want small r's messing this up
rtest = np.zeros(use.size) + 0.1
bcounts = ffg * (self.cluster.calc_zred_bkg_density(rtest,
self.cluster.neighbors.zred[use],
self.cluster.neighbors.refmag[use]) /
(2. * np.pi * rtest)) * np.pi * self.cluster.r_lambda**2.
# The start of Pcen
Pcen_basic = np.clip(self.cluster.neighbors.pfree[use] * (ucen / (ucen + (self.cluster.Lambda / self.cluster.scaleval - 1.0) * usat + bcounts)),None, 0.99999)
# make sure we don't have any bad values
bad, = np.where(~np.isfinite(Pcen_basic))
Pcen_basic[bad] = 0.0
okay, = np.where(Pcen_basic > 0.0)
if okay.size == 0:
# There are literally NO centers
self.q_miss = 1.0
# Set the same as the input galaxy...
# We need this to be an array of length 1
good = np.atleast_1d(np.argmin(self.cluster.neighbors.r[use]))
maxind = use[good[0]]
Pcen = np.zeros(use.size)
Qcen = np.zeros(use.size)
else:
# Do the renormalization
Pcen_unnorm = np.zeros(use.size)
# Only consider centrals that have a non-zero probability
ok, = np.where(Pcen_basic > 0)
st = np.argsort(Pcen_basic[ok])[::-1]
if st.size < self.config.percolation_maxcen:
good = ok[st]
else:
good = ok[st[0: self.config.percolation_maxcen]]
self.ngood = good.size
for i in range(self.ngood):
Pcen0 = Pcen_basic[good[i]]
Pcen_basic[good[i]] = 0.0
Pcen_unnorm[good[i]] = Pcen0 * np.prod(1.0 - Pcen_basic[good])
Pcen_basic[good[i]] = Pcen0
Qmiss = np.prod(1.0 - Pcen_basic[good])
KQ = 1./(Qmiss + np.sum(Pcen_unnorm))
KP = 1./np.sum(Pcen_unnorm)
Pcen = KP * Pcen_unnorm
Qcen = KQ * Pcen_unnorm
mod1 = np.sum(np.log(ucen[good] + (self.cluster.Lambda - 1) * usat[good] + bcounts[good]))
mod2 = np.sum(np.log(self.cluster.Lambda * usat[good] + bcounts[good]))
# A new statistic that doesn't quite work
Qmiss = -2.0 * np.sum(np.log((ucen[good] + (self.cluster.Lambda - 1) * usat[good] + bcounts[good]) / (self.cluster.Lambda * usat[good] + bcounts[good])))
maxind = use[good[0]]
Pfg_basic = bcounts[good] / ((self.cluster.Lambda - 1.0) * usat[good] + bcounts[good])
inf, = np.where(~np.isfinite(Pfg_basic))
Pfg_basic[inf] = 0.0
Pfg = (1.0 - Pcen[good]) * Pfg_basic
Psat_basic = (self.cluster.Lambda - 1.0) * usat[good] / ((self.cluster.Lambda - 1.0) * usat[good] + bcounts[good])
inf, = np.where(~np.isfinite(Psat_basic))
Psat_basic[inf] = 0.0
Psat = (1.0 - Pcen[good]) * Psat_basic
self.ra[0: good.size] = self.cluster.neighbors.ra[use[good]]
self.dec[0: good.size] = self.cluster.neighbors.dec[use[good]]
self.maxind = use[good[0]]
self.index[0: good.size] = use[good]
self.p_cen[0: good.size] = Pcen[good]
self.q_cen[0: good.size] = Qcen[good]
self.p_fg[0: good.size] = Pfg
self.p_sat[0: good.size] = Psat
self.p_c[0: good.size] = Pcen_basic[good]
return True
class CenteringRandom(Centering):
"""
Centering class using the random-position algorithm.
This is used for filter calibration.
"""
def find_center(self):
"""
Find the center using the CenteringRandom algorithm.
This algorithm takes a random position within cluster r_lambda and
calls it the center. It is not a very good centering algorithm.
Will set self.maxind (index of best center); self.ra, self.dec
(position of best center); self.ngood (number of good candidates);
self.index[:] (indices of all the candidates); self.p_cen[:] (pcen
centering probabilities); self.q_cen[:] (qcen unused miss
probabilities); self.p_sat[:] (p_sat satellite probabilities).
Returns
-------
success: `bool`
True when a center is successfully found. (Always True).
"""
r = self.cluster.r_lambda * np.sqrt(np.random.random(size=1))
phi = 2. * np.pi * np.random.random(size=1)
x = r * np.cos(phi) / (self.cluster.mpc_scale)
y = r * np.sin(phi) / (self.cluster.mpc_scale)
ra_cen = self.cluster.ra + x / np.cos(np.radians(self.cluster.dec))
dec_cen = self.cluster.dec + y
self.ra[0] = ra_cen
self.dec[0] = dec_cen
self.ngood = 1
self.index[0] = -1
self.maxind = -1
self.p_cen[0] = 1.0
self.q_cen[0] = 1.0
self.p_sat[0] = 0.0
self.p_fg[0] = 0.0
self.p_c[0] = 1.0
return True
class CenteringRandomSatellite(Centering):
"""
Centering class using the random-satellite algorithm.
This is used for filter calibration.
"""
def find_center(self):
"""
Find the center using the CenteringRandomSatellite algorithm.
This algorithm takes a random member (weighted by member pmem) and
calls it the center. It is not a very good centering algorithm (but
better than pure random!)
Will set self.maxind (index of best center); self.ra, self.dec
(position of best center); self.ngood (number of good candidates);
self.index[:] (indices of all the candidates); self.p_cen[:] (pcen
centering probabilities); self.q_cen[:] (qcen unused miss
probabilities); self.p_sat[:] (p_sat satellite probabilities).
Returns
-------
success: `bool`
True when a center is successfully found. (Always True).
"""
st = np.argsort(self.cluster.neighbors.pmem)[::-1]
pdf = self.cluster.neighbors.pmem[st]
pdf /= np.sum(pdf)
cdf = np.cumsum(pdf, dtype=np.float64)
cdfi = (cdf * st.size).astype(np.int32)
rand = (np.random.uniform(size=1) * st.size).astype(np.int32)
ind = np.where(cdfi >= rand[0])[0][0]
maxind = st[ind]
ra_cen = self.cluster.neighbors.ra[maxind]
dec_cen = self.cluster.neighbors.dec[maxind]
self.ra[0] = ra_cen
self.dec[0] = dec_cen
self.index[0] = maxind
self.maxind = maxind
self.ngood = 1
self.p_cen[0] = 1.0
self.q_cen[0] = 1.0
self.p_sat[0] = 0.0
self.p_fg[0] = 0.0
self.p_c[0] = 1.0
return True
| [
"numpy.sum",
"numpy.abs",
"numpy.argmin",
"numpy.clip",
"numpy.argsort",
"numpy.sin",
"numpy.prod",
"numpy.isfinite",
"numpy.cumsum",
"numpy.log10",
"numpy.radians",
"numpy.cos",
"numpy.random.uniform",
"numpy.log",
"numpy.zeros",
"numpy.where",
"numpy.array",
"numpy.random.random"... | [((1621, 1661), 'numpy.zeros', 'np.zeros', (['self.config.percolation_maxcen'], {}), '(self.config.percolation_maxcen)\n', (1629, 1661), True, 'import numpy as np\n'), ((1683, 1723), 'numpy.zeros', 'np.zeros', (['self.config.percolation_maxcen'], {}), '(self.config.percolation_maxcen)\n', (1691, 1723), True, 'import numpy as np\n'), ((1744, 1784), 'numpy.zeros', 'np.zeros', (['self.config.percolation_maxcen'], {}), '(self.config.percolation_maxcen)\n', (1752, 1784), True, 'import numpy as np\n'), ((1832, 1872), 'numpy.zeros', 'np.zeros', (['self.config.percolation_maxcen'], {}), '(self.config.percolation_maxcen)\n', (1840, 1872), True, 'import numpy as np\n'), ((1892, 1932), 'numpy.zeros', 'np.zeros', (['self.config.percolation_maxcen'], {}), '(self.config.percolation_maxcen)\n', (1900, 1932), True, 'import numpy as np\n'), ((3298, 3343), 'numpy.argmin', 'np.argmin', (['self.cluster.neighbors.refmag[use]'], {}), '(self.cluster.neighbors.refmag[use])\n', (3307, 3343), True, 'import numpy as np\n'), ((3395, 3445), 'numpy.array', 'np.array', (['[self.cluster.neighbors.ra[self.maxind]]'], {}), '([self.cluster.neighbors.ra[self.maxind]])\n', (3403, 3445), True, 'import numpy as np\n'), ((3465, 3516), 'numpy.array', 'np.array', (['[self.cluster.neighbors.dec[self.maxind]]'], {}), '([self.cluster.neighbors.dec[self.maxind]])\n', (3473, 3516), True, 'import numpy as np\n'), ((6759, 6799), 'numpy.where', 'np.where', (['(self.cluster.neighbors.p > 0.0)'], {}), '(self.cluster.neighbors.p > 0.0)\n', (6767, 6799), True, 'import numpy as np\n'), ((6996, 7114), 'esutil.htm.Matcher', 'esutil.htm.Matcher', (['self.cluster.neighbors.depth', 'self.cluster.neighbors.ra[use]', 'self.cluster.neighbors.dec[use]'], {}), '(self.cluster.neighbors.depth, self.cluster.neighbors.ra[\n use], self.cluster.neighbors.dec[use])\n', (7014, 7114), False, 'import esutil\n'), ((7421, 7450), 'numpy.where', 'np.where', (['(~(use[i1] == u[i2]))'], {}), '(~(use[i1] == u[i2]))\n', (7429, 7450), True, 'import numpy as np\n'), ((7585, 7637), 'numpy.sqrt', 'np.sqrt', (['(pdis ** 2.0 + self.config.wcen_rsoft ** 2.0)'], {}), '(pdis ** 2.0 + self.config.wcen_rsoft ** 2.0)\n', (7592, 7637), True, 'import numpy as np\n'), ((9372, 9394), 'numpy.where', 'np.where', (['(ucen < 1e-10)'], {}), '(ucen < 1e-10)\n', (9380, 9394), True, 'import numpy as np\n'), ((9906, 9928), 'numpy.where', 'np.where', (['(usat < 1e-10)'], {}), '(usat < 1e-10)\n', (9914, 9928), True, 'import numpy as np\n'), ((10754, 10905), 'numpy.clip', 'np.clip', (['(self.cluster.neighbors.pfree[use] * (ucen / (ucen + (self.cluster.Lambda /\n self.cluster.scaleval - 1.0) * usat + bcounts)))', 'None', '(0.99999)'], {}), '(self.cluster.neighbors.pfree[use] * (ucen / (ucen + (self.cluster.\n Lambda / self.cluster.scaleval - 1.0) * usat + bcounts)), None, 0.99999)\n', (10761, 10905), True, 'import numpy as np\n'), ((11047, 11073), 'numpy.where', 'np.where', (['(Pcen_basic > 0.0)'], {}), '(Pcen_basic > 0.0)\n', (11055, 11073), True, 'import numpy as np\n'), ((16394, 16405), 'numpy.sum', 'np.sum', (['pdf'], {}), '(pdf)\n', (16400, 16405), True, 'import numpy as np\n'), ((16420, 16452), 'numpy.cumsum', 'np.cumsum', (['pdf'], {'dtype': 'np.float64'}), '(pdf, dtype=np.float64)\n', (16429, 16452), True, 'import numpy as np\n'), ((1293, 1333), 'numpy.zeros', 'np.zeros', (['self.config.percolation_maxcen'], {}), '(self.config.percolation_maxcen)\n', (1301, 1333), True, 'import numpy as np\n'), ((1361, 1401), 'numpy.zeros', 'np.zeros', (['self.config.percolation_maxcen'], {}), '(self.config.percolation_maxcen)\n', (1369, 1401), True, 'import numpy as np\n'), ((1454, 1510), 'numpy.zeros', 'np.zeros', (['self.config.percolation_maxcen'], {'dtype': 'np.int32'}), '(self.config.percolation_maxcen, dtype=np.int32)\n', (1462, 1510), True, 'import numpy as np\n'), ((7847, 7865), 'numpy.zeros', 'np.zeros', (['use.size'], {}), '(use.size)\n', (7855, 7865), True, 'import numpy as np\n'), ((7973, 7990), 'numpy.where', 'np.where', (['(i1 == i)'], {}), '(i1 == i)\n', (7981, 7990), True, 'import numpy as np\n'), ((9166, 9175), 'numpy.log', 'np.log', (['w'], {}), '(w)\n', (9172, 9175), True, 'import numpy as np\n'), ((9686, 9695), 'numpy.log', 'np.log', (['w'], {}), '(w)\n', (9692, 9695), True, 'import numpy as np\n'), ((10073, 10082), 'numpy.log', 'np.log', (['w'], {}), '(w)\n', (10079, 10082), True, 'import numpy as np\n'), ((10336, 10354), 'numpy.zeros', 'np.zeros', (['use.size'], {}), '(use.size)\n', (10344, 10354), True, 'import numpy as np\n'), ((11411, 11429), 'numpy.zeros', 'np.zeros', (['use.size'], {}), '(use.size)\n', (11419, 11429), True, 'import numpy as np\n'), ((11449, 11467), 'numpy.zeros', 'np.zeros', (['use.size'], {}), '(use.size)\n', (11457, 11467), True, 'import numpy as np\n'), ((11547, 11565), 'numpy.zeros', 'np.zeros', (['use.size'], {}), '(use.size)\n', (11555, 11565), True, 'import numpy as np\n'), ((11655, 11679), 'numpy.where', 'np.where', (['(Pcen_basic > 0)'], {}), '(Pcen_basic > 0)\n', (11663, 11679), True, 'import numpy as np\n'), ((12208, 12239), 'numpy.prod', 'np.prod', (['(1.0 - Pcen_basic[good])'], {}), '(1.0 - Pcen_basic[good])\n', (12215, 12239), True, 'import numpy as np\n'), ((14775, 14799), 'numpy.random.random', 'np.random.random', ([], {'size': '(1)'}), '(size=1)\n', (14791, 14799), True, 'import numpy as np\n'), ((16286, 16325), 'numpy.argsort', 'np.argsort', (['self.cluster.neighbors.pmem'], {}), '(self.cluster.neighbors.pmem)\n', (16296, 16325), True, 'import numpy as np\n'), ((5470, 5522), 'numpy.log', 'np.log', (['(self.cluster.Lambda / self.config.wcen_pivot)'], {}), '(self.cluster.Lambda / self.config.wcen_pivot)\n', (5476, 5522), True, 'import numpy as np\n'), ((8049, 8095), 'numpy.where', 'np.where', (['(pdis[subgal] < self.cluster.r_lambda)'], {}), '(pdis[subgal] < self.cluster.r_lambda)\n', (8057, 8095), True, 'import numpy as np\n'), ((9500, 9536), 'numpy.log10', 'np.log10', (['self.config.lval_reference'], {}), '(self.config.lval_reference)\n', (9508, 9536), True, 'import numpy as np\n'), ((10975, 10998), 'numpy.isfinite', 'np.isfinite', (['Pcen_basic'], {}), '(Pcen_basic)\n', (10986, 10998), True, 'import numpy as np\n'), ((11314, 11354), 'numpy.argmin', 'np.argmin', (['self.cluster.neighbors.r[use]'], {}), '(self.cluster.neighbors.r[use])\n', (11323, 11354), True, 'import numpy as np\n'), ((11698, 11724), 'numpy.argsort', 'np.argsort', (['Pcen_basic[ok]'], {}), '(Pcen_basic[ok])\n', (11708, 11724), True, 'import numpy as np\n'), ((12311, 12330), 'numpy.sum', 'np.sum', (['Pcen_unnorm'], {}), '(Pcen_unnorm)\n', (12317, 12330), True, 'import numpy as np\n'), ((12431, 12506), 'numpy.log', 'np.log', (['(ucen[good] + (self.cluster.Lambda - 1) * usat[good] + bcounts[good])'], {}), '(ucen[good] + (self.cluster.Lambda - 1) * usat[good] + bcounts[good])\n', (12437, 12506), True, 'import numpy as np\n'), ((12534, 12590), 'numpy.log', 'np.log', (['(self.cluster.Lambda * usat[good] + bcounts[good])'], {}), '(self.cluster.Lambda * usat[good] + bcounts[good])\n', (12540, 12590), True, 'import numpy as np\n'), ((12969, 12991), 'numpy.isfinite', 'np.isfinite', (['Pfg_basic'], {}), '(Pfg_basic)\n', (12980, 12991), True, 'import numpy as np\n'), ((13217, 13240), 'numpy.isfinite', 'np.isfinite', (['Psat_basic'], {}), '(Psat_basic)\n', (13228, 13240), True, 'import numpy as np\n'), ((14722, 14746), 'numpy.random.random', 'np.random.random', ([], {'size': '(1)'}), '(size=1)\n', (14738, 14746), True, 'import numpy as np\n'), ((14817, 14828), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (14823, 14828), True, 'import numpy as np\n'), ((14872, 14883), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (14878, 14883), True, 'import numpy as np\n'), ((16586, 16611), 'numpy.where', 'np.where', (['(cdfi >= rand[0])'], {}), '(cdfi >= rand[0])\n', (16594, 16611), True, 'import numpy as np\n'), ((5629, 5649), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (5636, 5649), True, 'import numpy as np\n'), ((8923, 8985), 'numpy.clip', 'np.clip', (['self.cluster.Lambda', 'None', 'self.config.wcen_maxlambda'], {}), '(self.cluster.Lambda, None, self.config.wcen_maxlambda)\n', (8930, 8985), True, 'import numpy as np\n'), ((9210, 9230), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (9217, 9230), True, 'import numpy as np\n'), ((9732, 9752), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (9739, 9752), True, 'import numpy as np\n'), ((10118, 10138), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (10125, 10138), True, 'import numpy as np\n'), ((12111, 12142), 'numpy.prod', 'np.prod', (['(1.0 - Pcen_basic[good])'], {}), '(1.0 - Pcen_basic[good])\n', (12118, 12142), True, 'import numpy as np\n'), ((12270, 12289), 'numpy.sum', 'np.sum', (['Pcen_unnorm'], {}), '(Pcen_unnorm)\n', (12276, 12289), True, 'import numpy as np\n'), ((12681, 12816), 'numpy.log', 'np.log', (['((ucen[good] + (self.cluster.Lambda - 1) * usat[good] + bcounts[good]) / (\n self.cluster.Lambda * usat[good] + bcounts[good]))'], {}), '((ucen[good] + (self.cluster.Lambda - 1) * usat[good] + bcounts[good]\n ) / (self.cluster.Lambda * usat[good] + bcounts[good]))\n', (12687, 12816), True, 'import numpy as np\n'), ((14958, 14986), 'numpy.radians', 'np.radians', (['self.cluster.dec'], {}), '(self.cluster.dec)\n', (14968, 14986), True, 'import numpy as np\n'), ((16518, 16543), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1)'}), '(size=1)\n', (16535, 16543), True, 'import numpy as np\n'), ((3129, 3188), 'numpy.abs', 'np.abs', (['(self.cluster.neighbors.zred - self.cluster.redshift)'], {}), '(self.cluster.neighbors.zred - self.cluster.redshift)\n', (3135, 3188), True, 'import numpy as np\n'), ((5248, 5307), 'numpy.abs', 'np.abs', (['(self.cluster.redshift - self.cluster.neighbors.zred)'], {}), '(self.cluster.redshift - self.cluster.neighbors.zred)\n', (5254, 5307), True, 'import numpy as np\n'), ((6023, 6043), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (6030, 6043), True, 'import numpy as np\n'), ((6303, 6323), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (6310, 6323), True, 'import numpy as np\n'), ((8270, 8349), 'numpy.sum', 'np.sum', (['(self.cluster.neighbors.p[indices] * lum[indices] / pdis[subgal[inside]])'], {}), '(self.cluster.neighbors.p[indices] * lum[indices] / pdis[subgal[inside]])\n', (8276, 8349), True, 'import numpy as np\n'), ((8629, 8693), 'numpy.sum', 'np.sum', (['(self.cluster.neighbors.p[indices] / pdis[subgal[inside]])'], {}), '(self.cluster.neighbors.p[indices] / pdis[subgal[inside]])\n', (8635, 8693), True, 'import numpy as np\n'), ((8506, 8562), 'numpy.sum', 'np.sum', (['(self.cluster.neighbors.p[indices] * lum[indices])'], {}), '(self.cluster.neighbors.p[indices] * lum[indices])\n', (8512, 8562), True, 'import numpy as np\n'), ((8850, 8891), 'numpy.sum', 'np.sum', (['self.cluster.neighbors.p[indices]'], {}), '(self.cluster.neighbors.p[indices])\n', (8856, 8891), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 12 16:48:58 2021
@author: Alex
"""
reset -f
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#change working directory
os.chdir('C:/Programacion Estadistica PEP/code_and_data')
os.getcwd()
#Read data from CSV file
rentals_2011 = pd.read_csv('washington_bike_rentals_2011.csv', sep=';', decimal=',')
rentals_2011.shape
rentals_2011.head()
rentals_2011.tail()
#QC OK
#Seleccionar las columnas
rentals_2011.cnt
np.mean(rentals_2011.cnt) #media (Variable cuantitativa)
np.std(rentals_2011.cnt) #desviacion tipica (Variable cuantitativa)
rentals_2011.cnt.mean() #hacerlo directamente
rentals_2011.cnt.describe() #te lo da todo
#Describir graficamente una variable
plt.hist(rentals_2011.cnt) #Histograma (Variable cuantitativa)
rentals_2011.cnt.hist()
x=rentals_2011['cnt']
plt.hist(x,edgecolor='black', bins=20) #borde de color negro
plt.xticks(np.arange(0,7000, step=1000)) #eje X
plt.title('Figure 1. Registered rental in Washington') #poner titulo
plt.ylabel('Frequency') #dar nombre al eje y
plt.xlabel('Number of rentals') #dar nombre al eje x
plt.show()
#Expandiendo el dataset. Leer cdv del tiempo
weather_2011 = pd.read_csv('weather_washington_2011.csv', sep=';', decimal= ',')
del (x)
weather_2011.shape
weather_2011.head()
weather_2011.tail()
#QC OK
weather_2011.dtypes #tipos de datos en el dataframe
#Merge=unir, juntar dos archivos
rentals_weather_2011=pd.merge(weather_2011, rentals_2011, on='day')
rentals_weather_2011.shape
rentals_weather_2011.head()
del rentals_weather_2011['dteday_y'] #borrar columna
#Renombrar. Cambiar el nombre
rentals_weather_2011 = rentals_weather_2011.rename(columns={'dteday_x': 'dteday'})
#Añadir nuevas filas
rentals_weather_2012 = pd.read_csv('rentals_weather_2012.csv', sep=';', decimal= ',')
rentals_weather_2012.shape
rentals_weather_2012.head()
#Unir por abajo
rentals_weather_11_12 = rentals_weather_2011.append(rentals_weather_2012)
rentals_weather_11_12.shape
rentals_weather_11_12.head()
rentals_weather_11_12.tail()
#Simplificar el nombre del dataframe
wbr=rentals_weather_11_12
del rentals_weather_11_12
#Describir variable nominal
mytable = wbr.groupby(['weathersit']).size()
print(mytable)
mytable.sum()
#Porcentajes
n=mytable.sum()
mytable2=(mytable/n)*100
print(mytable2)
#Grafica
plt.bar(mytable.index, mytable2)
#Barchart
#Hacer categorias
bar_list = ['Sunny', 'Cloudy', 'Rainy']
plt.bar(bar_list, mytable2)
plt.ylabel('Percentage')
plt.title('Figure 1. Percentage of weather situation')
props = dict(boxstyle='round', facecolor='white', lw=0.5)
textstr = '$\mathrm{n}=%.0f$'%(n)
plt.text(2,50, textstr, bbox=props)
#Guardar figuras
plt.savefig('bar1.eps')
plt.savefig('bar1.jpg')
plt.savefig('bar1.svg')
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"os.getcwd",
"pandas.read_csv",
"numpy.std",
"pandas.merge",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.text",
"numpy.mean",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.chdir",
... | [((293, 350), 'os.chdir', 'os.chdir', (['"""C:/Programacion Estadistica PEP/code_and_data"""'], {}), "('C:/Programacion Estadistica PEP/code_and_data')\n", (301, 350), False, 'import os\n'), ((351, 362), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (360, 362), False, 'import os\n'), ((404, 473), 'pandas.read_csv', 'pd.read_csv', (['"""washington_bike_rentals_2011.csv"""'], {'sep': '""";"""', 'decimal': '""","""'}), "('washington_bike_rentals_2011.csv', sep=';', decimal=',')\n", (415, 473), True, 'import pandas as pd\n'), ((585, 610), 'numpy.mean', 'np.mean', (['rentals_2011.cnt'], {}), '(rentals_2011.cnt)\n', (592, 610), True, 'import numpy as np\n'), ((642, 666), 'numpy.std', 'np.std', (['rentals_2011.cnt'], {}), '(rentals_2011.cnt)\n', (648, 666), True, 'import numpy as np\n'), ((841, 867), 'matplotlib.pyplot.hist', 'plt.hist', (['rentals_2011.cnt'], {}), '(rentals_2011.cnt)\n', (849, 867), True, 'import matplotlib.pyplot as plt\n'), ((952, 991), 'matplotlib.pyplot.hist', 'plt.hist', (['x'], {'edgecolor': '"""black"""', 'bins': '(20)'}), "(x, edgecolor='black', bins=20)\n", (960, 991), True, 'import matplotlib.pyplot as plt\n'), ((1063, 1117), 'matplotlib.pyplot.title', 'plt.title', (['"""Figure 1. Registered rental in Washington"""'], {}), "('Figure 1. Registered rental in Washington')\n", (1072, 1117), True, 'import matplotlib.pyplot as plt\n'), ((1133, 1156), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency"""'], {}), "('Frequency')\n", (1143, 1156), True, 'import matplotlib.pyplot as plt\n'), ((1179, 1210), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of rentals"""'], {}), "('Number of rentals')\n", (1189, 1210), True, 'import matplotlib.pyplot as plt\n'), ((1232, 1242), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1240, 1242), True, 'import matplotlib.pyplot as plt\n'), ((1304, 1368), 'pandas.read_csv', 'pd.read_csv', (['"""weather_washington_2011.csv"""'], {'sep': '""";"""', 'decimal': '""","""'}), "('weather_washington_2011.csv', sep=';', decimal=',')\n", (1315, 1368), True, 'import pandas as pd\n'), ((1554, 1600), 'pandas.merge', 'pd.merge', (['weather_2011', 'rentals_2011'], {'on': '"""day"""'}), "(weather_2011, rentals_2011, on='day')\n", (1562, 1600), True, 'import pandas as pd\n'), ((1868, 1929), 'pandas.read_csv', 'pd.read_csv', (['"""rentals_weather_2012.csv"""'], {'sep': '""";"""', 'decimal': '""","""'}), "('rentals_weather_2012.csv', sep=';', decimal=',')\n", (1879, 1929), True, 'import pandas as pd\n'), ((2442, 2474), 'matplotlib.pyplot.bar', 'plt.bar', (['mytable.index', 'mytable2'], {}), '(mytable.index, mytable2)\n', (2449, 2474), True, 'import matplotlib.pyplot as plt\n'), ((2544, 2571), 'matplotlib.pyplot.bar', 'plt.bar', (['bar_list', 'mytable2'], {}), '(bar_list, mytable2)\n', (2551, 2571), True, 'import matplotlib.pyplot as plt\n'), ((2572, 2596), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Percentage"""'], {}), "('Percentage')\n", (2582, 2596), True, 'import matplotlib.pyplot as plt\n'), ((2597, 2651), 'matplotlib.pyplot.title', 'plt.title', (['"""Figure 1. Percentage of weather situation"""'], {}), "('Figure 1. Percentage of weather situation')\n", (2606, 2651), True, 'import matplotlib.pyplot as plt\n'), ((2744, 2780), 'matplotlib.pyplot.text', 'plt.text', (['(2)', '(50)', 'textstr'], {'bbox': 'props'}), '(2, 50, textstr, bbox=props)\n', (2752, 2780), True, 'import matplotlib.pyplot as plt\n'), ((2798, 2821), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""bar1.eps"""'], {}), "('bar1.eps')\n", (2809, 2821), True, 'import matplotlib.pyplot as plt\n'), ((2822, 2845), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""bar1.jpg"""'], {}), "('bar1.jpg')\n", (2833, 2845), True, 'import matplotlib.pyplot as plt\n'), ((2846, 2869), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""bar1.svg"""'], {}), "('bar1.svg')\n", (2857, 2869), True, 'import matplotlib.pyplot as plt\n'), ((2870, 2880), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2878, 2880), True, 'import matplotlib.pyplot as plt\n'), ((1025, 1054), 'numpy.arange', 'np.arange', (['(0)', '(7000)'], {'step': '(1000)'}), '(0, 7000, step=1000)\n', (1034, 1054), True, 'import numpy as np\n')] |
import scipy.optimize as sopt
import scipy.linalg as slin
import numpy as np
import networkx as nx
from multidag.common.consts import DEVICE
import torch
# from notears.linear import notears_linear
from tqdm import tqdm
import math
import torch.nn.functional as F
def matrix_poly(W):
if len(W.shape) == 2:
d = W.shape[0]
assert d == W.shape[1]
x = torch.eye(d).to(DEVICE) + 1/d * W
return torch.matrix_power(x, d)
elif len(W.shape) == 3:
m, d = W.shape[0], W.shape[1]
x = torch.eye(d).unsqueeze(0).repeat(m, 1, 1).detach().to(DEVICE) + 1/d * W
return torch.matrix_power(x, d)
else:
raise NotImplementedError('Shape should has length 2 or 3.')
def DAGGNN_h_W(W):
expd_W = matrix_poly(W * W)
if len(W.shape) == 2:
h_W = torch.trace(expd_W) - d
elif len(W.shape) == 3:
h_W = torch.einsum('bii->b', expd_W) - d
else:
raise NotImplementedError('Shape should has length 2 or 3.')
return h_W
class NOTEARS_h_W(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
"""
input: [batch, d, d] tensor containing batch many matrices
"""
if len(input.shape) == 2:
d = input.shape[0]
e_W_W = torch.matrix_exp(input * input)
tr_e_W_W = torch.trace(e_W_W)
elif len(input.shape) == 3:
d = input.shape[1]
assert d == input.shape[2]
e_W_W = torch.matrix_exp(input * input)
tr_e_W_W = torch.einsum('bii->b', e_W_W) # [batch]
else:
raise NotImplementedError('Shape should has length 2 or 3.')
ctx.save_for_backward(input, e_W_W)
return tr_e_W_W - d
@staticmethod
def backward(ctx, grad_output):
input, e_W_W = ctx.saved_tensors
if len(input.shape) == 2:
grad_input = e_W_W.t() * 2 * input
return grad_input * grad_output
elif len(input.shape) == 3:
m = input.shape[0]
grad_input = e_W_W.permute(0, 2, 1) * 2 * input # [batch, d, d]
return grad_input * grad_output.view(m, 1, 1)
h_W = {'notears': NOTEARS_h_W.apply,
'daggnn': DAGGNN_h_W}
class TraceExpm(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
"""
input : A = [d, d] tensor
output: tr(e^A)
"""
E_A = torch.matrix_exp(input)
tr_E_A = torch.trace(E_A)
ctx.save_for_backward(E_A)
return tr_E_A
@staticmethod
def backward(ctx, grad_output):
E_A, = ctx.saved_tensors
grad_input = grad_output * E_A.t()
return grad_input
trace_expm = TraceExpm.apply
# def run_notears_linear(X):
# """
# :param X: [m, n, d]
# :param d:
# :return: W_est: [m, d, d]
# """
# assert len(X.shape) == 3
# num_dag = X.shape[0]
# d = X.shape[2]
# W_est = np.zeros([num_dag, d, d])
# progress_bar = tqdm(range(num_dag))
# for i in progress_bar:
# W_est[i] = notears_linear(X[i], lambda1=0.1, loss_type='l2')
# assert is_dag(W_est[i])
# return W_est.astype(np.float32)
def project_to_dag(W, sparsity=1.0, max_iter=20, h_tol=1e-3, rho_max=1e+16, w_threshold=0.1):
"""
:param W: (np.ndarray) [d, d] matrix as a general directed graph, not necessarily acyclic
:return:
W: (np.ndarray) [d, d] approximate projection to DAGs
return None if it takes to long to project to DAGs
"""
W = W * (np.abs(W) > w_threshold)
for _ in range(5): # run at most 5 times
try:
W, P = project_notears(W, sparsity, max_iter, h_tol, rho_max, w_threshold)
except ValueError:
print('numerical instability error')
# in case of some numerical instability error
return None, None
if is_dag(P):
return W, P
return None, None
def is_dag(W: np.ndarray):
if W is not None:
G = nx.DiGraph(W)
return nx.is_directed_acyclic_graph(G)
else:
return False
def project_notears(X, sparsity=1.0, max_iter=100, h_tol=1e-3, rho_max=1e+16, w_threshold=0.1):
"""
Projection to the space of DAGs. Solved based on the 'DASs with NO TEARS' paper.
Implemented based on https://github.com/xunzheng/notears/blob/master/notears/linear.py
Solve min_W L(W;X) + lambda1 ‖W‖_1 s.t. h(W) = 0 using augmented Lagrangian.
To perform projection, the loss is L(W; X) = 1/2 ‖W - X‖_F^2.
When lambda1 > 0, then this is not a pure projection, but with l1 regularization.
Args:
X (np.ndarray): [d, d] matrix as a general directed graph, not necessarily acyclic
sparsity (float): l1 penalty parameter
max_iter (int): max num of dual ascent steps
h_tol (float): exit if |h(w_est)| <= htol
rho_max (float): exit if rho >= rho_max
w_threshold (float): drop edge if |weight| < threshold
Returns:
W_est (np.ndarray): [d, d] approximate projection to DAGs
"""
n, d = X.shape
assert n == d
def _loss(W):
"""Evaluate value and gradient of loss."""
R = W - X
loss = 0.5 * (R ** 2).sum()
G_loss = R
return loss, G_loss
def _h(W):
"""Evaluate value and gradient of acyclicity constraint."""
# use torch.matrix_exp
# E = slin.expm(W * W) # (sZheng et al. 2018)
E = torch.matrix_exp(torch.tensor(W * W)).numpy()
h = np.trace(E) - d
G_h = E.T * W * 2
return h, G_h
def _adj(w):
"""Convert doubled variables ([2 d^2] array) back to original variables ([d, d] matrix)."""
return (w[:d * d] - w[d * d:]).reshape([d, d])
def _func(w):
"""Evaluate value and gradient of augmented Lagrangian for doubled variables ([2 d^2] array)."""
W = _adj(w)
loss, G_loss = _loss(W)
h, G_h = _h(W)
obj = loss + 0.5 * rho * h * h + alpha * h + sparsity * w.sum()
G_smooth = G_loss + (rho * h + alpha) * G_h
g_obj = np.concatenate((G_smooth + sparsity, - G_smooth + sparsity), axis=None)
return obj, g_obj
w_est, rho, alpha, h = np.ones(2 * d * d), 1.0, 0.0, np.inf # double w_est into (w_pos, w_neg)
bnds = [(0, 0) if i == j else (0, None) for _ in range(2) for i in range(d) for j in range(d)]
for _ in range(max_iter):
w_new, h_new = None, None
while rho < rho_max:
sol = sopt.minimize(_func, w_est, method='L-BFGS-B', jac=True, bounds=bnds)
w_new = sol.x
h_new, _ = _h(_adj(w_new))
if h_new > 0.25 * h:
rho *= 10
else:
break
w_est, h = w_new, h_new
alpha += rho * h
if h <= h_tol or rho >= rho_max:
break
W_est = _adj(w_est)
P = np.abs(W_est) >= w_threshold
W_est.fill(0)
W_est[P] = X[P]
return W_est, P
def sampler(W, n, f=None, g=None):
"""
sample n samples from the probablistic model defined by W, f, and g
:param W: weighted adjacency matrix. size=[d, d]
:param n: number of samples
:return: X: [n, d] sample matrix
"""
if isinstance(W, np.ndarray):
W = torch.tensor(W)
elif not torch.is_tensor(W):
raise NotImplementedError('Adjacency matrix should be np.ndarray or torch.tensor.')
d = W.shape[0]
X = torch.zeros([n, d])
neg_log_likelihood = torch.zeros([n, d])
z = torch.normal(0, 1, size=(n, d)).float()
# get the topological order of the DAG
G = nx.DiGraph(W.detach().cpu().numpy())
ordered_vertices = list(nx.topological_sort(G))
assert len(ordered_vertices) == d
for j in ordered_vertices:
WX = W[:, j] * X # [n, d]
if f is not None:
m_j = f[j](WX).view(n)
else:
m_j = WX.sum(dim=-1) # linear model
if g is not None:
sigma_j = torch.abs(g[j](WX).view(n))
log_z = 0.5 * math.log(2 * math.pi) + torch.log(sigma_j)
else:
sigma_j = 1.0
log_z = 0.5 * math.log(2 * math.pi)
X[:, j] = m_j + z[:, j] * sigma_j
neg_log_likelihood[:, j] = log_z + 0.5 * ((X[:, j] - m_j) / sigma_j) ** 2
return X, torch.sum(neg_log_likelihood, dim=-1)
def count_accuracy(B_true, B_est, verbose=False):
"""Compute various accuracy metrics for B_est.
true positive = predicted association exists in condition in correct direction
reverse = predicted association exists in condition in opposite direction
false positive = predicted association does not exist in condition
Args:
B_true (np.ndarray): [d, d] ground truth graph, {0, 1}
B_est (np.ndarray): [d, d] estimate, {0, 1}
Returns:
fdr: (reverse + false positive) / prediction positive
tpr: (true positive) / condition positive
fpr: (reverse + false positive) / condition negative
shd: undirected extra + undirected missing + reverse
nnz: prediction positive
"""
if (B_est == -1).any(): # cpdag
if not ((B_est == 0) | (B_est == 1) | (B_est == -1)).all():
raise ValueError('B_est should take value in {0,1,-1}')
if ((B_est == -1) & (B_est.T == -1)).any():
raise ValueError('undirected edge should only appear once')
else: # dag
if not ((B_est == 0) | (B_est == 1)).all():
raise ValueError('B_est should take value in {0,1}')
if not is_dag(B_est):
B_est, _ = project_to_dag(B_est)
if B_est is None:
raise ValueError('B_est should be a DAG, fail to project to DAG')
else:
if verbose:
print("Warning: B_est is not DAG, use projection")
d = B_true.shape[0]
# linear index of nonzeros
pred_und = np.flatnonzero(B_est == -1)
pred = np.flatnonzero(B_est == 1)
cond = np.flatnonzero(B_true)
cond_reversed = np.flatnonzero(B_true.T)
cond_skeleton = np.concatenate([cond, cond_reversed])
# true pos
true_pos = np.intersect1d(pred, cond, assume_unique=True)
# treat undirected edge favorably
true_pos_und = np.intersect1d(pred_und, cond_skeleton, assume_unique=True)
true_pos = np.concatenate([true_pos, true_pos_und])
# false pos
false_pos = np.setdiff1d(pred, cond_skeleton, assume_unique=True)
false_pos_und = np.setdiff1d(pred_und, cond_skeleton, assume_unique=True)
false_pos = np.concatenate([false_pos, false_pos_und])
# reverse
extra = np.setdiff1d(pred, cond, assume_unique=True)
reverse = np.intersect1d(extra, cond_reversed, assume_unique=True)
# compute ratio
pred_size = len(pred) + len(pred_und)
cond_neg_size = 0.5 * d * (d - 1) - len(cond)
fdr = float(len(reverse) + len(false_pos)) / max(pred_size, 1)
tpr = float(len(true_pos)) / max(len(cond), 1)
fpr = float(len(reverse) + len(false_pos)) / max(cond_neg_size, 1)
# structural hamming distance
pred_lower = np.flatnonzero(np.tril(B_est + B_est.T))
cond_lower = np.flatnonzero(np.tril(B_true + B_true.T))
extra_lower = np.setdiff1d(pred_lower, cond_lower, assume_unique=True)
missing_lower = np.setdiff1d(cond_lower, pred_lower, assume_unique=True)
shd = len(extra_lower) + len(missing_lower) + len(reverse)
return {'fdr': fdr, 'tpr': tpr, 'fpr': fpr, 'shd': shd, 'nnz': pred_size}
if __name__ == '__main__':
d = 2
threshold = 0.1
sparsity = 1.0
x = np.random.uniform(low=-2, high=2, size=[d, d])
x[np.abs(x)<threshold] = 0
print(x)
# y, p = project_to_dag(x, max_iter=10, w_threshold=threshold, sparsity=sparsity)
# print('projected')
# print(y)
# print(p)
# print('dagness')
# print(is_dag(p))
#
# n = 30
# mu = np.zeros(d)
# sigma = np.ones(d)
# x_np = sampler(y, n, mu, sigma)
# print(x_np)
# x_torch = sampler(torch.tensor(y), n, torch.tensor(mu), torch.tensor(sigma))
# print(x_torch)
#
# import matplotlib.pyplot as plt
# fig = plt.figure()
# plt.scatter(x_np[:,0], x_np[:,1])
# plt.scatter(x_torch.detach().numpy()[:,0], x_torch.detach().numpy()[:,1])
# plt.show()
| [
"numpy.trace",
"numpy.abs",
"torch.eye",
"torch.matrix_exp",
"numpy.ones",
"scipy.optimize.minimize",
"networkx.topological_sort",
"torch.zeros",
"numpy.intersect1d",
"torch.is_tensor",
"math.log",
"torch.log",
"torch.trace",
"torch.matrix_power",
"torch.normal",
"torch.einsum",
"net... | [((7429, 7448), 'torch.zeros', 'torch.zeros', (['[n, d]'], {}), '([n, d])\n', (7440, 7448), False, 'import torch\n'), ((7474, 7493), 'torch.zeros', 'torch.zeros', (['[n, d]'], {}), '([n, d])\n', (7485, 7493), False, 'import torch\n'), ((9931, 9958), 'numpy.flatnonzero', 'np.flatnonzero', (['(B_est == -1)'], {}), '(B_est == -1)\n', (9945, 9958), True, 'import numpy as np\n'), ((9970, 9996), 'numpy.flatnonzero', 'np.flatnonzero', (['(B_est == 1)'], {}), '(B_est == 1)\n', (9984, 9996), True, 'import numpy as np\n'), ((10008, 10030), 'numpy.flatnonzero', 'np.flatnonzero', (['B_true'], {}), '(B_true)\n', (10022, 10030), True, 'import numpy as np\n'), ((10051, 10075), 'numpy.flatnonzero', 'np.flatnonzero', (['B_true.T'], {}), '(B_true.T)\n', (10065, 10075), True, 'import numpy as np\n'), ((10096, 10133), 'numpy.concatenate', 'np.concatenate', (['[cond, cond_reversed]'], {}), '([cond, cond_reversed])\n', (10110, 10133), True, 'import numpy as np\n'), ((10164, 10210), 'numpy.intersect1d', 'np.intersect1d', (['pred', 'cond'], {'assume_unique': '(True)'}), '(pred, cond, assume_unique=True)\n', (10178, 10210), True, 'import numpy as np\n'), ((10268, 10327), 'numpy.intersect1d', 'np.intersect1d', (['pred_und', 'cond_skeleton'], {'assume_unique': '(True)'}), '(pred_und, cond_skeleton, assume_unique=True)\n', (10282, 10327), True, 'import numpy as np\n'), ((10343, 10383), 'numpy.concatenate', 'np.concatenate', (['[true_pos, true_pos_und]'], {}), '([true_pos, true_pos_und])\n', (10357, 10383), True, 'import numpy as np\n'), ((10416, 10469), 'numpy.setdiff1d', 'np.setdiff1d', (['pred', 'cond_skeleton'], {'assume_unique': '(True)'}), '(pred, cond_skeleton, assume_unique=True)\n', (10428, 10469), True, 'import numpy as np\n'), ((10490, 10547), 'numpy.setdiff1d', 'np.setdiff1d', (['pred_und', 'cond_skeleton'], {'assume_unique': '(True)'}), '(pred_und, cond_skeleton, assume_unique=True)\n', (10502, 10547), True, 'import numpy as np\n'), ((10564, 10606), 'numpy.concatenate', 'np.concatenate', (['[false_pos, false_pos_und]'], {}), '([false_pos, false_pos_und])\n', (10578, 10606), True, 'import numpy as np\n'), ((10633, 10677), 'numpy.setdiff1d', 'np.setdiff1d', (['pred', 'cond'], {'assume_unique': '(True)'}), '(pred, cond, assume_unique=True)\n', (10645, 10677), True, 'import numpy as np\n'), ((10692, 10748), 'numpy.intersect1d', 'np.intersect1d', (['extra', 'cond_reversed'], {'assume_unique': '(True)'}), '(extra, cond_reversed, assume_unique=True)\n', (10706, 10748), True, 'import numpy as np\n'), ((11220, 11276), 'numpy.setdiff1d', 'np.setdiff1d', (['pred_lower', 'cond_lower'], {'assume_unique': '(True)'}), '(pred_lower, cond_lower, assume_unique=True)\n', (11232, 11276), True, 'import numpy as np\n'), ((11297, 11353), 'numpy.setdiff1d', 'np.setdiff1d', (['cond_lower', 'pred_lower'], {'assume_unique': '(True)'}), '(cond_lower, pred_lower, assume_unique=True)\n', (11309, 11353), True, 'import numpy as np\n'), ((11581, 11627), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-2)', 'high': '(2)', 'size': '[d, d]'}), '(low=-2, high=2, size=[d, d])\n', (11598, 11627), True, 'import numpy as np\n'), ((427, 451), 'torch.matrix_power', 'torch.matrix_power', (['x', 'd'], {}), '(x, d)\n', (445, 451), False, 'import torch\n'), ((2414, 2437), 'torch.matrix_exp', 'torch.matrix_exp', (['input'], {}), '(input)\n', (2430, 2437), False, 'import torch\n'), ((2455, 2471), 'torch.trace', 'torch.trace', (['E_A'], {}), '(E_A)\n', (2466, 2471), False, 'import torch\n'), ((3997, 4010), 'networkx.DiGraph', 'nx.DiGraph', (['W'], {}), '(W)\n', (4007, 4010), True, 'import networkx as nx\n'), ((4026, 4057), 'networkx.is_directed_acyclic_graph', 'nx.is_directed_acyclic_graph', (['G'], {}), '(G)\n', (4054, 4057), True, 'import networkx as nx\n'), ((6084, 6154), 'numpy.concatenate', 'np.concatenate', (['(G_smooth + sparsity, -G_smooth + sparsity)'], {'axis': 'None'}), '((G_smooth + sparsity, -G_smooth + sparsity), axis=None)\n', (6098, 6154), True, 'import numpy as np\n'), ((6210, 6228), 'numpy.ones', 'np.ones', (['(2 * d * d)'], {}), '(2 * d * d)\n', (6217, 6228), True, 'import numpy as np\n'), ((6876, 6889), 'numpy.abs', 'np.abs', (['W_est'], {}), '(W_est)\n', (6882, 6889), True, 'import numpy as np\n'), ((7259, 7274), 'torch.tensor', 'torch.tensor', (['W'], {}), '(W)\n', (7271, 7274), False, 'import torch\n'), ((7660, 7682), 'networkx.topological_sort', 'nx.topological_sort', (['G'], {}), '(G)\n', (7679, 7682), True, 'import networkx as nx\n'), ((8288, 8325), 'torch.sum', 'torch.sum', (['neg_log_likelihood'], {'dim': '(-1)'}), '(neg_log_likelihood, dim=-1)\n', (8297, 8325), False, 'import torch\n'), ((11116, 11140), 'numpy.tril', 'np.tril', (['(B_est + B_est.T)'], {}), '(B_est + B_est.T)\n', (11123, 11140), True, 'import numpy as np\n'), ((11174, 11200), 'numpy.tril', 'np.tril', (['(B_true + B_true.T)'], {}), '(B_true + B_true.T)\n', (11181, 11200), True, 'import numpy as np\n'), ((617, 641), 'torch.matrix_power', 'torch.matrix_power', (['x', 'd'], {}), '(x, d)\n', (635, 641), False, 'import torch\n'), ((814, 833), 'torch.trace', 'torch.trace', (['expd_W'], {}), '(expd_W)\n', (825, 833), False, 'import torch\n'), ((1280, 1311), 'torch.matrix_exp', 'torch.matrix_exp', (['(input * input)'], {}), '(input * input)\n', (1296, 1311), False, 'import torch\n'), ((1335, 1353), 'torch.trace', 'torch.trace', (['e_W_W'], {}), '(e_W_W)\n', (1346, 1353), False, 'import torch\n'), ((3528, 3537), 'numpy.abs', 'np.abs', (['W'], {}), '(W)\n', (3534, 3537), True, 'import numpy as np\n'), ((5507, 5518), 'numpy.trace', 'np.trace', (['E'], {}), '(E)\n', (5515, 5518), True, 'import numpy as np\n'), ((6494, 6563), 'scipy.optimize.minimize', 'sopt.minimize', (['_func', 'w_est'], {'method': '"""L-BFGS-B"""', 'jac': '(True)', 'bounds': 'bnds'}), "(_func, w_est, method='L-BFGS-B', jac=True, bounds=bnds)\n", (6507, 6563), True, 'import scipy.optimize as sopt\n'), ((7288, 7306), 'torch.is_tensor', 'torch.is_tensor', (['W'], {}), '(W)\n', (7303, 7306), False, 'import torch\n'), ((7503, 7534), 'torch.normal', 'torch.normal', (['(0)', '(1)'], {'size': '(n, d)'}), '(0, 1, size=(n, d))\n', (7515, 7534), False, 'import torch\n'), ((11634, 11643), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (11640, 11643), True, 'import numpy as np\n'), ((880, 910), 'torch.einsum', 'torch.einsum', (['"""bii->b"""', 'expd_W'], {}), "('bii->b', expd_W)\n", (892, 910), False, 'import torch\n'), ((1480, 1511), 'torch.matrix_exp', 'torch.matrix_exp', (['(input * input)'], {}), '(input * input)\n', (1496, 1511), False, 'import torch\n'), ((1535, 1564), 'torch.einsum', 'torch.einsum', (['"""bii->b"""', 'e_W_W'], {}), "('bii->b', e_W_W)\n", (1547, 1564), False, 'import torch\n'), ((8041, 8059), 'torch.log', 'torch.log', (['sigma_j'], {}), '(sigma_j)\n', (8050, 8059), False, 'import torch\n'), ((8126, 8147), 'math.log', 'math.log', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (8134, 8147), False, 'import math\n'), ((378, 390), 'torch.eye', 'torch.eye', (['d'], {}), '(d)\n', (387, 390), False, 'import torch\n'), ((5466, 5485), 'torch.tensor', 'torch.tensor', (['(W * W)'], {}), '(W * W)\n', (5478, 5485), False, 'import torch\n'), ((8017, 8038), 'math.log', 'math.log', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (8025, 8038), False, 'import math\n'), ((530, 542), 'torch.eye', 'torch.eye', (['d'], {}), '(d)\n', (539, 542), False, 'import torch\n')] |
# Author: <NAME>
# Last modification: 2021/22/09
import numpy as np
import torch
import pycuda.autoinit
from pycuda.gpuarray import to_gpu
from cufinufft import cufinufft
from nufftbindings.basenufft import *
# Note: in order for cuFINUFFT to work with pytorch tensor (and not GPUArray from pycuda), we have to do the following changes in the 'execute' method in lib/python3.XX/site-packages/cufinufft/cufinufft.py:
# Comment the test ```if not c.dtype == fk.dtype == self.complex_dtype:```
# Replace c.ptr by c and fk.ptr by fk in ier = self._exec_plan(c.ptr, fk.ptr, self.plan)
class Nufft(baseNUFFT):
def _set_dims(self):
xx = np.arange(self.nx)-self.nx/2.
xy = np.arange(self.ny)-self.ny/2.
if self.ndim==2:
self.XX, self.XY = np.meshgrid(xx, xy)
if self.ndim==3:
xz = np.arange(self.nz)-self.nz/2.
self.XX, self.XY, self.XZ = np.meshgrid(xx, xy, xz)
self.XX = torch.tensor(self.XX.T, device=self.device)
self.XY = torch.tensor(self.XY.T, device=self.device)
if self.ndim==3:
self.XZ = torch.tensor(self.XZ.T, device=self.device)
if self.Nbatch is None:
raise Exception("The batch size should be specified in set_dims")
self.plan_forward = None
self.plan_adjoint = None
self.plan_forward_batch = None
self.plan_adjoint_batch = None
def precompute(self, xi):
xinp = xi.detach().cpu().numpy()
if self.plan_forward is not None:
del self.plan_forward
del self.plan_adjoint
del self.plan_forward_batch
del self.plan_adjoint_batch
if self.ndim==2:
self.plan_forward = cufinufft(2, (self.nx, self.ny), 1, eps=self.eps, dtype=self.np_dtype)
self.plan_adjoint = cufinufft(1, (self.nx, self.ny), 1, eps=self.eps, dtype=self.np_dtype)
self.plan_forward_batch = cufinufft(2, (self.nx, self.ny), self.Nbatch, eps=self.eps, dtype=self.np_dtype)
self.plan_adjoint_batch = cufinufft(1, (self.nx, self.ny), self.Nbatch, eps=self.eps, dtype=self.np_dtype)
self.plan_forward.set_pts(to_gpu(xinp[:,0].astype(self.np_dtype)), to_gpu(xinp[:,1].astype(self.np_dtype)))
self.plan_adjoint.set_pts(to_gpu(xinp[:,0].astype(self.np_dtype)), to_gpu(xinp[:,1].astype(self.np_dtype)))
self.plan_forward_batch.set_pts(to_gpu(xinp[:,0].astype(self.np_dtype)), to_gpu(xinp[:,1].astype(self.np_dtype)))
self.plan_adjoint_batch.set_pts(to_gpu(xinp[:,0].astype(self.np_dtype)), to_gpu(xinp[:,1].astype(self.np_dtype)))
elif self.ndim==3:
self.plan_forward = cufinufft(2, (self.nx, self.ny, self.nz), 1, eps=self.eps, dtype=self.np_dtype)
self.plan_adjoint = cufinufft(1, (self.nx, self.ny, self.nz), 1, eps=self.eps, dtype=self.np_dtype)
self.plan_forward_batch = cufinufft(2, (self.nx, self.ny, self.nz), self.Nbatch, eps=self.eps, dtype=self.np_dtype)
self.plan_adjoint_batch = cufinufft(1, (self.nx, self.ny, self.nz), self.Nbatch, eps=self.eps, dtype=self.np_dtype)
self.plan_forward.set_pts(to_gpu(xinp[:,0].astype(self.np_dtype)), to_gpu(xinp[:,1].astype(self.np_dtype)), to_gpu(xinp[:,2].astype(self.np_dtype)))
self.plan_adjoint.set_pts(to_gpu(xinp[:,0].astype(self.np_dtype)), to_gpu(xinp[:,1].astype(self.np_dtype)), to_gpu(xinp[:,2].astype(self.np_dtype)))
self.plan_forward_batch.set_pts(to_gpu(xinp[:,0].astype(self.np_dtype)), to_gpu(xinp[:,1].astype(self.np_dtype)), to_gpu(xinp[:,2].astype(self.np_dtype)))
self.plan_adjoint_batch.set_pts(to_gpu(xinp[:,0].astype(self.np_dtype)), to_gpu(xinp[:,1].astype(self.np_dtype)), to_gpu(xinp[:,2].astype(self.np_dtype)))
self.xiprecomputed = xi.clone()
self.precomputedTrig = True
def _forward2D(self, f, xi):
self.test_xi(xi)
self.test_f(f)
ndim = len(f.shape)
iscpx = f.is_complex()
if ndim==4 and not iscpx or ndim==3 and iscpx:
Nbatch = f.shape[0]
if iscpx:
y = torch.zeros(Nbatch, self.K, device=self.device, dtype=self.torch_cpxdtype)
fcpx = f.type(self.torch_cpxdtype).contiguous()
else:
y = torch.zeros(Nbatch, self.K, 2, device=self.device, dtype=self.torch_dtype)
fcpx = f.type(self.torch_dtype).contiguous()
if Nbatch==1:
self.plan_forward.execute(y.data_ptr(), fcpx.data_ptr())
else:
self.plan_forward_batch.execute(y.data_ptr(), fcpx.data_ptr())
return y
else:
raise Exception("Error: f should have 4 dimensions (one axis for real/imaginary parts) or 3 dimensions (complex)")
def _adjoint2D(self, y, xi):
self.test_xi(xi)
self.test_f(y)
ndim = len(y.shape)
iscpx = y.is_complex()
if ndim==3 and not iscpx or ndim==2 and iscpx:
Nbatch = y.shape[0]
if iscpx:
f = torch.zeros(Nbatch, self.nx, self.ny, device=self.device, dtype=self.torch_cpxdtype)
ycpx = y.type(self.torch_cpxdtype).contiguous()
else:
f = torch.zeros(Nbatch, self.nx, self.ny, 2, device=self.device, dtype=self.torch_dtype)
ycpx = y.type(self.torch_dtype).contiguous()
if Nbatch==1:
self.plan_adjoint.execute(ycpx.data_ptr(), f.data_ptr())
else:
self.plan_adjoint_batch.execute(ycpx.data_ptr(), f.data_ptr())
return f
else:
raise Exception("Error: y should have 3 dimensions (one axis for real/imaginary parts) or 2 dimensions (complex)")
def _backward_forward2D(self, f, g, xi):
self.test_xi(xi)
ndim = len(f.shape)
iscpx = f.is_complex()
if ndim==4 and not iscpx or ndim==3 and iscpx:
Nbatch = f.shape[0]
if iscpx:
vec_fx = torch.mul(self.XX[None,:,:].contiguous(), f.contiguous())
vec_fy = torch.mul(self.XY[None,:,:].contiguous(), f.contiguous())
else:
vec_fx = torch.mul(self.XX[None,:,:,None].contiguous(), f.contiguous())
vec_fy = torch.mul(self.XY[None,:,:,None].contiguous(), f.contiguous())
grad = torch.zeros_like(xi)
if iscpx:
tmp = torch.zeros(Nbatch, self.K, device=self.device, dtype=self.torch_cpxdtype)
vec_fx = vec_fx.type(self.torch_cpxdtype).contiguous()
else:
tmp = torch.zeros(Nbatch, self.K, 2, device=self.device, dtype=self.torch_dtype)
vec_fx = vec_fx.type(self.torch_dtype).contiguous()
if Nbatch==1:
self.plan_forward.execute(tmp.data_ptr(), vec_fx.data_ptr())
else:
self.plan_forward_batch.execute(tmp.data_ptr(), vec_fx.data_ptr())
if iscpx:
grad[:,0] = ( torch.mul(tmp.imag, g.real) - torch.mul(tmp.real, g.imag) ).sum(axis=0)
else:
grad[:,0] = ( torch.mul(tmp[...,1], g[...,0]) - torch.mul(tmp[...,0], g[...,1]) ).sum(axis=0)
if iscpx:
vec_fy = vec_fy.type(self.torch_cpxdtype).contiguous()
else:
vec_fy = vec_fy.type(self.torch_dtype).contiguous()
if Nbatch==1:
self.plan_forward.execute(tmp.data_ptr(), vec_fy.data_ptr())
else:
self.plan_forward_batch.execute(tmp.data_ptr(), vec_fy.data_ptr())
if iscpx:
grad[:,1] = ( torch.mul(tmp.imag, g.real) - torch.mul(tmp.real, g.imag) ).sum(axis=0)
else:
grad[:,1] = ( torch.mul(tmp[...,1], g[...,0]) - torch.mul(tmp[...,0], g[...,1]) ).sum(axis=0)
return grad
else:
raise Exception("Error: f should have 4 dimensions (one axis for real/imaginary parts) or 3 dimensions (complex)")
def _backward_adjoint2D(self, y, g, xi):
self.test_xi(xi)
ndim = len(y.shape)
iscpx = y.is_complex()
if ndim==3 and not iscpx or ndim==2 and iscpx:
Nbatch = y.shape[0]
if iscpx:
vecx_grad_output = torch.mul(self.XX[None,:,:].contiguous(), g.contiguous())
vecy_grad_output = torch.mul(self.XY[None,:,:].contiguous(), g.contiguous())
else:
vecx_grad_output = torch.mul(self.XX[None,:,:,None].contiguous(), g.contiguous())
vecy_grad_output = torch.mul(self.XY[None,:,:,None].contiguous(), g.contiguous())
grad = torch.zeros_like(xi)
if iscpx:
tmp = torch.zeros(Nbatch, self.K, device=self.device, dtype=self.torch_cpxdtype)
vecx_grad_output = vecx_grad_output.type(self.torch_cpxdtype).contiguous()
else:
tmp = torch.zeros(Nbatch, self.K, 2, device=self.device, dtype=self.torch_dtype)
vecx_grad_output = vecx_grad_output.type(self.torch_dtype).contiguous()
if Nbatch==1:
self.plan_forward.execute(tmp.data_ptr(), vecx_grad_output.data_ptr())
else:
self.plan_forward_batch.execute(tmp.data_ptr(), vecx_grad_output.data_ptr())
if iscpx:
grad[:,0] = ( torch.mul(tmp.imag, y.real) - torch.mul(tmp.real, y.imag) ).sum(axis=0)
else:
grad[:,0] = ( torch.mul(tmp[...,1], y[...,0]) - torch.mul(tmp[...,0], y[...,1]) ).sum(axis=0)
if iscpx:
vecy_grad_output = vecy_grad_output.type(self.torch_cpxdtype).contiguous()
else:
vecy_grad_output = vecy_grad_output.type(self.torch_dtype).contiguous()
if Nbatch==1:
self.plan_forward.execute(tmp.data_ptr(), vecy_grad_output.data_ptr())
else:
self.plan_forward_batch.execute(tmp.data_ptr(), vecy_grad_output.data_ptr())
if iscpx:
grad[:,1] = ( torch.mul(tmp.imag, y.real) - torch.mul(tmp.real, y.imag) ).sum(axis=0)
else:
grad[:,1] = ( torch.mul(tmp[...,1], y[...,0]) - torch.mul(tmp[...,0], y[...,1]) ).sum(axis=0)
return grad
else:
raise Exception("Error: y should have 3 dimensions (one axis for real/imaginary parts) or 2 dimensions (complex)")
def _forward3D(self, f, xi):
self.test_xi(xi)
self.test_f(f)
ndim = len(f.shape)
if ndim==5:
Nbatch = f.shape[0]
y = torch.zeros(Nbatch, self.K, 2, device=self.device, dtype=self.torch_dtype)
fcpx = f.type(self.torch_dtype).contiguous()
if Nbatch==1:
self.plan_forward.execute(y.data_ptr(), fcpx.data_ptr())
else:
self.plan_forward_batch.execute(y.data_ptr(), fcpx.data_ptr())
return y
else:
raise Exception("Error: f should have 5 dimensions")
def _adjoint3D(self, y, xi):
self.test_xi(xi)
self.test_f(y)
ndim = len(y.shape)
if ndim==3:
Nbatch = y.shape[0]
f = torch.zeros(Nbatch, self.nx, self.ny, self.nz, 2, device=self.device, dtype=self.torch_dtype)
ycpx = y.type(self.torch_dtype).contiguous()
if Nbatch==1:
self.plan_adjoint.execute(ycpx.data_ptr(), f.data_ptr())
else:
self.plan_adjoint_batch.execute(ycpx.data_ptr(), f.data_ptr())
return f
else:
raise Exception("Error: y should have 3 dimensions")
def _backward_forward3D(self, f, g, xi):
self.test_xi(xi)
ndim = len(f.shape)
if ndim==5:
Nbatch = f.shape[0]
vec_fx = torch.mul(self.XX[None,:,:,:,None].contiguous(), f.contiguous())
vec_fy = torch.mul(self.XY[None,:,:,:,None].contiguous(), f.contiguous())
vec_fz = torch.mul(self.XZ[None,:,:,:,None].contiguous(), f.contiguous())
grad = torch.zeros_like(xi)
tmp = torch.zeros(Nbatch, self.K, 3, device=self.device, dtype=self.torch_dtype)
vec_fx = vec_fx.type(self.torch_dtype).contiguous()
if Nbatch==1:
self.plan_forward.execute(tmp.data_ptr(), vec_fx.data_ptr())
else:
self.plan_forward_batch.execute(tmp.data_ptr(), vec_fx.data_ptr())
grad[:,0] = ( torch.mul(tmp[...,1], g[...,0]) - torch.mul(tmp[...,0], g[...,1]) ).sum(axis=0)
vec_fy = vec_fy.type(self.torch_dtype).contiguous()
if Nbatch==1:
self.plan_forward.execute(tmp.data_ptr(), vec_fy.data_ptr())
else:
self.plan_forward_batch.execute(tmp.data_ptr(), vec_fy.data_ptr())
grad[:,1] = ( torch.mul(tmp[...,1], g[...,0]) - torch.mul(tmp[...,0], g[...,1]) ).sum(axis=0)
vec_fz = vec_fz.type(self.torch_dtype).contiguous()
if Nbatch==1:
self.plan_forward.execute(tmp.data_ptr(), vec_fz.data_ptr())
else:
self.plan_forward_batch.execute(tmp.data_ptr(), vec_fz.data_ptr())
grad[:,2] = ( torch.mul(tmp[...,1], g[...,0]) - torch.mul(tmp[...,0], g[...,1]) ).sum(axis=0)
return grad
else:
raise Exception("Error: f should have 5 dimensions")
def _backward_adjoint3D(self, y, g, xi):
self.test_xi(xi)
ndim = len(y.shape)
if ndim==3:
Nbatch = y.shape[0]
vecx_grad_output = torch.mul(self.XX[None,:,:,:,None].contiguous(), g.contiguous())
vecy_grad_output = torch.mul(self.XY[None,:,:,:,None].contiguous(), g.contiguous())
vecz_grad_output = torch.mul(self.XZ[None,:,:,:,None].contiguous(), g.contiguous())
grad = torch.zeros_like(xi)
tmp = torch.zeros(Nbatch, self.K, 3, device=self.device, dtype=self.torch_dtype)
vecx_grad_output = vecx_grad_output.type(self.torch_dtype).contiguous()
if Nbatch==1:
self.plan_forward.execute(tmp.data_ptr(), vecx_grad_output.data_ptr())
else:
self.plan_forward_batch.execute(tmp.data_ptr(), vecx_grad_output.data_ptr())
grad[:,0] = ( torch.mul(tmp[...,1], y[...,0]) - torch.mul(tmp[...,0], y[...,1]) ).sum(axis=0)
vecy_grad_output = vecy_grad_output.type(self.torch_dtype).contiguous()
if Nbatch==1:
self.plan_forward.execute(tmp.data_ptr(), vecy_grad_output.data_ptr())
else:
self.plan_forward_batch.execute(tmp.data_ptr(), vecy_grad_output.data_ptr())
grad[:,1] = ( torch.mul(tmp[...,1], y[...,0]) - torch.mul(tmp[...,0], y[...,1]) ).sum(axis=0)
vecz_grad_output = vecz_grad_output.type(self.torch_dtype).contiguous()
if Nbatch==1:
self.plan_forward.execute(tmp.data_ptr(), vecz_grad_output.data_ptr())
else:
self.plan_forward_batch.execute(tmp.data_ptr(), vecz_grad_output.data_ptr())
grad[:,2] = ( torch.mul(tmp[...,1], y[...,0]) - torch.mul(tmp[...,0], y[...,1]) ).sum(axis=0)
return grad
else:
raise Exception("Error: y should have 3 dimensions")
nufft=Nufft()
class FClass(torch.autograd.Function):
@staticmethod
def forward(ctx, xi, f):
ctx.save_for_backward(xi, f)
output = nufft.forward(f, xi)
return output
@staticmethod
def backward(ctx, grad_output):
xi, f = ctx.saved_tensors
grad_input = nufft.backward_forward(f, grad_output, xi)
grad_input_f = nufft.adjoint(grad_output, xi)
return grad_input, grad_input_f
class FtClass(torch.autograd.Function):
@staticmethod
def forward(ctx, xi, y):
ctx.save_for_backward(xi, y)
output = nufft.adjoint(y, xi)
return output
@staticmethod
def backward(ctx, grad_output):
xi, y = ctx.saved_tensors
grad_input = nufft.backward_adjoint(y, grad_output, xi)
grad_input_y = nufft.forward(grad_output, xi)
return grad_input, grad_input_y
forward = FClass.apply
adjoint = FtClass.apply
| [
"cufinufft.cufinufft",
"numpy.meshgrid",
"torch.zeros_like",
"torch.mul",
"numpy.arange",
"torch.zeros",
"torch.tensor"
] | [((949, 992), 'torch.tensor', 'torch.tensor', (['self.XX.T'], {'device': 'self.device'}), '(self.XX.T, device=self.device)\n', (961, 992), False, 'import torch\n'), ((1011, 1054), 'torch.tensor', 'torch.tensor', (['self.XY.T'], {'device': 'self.device'}), '(self.XY.T, device=self.device)\n', (1023, 1054), False, 'import torch\n'), ((646, 664), 'numpy.arange', 'np.arange', (['self.nx'], {}), '(self.nx)\n', (655, 664), True, 'import numpy as np\n'), ((689, 707), 'numpy.arange', 'np.arange', (['self.ny'], {}), '(self.ny)\n', (698, 707), True, 'import numpy as np\n'), ((775, 794), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'xy'], {}), '(xx, xy)\n', (786, 794), True, 'import numpy as np\n'), ((907, 930), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'xy', 'xz'], {}), '(xx, xy, xz)\n', (918, 930), True, 'import numpy as np\n'), ((1102, 1145), 'torch.tensor', 'torch.tensor', (['self.XZ.T'], {'device': 'self.device'}), '(self.XZ.T, device=self.device)\n', (1114, 1145), False, 'import torch\n'), ((1722, 1792), 'cufinufft.cufinufft', 'cufinufft', (['(2)', '(self.nx, self.ny)', '(1)'], {'eps': 'self.eps', 'dtype': 'self.np_dtype'}), '(2, (self.nx, self.ny), 1, eps=self.eps, dtype=self.np_dtype)\n', (1731, 1792), False, 'from cufinufft import cufinufft\n'), ((1825, 1895), 'cufinufft.cufinufft', 'cufinufft', (['(1)', '(self.nx, self.ny)', '(1)'], {'eps': 'self.eps', 'dtype': 'self.np_dtype'}), '(1, (self.nx, self.ny), 1, eps=self.eps, dtype=self.np_dtype)\n', (1834, 1895), False, 'from cufinufft import cufinufft\n'), ((1934, 2019), 'cufinufft.cufinufft', 'cufinufft', (['(2)', '(self.nx, self.ny)', 'self.Nbatch'], {'eps': 'self.eps', 'dtype': 'self.np_dtype'}), '(2, (self.nx, self.ny), self.Nbatch, eps=self.eps, dtype=self.np_dtype\n )\n', (1943, 2019), False, 'from cufinufft import cufinufft\n'), ((2053, 2138), 'cufinufft.cufinufft', 'cufinufft', (['(1)', '(self.nx, self.ny)', 'self.Nbatch'], {'eps': 'self.eps', 'dtype': 'self.np_dtype'}), '(1, (self.nx, self.ny), self.Nbatch, eps=self.eps, dtype=self.np_dtype\n )\n', (2062, 2138), False, 'from cufinufft import cufinufft\n'), ((6389, 6409), 'torch.zeros_like', 'torch.zeros_like', (['xi'], {}), '(xi)\n', (6405, 6409), False, 'import torch\n'), ((8704, 8724), 'torch.zeros_like', 'torch.zeros_like', (['xi'], {}), '(xi)\n', (8720, 8724), False, 'import torch\n'), ((10657, 10731), 'torch.zeros', 'torch.zeros', (['Nbatch', 'self.K', '(2)'], {'device': 'self.device', 'dtype': 'self.torch_dtype'}), '(Nbatch, self.K, 2, device=self.device, dtype=self.torch_dtype)\n', (10668, 10731), False, 'import torch\n'), ((11262, 11360), 'torch.zeros', 'torch.zeros', (['Nbatch', 'self.nx', 'self.ny', 'self.nz', '(2)'], {'device': 'self.device', 'dtype': 'self.torch_dtype'}), '(Nbatch, self.nx, self.ny, self.nz, 2, device=self.device, dtype\n =self.torch_dtype)\n', (11273, 11360), False, 'import torch\n'), ((12138, 12158), 'torch.zeros_like', 'torch.zeros_like', (['xi'], {}), '(xi)\n', (12154, 12158), False, 'import torch\n'), ((12178, 12252), 'torch.zeros', 'torch.zeros', (['Nbatch', 'self.K', '(3)'], {'device': 'self.device', 'dtype': 'self.torch_dtype'}), '(Nbatch, self.K, 3, device=self.device, dtype=self.torch_dtype)\n', (12189, 12252), False, 'import torch\n'), ((13944, 13964), 'torch.zeros_like', 'torch.zeros_like', (['xi'], {}), '(xi)\n', (13960, 13964), False, 'import torch\n'), ((13984, 14058), 'torch.zeros', 'torch.zeros', (['Nbatch', 'self.K', '(3)'], {'device': 'self.device', 'dtype': 'self.torch_dtype'}), '(Nbatch, self.K, 3, device=self.device, dtype=self.torch_dtype)\n', (13995, 14058), False, 'import torch\n'), ((837, 855), 'numpy.arange', 'np.arange', (['self.nz'], {}), '(self.nz)\n', (846, 855), True, 'import numpy as np\n'), ((2686, 2765), 'cufinufft.cufinufft', 'cufinufft', (['(2)', '(self.nx, self.ny, self.nz)', '(1)'], {'eps': 'self.eps', 'dtype': 'self.np_dtype'}), '(2, (self.nx, self.ny, self.nz), 1, eps=self.eps, dtype=self.np_dtype)\n', (2695, 2765), False, 'from cufinufft import cufinufft\n'), ((2798, 2877), 'cufinufft.cufinufft', 'cufinufft', (['(1)', '(self.nx, self.ny, self.nz)', '(1)'], {'eps': 'self.eps', 'dtype': 'self.np_dtype'}), '(1, (self.nx, self.ny, self.nz), 1, eps=self.eps, dtype=self.np_dtype)\n', (2807, 2877), False, 'from cufinufft import cufinufft\n'), ((2916, 3010), 'cufinufft.cufinufft', 'cufinufft', (['(2)', '(self.nx, self.ny, self.nz)', 'self.Nbatch'], {'eps': 'self.eps', 'dtype': 'self.np_dtype'}), '(2, (self.nx, self.ny, self.nz), self.Nbatch, eps=self.eps, dtype=\n self.np_dtype)\n', (2925, 3010), False, 'from cufinufft import cufinufft\n'), ((3044, 3138), 'cufinufft.cufinufft', 'cufinufft', (['(1)', '(self.nx, self.ny, self.nz)', 'self.Nbatch'], {'eps': 'self.eps', 'dtype': 'self.np_dtype'}), '(1, (self.nx, self.ny, self.nz), self.Nbatch, eps=self.eps, dtype=\n self.np_dtype)\n', (3053, 3138), False, 'from cufinufft import cufinufft\n'), ((4139, 4213), 'torch.zeros', 'torch.zeros', (['Nbatch', 'self.K'], {'device': 'self.device', 'dtype': 'self.torch_cpxdtype'}), '(Nbatch, self.K, device=self.device, dtype=self.torch_cpxdtype)\n', (4150, 4213), False, 'import torch\n'), ((4316, 4390), 'torch.zeros', 'torch.zeros', (['Nbatch', 'self.K', '(2)'], {'device': 'self.device', 'dtype': 'self.torch_dtype'}), '(Nbatch, self.K, 2, device=self.device, dtype=self.torch_dtype)\n', (4327, 4390), False, 'import torch\n'), ((5079, 5168), 'torch.zeros', 'torch.zeros', (['Nbatch', 'self.nx', 'self.ny'], {'device': 'self.device', 'dtype': 'self.torch_cpxdtype'}), '(Nbatch, self.nx, self.ny, device=self.device, dtype=self.\n torch_cpxdtype)\n', (5090, 5168), False, 'import torch\n'), ((5266, 5355), 'torch.zeros', 'torch.zeros', (['Nbatch', 'self.nx', 'self.ny', '(2)'], {'device': 'self.device', 'dtype': 'self.torch_dtype'}), '(Nbatch, self.nx, self.ny, 2, device=self.device, dtype=self.\n torch_dtype)\n', (5277, 5355), False, 'import torch\n'), ((6455, 6529), 'torch.zeros', 'torch.zeros', (['Nbatch', 'self.K'], {'device': 'self.device', 'dtype': 'self.torch_cpxdtype'}), '(Nbatch, self.K, device=self.device, dtype=self.torch_cpxdtype)\n', (6466, 6529), False, 'import torch\n'), ((6641, 6715), 'torch.zeros', 'torch.zeros', (['Nbatch', 'self.K', '(2)'], {'device': 'self.device', 'dtype': 'self.torch_dtype'}), '(Nbatch, self.K, 2, device=self.device, dtype=self.torch_dtype)\n', (6652, 6715), False, 'import torch\n'), ((8770, 8844), 'torch.zeros', 'torch.zeros', (['Nbatch', 'self.K'], {'device': 'self.device', 'dtype': 'self.torch_cpxdtype'}), '(Nbatch, self.K, device=self.device, dtype=self.torch_cpxdtype)\n', (8781, 8844), False, 'import torch\n'), ((8976, 9050), 'torch.zeros', 'torch.zeros', (['Nbatch', 'self.K', '(2)'], {'device': 'self.device', 'dtype': 'self.torch_dtype'}), '(Nbatch, self.K, 2, device=self.device, dtype=self.torch_dtype)\n', (8987, 9050), False, 'import torch\n'), ((12548, 12581), 'torch.mul', 'torch.mul', (['tmp[..., 1]', 'g[..., 0]'], {}), '(tmp[..., 1], g[..., 0])\n', (12557, 12581), False, 'import torch\n'), ((12582, 12615), 'torch.mul', 'torch.mul', (['tmp[..., 0]', 'g[..., 1]'], {}), '(tmp[..., 0], g[..., 1])\n', (12591, 12615), False, 'import torch\n'), ((12924, 12957), 'torch.mul', 'torch.mul', (['tmp[..., 1]', 'g[..., 0]'], {}), '(tmp[..., 1], g[..., 0])\n', (12933, 12957), False, 'import torch\n'), ((12958, 12991), 'torch.mul', 'torch.mul', (['tmp[..., 0]', 'g[..., 1]'], {}), '(tmp[..., 0], g[..., 1])\n', (12967, 12991), False, 'import torch\n'), ((13300, 13333), 'torch.mul', 'torch.mul', (['tmp[..., 1]', 'g[..., 0]'], {}), '(tmp[..., 1], g[..., 0])\n', (13309, 13333), False, 'import torch\n'), ((13334, 13367), 'torch.mul', 'torch.mul', (['tmp[..., 0]', 'g[..., 1]'], {}), '(tmp[..., 0], g[..., 1])\n', (13343, 13367), False, 'import torch\n'), ((14394, 14427), 'torch.mul', 'torch.mul', (['tmp[..., 1]', 'y[..., 0]'], {}), '(tmp[..., 1], y[..., 0])\n', (14403, 14427), False, 'import torch\n'), ((14428, 14461), 'torch.mul', 'torch.mul', (['tmp[..., 0]', 'y[..., 1]'], {}), '(tmp[..., 0], y[..., 1])\n', (14437, 14461), False, 'import torch\n'), ((14810, 14843), 'torch.mul', 'torch.mul', (['tmp[..., 1]', 'y[..., 0]'], {}), '(tmp[..., 1], y[..., 0])\n', (14819, 14843), False, 'import torch\n'), ((14844, 14877), 'torch.mul', 'torch.mul', (['tmp[..., 0]', 'y[..., 1]'], {}), '(tmp[..., 0], y[..., 1])\n', (14853, 14877), False, 'import torch\n'), ((15226, 15259), 'torch.mul', 'torch.mul', (['tmp[..., 1]', 'y[..., 0]'], {}), '(tmp[..., 1], y[..., 0])\n', (15235, 15259), False, 'import torch\n'), ((15260, 15293), 'torch.mul', 'torch.mul', (['tmp[..., 0]', 'y[..., 1]'], {}), '(tmp[..., 0], y[..., 1])\n', (15269, 15293), False, 'import torch\n'), ((7041, 7068), 'torch.mul', 'torch.mul', (['tmp.imag', 'g.real'], {}), '(tmp.imag, g.real)\n', (7050, 7068), False, 'import torch\n'), ((7071, 7098), 'torch.mul', 'torch.mul', (['tmp.real', 'g.imag'], {}), '(tmp.real, g.imag)\n', (7080, 7098), False, 'import torch\n'), ((7161, 7194), 'torch.mul', 'torch.mul', (['tmp[..., 1]', 'g[..., 0]'], {}), '(tmp[..., 1], g[..., 0])\n', (7170, 7194), False, 'import torch\n'), ((7195, 7228), 'torch.mul', 'torch.mul', (['tmp[..., 0]', 'g[..., 1]'], {}), '(tmp[..., 0], g[..., 1])\n', (7204, 7228), False, 'import torch\n'), ((7678, 7705), 'torch.mul', 'torch.mul', (['tmp.imag', 'g.real'], {}), '(tmp.imag, g.real)\n', (7687, 7705), False, 'import torch\n'), ((7708, 7735), 'torch.mul', 'torch.mul', (['tmp.real', 'g.imag'], {}), '(tmp.real, g.imag)\n', (7717, 7735), False, 'import torch\n'), ((7798, 7831), 'torch.mul', 'torch.mul', (['tmp[..., 1]', 'g[..., 0]'], {}), '(tmp[..., 1], g[..., 0])\n', (7807, 7831), False, 'import torch\n'), ((7832, 7865), 'torch.mul', 'torch.mul', (['tmp[..., 0]', 'g[..., 1]'], {}), '(tmp[..., 0], g[..., 1])\n', (7841, 7865), False, 'import torch\n'), ((9416, 9443), 'torch.mul', 'torch.mul', (['tmp.imag', 'y.real'], {}), '(tmp.imag, y.real)\n', (9425, 9443), False, 'import torch\n'), ((9446, 9473), 'torch.mul', 'torch.mul', (['tmp.real', 'y.imag'], {}), '(tmp.real, y.imag)\n', (9455, 9473), False, 'import torch\n'), ((9536, 9569), 'torch.mul', 'torch.mul', (['tmp[..., 1]', 'y[..., 0]'], {}), '(tmp[..., 1], y[..., 0])\n', (9545, 9569), False, 'import torch\n'), ((9570, 9603), 'torch.mul', 'torch.mul', (['tmp[..., 0]', 'y[..., 1]'], {}), '(tmp[..., 0], y[..., 1])\n', (9579, 9603), False, 'import torch\n'), ((10113, 10140), 'torch.mul', 'torch.mul', (['tmp.imag', 'y.real'], {}), '(tmp.imag, y.real)\n', (10122, 10140), False, 'import torch\n'), ((10143, 10170), 'torch.mul', 'torch.mul', (['tmp.real', 'y.imag'], {}), '(tmp.real, y.imag)\n', (10152, 10170), False, 'import torch\n'), ((10233, 10266), 'torch.mul', 'torch.mul', (['tmp[..., 1]', 'y[..., 0]'], {}), '(tmp[..., 1], y[..., 0])\n', (10242, 10266), False, 'import torch\n'), ((10267, 10300), 'torch.mul', 'torch.mul', (['tmp[..., 0]', 'y[..., 1]'], {}), '(tmp[..., 0], y[..., 1])\n', (10276, 10300), False, 'import torch\n')] |
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC San Diego.
# Created by <NAME>, <NAME>
from final_env import FinalEnv, SolutionBase
import numpy as np
from sapien.core import Pose
from transforms3d.euler import euler2quat, quat2euler
from transforms3d.quaternions import quat2axangle, qmult, qinverse
class Solution(SolutionBase):
"""
This is a very bad baseline solution
It operates in the following ways:
1. roughly align the 2 spades
2. move the spades towards the center
3. lift 1 spade and move the other away
4. somehow transport the lifted spade to the bin
5. pour into the bin
6. go back to 1
"""
def init(self, env: FinalEnv):
self.phase = 0
self.drive = 0
meta = env.get_metadata()
self.box_ids = meta['box_ids']
r1, r2, c1, c2, c3, c4 = env.get_agents()
self.ps = [1000, 800, 600, 600, 200, 200, 100]
self.ds = [1000, 800, 600, 600, 200, 200, 100]
r1.configure_controllers(self.ps, self.ds)
r2.configure_controllers(self.ps, self.ds)
def act(self, env: FinalEnv, current_timestep: int):
r1, r2, c1, c2, c3, c4 = env.get_agents()
pf_left = f = r1.get_compute_functions()['passive_force'](True, True, False)
pf_right = f = r2.get_compute_functions()['passive_force'](True, True, False)
if self.phase == 0:
t1 = [2, 1, 0, -1.5, -1, 1, -2]
t2 = [-2, 1, 0, -1.5, 1, 1, -2]
r1.set_action(t1, [0] * 7, pf_left)
r2.set_action(t2, [0] * 7, pf_right)
if np.allclose(r1.get_observation()[0], t1, 0.05, 0.05) and np.allclose(
r2.get_observation()[0], t2, 0.05, 0.05):
self.phase = 1
self.counter = 0
self.selected_x = None
if self.phase == 1:
self.counter += 1
if (self.counter == 1):
selected = self.pick_box(c4)
self.selected_x = selected[0]
if self.selected_x is None:
return False
target_pose_left = Pose([self.selected_x, 0.5, 0.67], euler2quat(np.pi, -np.pi / 3, -np.pi / 2))
self.diff_drive(r1, 9, target_pose_left)
target_pose_right = Pose([self.selected_x, -0.5, 0.6], euler2quat(np.pi, -np.pi / 3, np.pi / 2))
self.diff_drive(r2, 9, target_pose_right)
if self.counter == 2000 / 5:
self.phase = 2
self.counter = 0
pose = r1.get_observation()[2][9]
p, q = pose.p, pose.q
p[1] = 0.07
self.pose_left = Pose(p, q)
pose = r2.get_observation()[2][9]
p, q = pose.p, pose.q
p[1] = -0.07
self.pose_right = Pose(p, q)
if self.phase == 2:
self.counter += 1
self.diff_drive(r1, 9, self.pose_left)
self.diff_drive(r2, 9, self.pose_right)
if self.counter == 2000 / 5:
self.phase = 3
pose = r2.get_observation()[2][9]
p, q = pose.p, pose.q
p[2] += 0.2
self.pose_right = Pose(p, q)
pose = r1.get_observation()[2][9]
p, q = pose.p, pose.q
p[1] = 0.5
q = euler2quat(np.pi, -np.pi / 2, -np.pi / 2)
self.pose_left = Pose(p, q)
self.counter = 0
if self.phase == 3:
self.counter += 1
self.diff_drive(r1, 9, self.pose_left)
self.diff_drive(r2, 9, self.pose_right)
if self.counter == 200 / 5:
self.phase = 4
self.counter = 0
if self.phase == 4:
self.counter += 1
if (self.counter < 3000 / 5):
pose = r2.get_observation()[2][9]
p, q = pose.p, pose.q
q = euler2quat(np.pi, -np.pi / 1.5, quat2euler(q)[2])
self.diff_drive2(r2, 9, Pose(p, q), [4, 5, 6], [0, 0, 0, -1, 0], [0, 1, 2, 3, 4])
elif (self.counter < 6000 / 5):
pose = r2.get_observation()[2][9]
p, q = pose.p, pose.q
q = euler2quat(np.pi, -np.pi / 1.5, quat2euler(q)[2])
self.diff_drive2(r2, 9, Pose(p, q), [4, 5, 6], [0, 0, 1, -1, 0], [0, 1, 2, 3, 4])
elif (self.counter < 9000 / 5):
p = [-1, 0, 1.5]
q = euler2quat(0, -np.pi / 1.5, 0)
self.diff_drive(r2, 9, Pose(p, q))
else:
self.phase = 0
# return False
def diff_drive(self, robot, index, target_pose):
"""
this diff drive is very hacky
it tries to transport the target pose to match an end pose
by computing the pose difference between current pose and target pose
then it estimates a cartesian velocity for the end effector to follow.
It uses differential IK to compute the required joint velocity, and set
the joint velocity as current step target velocity.
This technique makes the trajectory very unstable but it still works some times.
"""
pf = robot.get_compute_functions()['passive_force'](True, True, False)
max_v = 0.1
max_w = np.pi
qpos, qvel, poses = robot.get_observation()
current_pose: Pose = poses[index]
delta_p = target_pose.p - current_pose.p
delta_q = qmult(target_pose.q, qinverse(current_pose.q))
axis, theta = quat2axangle(delta_q)
if (theta > np.pi):
theta -= np.pi * 2
t1 = np.linalg.norm(delta_p) / max_v
t2 = theta / max_w
t = max(np.abs(t1), np.abs(t2), 0.001)
thres = 0.1
if t < thres:
k = (np.exp(thres) - 1) / thres
t = np.log(k * t + 1)
v = delta_p / t
w = theta / t * axis
target_qvel = robot.get_compute_functions()['cartesian_diff_ik'](np.concatenate((v, w)), 9)
robot.set_action(qpos, target_qvel, pf)
def diff_drive2(self, robot, index, target_pose, js1, joint_target, js2):
"""
This is a hackier version of the diff_drive
It uses specified joints to achieve the target pose of the end effector
while asking some other specified joints to match a global pose
"""
pf = robot.get_compute_functions()['passive_force'](True, True, False)
max_v = 0.1
max_w = np.pi
qpos, qvel, poses = robot.get_observation()
current_pose: Pose = poses[index]
delta_p = target_pose.p - current_pose.p
delta_q = qmult(target_pose.q, qinverse(current_pose.q))
axis, theta = quat2axangle(delta_q)
if (theta > np.pi):
theta -= np.pi * 2
t1 = np.linalg.norm(delta_p) / max_v
t2 = theta / max_w
t = max(np.abs(t1), np.abs(t2), 0.001)
thres = 0.1
if t < thres:
k = (np.exp(thres) - 1) / thres
t = np.log(k * t + 1)
v = delta_p / t
w = theta / t * axis
target_qvel = robot.get_compute_functions()['cartesian_diff_ik'](np.concatenate((v, w)), 9)
for j, target in zip(js2, joint_target):
qpos[j] = target
robot.set_action(qpos, target_qvel, pf)
def get_global_position_from_camera(self, camera, depth, x, y):
"""
camera: an camera agent
depth: the depth obsrevation
x, y: the horizontal, vertical index for a pixel, you would access the images by image[y, x]
"""
cm = camera.get_metadata()
proj, model = cm['projection_matrix'], cm['model_matrix']
w, h = cm['width'], cm['height']
# get 0 to 1 coordinate for (x, y) coordinates
xf, yf = (x + 0.5) / w, 1 - (y + 0.5) / h
# get 0 to 1 depth value at (x,y)
zf = depth[int(y), int(x)]
# get the -1 to 1 (x,y,z) coordinates
ndc = np.array([xf, yf, zf, 1]) * 2 - 1
# transform from image space to view space
v = np.linalg.inv(proj) @ ndc
v /= v[3]
# transform from view space to world space
v = model @ v
return v
def pick_box(self, c):
color, depth, segmentation = c.get_observation()
np.random.shuffle(self.box_ids)
for i in self.box_ids:
m = np.where(segmentation == i)
if len(m[0]):
min_x = 10000
max_x = -1
min_y = 10000
max_y = -1
for y, x in zip(m[0], m[1]):
min_x = min(min_x, x)
max_x = max(max_x, x)
min_y = min(min_y, y)
max_y = max(max_y, y)
x, y = round((min_x + max_x) / 2), round((min_y + max_y) / 2)
return self.get_global_position_from_camera(c, depth, x, y)
return False
if __name__ == '__main__':
np.random.seed(0)
env = FinalEnv()
# env.run(Solution(), render=True, render_interval=5, debug=True)
env.run(Solution(), render=True, render_interval=5)
env.close()
| [
"transforms3d.euler.euler2quat",
"numpy.random.seed",
"numpy.abs",
"numpy.log",
"numpy.concatenate",
"final_env.FinalEnv",
"transforms3d.euler.quat2euler",
"sapien.core.Pose",
"numpy.where",
"numpy.linalg.norm",
"transforms3d.quaternions.qinverse",
"numpy.linalg.inv",
"numpy.array",
"numpy... | [((9204, 9221), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (9218, 9221), True, 'import numpy as np\n'), ((9232, 9242), 'final_env.FinalEnv', 'FinalEnv', ([], {}), '()\n', (9240, 9242), False, 'from final_env import FinalEnv, SolutionBase\n'), ((5776, 5797), 'transforms3d.quaternions.quat2axangle', 'quat2axangle', (['delta_q'], {}), '(delta_q)\n', (5788, 5797), False, 'from transforms3d.quaternions import quat2axangle, qmult, qinverse\n'), ((6957, 6978), 'transforms3d.quaternions.quat2axangle', 'quat2axangle', (['delta_q'], {}), '(delta_q)\n', (6969, 6978), False, 'from transforms3d.quaternions import quat2axangle, qmult, qinverse\n'), ((8535, 8566), 'numpy.random.shuffle', 'np.random.shuffle', (['self.box_ids'], {}), '(self.box_ids)\n', (8552, 8566), True, 'import numpy as np\n'), ((5727, 5751), 'transforms3d.quaternions.qinverse', 'qinverse', (['current_pose.q'], {}), '(current_pose.q)\n', (5735, 5751), False, 'from transforms3d.quaternions import quat2axangle, qmult, qinverse\n'), ((5871, 5894), 'numpy.linalg.norm', 'np.linalg.norm', (['delta_p'], {}), '(delta_p)\n', (5885, 5894), True, 'import numpy as np\n'), ((5946, 5956), 'numpy.abs', 'np.abs', (['t1'], {}), '(t1)\n', (5952, 5956), True, 'import numpy as np\n'), ((5958, 5968), 'numpy.abs', 'np.abs', (['t2'], {}), '(t2)\n', (5964, 5968), True, 'import numpy as np\n'), ((6079, 6096), 'numpy.log', 'np.log', (['(k * t + 1)'], {}), '(k * t + 1)\n', (6085, 6096), True, 'import numpy as np\n'), ((6223, 6245), 'numpy.concatenate', 'np.concatenate', (['(v, w)'], {}), '((v, w))\n', (6237, 6245), True, 'import numpy as np\n'), ((6908, 6932), 'transforms3d.quaternions.qinverse', 'qinverse', (['current_pose.q'], {}), '(current_pose.q)\n', (6916, 6932), False, 'from transforms3d.quaternions import quat2axangle, qmult, qinverse\n'), ((7052, 7075), 'numpy.linalg.norm', 'np.linalg.norm', (['delta_p'], {}), '(delta_p)\n', (7066, 7075), True, 'import numpy as np\n'), ((7127, 7137), 'numpy.abs', 'np.abs', (['t1'], {}), '(t1)\n', (7133, 7137), True, 'import numpy as np\n'), ((7139, 7149), 'numpy.abs', 'np.abs', (['t2'], {}), '(t2)\n', (7145, 7149), True, 'import numpy as np\n'), ((7260, 7277), 'numpy.log', 'np.log', (['(k * t + 1)'], {}), '(k * t + 1)\n', (7266, 7277), True, 'import numpy as np\n'), ((7404, 7426), 'numpy.concatenate', 'np.concatenate', (['(v, w)'], {}), '((v, w))\n', (7418, 7426), True, 'import numpy as np\n'), ((8305, 8324), 'numpy.linalg.inv', 'np.linalg.inv', (['proj'], {}), '(proj)\n', (8318, 8324), True, 'import numpy as np\n'), ((8614, 8641), 'numpy.where', 'np.where', (['(segmentation == i)'], {}), '(segmentation == i)\n', (8622, 8641), True, 'import numpy as np\n'), ((2331, 2372), 'transforms3d.euler.euler2quat', 'euler2quat', (['np.pi', '(-np.pi / 3)', '(-np.pi / 2)'], {}), '(np.pi, -np.pi / 3, -np.pi / 2)\n', (2341, 2372), False, 'from transforms3d.euler import euler2quat, quat2euler\n'), ((2495, 2535), 'transforms3d.euler.euler2quat', 'euler2quat', (['np.pi', '(-np.pi / 3)', '(np.pi / 2)'], {}), '(np.pi, -np.pi / 3, np.pi / 2)\n', (2505, 2535), False, 'from transforms3d.euler import euler2quat, quat2euler\n'), ((2847, 2857), 'sapien.core.Pose', 'Pose', (['p', 'q'], {}), '(p, q)\n', (2851, 2857), False, 'from sapien.core import Pose\n'), ((3010, 3020), 'sapien.core.Pose', 'Pose', (['p', 'q'], {}), '(p, q)\n', (3014, 3020), False, 'from sapien.core import Pose\n'), ((3406, 3416), 'sapien.core.Pose', 'Pose', (['p', 'q'], {}), '(p, q)\n', (3410, 3416), False, 'from sapien.core import Pose\n'), ((3553, 3594), 'transforms3d.euler.euler2quat', 'euler2quat', (['np.pi', '(-np.pi / 2)', '(-np.pi / 2)'], {}), '(np.pi, -np.pi / 2, -np.pi / 2)\n', (3563, 3594), False, 'from transforms3d.euler import euler2quat, quat2euler\n'), ((3628, 3638), 'sapien.core.Pose', 'Pose', (['p', 'q'], {}), '(p, q)\n', (3632, 3638), False, 'from sapien.core import Pose\n'), ((8207, 8232), 'numpy.array', 'np.array', (['[xf, yf, zf, 1]'], {}), '([xf, yf, zf, 1])\n', (8215, 8232), True, 'import numpy as np\n'), ((4238, 4248), 'sapien.core.Pose', 'Pose', (['p', 'q'], {}), '(p, q)\n', (4242, 4248), False, 'from sapien.core import Pose\n'), ((6036, 6049), 'numpy.exp', 'np.exp', (['thres'], {}), '(thres)\n', (6042, 6049), True, 'import numpy as np\n'), ((7217, 7230), 'numpy.exp', 'np.exp', (['thres'], {}), '(thres)\n', (7223, 7230), True, 'import numpy as np\n'), ((4180, 4193), 'transforms3d.euler.quat2euler', 'quat2euler', (['q'], {}), '(q)\n', (4190, 4193), False, 'from transforms3d.euler import euler2quat, quat2euler\n'), ((4538, 4548), 'sapien.core.Pose', 'Pose', (['p', 'q'], {}), '(p, q)\n', (4542, 4548), False, 'from sapien.core import Pose\n'), ((4693, 4723), 'transforms3d.euler.euler2quat', 'euler2quat', (['(0)', '(-np.pi / 1.5)', '(0)'], {}), '(0, -np.pi / 1.5, 0)\n', (4703, 4723), False, 'from transforms3d.euler import euler2quat, quat2euler\n'), ((4480, 4493), 'transforms3d.euler.quat2euler', 'quat2euler', (['q'], {}), '(q)\n', (4490, 4493), False, 'from transforms3d.euler import euler2quat, quat2euler\n'), ((4763, 4773), 'sapien.core.Pose', 'Pose', (['p', 'q'], {}), '(p, q)\n', (4767, 4773), False, 'from sapien.core import Pose\n')] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import onnxruntime
from PIL import Image, ImageDraw, ImageFont
import numpy as np
import time
import io
import json
import os
from datetime import datetime
# Imports for the REST API
from flask import Flask, request, jsonify, Response
session = None
tags = []
output_dir = 'images'
# Called when the deployed service starts
def init():
global session
global tags
global output_dir
model_path = 'yolov3/yolov3.onnx'
# Initialize an inference session with yoloV3 model
session = onnxruntime.InferenceSession(model_path)
if (session != None):
print('Session initialized')
else:
print('Session is not initialized')
tags_file = 'tags.txt'
with open(tags_file) as f:
for line in f:
line = line.strip()
tags.append(line)
if (os.path.exists(output_dir)):
print(output_dir + " already exits")
else:
os.mkdir(output_dir)
def letterbox_image(image, size):
'''Resize image with unchanged aspect ratio using padding'''
iw, ih = image.size
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', size, (128,128,128))
new_image.paste(image, ((w-nw)//2, (h-nh)//2))
return new_image
def preprocess(img):
model_image_size = (416, 416)
boxed_image = letterbox_image(img, tuple(reversed(model_image_size)))
image_data = np.array(boxed_image, dtype='float32')
image_data /= 255.
image_data = np.transpose(image_data, [2, 0, 1])
image_data = np.expand_dims(image_data, 0)
return image_data
def postprocess(boxes, scores, indices, iw, ih):
detected_objects = []
for idx_ in indices:
idx_1 = (idx_[0], idx_[2])
y1, x1, y2, x2 = boxes[idx_1].tolist()
x2 = (x2 - x1) / iw
y2 = (y2 - y1) / ih
x1 = x1 / iw
y1 = y1 / ih
dobj = {
"type" : "entity",
"entity" : {
"tag" : {
"value" : tags[idx_[1].tolist()],
"confidence" : scores[tuple(idx_)].tolist()
},
"box" : {
"l" : x1,
"t" : y1,
"w" : x2,
"h" : y2
}
}
}
detected_objects.append(dobj)
return detected_objects
def processImage(img):
try:
# Preprocess input according to the functions specified above
img_data = preprocess(img)
img_size = np.array([img.size[1], img.size[0]], dtype=np.float32).reshape(1, 2)
inference_time_start = time.time()
boxes, scores, indices = session.run(None, {"input_1": img_data, "image_shape":img_size})
inference_time_end = time.time()
inference_duration = np.round(inference_time_end - inference_time_start, 2)
iw, ih = img.size
detected_objects = postprocess(boxes, scores, indices, iw, ih)
return inference_duration, detected_objects
except Exception as e:
print('EXCEPTION:', str(e))
return 'Error processing image', 500
def drawBboxes(image, detected_objects):
objects_identified = len(detected_objects)
iw, ih = image.size
draw = ImageDraw.Draw(image)
textfont = ImageFont.load_default()
for pos in range(objects_identified):
entity = detected_objects[pos]['entity']
box = entity["box"]
x1 = box["l"]
y1 = box["t"]
x2 = box["w"]
y2 = box["h"]
x1 = x1 * iw
y1 = y1 * ih
x2 = (x2 * iw) + x1
y2 = (y2 * ih) + y1
tag = entity['tag']
objClass = tag['value']
draw.rectangle((x1, y1, x2, y2), outline = 'blue', width = 2)
print('rectangle drawn')
draw.text((x1, y1), str(objClass), fill = "white", font = textfont)
return image
app = Flask(__name__)
# / routes to the default function which returns 'Hello World'
@app.route('/', methods=['GET'])
def defaultPage():
return Response(response='Hello from Yolov3 inferencing based on ONNX', status=200)
# /score routes to scoring function
# This function returns a JSON object with inference duration and detected objects
@app.route('/score', methods=['POST'])
def score():
try:
imageData = io.BytesIO(request.get_data())
# load the image
img = Image.open(imageData)
inference_duration, detected_objects = processImage(img)
print('Inference duration was ', str(inference_duration))
if len(detected_objects) > 0:
respBody = {
"inferences" : detected_objects
}
respBody = json.dumps(respBody)
return Response(respBody, status= 200, mimetype ='application/json')
else:
return Response(status= 204)
except Exception as e:
print('EXCEPTION:', str(e))
return Response(response='Error processing image', status= 500)
# /score-debug routes to score_debug
# This function scores the image and stores an annotated image for debugging purposes
@app.route('/score-debug', methods=['POST'])
def score_debug():
try:
imageData = io.BytesIO(request.get_data())
# load the image
img = Image.open(imageData)
inference_duration, detected_objects = processImage(img)
print('Inference duration was ', str(inference_duration))
output_img = drawBboxes(img, detected_objects)
# datetime object containing current date and time
now = datetime.now()
output_img_file = now.strftime("%d_%m_%Y_%H_%M_%S.jpeg")
output_img.save(output_dir + "/" + output_img_file)
respBody = {
"inferences" : detected_objects
}
return respBody
except Exception as e:
print('EXCEPTION:', str(e))
return Response(response='Error processing image', status= 500)
# /annotate routes to annotation function
# This function returns an image with bounding boxes drawn around detected objects
@app.route('/annotate', methods=['POST'])
def annotate():
try:
imageData = io.BytesIO(request.get_data())
# load the image
img = Image.open(imageData)
inference_duration, detected_objects = processImage(img)
print('Inference duration was ', str(inference_duration))
img = drawBboxes(img, detected_objects)
imgByteArr = io.BytesIO()
img.save(imgByteArr, format = 'JPEG')
imgByteArr = imgByteArr.getvalue()
return Response(response = imgByteArr, status = 200, mimetype = "image/jpeg")
except Exception as e:
print('EXCEPTION:', str(e))
return Response(response='Error processing image', status= 500)
# Load and intialize the model
init()
if __name__ == '__main__':
# Run the server
app.run(host='0.0.0.0', port=8888)
| [
"os.mkdir",
"PIL.Image.new",
"io.BytesIO",
"PIL.ImageFont.load_default",
"flask.Flask",
"os.path.exists",
"numpy.transpose",
"numpy.expand_dims",
"time.time",
"onnxruntime.InferenceSession",
"PIL.Image.open",
"datetime.datetime.now",
"flask.request.get_data",
"numpy.array",
"json.dumps",... | [((4119, 4134), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (4124, 4134), False, 'from flask import Flask, request, jsonify, Response\n'), ((584, 624), 'onnxruntime.InferenceSession', 'onnxruntime.InferenceSession', (['model_path'], {}), '(model_path)\n', (612, 624), False, 'import onnxruntime\n'), ((903, 929), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (917, 929), False, 'import os\n'), ((1301, 1340), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'size', '(128, 128, 128)'], {}), "('RGB', size, (128, 128, 128))\n", (1310, 1340), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((1559, 1597), 'numpy.array', 'np.array', (['boxed_image'], {'dtype': '"""float32"""'}), "(boxed_image, dtype='float32')\n", (1567, 1597), True, 'import numpy as np\n'), ((1638, 1673), 'numpy.transpose', 'np.transpose', (['image_data', '[2, 0, 1]'], {}), '(image_data, [2, 0, 1])\n', (1650, 1673), True, 'import numpy as np\n'), ((1691, 1720), 'numpy.expand_dims', 'np.expand_dims', (['image_data', '(0)'], {}), '(image_data, 0)\n', (1705, 1720), True, 'import numpy as np\n'), ((3446, 3467), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (3460, 3467), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3488, 3512), 'PIL.ImageFont.load_default', 'ImageFont.load_default', ([], {}), '()\n', (3510, 3512), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((4262, 4338), 'flask.Response', 'Response', ([], {'response': '"""Hello from Yolov3 inferencing based on ONNX"""', 'status': '(200)'}), "(response='Hello from Yolov3 inferencing based on ONNX', status=200)\n", (4270, 4338), False, 'from flask import Flask, request, jsonify, Response\n'), ((995, 1015), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (1003, 1015), False, 'import os\n'), ((2814, 2825), 'time.time', 'time.time', ([], {}), '()\n', (2823, 2825), False, 'import time\n'), ((2953, 2964), 'time.time', 'time.time', ([], {}), '()\n', (2962, 2964), False, 'import time\n'), ((2994, 3048), 'numpy.round', 'np.round', (['(inference_time_end - inference_time_start)', '(2)'], {}), '(inference_time_end - inference_time_start, 2)\n', (3002, 3048), True, 'import numpy as np\n'), ((4612, 4633), 'PIL.Image.open', 'Image.open', (['imageData'], {}), '(imageData)\n', (4622, 4633), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((5533, 5554), 'PIL.Image.open', 'Image.open', (['imageData'], {}), '(imageData)\n', (5543, 5554), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((5817, 5831), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5829, 5831), False, 'from datetime import datetime\n'), ((6553, 6574), 'PIL.Image.open', 'Image.open', (['imageData'], {}), '(imageData)\n', (6563, 6574), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((6786, 6798), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (6796, 6798), False, 'import io\n'), ((6936, 7000), 'flask.Response', 'Response', ([], {'response': 'imgByteArr', 'status': '(200)', 'mimetype': '"""image/jpeg"""'}), "(response=imgByteArr, status=200, mimetype='image/jpeg')\n", (6944, 7000), False, 'from flask import Flask, request, jsonify, Response\n'), ((4553, 4571), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (4569, 4571), False, 'from flask import Flask, request, jsonify, Response\n'), ((4952, 4972), 'json.dumps', 'json.dumps', (['respBody'], {}), '(respBody)\n', (4962, 4972), False, 'import json\n'), ((4992, 5051), 'flask.Response', 'Response', (['respBody'], {'status': '(200)', 'mimetype': '"""application/json"""'}), "(respBody, status=200, mimetype='application/json')\n", (5000, 5051), False, 'from flask import Flask, request, jsonify, Response\n'), ((5087, 5107), 'flask.Response', 'Response', ([], {'status': '(204)'}), '(status=204)\n', (5095, 5107), False, 'from flask import Flask, request, jsonify, Response\n'), ((5188, 5243), 'flask.Response', 'Response', ([], {'response': '"""Error processing image"""', 'status': '(500)'}), "(response='Error processing image', status=500)\n", (5196, 5243), False, 'from flask import Flask, request, jsonify, Response\n'), ((5474, 5492), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (5490, 5492), False, 'from flask import Flask, request, jsonify, Response\n'), ((6212, 6267), 'flask.Response', 'Response', ([], {'response': '"""Error processing image"""', 'status': '(500)'}), "(response='Error processing image', status=500)\n", (6220, 6267), False, 'from flask import Flask, request, jsonify, Response\n'), ((6494, 6512), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (6510, 6512), False, 'from flask import Flask, request, jsonify, Response\n'), ((7085, 7140), 'flask.Response', 'Response', ([], {'response': '"""Error processing image"""', 'status': '(500)'}), "(response='Error processing image', status=500)\n", (7093, 7140), False, 'from flask import Flask, request, jsonify, Response\n'), ((2713, 2767), 'numpy.array', 'np.array', (['[img.size[1], img.size[0]]'], {'dtype': 'np.float32'}), '([img.size[1], img.size[0]], dtype=np.float32)\n', (2721, 2767), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import numpy as np
matrix = np.array([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12],
[13, 14, 15, 16, 17, 18], [19, 20, 21, 22, 23, 24]])
mat1 = matrix[1:3, ...]
mat2 = matrix[..., 2:4]
mat3 = matrix[-3:, 3:]
print("The middle two rows of the matrix are:\n{}".format(mat1))
print("The middle two columns of the matrix are:\n{}".format(mat2))
print("The bottom-right, square, 3x3 matrix is:\n{}".format(mat3))
| [
"numpy.array"
] | [((51, 161), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12], [13, 14, 15, 16, 17, 18], [19, \n 20, 21, 22, 23, 24]]'], {}), '([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12], [13, 14, 15, 16, 17, \n 18], [19, 20, 21, 22, 23, 24]])\n', (59, 161), True, 'import numpy as np\n')] |
from typing import List, Tuple, Union
import numpy as np
from numpy.core.fromnumeric import resize
from .base import Transformer
from .crop import RandomCenterCrop, RandomCrop
from .resize import Resize
from .padding import ZeroPadding
class _ResizeCroPad(Transformer):
def __init__(
self,
final_size: Union[List[int], Tuple[int, int], int],
rand_range:Union[List[int], Tuple[int, int]] =(0.5, 1.5)
) -> None:
super().__init__()
assert rand_range[1] >= rand_range[0]
self.range = rand_range
if isinstance(final_size, int):
self.final_size = [final_size, final_size]
else:
self.final_size = final_size
self.resize = Resize()
self.pad = ZeroPadding(final_size)
def get_rand(self) -> float:
return np.random.uniform(*self.range)
def __call__(self, inp: np.ndarray) -> np.ndarray:
inp_shape = inp.shape
resize_scale = self.get_rand()
resize_to = [0, 0]
resize_to[0] = int(inp_shape[1] * resize_scale)
resize_to[1] = int(inp_shape[2] * resize_scale)
inp = self.resize.resize_by(inp, resize_to)
if resize_scale >= 1.0:
inp = self.crop_fn(inp)
else:
inp = self.pad(inp)
return inp
class ResizeRandomCroPad(_ResizeCroPad):
def __init__(
self,
final_size: Union[List[int], Tuple[int, int], int],
rand_range:Union[List[int], Tuple[int, int]] =(0.5, 1.5)
) -> None:
super().__init__(final_size, rand_range)
self.crop_fn = RandomCrop(final_size)
class ResizeRandomCenterCroPad(_ResizeCroPad):
def __init__(
self,
final_size: Union[List[int], Tuple[int, int], int],
rand_range:Union[List[int], Tuple[int, int]] =(0.5, 1.5)
) -> None:
super().__init__(final_size, rand_range)
self.crop_fn = RandomCenterCrop(final_size)
| [
"numpy.random.uniform"
] | [((824, 854), 'numpy.random.uniform', 'np.random.uniform', (['*self.range'], {}), '(*self.range)\n', (841, 854), True, 'import numpy as np\n')] |
############################################################################
# Copyright ESIEE Paris (2018) #
# #
# Contributor(s) : <NAME> #
# #
# Distributed under the terms of the CECILL-B License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
############################################################################
import unittest
import higra as hg
import numpy as np
class TestAlgorithmTree(unittest.TestCase):
def test_reconstruct_leaf_data(self):
tree = hg.Tree(np.asarray((5, 5, 6, 6, 6, 7, 7, 7)))
input = np.asarray(((1, 8),
(2, 7),
(3, 6),
(4, 5),
(5, 4),
(6, 3),
(7, 2),
(8, 1)), dtype=np.int32)
condition = np.asarray((True, False, True, False, True, True, False, False), np.bool_)
output = hg.reconstruct_leaf_data(tree, input, condition)
ref = np.asarray(((8, 1),
(2, 7),
(7, 2),
(4, 5),
(7, 2)), dtype=np.int32)
self.assertTrue(np.all(ref == output))
def test_reconstruct_leaf_data_component_tree(self):
g = hg.get_4_adjacency_implicit_graph((1, 6))
vertex_values = np.asarray((1, 5, 4, 3, 3, 6), dtype=np.int32)
tree, altitudes = hg.component_tree_max_tree(g, vertex_values)
condition = np.asarray((True, False, True, False, True, True, False, True, False, True, False), np.bool_)
output = hg.reconstruct_leaf_data(tree, altitudes, condition)
ref = np.asarray((1, 4, 4, 1, 1, 6), dtype=np.int32)
self.assertTrue(np.all(ref == output))
def test_reconstruct_leaf_data_default(self):
tree = hg.Tree(np.asarray((5, 5, 6, 6, 6, 7, 7, 7)))
input = np.asarray(((1, 8),
(2, 7),
(3, 6),
(4, 5),
(5, 4),
(6, 3),
(7, 2),
(8, 1)), dtype=np.int32)
output = hg.reconstruct_leaf_data(tree, input)
ref = np.asarray(((1, 8),
(2, 7),
(3, 6),
(4, 5),
(5, 4)), dtype=np.int32)
self.assertTrue(np.all(ref == output))
def test_reconstruct_leaf_data_component_tree_default(self):
g = hg.get_4_adjacency_implicit_graph((1, 6))
vertex_values = np.asarray((1, 5, 4, 3, 3, 6), dtype=np.int32)
tree, altitudes = hg.component_tree_max_tree(g, vertex_values)
area = hg.attribute_area(tree)
output = hg.reconstruct_leaf_data(tree, area)
ref = np.asarray((6, 1, 2, 5, 5, 1), dtype=np.int32)
self.assertTrue(np.all(ref == output))
def test_labelisation_horizontal_cut(self):
tree = hg.Tree(np.asarray((5, 5, 6, 6, 6, 7, 7, 7)))
altitudes = np.asarray((0, 0, 0, 0, 0, 0.5, 0, 0.7), dtype=np.double)
ref_t0 = np.asarray((1, 2, 3, 3, 3), dtype=np.int32)
ref_t1 = np.asarray((1, 1, 2, 2, 2), dtype=np.int32)
ref_t2 = np.asarray((1, 1, 1, 1, 1), dtype=np.int32)
output_t0 = hg.labelisation_horizontal_cut_from_threshold(tree, altitudes, 0)
output_t1 = hg.labelisation_horizontal_cut_from_threshold(tree, altitudes, 0.5)
output_t2 = hg.labelisation_horizontal_cut_from_threshold(tree, altitudes, 0.7)
self.assertTrue(hg.is_in_bijection(ref_t0, output_t0))
self.assertTrue(hg.is_in_bijection(ref_t1, output_t1))
self.assertTrue(hg.is_in_bijection(ref_t2, output_t2))
def test_labelisation_horizontal_cut_num_regions(self):
g = hg.get_4_adjacency_graph((1, 11))
tree = hg.Tree((11, 11, 11, 12, 12, 16, 13, 13, 13, 14, 14, 17, 16, 15, 15, 18, 17, 18, 18))
hg.CptHierarchy.link(tree, g)
altitudes = np.asarray((0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 3, 1, 2, 3))
ref_labels = (
np.asarray((1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)),
np.asarray((1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3)),
np.asarray((0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3)),
np.asarray((0, 1, 2, 3, 4, 5, 6, 6, 6, 7, 8))
)
k_cuts = (1, 3, 4, 9)
for i in range(4):
labels = hg.labelisation_horizontal_cut_from_num_regions(tree, altitudes, k_cuts[i])
self.assertTrue(hg.is_in_bijection(labels, ref_labels[i]))
# cuts with at least the given number of regions
k_cuts = (1, 2, 4, 5)
for i in range(4):
labels = hg.labelisation_horizontal_cut_from_num_regions(tree, altitudes, k_cuts[i])
self.assertTrue(hg.is_in_bijection(labels, ref_labels[i]))
# cuts with at most the given number of regions
k_cuts = (2, 3, 8, 20)
for i in range(4):
labels = hg.labelisation_horizontal_cut_from_num_regions(tree, altitudes, k_cuts[i], "at_most")
self.assertTrue(hg.is_in_bijection(labels, ref_labels[i]))
def test_labelisation_hierarchy_supervertices(self):
tree = hg.Tree(np.asarray((5, 5, 6, 6, 6, 7, 7, 7)))
altitudes = np.asarray((0, 0, 0, 0, 0, 0.5, 0, 0.7), dtype=np.double)
ref = np.asarray((0, 1, 2, 2, 2), dtype=np.int32)
output = hg.labelisation_hierarchy_supervertices(tree, altitudes)
self.assertTrue(hg.is_in_bijection(ref, output))
self.assertTrue(np.amax(output) == 2)
self.assertTrue(np.amin(output) == 0)
def test_tree_isomorphism(self):
t1 = hg.Tree(np.asarray((5, 5, 6, 6, 7, 8, 7, 8, 8)))
t2 = hg.Tree(np.asarray((6, 6, 5, 5, 7, 7, 8, 8, 8)))
t3 = hg.Tree(np.asarray((7, 7, 5, 5, 6, 6, 8, 8, 8)))
self.assertTrue(hg.test_tree_isomorphism(t1, t2))
self.assertTrue(hg.test_tree_isomorphism(t2, t1))
self.assertTrue(hg.test_tree_isomorphism(t1, t3))
self.assertTrue(hg.test_tree_isomorphism(t3, t1))
self.assertTrue(hg.test_tree_isomorphism(t2, t3))
self.assertTrue(hg.test_tree_isomorphism(t3, t2))
t4 = hg.Tree(np.asarray((5, 5, 7, 6, 6, 8, 7, 8, 8)))
self.assertTrue(not hg.test_tree_isomorphism(t1, t4))
self.assertTrue(not hg.test_tree_isomorphism(t2, t4))
self.assertTrue(not hg.test_tree_isomorphism(t3, t4))
self.assertTrue(not hg.test_tree_isomorphism(t4, t1))
self.assertTrue(not hg.test_tree_isomorphism(t4, t2))
self.assertTrue(not hg.test_tree_isomorphism(t4, t3))
def test_filter_non_relevant_node_from_tree(self):
g = hg.get_4_adjacency_graph((1, 8))
edge_weights = np.asarray((0, 2, 0, 0, 1, 0, 0))
tree, altitudes = hg.bpt_canonical(g, edge_weights)
def functor(tree, altitudes):
area = hg.attribute_area(tree)
area_min_children = hg.accumulate_parallel(tree, area, hg.Accumulators.min)
return area_min_children < 3
res_tree, res_altitudes = hg.filter_non_relevant_node_from_tree(tree, altitudes, functor)
sm = hg.saliency(res_tree, res_altitudes)
sm_ref = np.asarray((0, 0, 0, 0, 1, 0, 0))
self.assertTrue(np.all(sm == sm_ref))
def test_filter_small_node_from_tree(self):
g = hg.get_4_adjacency_graph((1, 8))
edge_weights = np.asarray((0, 2, 0, 0, 1, 0, 0))
tree, altitudes = hg.quasi_flat_zone_hierarchy(g, edge_weights)
res_tree, res_altitudes = hg.filter_small_nodes_from_tree(tree, altitudes, 3)
sm = hg.saliency(res_tree, res_altitudes)
sm_ref = np.asarray((0, 0, 0, 0, 1, 0, 0))
self.assertTrue(np.all(sm == sm_ref))
def test_filter_small_node_from_tree_on_rag(self):
g = hg.get_4_adjacency_graph((2, 8))
labels = np.asarray(((0, 1, 2, 3, 4, 5, 6, 7),
(0, 1, 2, 3, 4, 5, 6, 7)))
rag = hg.make_region_adjacency_graph_from_labelisation(g, labels)
edge_weights = np.asarray((0, 2, 0, 0, 1, 0, 0))
tree, altitudes = hg.quasi_flat_zone_hierarchy(rag, edge_weights)
res_tree, res_altitudes = hg.filter_small_nodes_from_tree(tree, altitudes, 5)
sm = hg.saliency(res_tree, res_altitudes, handle_rag=False)
sm_ref = np.asarray((0, 0, 0, 0, 1, 0, 0))
self.assertTrue(np.all(sm == sm_ref))
sm = hg.saliency(res_tree, res_altitudes, handle_rag=True)
sm_ref = np.asarray((0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0))
self.assertTrue(np.all(sm == sm_ref))
def test_filter_weak_frontier_nodes_from_tree(self):
g = hg.get_4_adjacency_graph((1, 8))
edge_weights = np.asarray((0, 2, 0, 0, 1, 0, 0))
tree, altitudes = hg.bpt_canonical(g, edge_weights)
res_tree, res_altitudes = hg.filter_weak_frontier_nodes_from_tree(tree, altitudes, edge_weights, 2)
sm = hg.saliency(res_tree, res_altitudes)
sm_ref = np.asarray((0, 2, 0, 0, 0, 0, 0))
self.assertTrue(np.all(sm == sm_ref))
def test_binary_labelisation_from_markers(self):
tree = hg.Tree(np.asarray((9, 9, 9, 10, 10, 12, 13, 11, 11, 14, 12, 15, 13, 14, 15, 15)))
object_marker = np.asarray((0, 1, 0, 1, 0, 0, 0, 0, 0), dtype=np.int8)
background_marker = np.asarray((1, 0, 0, 0, 0, 0, 1, 0, 0), dtype=np.int8)
labelisation = hg.binary_labelisation_from_markers(tree, object_marker, background_marker);
ref_labelisation = np.asarray((0, 1, 0, 1, 1, 1, 0, 0, 0), dtype=np.int8)
self.assertTrue(np.all(labelisation == ref_labelisation))
def test_sort_hierarchy_with_altitudes(self):
tree = hg.Tree(np.asarray((8, 8, 9, 9, 10, 10, 11, 13, 12, 12, 11, 13, 14, 14, 14)))
altitudes = np.asarray((0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 2, 4, 6, 5, 7))
ntree, naltitudes, node_map = hg.sort_hierarchy_with_altitudes(tree, altitudes)
ref_par = np.asarray((10, 10, 8, 8, 9, 9, 11, 12, 13, 11, 13, 12, 14, 14, 14))
self.assertTrue(np.all(ref_par == ntree.parents()))
ref_altitudes = np.asarray((0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7))
self.assertTrue(np.all(ref_altitudes == naltitudes))
tree = hg.Tree(np.asarray((5, 5, 6, 6, 7, 7, 7, 7)))
altitudes = np.asarray((0, 0, 1, 0, 0, 2, 1, 1))
with self.assertRaises(ValueError):
hg.sort_hierarchy_with_altitudes(tree, altitudes)
def test_test_altitudes_increasingness(self):
tree = hg.Tree(np.asarray((5, 5, 6, 6, 7, 7, 7, 7)))
altitudes = np.asarray((0, 0, 0, 0, 0, 2, 1, 3))
self.assertTrue(hg.test_altitudes_increasingness(tree, altitudes))
altitudes = np.asarray((0, 0, 0, 0, 0, 1, 2, 2))
self.assertTrue(hg.test_altitudes_increasingness(tree, altitudes))
altitudes = np.asarray((2, 0, 0, 1, 2, 2, 1, 3))
self.assertTrue(hg.test_altitudes_increasingness(tree, altitudes))
altitudes = np.asarray((0, 0, 0, 0, 0, 0, 0, 0))
self.assertTrue(hg.test_altitudes_increasingness(tree, altitudes))
altitudes = np.asarray((0, 0, 2, 0, 0, 2, 1, 3))
self.assertFalse(hg.test_altitudes_increasingness(tree, altitudes))
altitudes = np.asarray((0, 0, 1, 0, 0, 2, 1, 1))
self.assertFalse(hg.test_altitudes_increasingness(tree, altitudes))
if __name__ == '__main__':
unittest.main()
| [
"higra.labelisation_horizontal_cut_from_num_regions",
"numpy.amin",
"higra.accumulate_parallel",
"higra.saliency",
"higra.filter_non_relevant_node_from_tree",
"higra.get_4_adjacency_graph",
"higra.quasi_flat_zone_hierarchy",
"unittest.main",
"higra.test_altitudes_increasingness",
"higra.component_... | [((11872, 11887), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11885, 11887), False, 'import unittest\n'), ((906, 1002), 'numpy.asarray', 'np.asarray', (['((1, 8), (2, 7), (3, 6), (4, 5), (5, 4), (6, 3), (7, 2), (8, 1))'], {'dtype': 'np.int32'}), '(((1, 8), (2, 7), (3, 6), (4, 5), (5, 4), (6, 3), (7, 2), (8, 1)),\n dtype=np.int32)\n', (916, 1002), True, 'import numpy as np\n'), ((1216, 1290), 'numpy.asarray', 'np.asarray', (['(True, False, True, False, True, True, False, False)', 'np.bool_'], {}), '((True, False, True, False, True, True, False, False), np.bool_)\n', (1226, 1290), True, 'import numpy as np\n'), ((1309, 1357), 'higra.reconstruct_leaf_data', 'hg.reconstruct_leaf_data', (['tree', 'input', 'condition'], {}), '(tree, input, condition)\n', (1333, 1357), True, 'import higra as hg\n'), ((1372, 1440), 'numpy.asarray', 'np.asarray', (['((8, 1), (2, 7), (7, 2), (4, 5), (7, 2))'], {'dtype': 'np.int32'}), '(((8, 1), (2, 7), (7, 2), (4, 5), (7, 2)), dtype=np.int32)\n', (1382, 1440), True, 'import numpy as np\n'), ((1663, 1704), 'higra.get_4_adjacency_implicit_graph', 'hg.get_4_adjacency_implicit_graph', (['(1, 6)'], {}), '((1, 6))\n', (1696, 1704), True, 'import higra as hg\n'), ((1729, 1775), 'numpy.asarray', 'np.asarray', (['(1, 5, 4, 3, 3, 6)'], {'dtype': 'np.int32'}), '((1, 5, 4, 3, 3, 6), dtype=np.int32)\n', (1739, 1775), True, 'import numpy as np\n'), ((1802, 1846), 'higra.component_tree_max_tree', 'hg.component_tree_max_tree', (['g', 'vertex_values'], {}), '(g, vertex_values)\n', (1828, 1846), True, 'import higra as hg\n'), ((1868, 1965), 'numpy.asarray', 'np.asarray', (['(True, False, True, False, True, True, False, True, False, True, False)', 'np.bool_'], {}), '((True, False, True, False, True, True, False, True, False, True,\n False), np.bool_)\n', (1878, 1965), True, 'import numpy as np\n'), ((1980, 2032), 'higra.reconstruct_leaf_data', 'hg.reconstruct_leaf_data', (['tree', 'altitudes', 'condition'], {}), '(tree, altitudes, condition)\n', (2004, 2032), True, 'import higra as hg\n'), ((2047, 2093), 'numpy.asarray', 'np.asarray', (['(1, 4, 4, 1, 1, 6)'], {'dtype': 'np.int32'}), '((1, 4, 4, 1, 1, 6), dtype=np.int32)\n', (2057, 2093), True, 'import numpy as np\n'), ((2271, 2367), 'numpy.asarray', 'np.asarray', (['((1, 8), (2, 7), (3, 6), (4, 5), (5, 4), (6, 3), (7, 2), (8, 1))'], {'dtype': 'np.int32'}), '(((1, 8), (2, 7), (3, 6), (4, 5), (5, 4), (6, 3), (7, 2), (8, 1)),\n dtype=np.int32)\n', (2281, 2367), True, 'import numpy as np\n'), ((2578, 2615), 'higra.reconstruct_leaf_data', 'hg.reconstruct_leaf_data', (['tree', 'input'], {}), '(tree, input)\n', (2602, 2615), True, 'import higra as hg\n'), ((2630, 2698), 'numpy.asarray', 'np.asarray', (['((1, 8), (2, 7), (3, 6), (4, 5), (5, 4))'], {'dtype': 'np.int32'}), '(((1, 8), (2, 7), (3, 6), (4, 5), (5, 4)), dtype=np.int32)\n', (2640, 2698), True, 'import numpy as np\n'), ((2929, 2970), 'higra.get_4_adjacency_implicit_graph', 'hg.get_4_adjacency_implicit_graph', (['(1, 6)'], {}), '((1, 6))\n', (2962, 2970), True, 'import higra as hg\n'), ((2995, 3041), 'numpy.asarray', 'np.asarray', (['(1, 5, 4, 3, 3, 6)'], {'dtype': 'np.int32'}), '((1, 5, 4, 3, 3, 6), dtype=np.int32)\n', (3005, 3041), True, 'import numpy as np\n'), ((3068, 3112), 'higra.component_tree_max_tree', 'hg.component_tree_max_tree', (['g', 'vertex_values'], {}), '(g, vertex_values)\n', (3094, 3112), True, 'import higra as hg\n'), ((3129, 3152), 'higra.attribute_area', 'hg.attribute_area', (['tree'], {}), '(tree)\n', (3146, 3152), True, 'import higra as hg\n'), ((3170, 3206), 'higra.reconstruct_leaf_data', 'hg.reconstruct_leaf_data', (['tree', 'area'], {}), '(tree, area)\n', (3194, 3206), True, 'import higra as hg\n'), ((3221, 3267), 'numpy.asarray', 'np.asarray', (['(6, 1, 2, 5, 5, 1)'], {'dtype': 'np.int32'}), '((6, 1, 2, 5, 5, 1), dtype=np.int32)\n', (3231, 3267), True, 'import numpy as np\n'), ((3447, 3504), 'numpy.asarray', 'np.asarray', (['(0, 0, 0, 0, 0, 0.5, 0, 0.7)'], {'dtype': 'np.double'}), '((0, 0, 0, 0, 0, 0.5, 0, 0.7), dtype=np.double)\n', (3457, 3504), True, 'import numpy as np\n'), ((3523, 3566), 'numpy.asarray', 'np.asarray', (['(1, 2, 3, 3, 3)'], {'dtype': 'np.int32'}), '((1, 2, 3, 3, 3), dtype=np.int32)\n', (3533, 3566), True, 'import numpy as np\n'), ((3584, 3627), 'numpy.asarray', 'np.asarray', (['(1, 1, 2, 2, 2)'], {'dtype': 'np.int32'}), '((1, 1, 2, 2, 2), dtype=np.int32)\n', (3594, 3627), True, 'import numpy as np\n'), ((3645, 3688), 'numpy.asarray', 'np.asarray', (['(1, 1, 1, 1, 1)'], {'dtype': 'np.int32'}), '((1, 1, 1, 1, 1), dtype=np.int32)\n', (3655, 3688), True, 'import numpy as np\n'), ((3710, 3775), 'higra.labelisation_horizontal_cut_from_threshold', 'hg.labelisation_horizontal_cut_from_threshold', (['tree', 'altitudes', '(0)'], {}), '(tree, altitudes, 0)\n', (3755, 3775), True, 'import higra as hg\n'), ((3796, 3863), 'higra.labelisation_horizontal_cut_from_threshold', 'hg.labelisation_horizontal_cut_from_threshold', (['tree', 'altitudes', '(0.5)'], {}), '(tree, altitudes, 0.5)\n', (3841, 3863), True, 'import higra as hg\n'), ((3884, 3951), 'higra.labelisation_horizontal_cut_from_threshold', 'hg.labelisation_horizontal_cut_from_threshold', (['tree', 'altitudes', '(0.7)'], {}), '(tree, altitudes, 0.7)\n', (3929, 3951), True, 'import higra as hg\n'), ((4215, 4248), 'higra.get_4_adjacency_graph', 'hg.get_4_adjacency_graph', (['(1, 11)'], {}), '((1, 11))\n', (4239, 4248), True, 'import higra as hg\n'), ((4264, 4353), 'higra.Tree', 'hg.Tree', (['(11, 11, 11, 12, 12, 16, 13, 13, 13, 14, 14, 17, 16, 15, 15, 18, 17, 18, 18)'], {}), '((11, 11, 11, 12, 12, 16, 13, 13, 13, 14, 14, 17, 16, 15, 15, 18, 17,\n 18, 18))\n', (4271, 4353), True, 'import higra as hg\n'), ((4358, 4387), 'higra.CptHierarchy.link', 'hg.CptHierarchy.link', (['tree', 'g'], {}), '(tree, g)\n', (4378, 4387), True, 'import higra as hg\n'), ((4408, 4477), 'numpy.asarray', 'np.asarray', (['(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 3, 1, 2, 3)'], {}), '((0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 3, 1, 2, 3))\n', (4418, 4477), True, 'import numpy as np\n'), ((5690, 5747), 'numpy.asarray', 'np.asarray', (['(0, 0, 0, 0, 0, 0.5, 0, 0.7)'], {'dtype': 'np.double'}), '((0, 0, 0, 0, 0, 0.5, 0, 0.7), dtype=np.double)\n', (5700, 5747), True, 'import numpy as np\n'), ((5763, 5806), 'numpy.asarray', 'np.asarray', (['(0, 1, 2, 2, 2)'], {'dtype': 'np.int32'}), '((0, 1, 2, 2, 2), dtype=np.int32)\n', (5773, 5806), True, 'import numpy as np\n'), ((5825, 5881), 'higra.labelisation_hierarchy_supervertices', 'hg.labelisation_hierarchy_supervertices', (['tree', 'altitudes'], {}), '(tree, altitudes)\n', (5864, 5881), True, 'import higra as hg\n'), ((7109, 7141), 'higra.get_4_adjacency_graph', 'hg.get_4_adjacency_graph', (['(1, 8)'], {}), '((1, 8))\n', (7133, 7141), True, 'import higra as hg\n'), ((7165, 7198), 'numpy.asarray', 'np.asarray', (['(0, 2, 0, 0, 1, 0, 0)'], {}), '((0, 2, 0, 0, 1, 0, 0))\n', (7175, 7198), True, 'import numpy as np\n'), ((7225, 7258), 'higra.bpt_canonical', 'hg.bpt_canonical', (['g', 'edge_weights'], {}), '(g, edge_weights)\n', (7241, 7258), True, 'import higra as hg\n'), ((7505, 7568), 'higra.filter_non_relevant_node_from_tree', 'hg.filter_non_relevant_node_from_tree', (['tree', 'altitudes', 'functor'], {}), '(tree, altitudes, functor)\n', (7542, 7568), True, 'import higra as hg\n'), ((7583, 7619), 'higra.saliency', 'hg.saliency', (['res_tree', 'res_altitudes'], {}), '(res_tree, res_altitudes)\n', (7594, 7619), True, 'import higra as hg\n'), ((7637, 7670), 'numpy.asarray', 'np.asarray', (['(0, 0, 0, 0, 1, 0, 0)'], {}), '((0, 0, 0, 0, 1, 0, 0))\n', (7647, 7670), True, 'import numpy as np\n'), ((7778, 7810), 'higra.get_4_adjacency_graph', 'hg.get_4_adjacency_graph', (['(1, 8)'], {}), '((1, 8))\n', (7802, 7810), True, 'import higra as hg\n'), ((7834, 7867), 'numpy.asarray', 'np.asarray', (['(0, 2, 0, 0, 1, 0, 0)'], {}), '((0, 2, 0, 0, 1, 0, 0))\n', (7844, 7867), True, 'import numpy as np\n'), ((7894, 7939), 'higra.quasi_flat_zone_hierarchy', 'hg.quasi_flat_zone_hierarchy', (['g', 'edge_weights'], {}), '(g, edge_weights)\n', (7922, 7939), True, 'import higra as hg\n'), ((7975, 8026), 'higra.filter_small_nodes_from_tree', 'hg.filter_small_nodes_from_tree', (['tree', 'altitudes', '(3)'], {}), '(tree, altitudes, 3)\n', (8006, 8026), True, 'import higra as hg\n'), ((8041, 8077), 'higra.saliency', 'hg.saliency', (['res_tree', 'res_altitudes'], {}), '(res_tree, res_altitudes)\n', (8052, 8077), True, 'import higra as hg\n'), ((8095, 8128), 'numpy.asarray', 'np.asarray', (['(0, 0, 0, 0, 1, 0, 0)'], {}), '((0, 0, 0, 0, 1, 0, 0))\n', (8105, 8128), True, 'import numpy as np\n'), ((8243, 8275), 'higra.get_4_adjacency_graph', 'hg.get_4_adjacency_graph', (['(2, 8)'], {}), '((2, 8))\n', (8267, 8275), True, 'import higra as hg\n'), ((8293, 8357), 'numpy.asarray', 'np.asarray', (['((0, 1, 2, 3, 4, 5, 6, 7), (0, 1, 2, 3, 4, 5, 6, 7))'], {}), '(((0, 1, 2, 3, 4, 5, 6, 7), (0, 1, 2, 3, 4, 5, 6, 7)))\n', (8303, 8357), True, 'import numpy as np\n'), ((8402, 8461), 'higra.make_region_adjacency_graph_from_labelisation', 'hg.make_region_adjacency_graph_from_labelisation', (['g', 'labels'], {}), '(g, labels)\n', (8450, 8461), True, 'import higra as hg\n'), ((8485, 8518), 'numpy.asarray', 'np.asarray', (['(0, 2, 0, 0, 1, 0, 0)'], {}), '((0, 2, 0, 0, 1, 0, 0))\n', (8495, 8518), True, 'import numpy as np\n'), ((8546, 8593), 'higra.quasi_flat_zone_hierarchy', 'hg.quasi_flat_zone_hierarchy', (['rag', 'edge_weights'], {}), '(rag, edge_weights)\n', (8574, 8593), True, 'import higra as hg\n'), ((8629, 8680), 'higra.filter_small_nodes_from_tree', 'hg.filter_small_nodes_from_tree', (['tree', 'altitudes', '(5)'], {}), '(tree, altitudes, 5)\n', (8660, 8680), True, 'import higra as hg\n'), ((8695, 8749), 'higra.saliency', 'hg.saliency', (['res_tree', 'res_altitudes'], {'handle_rag': '(False)'}), '(res_tree, res_altitudes, handle_rag=False)\n', (8706, 8749), True, 'import higra as hg\n'), ((8767, 8800), 'numpy.asarray', 'np.asarray', (['(0, 0, 0, 0, 1, 0, 0)'], {}), '((0, 0, 0, 0, 1, 0, 0))\n', (8777, 8800), True, 'import numpy as np\n'), ((8861, 8914), 'higra.saliency', 'hg.saliency', (['res_tree', 'res_altitudes'], {'handle_rag': '(True)'}), '(res_tree, res_altitudes, handle_rag=True)\n', (8872, 8914), True, 'import higra as hg\n'), ((8932, 9010), 'numpy.asarray', 'np.asarray', (['(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0)'], {}), '((0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0))\n', (8942, 9010), True, 'import numpy as np\n'), ((9127, 9159), 'higra.get_4_adjacency_graph', 'hg.get_4_adjacency_graph', (['(1, 8)'], {}), '((1, 8))\n', (9151, 9159), True, 'import higra as hg\n'), ((9183, 9216), 'numpy.asarray', 'np.asarray', (['(0, 2, 0, 0, 1, 0, 0)'], {}), '((0, 2, 0, 0, 1, 0, 0))\n', (9193, 9216), True, 'import numpy as np\n'), ((9243, 9276), 'higra.bpt_canonical', 'hg.bpt_canonical', (['g', 'edge_weights'], {}), '(g, edge_weights)\n', (9259, 9276), True, 'import higra as hg\n'), ((9312, 9385), 'higra.filter_weak_frontier_nodes_from_tree', 'hg.filter_weak_frontier_nodes_from_tree', (['tree', 'altitudes', 'edge_weights', '(2)'], {}), '(tree, altitudes, edge_weights, 2)\n', (9351, 9385), True, 'import higra as hg\n'), ((9400, 9436), 'higra.saliency', 'hg.saliency', (['res_tree', 'res_altitudes'], {}), '(res_tree, res_altitudes)\n', (9411, 9436), True, 'import higra as hg\n'), ((9454, 9487), 'numpy.asarray', 'np.asarray', (['(0, 2, 0, 0, 0, 0, 0)'], {}), '((0, 2, 0, 0, 0, 0, 0))\n', (9464, 9487), True, 'import numpy as np\n'), ((9710, 9764), 'numpy.asarray', 'np.asarray', (['(0, 1, 0, 1, 0, 0, 0, 0, 0)'], {'dtype': 'np.int8'}), '((0, 1, 0, 1, 0, 0, 0, 0, 0), dtype=np.int8)\n', (9720, 9764), True, 'import numpy as np\n'), ((9793, 9847), 'numpy.asarray', 'np.asarray', (['(1, 0, 0, 0, 0, 0, 1, 0, 0)'], {'dtype': 'np.int8'}), '((1, 0, 0, 0, 0, 0, 1, 0, 0), dtype=np.int8)\n', (9803, 9847), True, 'import numpy as np\n'), ((9872, 9947), 'higra.binary_labelisation_from_markers', 'hg.binary_labelisation_from_markers', (['tree', 'object_marker', 'background_marker'], {}), '(tree, object_marker, background_marker)\n', (9907, 9947), True, 'import higra as hg\n'), ((9977, 10031), 'numpy.asarray', 'np.asarray', (['(0, 1, 0, 1, 1, 1, 0, 0, 0)'], {'dtype': 'np.int8'}), '((0, 1, 0, 1, 1, 1, 0, 0, 0), dtype=np.int8)\n', (9987, 10031), True, 'import numpy as np\n'), ((10263, 10320), 'numpy.asarray', 'np.asarray', (['(0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 2, 4, 6, 5, 7)'], {}), '((0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 2, 4, 6, 5, 7))\n', (10273, 10320), True, 'import numpy as np\n'), ((10360, 10409), 'higra.sort_hierarchy_with_altitudes', 'hg.sort_hierarchy_with_altitudes', (['tree', 'altitudes'], {}), '(tree, altitudes)\n', (10392, 10409), True, 'import higra as hg\n'), ((10429, 10497), 'numpy.asarray', 'np.asarray', (['(10, 10, 8, 8, 9, 9, 11, 12, 13, 11, 13, 12, 14, 14, 14)'], {}), '((10, 10, 8, 8, 9, 9, 11, 12, 13, 11, 13, 12, 14, 14, 14))\n', (10439, 10497), True, 'import numpy as np\n'), ((10583, 10640), 'numpy.asarray', 'np.asarray', (['(0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7)'], {}), '((0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7))\n', (10593, 10640), True, 'import numpy as np\n'), ((10784, 10820), 'numpy.asarray', 'np.asarray', (['(0, 0, 1, 0, 0, 2, 1, 1)'], {}), '((0, 0, 1, 0, 0, 2, 1, 1))\n', (10794, 10820), True, 'import numpy as np\n'), ((11060, 11096), 'numpy.asarray', 'np.asarray', (['(0, 0, 0, 0, 0, 2, 1, 3)'], {}), '((0, 0, 0, 0, 0, 2, 1, 3))\n', (11070, 11096), True, 'import numpy as np\n'), ((11193, 11229), 'numpy.asarray', 'np.asarray', (['(0, 0, 0, 0, 0, 1, 2, 2)'], {}), '((0, 0, 0, 0, 0, 1, 2, 2))\n', (11203, 11229), True, 'import numpy as np\n'), ((11326, 11362), 'numpy.asarray', 'np.asarray', (['(2, 0, 0, 1, 2, 2, 1, 3)'], {}), '((2, 0, 0, 1, 2, 2, 1, 3))\n', (11336, 11362), True, 'import numpy as np\n'), ((11459, 11495), 'numpy.asarray', 'np.asarray', (['(0, 0, 0, 0, 0, 0, 0, 0)'], {}), '((0, 0, 0, 0, 0, 0, 0, 0))\n', (11469, 11495), True, 'import numpy as np\n'), ((11592, 11628), 'numpy.asarray', 'np.asarray', (['(0, 0, 2, 0, 0, 2, 1, 3)'], {}), '((0, 0, 2, 0, 0, 2, 1, 3))\n', (11602, 11628), True, 'import numpy as np\n'), ((11726, 11762), 'numpy.asarray', 'np.asarray', (['(0, 0, 1, 0, 0, 2, 1, 1)'], {}), '((0, 0, 1, 0, 0, 2, 1, 1))\n', (11736, 11762), True, 'import numpy as np\n'), ((851, 887), 'numpy.asarray', 'np.asarray', (['(5, 5, 6, 6, 6, 7, 7, 7)'], {}), '((5, 5, 6, 6, 6, 7, 7, 7))\n', (861, 887), True, 'import numpy as np\n'), ((1570, 1591), 'numpy.all', 'np.all', (['(ref == output)'], {}), '(ref == output)\n', (1576, 1591), True, 'import numpy as np\n'), ((2119, 2140), 'numpy.all', 'np.all', (['(ref == output)'], {}), '(ref == output)\n', (2125, 2140), True, 'import numpy as np\n'), ((2216, 2252), 'numpy.asarray', 'np.asarray', (['(5, 5, 6, 6, 6, 7, 7, 7)'], {}), '((5, 5, 6, 6, 6, 7, 7, 7))\n', (2226, 2252), True, 'import numpy as np\n'), ((2828, 2849), 'numpy.all', 'np.all', (['(ref == output)'], {}), '(ref == output)\n', (2834, 2849), True, 'import numpy as np\n'), ((3293, 3314), 'numpy.all', 'np.all', (['(ref == output)'], {}), '(ref == output)\n', (3299, 3314), True, 'import numpy as np\n'), ((3388, 3424), 'numpy.asarray', 'np.asarray', (['(5, 5, 6, 6, 6, 7, 7, 7)'], {}), '((5, 5, 6, 6, 6, 7, 7, 7))\n', (3398, 3424), True, 'import numpy as np\n'), ((3977, 4014), 'higra.is_in_bijection', 'hg.is_in_bijection', (['ref_t0', 'output_t0'], {}), '(ref_t0, output_t0)\n', (3995, 4014), True, 'import higra as hg\n'), ((4040, 4077), 'higra.is_in_bijection', 'hg.is_in_bijection', (['ref_t1', 'output_t1'], {}), '(ref_t1, output_t1)\n', (4058, 4077), True, 'import higra as hg\n'), ((4103, 4140), 'higra.is_in_bijection', 'hg.is_in_bijection', (['ref_t2', 'output_t2'], {}), '(ref_t2, output_t2)\n', (4121, 4140), True, 'import higra as hg\n'), ((4514, 4559), 'numpy.asarray', 'np.asarray', (['(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)'], {}), '((1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1))\n', (4524, 4559), True, 'import numpy as np\n'), ((4573, 4618), 'numpy.asarray', 'np.asarray', (['(1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3)'], {}), '((1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3))\n', (4583, 4618), True, 'import numpy as np\n'), ((4632, 4677), 'numpy.asarray', 'np.asarray', (['(0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3)'], {}), '((0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3))\n', (4642, 4677), True, 'import numpy as np\n'), ((4691, 4736), 'numpy.asarray', 'np.asarray', (['(0, 1, 2, 3, 4, 5, 6, 6, 6, 7, 8)'], {}), '((0, 1, 2, 3, 4, 5, 6, 6, 6, 7, 8))\n', (4701, 4736), True, 'import numpy as np\n'), ((4826, 4901), 'higra.labelisation_horizontal_cut_from_num_regions', 'hg.labelisation_horizontal_cut_from_num_regions', (['tree', 'altitudes', 'k_cuts[i]'], {}), '(tree, altitudes, k_cuts[i])\n', (4873, 4901), True, 'import higra as hg\n'), ((5109, 5184), 'higra.labelisation_horizontal_cut_from_num_regions', 'hg.labelisation_horizontal_cut_from_num_regions', (['tree', 'altitudes', 'k_cuts[i]'], {}), '(tree, altitudes, k_cuts[i])\n', (5156, 5184), True, 'import higra as hg\n'), ((5392, 5482), 'higra.labelisation_horizontal_cut_from_num_regions', 'hg.labelisation_horizontal_cut_from_num_regions', (['tree', 'altitudes', 'k_cuts[i]', '"""at_most"""'], {}), "(tree, altitudes, k_cuts[i],\n 'at_most')\n", (5439, 5482), True, 'import higra as hg\n'), ((5631, 5667), 'numpy.asarray', 'np.asarray', (['(5, 5, 6, 6, 6, 7, 7, 7)'], {}), '((5, 5, 6, 6, 6, 7, 7, 7))\n', (5641, 5667), True, 'import numpy as np\n'), ((5907, 5938), 'higra.is_in_bijection', 'hg.is_in_bijection', (['ref', 'output'], {}), '(ref, output)\n', (5925, 5938), True, 'import higra as hg\n'), ((6091, 6130), 'numpy.asarray', 'np.asarray', (['(5, 5, 6, 6, 7, 8, 7, 8, 8)'], {}), '((5, 5, 6, 6, 7, 8, 7, 8, 8))\n', (6101, 6130), True, 'import numpy as np\n'), ((6153, 6192), 'numpy.asarray', 'np.asarray', (['(6, 6, 5, 5, 7, 7, 8, 8, 8)'], {}), '((6, 6, 5, 5, 7, 7, 8, 8, 8))\n', (6163, 6192), True, 'import numpy as np\n'), ((6215, 6254), 'numpy.asarray', 'np.asarray', (['(7, 7, 5, 5, 6, 6, 8, 8, 8)'], {}), '((7, 7, 5, 5, 6, 6, 8, 8, 8))\n', (6225, 6254), True, 'import numpy as np\n'), ((6281, 6313), 'higra.test_tree_isomorphism', 'hg.test_tree_isomorphism', (['t1', 't2'], {}), '(t1, t2)\n', (6305, 6313), True, 'import higra as hg\n'), ((6339, 6371), 'higra.test_tree_isomorphism', 'hg.test_tree_isomorphism', (['t2', 't1'], {}), '(t2, t1)\n', (6363, 6371), True, 'import higra as hg\n'), ((6397, 6429), 'higra.test_tree_isomorphism', 'hg.test_tree_isomorphism', (['t1', 't3'], {}), '(t1, t3)\n', (6421, 6429), True, 'import higra as hg\n'), ((6455, 6487), 'higra.test_tree_isomorphism', 'hg.test_tree_isomorphism', (['t3', 't1'], {}), '(t3, t1)\n', (6479, 6487), True, 'import higra as hg\n'), ((6513, 6545), 'higra.test_tree_isomorphism', 'hg.test_tree_isomorphism', (['t2', 't3'], {}), '(t2, t3)\n', (6537, 6545), True, 'import higra as hg\n'), ((6571, 6603), 'higra.test_tree_isomorphism', 'hg.test_tree_isomorphism', (['t3', 't2'], {}), '(t3, t2)\n', (6595, 6603), True, 'import higra as hg\n'), ((6627, 6666), 'numpy.asarray', 'np.asarray', (['(5, 5, 7, 6, 6, 8, 7, 8, 8)'], {}), '((5, 5, 7, 6, 6, 8, 7, 8, 8))\n', (6637, 6666), True, 'import numpy as np\n'), ((7317, 7340), 'higra.attribute_area', 'hg.attribute_area', (['tree'], {}), '(tree)\n', (7334, 7340), True, 'import higra as hg\n'), ((7373, 7428), 'higra.accumulate_parallel', 'hg.accumulate_parallel', (['tree', 'area', 'hg.Accumulators.min'], {}), '(tree, area, hg.Accumulators.min)\n', (7395, 7428), True, 'import higra as hg\n'), ((7695, 7715), 'numpy.all', 'np.all', (['(sm == sm_ref)'], {}), '(sm == sm_ref)\n', (7701, 7715), True, 'import numpy as np\n'), ((8153, 8173), 'numpy.all', 'np.all', (['(sm == sm_ref)'], {}), '(sm == sm_ref)\n', (8159, 8173), True, 'import numpy as np\n'), ((8825, 8845), 'numpy.all', 'np.all', (['(sm == sm_ref)'], {}), '(sm == sm_ref)\n', (8831, 8845), True, 'import numpy as np\n'), ((9035, 9055), 'numpy.all', 'np.all', (['(sm == sm_ref)'], {}), '(sm == sm_ref)\n', (9041, 9055), True, 'import numpy as np\n'), ((9512, 9532), 'numpy.all', 'np.all', (['(sm == sm_ref)'], {}), '(sm == sm_ref)\n', (9518, 9532), True, 'import numpy as np\n'), ((9611, 9684), 'numpy.asarray', 'np.asarray', (['(9, 9, 9, 10, 10, 12, 13, 11, 11, 14, 12, 15, 13, 14, 15, 15)'], {}), '((9, 9, 9, 10, 10, 12, 13, 11, 11, 14, 12, 15, 13, 14, 15, 15))\n', (9621, 9684), True, 'import numpy as np\n'), ((10057, 10097), 'numpy.all', 'np.all', (['(labelisation == ref_labelisation)'], {}), '(labelisation == ref_labelisation)\n', (10063, 10097), True, 'import numpy as np\n'), ((10173, 10241), 'numpy.asarray', 'np.asarray', (['(8, 8, 9, 9, 10, 10, 11, 13, 12, 12, 11, 13, 14, 14, 14)'], {}), '((8, 8, 9, 9, 10, 10, 11, 13, 12, 12, 11, 13, 14, 14, 14))\n', (10183, 10241), True, 'import numpy as np\n'), ((10665, 10700), 'numpy.all', 'np.all', (['(ref_altitudes == naltitudes)'], {}), '(ref_altitudes == naltitudes)\n', (10671, 10700), True, 'import numpy as np\n'), ((10726, 10762), 'numpy.asarray', 'np.asarray', (['(5, 5, 6, 6, 7, 7, 7, 7)'], {}), '((5, 5, 6, 6, 7, 7, 7, 7))\n', (10736, 10762), True, 'import numpy as np\n'), ((10877, 10926), 'higra.sort_hierarchy_with_altitudes', 'hg.sort_hierarchy_with_altitudes', (['tree', 'altitudes'], {}), '(tree, altitudes)\n', (10909, 10926), True, 'import higra as hg\n'), ((11001, 11037), 'numpy.asarray', 'np.asarray', (['(5, 5, 6, 6, 7, 7, 7, 7)'], {}), '((5, 5, 6, 6, 7, 7, 7, 7))\n', (11011, 11037), True, 'import numpy as np\n'), ((11121, 11170), 'higra.test_altitudes_increasingness', 'hg.test_altitudes_increasingness', (['tree', 'altitudes'], {}), '(tree, altitudes)\n', (11153, 11170), True, 'import higra as hg\n'), ((11254, 11303), 'higra.test_altitudes_increasingness', 'hg.test_altitudes_increasingness', (['tree', 'altitudes'], {}), '(tree, altitudes)\n', (11286, 11303), True, 'import higra as hg\n'), ((11387, 11436), 'higra.test_altitudes_increasingness', 'hg.test_altitudes_increasingness', (['tree', 'altitudes'], {}), '(tree, altitudes)\n', (11419, 11436), True, 'import higra as hg\n'), ((11520, 11569), 'higra.test_altitudes_increasingness', 'hg.test_altitudes_increasingness', (['tree', 'altitudes'], {}), '(tree, altitudes)\n', (11552, 11569), True, 'import higra as hg\n'), ((11654, 11703), 'higra.test_altitudes_increasingness', 'hg.test_altitudes_increasingness', (['tree', 'altitudes'], {}), '(tree, altitudes)\n', (11686, 11703), True, 'import higra as hg\n'), ((11788, 11837), 'higra.test_altitudes_increasingness', 'hg.test_altitudes_increasingness', (['tree', 'altitudes'], {}), '(tree, altitudes)\n', (11820, 11837), True, 'import higra as hg\n'), ((4930, 4971), 'higra.is_in_bijection', 'hg.is_in_bijection', (['labels', 'ref_labels[i]'], {}), '(labels, ref_labels[i])\n', (4948, 4971), True, 'import higra as hg\n'), ((5213, 5254), 'higra.is_in_bijection', 'hg.is_in_bijection', (['labels', 'ref_labels[i]'], {}), '(labels, ref_labels[i])\n', (5231, 5254), True, 'import higra as hg\n'), ((5507, 5548), 'higra.is_in_bijection', 'hg.is_in_bijection', (['labels', 'ref_labels[i]'], {}), '(labels, ref_labels[i])\n', (5525, 5548), True, 'import higra as hg\n'), ((5964, 5979), 'numpy.amax', 'np.amax', (['output'], {}), '(output)\n', (5971, 5979), True, 'import numpy as np\n'), ((6010, 6025), 'numpy.amin', 'np.amin', (['output'], {}), '(output)\n', (6017, 6025), True, 'import numpy as np\n'), ((6697, 6729), 'higra.test_tree_isomorphism', 'hg.test_tree_isomorphism', (['t1', 't4'], {}), '(t1, t4)\n', (6721, 6729), True, 'import higra as hg\n'), ((6759, 6791), 'higra.test_tree_isomorphism', 'hg.test_tree_isomorphism', (['t2', 't4'], {}), '(t2, t4)\n', (6783, 6791), True, 'import higra as hg\n'), ((6821, 6853), 'higra.test_tree_isomorphism', 'hg.test_tree_isomorphism', (['t3', 't4'], {}), '(t3, t4)\n', (6845, 6853), True, 'import higra as hg\n'), ((6883, 6915), 'higra.test_tree_isomorphism', 'hg.test_tree_isomorphism', (['t4', 't1'], {}), '(t4, t1)\n', (6907, 6915), True, 'import higra as hg\n'), ((6945, 6977), 'higra.test_tree_isomorphism', 'hg.test_tree_isomorphism', (['t4', 't2'], {}), '(t4, t2)\n', (6969, 6977), True, 'import higra as hg\n'), ((7007, 7039), 'higra.test_tree_isomorphism', 'hg.test_tree_isomorphism', (['t4', 't3'], {}), '(t4, t3)\n', (7031, 7039), True, 'import higra as hg\n')] |
import numpy as np
from skimage import img_as_float
from skimage.color import rgb2grey
from skimage.morphology import remove_small_holes, remove_small_objects, disk, skeletonize
from skimage.filters import threshold_sauvola, threshold_local, gaussian
from skimage.util import invert
from scipy.sparse import dok_matrix
from scipy.signal import convolve2d
from scipy.ndimage.morphology import distance_transform_edt
from ..utils.functions import laplacian_of_gaussian
class Preprocessor:
DEFAULT_CONFIG = {
"pen_diameter": 20,
"window_size": 99,
"dot_threshold": 1300,
}
def __init__(self, cfg = DEFAULT_CONFIG):
self.config = cfg
self.input = None
self.binary_layer = None
self.distance_map = None
self.ridges_layer = None
self.skeleton_layer = None
def convert_input(self):
# Convert the input into a greyscale Numpy array
self.input = img_as_float(rgb2grey(self.input))
self.input = gaussian(self.input, sigma = 4)
def binarize(self):
# Perform an adaptive thresholding and filter out the artifacts
# Return a binary as a boolean Numpy array
thresh_sauvola = threshold_sauvola(self.input, self.config["window_size"])
bin_image = self.input > thresh_sauvola
# Filter out the small artifacts
bin_image = remove_small_holes(bin_image)
bin_image = remove_small_objects(bin_image)
self.binary_layer = ~bin_image
def make_distance_map(self):
self.distance_map = distance_transform_edt(self.binary_layer)
def extract_ridges(self):
# Create the LoG kernel :
m = 9
sigma = 1.4
kernel = np.zeros((m,m), np.float)
#z = (x**2+y**2)/(2*sigma**2)
#val = -(1-z)/(np.pi*sigma**4)*np.exp(-z)
#return val
for i in range(m):
for j in range(m):
kernel[i,j] = -(m**2)*laplacian_of_gaussian(i-m/2, j-m/2, sigma)
# Perform the convolution
ridges = convolve2d(self.distance_map, kernel, mode='same', boundary='fill', fillvalue=False)
self.ridges_layer = skeletonize(ridges>40)
def slideshow(self):
return [self.input,
self.binary_layer,
self.distance_map,
self.ridges_layer
]
def run(self, inpt):
print('* PREPROCESSOR *')
self.input = inpt
print(' -> Converting input')
self.convert_input()
print(' -> Binarization')
self.binarize()
print(' -> Euclidean distance transformation')
self.make_distance_map()
print(' -> Ridges extraction')
self.extract_ridges()
| [
"scipy.signal.convolve2d",
"skimage.morphology.remove_small_holes",
"numpy.zeros",
"skimage.morphology.skeletonize",
"skimage.filters.threshold_sauvola",
"skimage.morphology.remove_small_objects",
"skimage.color.rgb2grey",
"scipy.ndimage.morphology.distance_transform_edt",
"skimage.filters.gaussian"... | [((1065, 1094), 'skimage.filters.gaussian', 'gaussian', (['self.input'], {'sigma': '(4)'}), '(self.input, sigma=4)\n', (1073, 1094), False, 'from skimage.filters import threshold_sauvola, threshold_local, gaussian\n'), ((1296, 1353), 'skimage.filters.threshold_sauvola', 'threshold_sauvola', (['self.input', "self.config['window_size']"], {}), "(self.input, self.config['window_size'])\n", (1313, 1353), False, 'from skimage.filters import threshold_sauvola, threshold_local, gaussian\n'), ((1470, 1499), 'skimage.morphology.remove_small_holes', 'remove_small_holes', (['bin_image'], {}), '(bin_image)\n', (1488, 1499), False, 'from skimage.morphology import remove_small_holes, remove_small_objects, disk, skeletonize\n'), ((1520, 1551), 'skimage.morphology.remove_small_objects', 'remove_small_objects', (['bin_image'], {}), '(bin_image)\n', (1540, 1551), False, 'from skimage.morphology import remove_small_holes, remove_small_objects, disk, skeletonize\n'), ((1679, 1720), 'scipy.ndimage.morphology.distance_transform_edt', 'distance_transform_edt', (['self.binary_layer'], {}), '(self.binary_layer)\n', (1701, 1720), False, 'from scipy.ndimage.morphology import distance_transform_edt\n'), ((1872, 1898), 'numpy.zeros', 'np.zeros', (['(m, m)', 'np.float'], {}), '((m, m), np.float)\n', (1880, 1898), True, 'import numpy as np\n'), ((2240, 2328), 'scipy.signal.convolve2d', 'convolve2d', (['self.distance_map', 'kernel'], {'mode': '"""same"""', 'boundary': '"""fill"""', 'fillvalue': '(False)'}), "(self.distance_map, kernel, mode='same', boundary='fill',\n fillvalue=False)\n", (2250, 2328), False, 'from scipy.signal import convolve2d\n'), ((2362, 2386), 'skimage.morphology.skeletonize', 'skeletonize', (['(ridges > 40)'], {}), '(ridges > 40)\n', (2373, 2386), False, 'from skimage.morphology import remove_small_holes, remove_small_objects, disk, skeletonize\n'), ((1022, 1042), 'skimage.color.rgb2grey', 'rgb2grey', (['self.input'], {}), '(self.input)\n', (1030, 1042), False, 'from skimage.color import rgb2grey\n')] |
# imu_moving_average.py
"""
Author: <NAME> (<EMAIL>), 2022
Brief: Code to load IMU data (ADXL327 3-axis accelerometer) from file
and plot raw and moving-average filtered pitch angle data
Course: ENPM809T - Autonomous Robotics [HW01]
Date: 3rd February, 2022
"""
# Required packages
import numpy as np
import matplotlib.pyplot as plt
# Loading the data from 'imudata.txt'
file_name = 'imudata.txt'
imu_raw_data = np.loadtxt(file_name, delimiter=' ', dtype=str) # Loaded data size: 371 by 7
print(f"Loaded IMU data from file: {file_name}.")
data_size = np.shape(imu_raw_data)[0]
imu_pitch_angle = [int(imu_raw_data[i][4]) for i in range(data_size)] # Pitch angle data
def plot_data(data, window_size, smooth_data):
"""
Plots the ADXL accelerometer pitch angle data (raw and filtered) and displays the
mean and standard deviation of the data on the plot for the chosen window-size of
the moving-average filter.
Parameters
----------
data : List
List of integers containing the raw IMU pitch angle data
window_size : int
Window-size of the moving-average filter
smooth_data : List
List of integers containing the smoothened IMU pitch angle data
Returns
-------
None
"""
imu_data_avg = np.round(np.average(smooth_data), decimals=4) # Mean of filtered data
imu_data_std = np.round(np.std(smooth_data), decimals=4) # Std. Dev of filtered data
print(f"\nMean of IMU data for {window_size}-pt filtered data: {imu_data_avg}.")
print(f"Standard Deviation of IMU data for {window_size}-pt filtered data: {imu_data_std}.")
plt.plot(range(data_size),data,'b', range(len(smooth_data)),smooth_data,'r')
plt.axis([0, data_size, 0, max(data)])
plt.xlabel('Sample')
plt.ylabel('Pitch Angle (deg)')
plt.title('ADXL327 Accel Pitch Angle Plot')
plt.legend(['Raw Data', f'{window_size}-pt Moving Average Data'])
# Placing Mean and Std dev of raw & filtered data in the plot
plt.text(100.0, 1.7, f"Mean: {imu_data_avg} deg\nStd Dev: {imu_data_std} deg",
verticalalignment='top', horizontalalignment='left', color='red', fontsize=9)
plt.text(220.0, 1.7, f"Mean: {np.round(np.average(data), decimals=4)} deg\nStd Dev: {np.round(np.std(data), decimals=4)} deg",
verticalalignment='top', horizontalalignment='left', color='blue', fontsize=9)
plt.show()
print(f"Plotting IMU data for {window_size}-pt moving average filter.")
def moving_average(data, window_size):
"""
Computes the window-size point moving-average value of the raw IMU data and calls the plot_data function to plot the data.
Parameters
----------
data : List
List of integers containing the raw IMU pitch angle data
window_size : int
Window-size of the moving-average filter
Returns
-------
None
"""
imu_pitch_angle_smooth = []
for i in range(0,data_size-window_size+1):
sub_data = data[i:i+window_size] # Slicing data into 'window-size' parts
avg = np.mean(sub_data)
imu_pitch_angle_smooth.append(avg)
plot_data(data, window_size, imu_pitch_angle_smooth)
if __name__ == "__main__":
# Moving-average for 2, 4, 8, 16, 64, 128 points
i = 2
while i <= 128:
if i is not 32:
moving_average(imu_pitch_angle, i)
i = i*2
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.average",
"numpy.std",
"matplotlib.pyplot.legend",
"numpy.shape",
"matplotlib.pyplot.text",
"numpy.mean",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((437, 484), 'numpy.loadtxt', 'np.loadtxt', (['file_name'], {'delimiter': '""" """', 'dtype': 'str'}), "(file_name, delimiter=' ', dtype=str)\n", (447, 484), True, 'import numpy as np\n'), ((584, 606), 'numpy.shape', 'np.shape', (['imu_raw_data'], {}), '(imu_raw_data)\n', (592, 606), True, 'import numpy as np\n'), ((1828, 1848), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Sample"""'], {}), "('Sample')\n", (1838, 1848), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1885), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pitch Angle (deg)"""'], {}), "('Pitch Angle (deg)')\n", (1864, 1885), True, 'import matplotlib.pyplot as plt\n'), ((1891, 1934), 'matplotlib.pyplot.title', 'plt.title', (['"""ADXL327 Accel Pitch Angle Plot"""'], {}), "('ADXL327 Accel Pitch Angle Plot')\n", (1900, 1934), True, 'import matplotlib.pyplot as plt\n'), ((1940, 2005), 'matplotlib.pyplot.legend', 'plt.legend', (["['Raw Data', f'{window_size}-pt Moving Average Data']"], {}), "(['Raw Data', f'{window_size}-pt Moving Average Data'])\n", (1950, 2005), True, 'import matplotlib.pyplot as plt\n'), ((2080, 2251), 'matplotlib.pyplot.text', 'plt.text', (['(100.0)', '(1.7)', 'f"""Mean: {imu_data_avg} deg\nStd Dev: {imu_data_std} deg"""'], {'verticalalignment': '"""top"""', 'horizontalalignment': '"""left"""', 'color': '"""red"""', 'fontsize': '(9)'}), '(100.0, 1.7,\n f"""Mean: {imu_data_avg} deg\nStd Dev: {imu_data_std} deg""",\n verticalalignment=\'top\', horizontalalignment=\'left\', color=\'red\',\n fontsize=9)\n', (2088, 2251), True, 'import matplotlib.pyplot as plt\n'), ((2476, 2486), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2484, 2486), True, 'import matplotlib.pyplot as plt\n'), ((1352, 1375), 'numpy.average', 'np.average', (['smooth_data'], {}), '(smooth_data)\n', (1362, 1375), True, 'import numpy as np\n'), ((1445, 1464), 'numpy.std', 'np.std', (['smooth_data'], {}), '(smooth_data)\n', (1451, 1464), True, 'import numpy as np\n'), ((3174, 3191), 'numpy.mean', 'np.mean', (['sub_data'], {}), '(sub_data)\n', (3181, 3191), True, 'import numpy as np\n'), ((2294, 2310), 'numpy.average', 'np.average', (['data'], {}), '(data)\n', (2304, 2310), True, 'import numpy as np\n'), ((2349, 2361), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (2355, 2361), True, 'import numpy as np\n')] |
"""Testing visualization with fvtk."""
import os
import warnings
import numpy as np
from distutils.version import LooseVersion
from dipy.viz import fvtk
from dipy import data
import numpy.testing as npt
from dipy.testing.decorators import xvfb_it
from dipy.utils.optpkg import optional_package
use_xvfb = os.environ.get('TEST_WITH_XVFB', False)
if use_xvfb == 'skip':
skip_it = True
else:
skip_it = False
cm, have_matplotlib, _ = optional_package('matplotlib.cm')
if have_matplotlib:
import matplotlib
mpl_version = LooseVersion(matplotlib.__version__)
@npt.dec.skipif(not fvtk.have_vtk or not fvtk.have_vtk_colors or skip_it)
@xvfb_it
def test_fvtk_functions():
# This tests will fail if any of the given actors changed inputs or do
# not exist
# Create a renderer
r = fvtk.ren()
# Create 2 lines with 2 different colors
lines = [np.random.rand(10, 3), np.random.rand(20, 3)]
colors = np.random.rand(2, 3)
c = fvtk.line(lines, colors)
fvtk.add(r, c)
# create streamtubes of the same lines and shift them a bit
c2 = fvtk.streamtube(lines, colors)
c2.SetPosition(2, 0, 0)
fvtk.add(r, c2)
# Create a volume and return a volumetric actor using volumetric rendering
vol = 100 * np.random.rand(100, 100, 100)
vol = vol.astype('uint8')
r = fvtk.ren()
v = fvtk.volume(vol)
fvtk.add(r, v)
# Remove all objects
fvtk.rm_all(r)
# Put some text
l = fvtk.label(r, text='Yes Men')
fvtk.add(r, l)
# Slice the volume
slicer = fvtk.slicer(vol)
slicer.display(50, None, None)
fvtk.add(r, slicer)
# Change the position of the active camera
fvtk.camera(r, pos=(0.6, 0, 0), verbose=False)
fvtk.clear(r)
# Peak directions
p = fvtk.peaks(np.random.rand(3, 3, 3, 5, 3))
fvtk.add(r, p)
p2 = fvtk.peaks(np.random.rand(3, 3, 3, 5, 3),
np.random.rand(3, 3, 3, 5),
colors=(0, 1, 0))
fvtk.add(r, p2)
@npt.dec.skipif(not fvtk.have_vtk or not fvtk.have_vtk_colors or skip_it)
@xvfb_it
def test_fvtk_ellipsoid():
evals = np.array([1.4, .35, .35]) * 10 ** (-3)
evecs = np.eye(3)
mevals = np.zeros((3, 2, 4, 3))
mevecs = np.zeros((3, 2, 4, 3, 3))
mevals[..., :] = evals
mevecs[..., :, :] = evecs
from dipy.data import get_sphere
sphere = get_sphere('symmetric724')
ren = fvtk.ren()
fvtk.add(ren, fvtk.tensor(mevals, mevecs, sphere=sphere))
fvtk.add(ren, fvtk.tensor(mevals, mevecs, np.ones(mevals.shape),
sphere=sphere))
npt.assert_equal(ren.GetActors().GetNumberOfItems(), 2)
def test_colormap():
v = np.linspace(0., .5)
map1 = fvtk.create_colormap(v, 'bone', auto=True)
map2 = fvtk.create_colormap(v, 'bone', auto=False)
npt.assert_(not np.allclose(map1, map2))
npt.assert_raises(ValueError, fvtk.create_colormap, np.ones((2, 3)))
npt.assert_raises(ValueError, fvtk.create_colormap, v, 'no such map')
@npt.dec.skipif(not fvtk.have_matplotlib)
def test_colormaps_matplotlib():
v = np.random.random(1000)
# The "Accent" colormap is deprecated as of 0.12:
with warnings.catch_warnings(record=True) as w:
accent_cm = data.get_cmap("Accent")
# Test that the deprecation warning was raised:
npt.assert_(len(w) > 0)
names = ['jet', 'Blues', 'bone']
if have_matplotlib and mpl_version < "2":
names.append('Accent')
for name in names:
with warnings.catch_warnings(record=True) as w:
# Matplotlib version of get_cmap
rgba1 = fvtk.get_cmap(name)(v)
# Dipy version of get_cmap
rgba2 = data.get_cmap(name)(v)
# dipy's colormaps are close to matplotlibs colormaps, but not
# perfect:
npt.assert_array_almost_equal(rgba1, rgba2, 1)
npt.assert_(len(w) == (1 if name == 'Accent' else 0))
if __name__ == "__main__":
npt.run_module_suite()
| [
"dipy.data.get_cmap",
"dipy.viz.fvtk.streamtube",
"dipy.viz.fvtk.line",
"dipy.viz.fvtk.volume",
"dipy.data.get_sphere",
"numpy.ones",
"numpy.allclose",
"dipy.viz.fvtk.create_colormap",
"dipy.viz.fvtk.tensor",
"numpy.testing.assert_array_almost_equal",
"numpy.testing.dec.skipif",
"dipy.viz.fvtk... | [((308, 347), 'os.environ.get', 'os.environ.get', (['"""TEST_WITH_XVFB"""', '(False)'], {}), "('TEST_WITH_XVFB', False)\n", (322, 347), False, 'import os\n'), ((442, 475), 'dipy.utils.optpkg.optional_package', 'optional_package', (['"""matplotlib.cm"""'], {}), "('matplotlib.cm')\n", (458, 475), False, 'from dipy.utils.optpkg import optional_package\n'), ((577, 649), 'numpy.testing.dec.skipif', 'npt.dec.skipif', (['(not fvtk.have_vtk or not fvtk.have_vtk_colors or skip_it)'], {}), '(not fvtk.have_vtk or not fvtk.have_vtk_colors or skip_it)\n', (591, 649), True, 'import numpy.testing as npt\n'), ((1991, 2063), 'numpy.testing.dec.skipif', 'npt.dec.skipif', (['(not fvtk.have_vtk or not fvtk.have_vtk_colors or skip_it)'], {}), '(not fvtk.have_vtk or not fvtk.have_vtk_colors or skip_it)\n', (2005, 2063), True, 'import numpy.testing as npt\n'), ((2988, 3028), 'numpy.testing.dec.skipif', 'npt.dec.skipif', (['(not fvtk.have_matplotlib)'], {}), '(not fvtk.have_matplotlib)\n', (3002, 3028), True, 'import numpy.testing as npt\n'), ((537, 573), 'distutils.version.LooseVersion', 'LooseVersion', (['matplotlib.__version__'], {}), '(matplotlib.__version__)\n', (549, 573), False, 'from distutils.version import LooseVersion\n'), ((810, 820), 'dipy.viz.fvtk.ren', 'fvtk.ren', ([], {}), '()\n', (818, 820), False, 'from dipy.viz import fvtk\n'), ((939, 959), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)'], {}), '(2, 3)\n', (953, 959), True, 'import numpy as np\n'), ((968, 992), 'dipy.viz.fvtk.line', 'fvtk.line', (['lines', 'colors'], {}), '(lines, colors)\n', (977, 992), False, 'from dipy.viz import fvtk\n'), ((997, 1011), 'dipy.viz.fvtk.add', 'fvtk.add', (['r', 'c'], {}), '(r, c)\n', (1005, 1011), False, 'from dipy.viz import fvtk\n'), ((1086, 1116), 'dipy.viz.fvtk.streamtube', 'fvtk.streamtube', (['lines', 'colors'], {}), '(lines, colors)\n', (1101, 1116), False, 'from dipy.viz import fvtk\n'), ((1149, 1164), 'dipy.viz.fvtk.add', 'fvtk.add', (['r', 'c2'], {}), '(r, c2)\n', (1157, 1164), False, 'from dipy.viz import fvtk\n'), ((1329, 1339), 'dipy.viz.fvtk.ren', 'fvtk.ren', ([], {}), '()\n', (1337, 1339), False, 'from dipy.viz import fvtk\n'), ((1348, 1364), 'dipy.viz.fvtk.volume', 'fvtk.volume', (['vol'], {}), '(vol)\n', (1359, 1364), False, 'from dipy.viz import fvtk\n'), ((1369, 1383), 'dipy.viz.fvtk.add', 'fvtk.add', (['r', 'v'], {}), '(r, v)\n', (1377, 1383), False, 'from dipy.viz import fvtk\n'), ((1414, 1428), 'dipy.viz.fvtk.rm_all', 'fvtk.rm_all', (['r'], {}), '(r)\n', (1425, 1428), False, 'from dipy.viz import fvtk\n'), ((1458, 1487), 'dipy.viz.fvtk.label', 'fvtk.label', (['r'], {'text': '"""Yes Men"""'}), "(r, text='Yes Men')\n", (1468, 1487), False, 'from dipy.viz import fvtk\n'), ((1492, 1506), 'dipy.viz.fvtk.add', 'fvtk.add', (['r', 'l'], {}), '(r, l)\n', (1500, 1506), False, 'from dipy.viz import fvtk\n'), ((1544, 1560), 'dipy.viz.fvtk.slicer', 'fvtk.slicer', (['vol'], {}), '(vol)\n', (1555, 1560), False, 'from dipy.viz import fvtk\n'), ((1600, 1619), 'dipy.viz.fvtk.add', 'fvtk.add', (['r', 'slicer'], {}), '(r, slicer)\n', (1608, 1619), False, 'from dipy.viz import fvtk\n'), ((1672, 1718), 'dipy.viz.fvtk.camera', 'fvtk.camera', (['r'], {'pos': '(0.6, 0, 0)', 'verbose': '(False)'}), '(r, pos=(0.6, 0, 0), verbose=False)\n', (1683, 1718), False, 'from dipy.viz import fvtk\n'), ((1724, 1737), 'dipy.viz.fvtk.clear', 'fvtk.clear', (['r'], {}), '(r)\n', (1734, 1737), False, 'from dipy.viz import fvtk\n'), ((1815, 1829), 'dipy.viz.fvtk.add', 'fvtk.add', (['r', 'p'], {}), '(r, p)\n', (1823, 1829), False, 'from dipy.viz import fvtk\n'), ((1972, 1987), 'dipy.viz.fvtk.add', 'fvtk.add', (['r', 'p2'], {}), '(r, p2)\n', (1980, 1987), False, 'from dipy.viz import fvtk\n'), ((2164, 2173), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2170, 2173), True, 'import numpy as np\n'), ((2188, 2210), 'numpy.zeros', 'np.zeros', (['(3, 2, 4, 3)'], {}), '((3, 2, 4, 3))\n', (2196, 2210), True, 'import numpy as np\n'), ((2224, 2249), 'numpy.zeros', 'np.zeros', (['(3, 2, 4, 3, 3)'], {}), '((3, 2, 4, 3, 3))\n', (2232, 2249), True, 'import numpy as np\n'), ((2360, 2386), 'dipy.data.get_sphere', 'get_sphere', (['"""symmetric724"""'], {}), "('symmetric724')\n", (2370, 2386), False, 'from dipy.data import get_sphere\n'), ((2398, 2408), 'dipy.viz.fvtk.ren', 'fvtk.ren', ([], {}), '()\n', (2406, 2408), False, 'from dipy.viz import fvtk\n'), ((2663, 2684), 'numpy.linspace', 'np.linspace', (['(0.0)', '(0.5)'], {}), '(0.0, 0.5)\n', (2674, 2684), True, 'import numpy as np\n'), ((2694, 2736), 'dipy.viz.fvtk.create_colormap', 'fvtk.create_colormap', (['v', '"""bone"""'], {'auto': '(True)'}), "(v, 'bone', auto=True)\n", (2714, 2736), False, 'from dipy.viz import fvtk\n'), ((2748, 2791), 'dipy.viz.fvtk.create_colormap', 'fvtk.create_colormap', (['v', '"""bone"""'], {'auto': '(False)'}), "(v, 'bone', auto=False)\n", (2768, 2791), False, 'from dipy.viz import fvtk\n'), ((2915, 2984), 'numpy.testing.assert_raises', 'npt.assert_raises', (['ValueError', 'fvtk.create_colormap', 'v', '"""no such map"""'], {}), "(ValueError, fvtk.create_colormap, v, 'no such map')\n", (2932, 2984), True, 'import numpy.testing as npt\n'), ((3070, 3092), 'numpy.random.random', 'np.random.random', (['(1000)'], {}), '(1000)\n', (3086, 3092), True, 'import numpy as np\n'), ((3955, 3977), 'numpy.testing.run_module_suite', 'npt.run_module_suite', ([], {}), '()\n', (3975, 3977), True, 'import numpy.testing as npt\n'), ((880, 901), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)'], {}), '(10, 3)\n', (894, 901), True, 'import numpy as np\n'), ((903, 924), 'numpy.random.rand', 'np.random.rand', (['(20)', '(3)'], {}), '(20, 3)\n', (917, 924), True, 'import numpy as np\n'), ((1261, 1290), 'numpy.random.rand', 'np.random.rand', (['(100)', '(100)', '(100)'], {}), '(100, 100, 100)\n', (1275, 1290), True, 'import numpy as np\n'), ((1780, 1809), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)', '(3)', '(5)', '(3)'], {}), '(3, 3, 3, 5, 3)\n', (1794, 1809), True, 'import numpy as np\n'), ((1851, 1880), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)', '(3)', '(5)', '(3)'], {}), '(3, 3, 3, 5, 3)\n', (1865, 1880), True, 'import numpy as np\n'), ((1902, 1928), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)', '(3)', '(5)'], {}), '(3, 3, 3, 5)\n', (1916, 1928), True, 'import numpy as np\n'), ((2113, 2140), 'numpy.array', 'np.array', (['[1.4, 0.35, 0.35]'], {}), '([1.4, 0.35, 0.35])\n', (2121, 2140), True, 'import numpy as np\n'), ((2428, 2470), 'dipy.viz.fvtk.tensor', 'fvtk.tensor', (['mevals', 'mevecs'], {'sphere': 'sphere'}), '(mevals, mevecs, sphere=sphere)\n', (2439, 2470), False, 'from dipy.viz import fvtk\n'), ((2894, 2909), 'numpy.ones', 'np.ones', (['(2, 3)'], {}), '((2, 3))\n', (2901, 2909), True, 'import numpy as np\n'), ((3156, 3192), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (3179, 3192), False, 'import warnings\n'), ((3219, 3242), 'dipy.data.get_cmap', 'data.get_cmap', (['"""Accent"""'], {}), "('Accent')\n", (3232, 3242), False, 'from dipy import data\n'), ((2519, 2540), 'numpy.ones', 'np.ones', (['mevals.shape'], {}), '(mevals.shape)\n', (2526, 2540), True, 'import numpy as np\n'), ((2812, 2835), 'numpy.allclose', 'np.allclose', (['map1', 'map2'], {}), '(map1, map2)\n', (2823, 2835), True, 'import numpy as np\n'), ((3484, 3520), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (3507, 3520), False, 'import warnings\n'), ((3807, 3853), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['rgba1', 'rgba2', '(1)'], {}), '(rgba1, rgba2, 1)\n', (3836, 3853), True, 'import numpy.testing as npt\n'), ((3592, 3611), 'dipy.viz.fvtk.get_cmap', 'fvtk.get_cmap', (['name'], {}), '(name)\n', (3605, 3611), False, 'from dipy.viz import fvtk\n'), ((3674, 3693), 'dipy.data.get_cmap', 'data.get_cmap', (['name'], {}), '(name)\n', (3687, 3693), False, 'from dipy import data\n')] |
# ***************************************************************
# Copyright (c) 2020 Jittor. Authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>.
# All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import jittor as jt
import numpy as np
skip_this_test = False
try:
jt.dirty_fix_pytorch_runtime_error()
import torch
except:
skip_this_test = True
@unittest.skipIf(skip_this_test, "No Torch Found")
class TestSearchsorted(unittest.TestCase):
def test_searchsorted_cpu(self):
for i in range(1,3):
s = np.sort(np.random.rand(*((10,)*i)),-1)
v = np.random.rand(*((10,)*i))
s_jt = jt.array(s)
v_jt = jt.array(v)
s_tc = torch.from_numpy(s)
v_tc = torch.from_numpy(v)
y_tc = torch.searchsorted(s_tc, v_tc, right=True)
y_jt = jt.searchsorted(s_jt, v_jt, right=True)
assert np.allclose(y_jt.numpy(), y_tc.data)
y_jt = jt.searchsorted(s_jt, v_jt, right=False)
y_tc = torch.searchsorted(s_tc, v_tc, right=False)
assert np.allclose(y_jt.numpy(), y_tc.data)
@unittest.skipIf(not jt.compiler.has_cuda, "No CUDA found")
@jt.flag_scope(use_cuda=1)
def test_searchsorted_gpu(self):
self.test_searchsorted_cpu()
if __name__ == "__main__":
unittest.main() | [
"unittest.main",
"unittest.skipIf",
"jittor.array",
"jittor.dirty_fix_pytorch_runtime_error",
"torch.searchsorted",
"numpy.random.rand",
"jittor.flag_scope",
"jittor.searchsorted",
"torch.from_numpy"
] | [((548, 597), 'unittest.skipIf', 'unittest.skipIf', (['skip_this_test', '"""No Torch Found"""'], {}), "(skip_this_test, 'No Torch Found')\n", (563, 597), False, 'import unittest\n'), ((457, 493), 'jittor.dirty_fix_pytorch_runtime_error', 'jt.dirty_fix_pytorch_runtime_error', ([], {}), '()\n', (491, 493), True, 'import jittor as jt\n'), ((1308, 1366), 'unittest.skipIf', 'unittest.skipIf', (['(not jt.compiler.has_cuda)', '"""No CUDA found"""'], {}), "(not jt.compiler.has_cuda, 'No CUDA found')\n", (1323, 1366), False, 'import unittest\n'), ((1372, 1397), 'jittor.flag_scope', 'jt.flag_scope', ([], {'use_cuda': '(1)'}), '(use_cuda=1)\n', (1385, 1397), True, 'import jittor as jt\n'), ((1504, 1519), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1517, 1519), False, 'import unittest\n'), ((778, 806), 'numpy.random.rand', 'np.random.rand', (['*((10,) * i)'], {}), '(*((10,) * i))\n', (792, 806), True, 'import numpy as np\n'), ((824, 835), 'jittor.array', 'jt.array', (['s'], {}), '(s)\n', (832, 835), True, 'import jittor as jt\n'), ((855, 866), 'jittor.array', 'jt.array', (['v'], {}), '(v)\n', (863, 866), True, 'import jittor as jt\n'), ((886, 905), 'torch.from_numpy', 'torch.from_numpy', (['s'], {}), '(s)\n', (902, 905), False, 'import torch\n'), ((925, 944), 'torch.from_numpy', 'torch.from_numpy', (['v'], {}), '(v)\n', (941, 944), False, 'import torch\n'), ((965, 1007), 'torch.searchsorted', 'torch.searchsorted', (['s_tc', 'v_tc'], {'right': '(True)'}), '(s_tc, v_tc, right=True)\n', (983, 1007), False, 'import torch\n'), ((1027, 1066), 'jittor.searchsorted', 'jt.searchsorted', (['s_jt', 'v_jt'], {'right': '(True)'}), '(s_jt, v_jt, right=True)\n', (1042, 1066), True, 'import jittor as jt\n'), ((1142, 1182), 'jittor.searchsorted', 'jt.searchsorted', (['s_jt', 'v_jt'], {'right': '(False)'}), '(s_jt, v_jt, right=False)\n', (1157, 1182), True, 'import jittor as jt\n'), ((1202, 1245), 'torch.searchsorted', 'torch.searchsorted', (['s_tc', 'v_tc'], {'right': '(False)'}), '(s_tc, v_tc, right=False)\n', (1220, 1245), False, 'import torch\n'), ((731, 759), 'numpy.random.rand', 'np.random.rand', (['*((10,) * i)'], {}), '(*((10,) * i))\n', (745, 759), True, 'import numpy as np\n')] |
from embeddings import PositionalEncoding
from utils.pad import pad_masking, subsequent_masking
import torch
from torch import nn
import numpy as np
from collections import defaultdict
PAD_TOKEN_ID = 0
def build_model(config, source_vocabulary_size, target_vocabulary_size):
if config['positional_encoding']:
source_embedding = PositionalEncoding(
num_embeddings=source_vocabulary_size,
embedding_dim=config['d_model'],
dim=config['d_model']) # why dim?
target_embedding = PositionalEncoding(
num_embeddings=target_vocabulary_size,
embedding_dim=config['d_model'],
dim=config['d_model']) # why dim?
else:
source_embedding = nn.Embedding(
num_embeddings=source_vocabulary_size,
embedding_dim=config['d_model'])
target_embedding = nn.Embedding(
num_embeddings=target_vocabulary_size,
embedding_dim=config['d_model'])
encoder = TransformerEncoder(
layers_count=config['layers_count'],
d_model=config['d_model'],
heads_count=config['heads_count'],
d_ff=config['d_ff'],
dropout_prob=config['dropout_prob'],
embedding=source_embedding)
decoder = TransformerDecoder(
layers_count=config['layers_count'],
d_model=config['d_model'],
heads_count=config['heads_count'],
d_ff=config['d_ff'],
dropout_prob=config['dropout_prob'],
embedding=target_embedding)
model = Transformer(encoder, decoder)
return model
class Transformer(nn.Module):
def __init__(self, encoder, decoder):
super(Transformer, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, sources, inputs):
# sources : (batch_size, sources_len)
# inputs : (batch_size, targets_len - 1)
batch_size, sources_len = sources.size()
batch_size, inputs_len = inputs.size()
sources_mask = pad_masking(sources, sources_len)
memory_mask = pad_masking(sources, inputs_len)
inputs_mask = subsequent_masking(inputs) | pad_masking(inputs, inputs_len)
memory = self.encoder(sources, sources_mask) # (batch_size, seq_len, d_model)
outputs, state = self.decoder(inputs, memory, memory_mask, inputs_mask) # (batch_size, seq_len, d_model)
return outputs
class TransformerEncoder(nn.Module):
def __init__(self, layers_count, d_model, heads_count, d_ff, dropout_prob, embedding):
super(TransformerEncoder, self).__init__()
self.d_model = d_model
self.embedding = embedding
self.encoder_layers = nn.ModuleList(
[TransformerEncoderLayer(d_model, heads_count, d_ff, dropout_prob) for _ in range(layers_count)]
)
def forward(self, sources, mask):
"""
args:
sources: embedded_sequence, (batch_size, seq_len, embed_size)
"""
sources = self.embedding(sources)
for encoder_layer in self.encoder_layers:
sources = encoder_layer(sources, mask)
return sources
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, heads_count, d_ff, dropout_prob):
super(TransformerEncoderLayer, self).__init__()
self.self_attention_layer = Sublayer(MultiHeadAttention(heads_count, d_model, dropout_prob), d_model)
self.pointwise_feedforward_layer = Sublayer(PointwiseFeedForwardNetwork(d_ff, d_model, dropout_prob), d_model)
self.dropout = nn.Dropout(dropout_prob)
def forward(self, sources, sources_mask):
# x: (batch_size, seq_len, d_model)
sources = self.self_attention_layer(sources, sources, sources, sources_mask)
sources = self.dropout(sources)
sources = self.pointwise_feedforward_layer(sources)
return sources
class TransformerDecoder(nn.Module):
def __init__(self, layers_count, d_model, heads_count, d_ff, dropout_prob, embedding):
super(TransformerDecoder, self).__init__()
self.d_model = d_model
self.embedding = embedding
self.decoder_layers = nn.ModuleList(
[TransformerDecoderLayer(d_model, heads_count, d_ff, dropout_prob) for _ in range(layers_count)]
)
self.generator = nn.Linear(embedding.embedding_dim, embedding.num_embeddings)
self.generator.weight = self.embedding.weight
def forward(self, inputs, memory, memory_mask, inputs_mask=None, state=None):
# inputs: (batch_size, seq_len - 1, d_model)
# memory: (batch_size, seq_len, d_model)
inputs = self.embedding(inputs)
# if state is not None:
# inputs = torch.cat([state.previous_inputs, inputs], dim=1)
#
# state.previous_inputs = inputs
for layer_index, decoder_layer in enumerate(self.decoder_layers):
if state is None:
inputs = decoder_layer(inputs, memory, memory_mask, inputs_mask)
else: # Use cache
layer_cache = state.layer_caches[layer_index]
# print('inputs_mask', inputs_mask)
inputs = decoder_layer(inputs, memory, memory_mask, inputs_mask, layer_cache)
state.update_state(
layer_index=layer_index,
layer_mode='self-attention',
key_projected=decoder_layer.self_attention_layer.sublayer.key_projected,
value_projected=decoder_layer.self_attention_layer.sublayer.value_projected,
)
state.update_state(
layer_index=layer_index,
layer_mode='memory-attention',
key_projected=decoder_layer.memory_attention_layer.sublayer.key_projected,
value_projected=decoder_layer.memory_attention_layer.sublayer.value_projected,
)
generated = self.generator(inputs) # (batch_size, seq_len, vocab_size)
return generated, state
def init_decoder_state(self, **args):
return DecoderState()
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, heads_count, d_ff, dropout_prob):
super(TransformerDecoderLayer, self).__init__()
self.self_attention_layer = Sublayer(MultiHeadAttention(heads_count, d_model, dropout_prob, mode='self-attention'), d_model)
self.memory_attention_layer = Sublayer(MultiHeadAttention(heads_count, d_model, dropout_prob, mode='memory-attention'), d_model)
self.pointwise_feedforward_layer = Sublayer(PointwiseFeedForwardNetwork(d_ff, d_model, dropout_prob), d_model)
def forward(self, inputs, memory, memory_mask, inputs_mask, layer_cache=None):
# print('self attention')
# print('inputs_mask', inputs_mask)
inputs = self.self_attention_layer(inputs, inputs, inputs, inputs_mask, layer_cache)
# print('memory attention')
inputs = self.memory_attention_layer(inputs, memory, memory, memory_mask, layer_cache)
inputs = self.pointwise_feedforward_layer(inputs)
return inputs
class Sublayer(nn.Module):
def __init__(self, sublayer, d_model):
super(Sublayer, self).__init__()
self.sublayer = sublayer
self.layer_normalization = LayerNormalization(d_model)
def forward(self, *args):
x = args[0]
x = self.sublayer(*args) + x
return self.layer_normalization(x)
class LayerNormalization(nn.Module):
def __init__(self, features_count, epsilon=1e-6):
super(LayerNormalization, self).__init__()
self.gain = nn.Parameter(torch.ones(features_count))
self.bias = nn.Parameter(torch.zeros(features_count))
self.epsilon = epsilon
def forward(self, x):
mean = x.mean(dim=-1, keepdim=True)
std = x.std(dim=-1, keepdim=True)
return self.gain * (x - mean) / (std + self.epsilon) + self.bias
class MultiHeadAttention(nn.Module):
def __init__(self, heads_count, d_model, dropout_prob, mode='self-attention'):
super(MultiHeadAttention, self).__init__()
assert d_model % heads_count == 0
assert mode in ('self-attention', 'memory-attention')
self.d_head = d_model // heads_count
self.heads_count = heads_count
self.mode = mode
self.query_projection = nn.Linear(d_model, heads_count * self.d_head)
self.key_projection = nn.Linear(d_model, heads_count * self.d_head)
self.value_projection = nn.Linear(d_model, heads_count * self.d_head)
self.final_projection = nn.Linear(d_model, heads_count * self.d_head)
self.dropout = nn.Dropout(dropout_prob)
self.softmax = nn.Softmax(dim=3)
self.attention = None
# For cache
self.key_projected = None
self.value_projected = None
def forward(self, query, key, value, mask=None, layer_cache=None):
"""
Args:
query: (batch_size, query_len, model_dim)
key: (batch_size, key_len, model_dim)
value: (batch_size, value_len, model_dim)
mask: (batch_size, query_len, key_len)
state: DecoderState
"""
# print('attention mask', mask)
batch_size, query_len, d_model = query.size()
d_head = d_model // self.heads_count
query_projected = self.query_projection(query)
# print('query_projected', query_projected.shape)
if layer_cache is None or layer_cache[self.mode] is None: # Don't use cache
key_projected = self.key_projection(key)
value_projected = self.value_projection(value)
else: # Use cache
if self.mode == 'self-attention':
key_projected = self.key_projection(key)
value_projected = self.value_projection(value)
key_projected = torch.cat([key_projected, layer_cache[self.mode]['key_projected']], dim=1)
value_projected = torch.cat([value_projected, layer_cache[self.mode]['value_projected']], dim=1)
elif self.mode == 'memory-attention':
key_projected = layer_cache[self.mode]['key_projected']
value_projected = layer_cache[self.mode]['value_projected']
# For cache
self.key_projected = key_projected
self.value_projected = value_projected
batch_size, key_len, d_model = key_projected.size()
batch_size, value_len, d_model = value_projected.size()
query_heads = query_projected.view(batch_size, query_len, self.heads_count, d_head).transpose(1, 2) # (batch_size, heads_count, query_len, d_head)
# print('query_heads', query_heads.shape)
# print(batch_size, key_len, self.heads_count, d_head)
# print(key_projected.shape)
key_heads = key_projected.view(batch_size, key_len, self.heads_count, d_head).transpose(1, 2) # (batch_size, heads_count, key_len, d_head)
value_heads = value_projected.view(batch_size, value_len, self.heads_count, d_head).transpose(1, 2) # (batch_size, heads_count, value_len, d_head)
attention_weights = self.scaled_dot_product(query_heads, key_heads) # (batch_size, heads_count, query_len, key_len)
if mask is not None:
# print('mode', self.mode)
# print('mask', mask.shape)
# print('attention_weights', attention_weights.shape)
mask_expanded = mask.unsqueeze(1).expand_as(attention_weights)
attention_weights = attention_weights.masked_fill(mask_expanded, -1e18)
self.attention = self.softmax(attention_weights) # Save attention to the object
# print('attention_weights', attention_weights.shape)
attention_dropped = self.dropout(self.attention)
context_heads = torch.matmul(attention_dropped, value_heads) # (batch_size, heads_count, query_len, d_head)
# print('context_heads', context_heads.shape)
context_sequence = context_heads.transpose(1, 2).contiguous() # (batch_size, query_len, heads_count, d_head)
context = context_sequence.view(batch_size, query_len, d_model) # (batch_size, query_len, d_model)
final_output = self.final_projection(context)
# print('final_output', final_output.shape)
return final_output
def scaled_dot_product(self, query_heads, key_heads):
"""
Args:
query_heads: (batch_size, heads_count, query_len, d_head)
key_heads: (batch_size, heads_count, key_len, d_head)
"""
key_heads_transposed = key_heads.transpose(2, 3)
dot_product = torch.matmul(query_heads, key_heads_transposed) # (batch_size, heads_count, query_len, key_len)
attention_weights = dot_product / np.sqrt(self.d_head)
return attention_weights
class PointwiseFeedForwardNetwork(nn.Module):
def __init__(self, d_ff, d_model, dropout_prob):
super(PointwiseFeedForwardNetwork, self).__init__()
self.feed_forward = nn.Sequential(
nn.Linear(d_model, d_ff),
nn.Dropout(dropout_prob),
nn.ReLU(),
nn.Linear(d_ff, d_model),
nn.Dropout(dropout_prob),
)
def forward(self, x):
"""
Args:
x: (batch_size, seq_len, d_model)
"""
return self.feed_forward(x)
class DecoderState:
def __init__(self):
self.previous_inputs = torch.tensor([])
self.layer_caches = defaultdict(lambda: {'self-attention': None, 'memory-attention': None})
def update_state(self, layer_index, layer_mode, key_projected, value_projected):
self.layer_caches[layer_index][layer_mode] = {
'key_projected': key_projected,
'value_projected': value_projected
}
# def repeat_beam_size_times(self, beam_size): # memory만 repeat하면 되는데 state에 memory는 넣지 않기로 했다.
# self.
# self.src = self.src.data.repeat(beam_size, 1)
def beam_update(self, positions):
for layer_index in self.layer_caches:
for mode in ('self-attention', 'memory-attention'):
if self.layer_caches[layer_index][mode] is not None:
for projection in self.layer_caches[layer_index][mode]:
cache = self.layer_caches[layer_index][mode][projection]
if cache is not None:
cache.data.copy_(cache.data.index_select(0, positions))
| [
"torch.nn.Dropout",
"utils.pad.subsequent_masking",
"torch.ones",
"torch.nn.ReLU",
"torch.nn.Embedding",
"embeddings.PositionalEncoding",
"torch.cat",
"utils.pad.pad_masking",
"collections.defaultdict",
"torch.nn.Softmax",
"torch.nn.Linear",
"torch.zeros",
"torch.matmul",
"torch.tensor",
... | [((344, 462), 'embeddings.PositionalEncoding', 'PositionalEncoding', ([], {'num_embeddings': 'source_vocabulary_size', 'embedding_dim': "config['d_model']", 'dim': "config['d_model']"}), "(num_embeddings=source_vocabulary_size, embedding_dim=\n config['d_model'], dim=config['d_model'])\n", (362, 462), False, 'from embeddings import PositionalEncoding\n'), ((534, 652), 'embeddings.PositionalEncoding', 'PositionalEncoding', ([], {'num_embeddings': 'target_vocabulary_size', 'embedding_dim': "config['d_model']", 'dim': "config['d_model']"}), "(num_embeddings=target_vocabulary_size, embedding_dim=\n config['d_model'], dim=config['d_model'])\n", (552, 652), False, 'from embeddings import PositionalEncoding\n'), ((734, 823), 'torch.nn.Embedding', 'nn.Embedding', ([], {'num_embeddings': 'source_vocabulary_size', 'embedding_dim': "config['d_model']"}), "(num_embeddings=source_vocabulary_size, embedding_dim=config[\n 'd_model'])\n", (746, 823), False, 'from torch import nn\n'), ((871, 960), 'torch.nn.Embedding', 'nn.Embedding', ([], {'num_embeddings': 'target_vocabulary_size', 'embedding_dim': "config['d_model']"}), "(num_embeddings=target_vocabulary_size, embedding_dim=config[\n 'd_model'])\n", (883, 960), False, 'from torch import nn\n'), ((2016, 2049), 'utils.pad.pad_masking', 'pad_masking', (['sources', 'sources_len'], {}), '(sources, sources_len)\n', (2027, 2049), False, 'from utils.pad import pad_masking, subsequent_masking\n'), ((2072, 2104), 'utils.pad.pad_masking', 'pad_masking', (['sources', 'inputs_len'], {}), '(sources, inputs_len)\n', (2083, 2104), False, 'from utils.pad import pad_masking, subsequent_masking\n'), ((3565, 3589), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_prob'], {}), '(dropout_prob)\n', (3575, 3589), False, 'from torch import nn\n'), ((4329, 4389), 'torch.nn.Linear', 'nn.Linear', (['embedding.embedding_dim', 'embedding.num_embeddings'], {}), '(embedding.embedding_dim, embedding.num_embeddings)\n', (4338, 4389), False, 'from torch import nn\n'), ((8396, 8441), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(heads_count * self.d_head)'], {}), '(d_model, heads_count * self.d_head)\n', (8405, 8441), False, 'from torch import nn\n'), ((8472, 8517), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(heads_count * self.d_head)'], {}), '(d_model, heads_count * self.d_head)\n', (8481, 8517), False, 'from torch import nn\n'), ((8550, 8595), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(heads_count * self.d_head)'], {}), '(d_model, heads_count * self.d_head)\n', (8559, 8595), False, 'from torch import nn\n'), ((8628, 8673), 'torch.nn.Linear', 'nn.Linear', (['d_model', '(heads_count * self.d_head)'], {}), '(d_model, heads_count * self.d_head)\n', (8637, 8673), False, 'from torch import nn\n'), ((8697, 8721), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_prob'], {}), '(dropout_prob)\n', (8707, 8721), False, 'from torch import nn\n'), ((8745, 8762), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(3)'}), '(dim=3)\n', (8755, 8762), False, 'from torch import nn\n'), ((11839, 11883), 'torch.matmul', 'torch.matmul', (['attention_dropped', 'value_heads'], {}), '(attention_dropped, value_heads)\n', (11851, 11883), False, 'import torch\n'), ((12662, 12709), 'torch.matmul', 'torch.matmul', (['query_heads', 'key_heads_transposed'], {}), '(query_heads, key_heads_transposed)\n', (12674, 12709), False, 'import torch\n'), ((13473, 13489), 'torch.tensor', 'torch.tensor', (['[]'], {}), '([])\n', (13485, 13489), False, 'import torch\n'), ((13518, 13590), 'collections.defaultdict', 'defaultdict', (["(lambda : {'self-attention': None, 'memory-attention': None})"], {}), "(lambda : {'self-attention': None, 'memory-attention': None})\n", (13529, 13590), False, 'from collections import defaultdict\n'), ((2127, 2153), 'utils.pad.subsequent_masking', 'subsequent_masking', (['inputs'], {}), '(inputs)\n', (2145, 2153), False, 'from utils.pad import pad_masking, subsequent_masking\n'), ((2156, 2187), 'utils.pad.pad_masking', 'pad_masking', (['inputs', 'inputs_len'], {}), '(inputs, inputs_len)\n', (2167, 2187), False, 'from utils.pad import pad_masking, subsequent_masking\n'), ((7666, 7692), 'torch.ones', 'torch.ones', (['features_count'], {}), '(features_count)\n', (7676, 7692), False, 'import torch\n'), ((7727, 7754), 'torch.zeros', 'torch.zeros', (['features_count'], {}), '(features_count)\n', (7738, 7754), False, 'import torch\n'), ((12801, 12821), 'numpy.sqrt', 'np.sqrt', (['self.d_head'], {}), '(self.d_head)\n', (12808, 12821), True, 'import numpy as np\n'), ((13073, 13097), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_ff'], {}), '(d_model, d_ff)\n', (13082, 13097), False, 'from torch import nn\n'), ((13111, 13135), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_prob'], {}), '(dropout_prob)\n', (13121, 13135), False, 'from torch import nn\n'), ((13149, 13158), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (13156, 13158), False, 'from torch import nn\n'), ((13172, 13196), 'torch.nn.Linear', 'nn.Linear', (['d_ff', 'd_model'], {}), '(d_ff, d_model)\n', (13181, 13196), False, 'from torch import nn\n'), ((13210, 13234), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_prob'], {}), '(dropout_prob)\n', (13220, 13234), False, 'from torch import nn\n'), ((9913, 9987), 'torch.cat', 'torch.cat', (["[key_projected, layer_cache[self.mode]['key_projected']]"], {'dim': '(1)'}), "([key_projected, layer_cache[self.mode]['key_projected']], dim=1)\n", (9922, 9987), False, 'import torch\n'), ((10022, 10100), 'torch.cat', 'torch.cat', (["[value_projected, layer_cache[self.mode]['value_projected']]"], {'dim': '(1)'}), "([value_projected, layer_cache[self.mode]['value_projected']], dim=1)\n", (10031, 10100), False, 'import torch\n')] |
#!/usr/bin/env python
# Policy gradient algorithm and agent with neural network policy
# Chapter 2, TensorFlow 2 Reinforcement Learning Cookbook | <NAME>
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import gym
class PolicyNet(keras.Model):
def __init__(self, action_dim=1):
super(PolicyNet, self).__init__()
self.fc1 = layers.Dense(24, activation="relu")
self.fc2 = layers.Dense(36, activation="relu")
self.fc3 = layers.Dense(action_dim, activation="softmax")
def call(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
def process(self, observations):
# Process batch observations using `call(x)` behind-the-scenes
action_probabilities = self.predict_on_batch(observations)
return action_probabilities
class Agent(object):
def __init__(self, action_dim=1):
"""Agent with a neural-network brain powered policy
Args:
action_dim (int): Action dimension
"""
self.policy_net = PolicyNet(action_dim=action_dim)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
self.gamma = 0.99
def policy(self, observation):
observation = observation.reshape(1, -1)
observation = tf.convert_to_tensor(observation, dtype=tf.float32)
action_logits = self.policy_net(observation)
action = tf.random.categorical(tf.math.log(action_logits), num_samples=1)
return action
def get_action(self, observation):
action = self.policy(observation).numpy()
return action.squeeze()
def learn(self, states, rewards, actions):
discounted_reward = 0
discounted_rewards = []
rewards.reverse()
for r in rewards:
discounted_reward = r + self.gamma * discounted_reward
discounted_rewards.append(discounted_reward)
discounted_rewards.reverse()
for state, reward, action in zip(states, discounted_rewards, actions):
with tf.GradientTape() as tape:
action_probabilities = self.policy_net(np.array([state]), training=True)
loss = self.loss(action_probabilities, action, reward)
grads = tape.gradient(loss, self.policy_net.trainable_variables)
self.optimizer.apply_gradients(
zip(grads, self.policy_net.trainable_variables)
)
def loss(self, action_probabilities, action, reward):
dist = tfp.distributions.Categorical(
probs=action_probabilities, dtype=tf.float32
)
log_prob = dist.log_prob(action)
loss = -log_prob * reward
return loss
def train(agent: Agent, env: gym.Env, episodes: int, render=True):
"""Train `agent` in `env` for `episodes`
Args:
agent (Agent): Agent to train
env (gym.Env): Environment to train the agent
episodes (int): Number of episodes to train
render (bool): True=Enable/False=Disable rendering; Default=True
"""
for episode in range(episodes):
done = False
state = env.reset()
total_reward = 0
rewards = []
states = []
actions = []
while not done:
action = agent.get_action(state)
next_state, reward, done, _ = env.step(action)
rewards.append(reward)
states.append(state)
actions.append(action)
state = next_state
total_reward += reward
if render:
env.render()
if done:
agent.learn(states, rewards, actions)
print("\n")
print(f"Episode#:{episode} ep_reward:{total_reward}", end="\r")
if __name__ == "__main__":
agent = Agent()
episodes = 2 # Increase number of episodes to train
env = gym.make("MountainCar-v0")
# Set render=True to visualize Agent's actions in the env
train(agent, env, episodes, render=False)
env.close()
| [
"tensorflow.math.log",
"gym.make",
"tensorflow.keras.layers.Dense",
"tensorflow.convert_to_tensor",
"tensorflow_probability.distributions.Categorical",
"tensorflow.keras.optimizers.Adam",
"numpy.array",
"tensorflow.GradientTape"
] | [((3953, 3979), 'gym.make', 'gym.make', (['"""MountainCar-v0"""'], {}), "('MountainCar-v0')\n", (3961, 3979), False, 'import gym\n'), ((442, 477), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(24)'], {'activation': '"""relu"""'}), "(24, activation='relu')\n", (454, 477), False, 'from tensorflow.keras import layers\n'), ((497, 532), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(36)'], {'activation': '"""relu"""'}), "(36, activation='relu')\n", (509, 532), False, 'from tensorflow.keras import layers\n'), ((552, 598), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['action_dim'], {'activation': '"""softmax"""'}), "(action_dim, activation='softmax')\n", (564, 598), False, 'from tensorflow.keras import layers\n'), ((1203, 1248), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (1227, 1248), True, 'import tensorflow as tf\n'), ((1381, 1432), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['observation'], {'dtype': 'tf.float32'}), '(observation, dtype=tf.float32)\n', (1401, 1432), True, 'import tensorflow as tf\n'), ((2592, 2667), 'tensorflow_probability.distributions.Categorical', 'tfp.distributions.Categorical', ([], {'probs': 'action_probabilities', 'dtype': 'tf.float32'}), '(probs=action_probabilities, dtype=tf.float32)\n', (2621, 2667), True, 'import tensorflow_probability as tfp\n'), ((1525, 1551), 'tensorflow.math.log', 'tf.math.log', (['action_logits'], {}), '(action_logits)\n', (1536, 1551), True, 'import tensorflow as tf\n'), ((2132, 2149), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2147, 2149), True, 'import tensorflow as tf\n'), ((2214, 2231), 'numpy.array', 'np.array', (['[state]'], {}), '([state])\n', (2222, 2231), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import itbl._itbl as _itbl
def uniform_sample_maze(maze_dims, hall_width, wall_width):
# Create set of all possible walls.
walls = set()
for i in range(maze_dims[0]):
for j in range(maze_dims[1]):
p = (i, j)
q = (i, j - 1)
walls.add((min(p, q), max(p, q)))
q = (i - 1, j)
walls.add((min(p, q), max(p, q)))
q = (i, j + 1)
walls.add((min(p, q), max(p, q)))
q = (i + 1, j)
walls.add((min(p, q), max(p, q)))
# Set of tiles to added to the maze.
maze = set()
maze.add((0, 0))
tiles = set()
for i in range(maze_dims[0]):
for j in range(maze_dims[1]):
if not (i, j) in maze:
tiles.add((i, j))
# Do loop-erased random walk until all tiles are added to the maze.
while tiles:
# Sample random tile.
r = np.random.choice(len(tiles))
tile = list(tiles)[r]
# Random walk until we hit a tile in the maze.
stack = np.prod(maze_dims) * [None]
offset = 0
stack[offset] = tile
while True:
# Sample next tile.
t = stack[offset]
d = np.random.randint(0, 4)
if d == 0:
n = (t[0] + 1, t[1])
if d == 1:
n = (t[0], t[1] + 1)
if d == 2:
n = (t[0] - 1, t[1])
if d == 3:
n = (t[0], t[1] - 1)
# Reject if out of bounds.
if (n[0] < 0) or (maze_dims[0] <= n[0]):
continue
if (n[1] < 0) or (maze_dims[1] <= n[1]):
continue
# Add tile to stack.
offset += 1
stack[offset] = n
if n in maze:
# Add walk to maze.
for i in range(offset):
maze.add(stack[i])
tiles.remove(stack[i])
p = stack[i]
q = stack[i + 1]
walls.remove((min(p, q), max(p, q)))
break
else:
# Erase loops.
for i in range(offset):
if n == stack[i]:
offset = i
break
# Construct maze.
manager = _itbl.CollisionManager2D()
for e in walls:
v0 = np.array(e[0])
v1 = np.array(e[1])
wall = _itbl.Rectangle(hall_width + 2 * wall_width, wall_width, 2, 0.05)
wall.transform()[0:2, 3] = (hall_width + wall_width) * (v0 + v1) / 2.0
wall.transform()[2, 3] = 0
dy = v0[1] - v1[1]
if dy == 0:
wall.transform()[0:2, 0:2] = np.array([[0, -1], [1, 0]])
manager.add(wall)
return manager
def create_hallway(hall_width, block_width, block_height, center_x):
# Construct wall.
manager = _itbl.CollisionManager2D()
wall1 = _itbl.Rectangle(block_width, block_height, 2, 0.05)
wall2 = _itbl.Rectangle(block_width, block_height, 2, 0.05)
wall1.transform()[0:3, 3] = np.array([center_x + block_width / 2, block_height / 2, 0]).reshape(
wall1.transform()[0:3, 3].shape)
wall2.transform()[0:3, 3] = np.array(
[center_x + block_width / 2, block_height / 2 + block_height + hall_width, 0]).reshape(
wall1.transform()[0:3, 3].shape)
manager.add(wall1)
manager.add(wall2)
return manager
def corner():
manager = _itbl.CollisionManager2D()
wall1 = _itbl.Rectangle(5, 1, 2, 0.05)
wall2 = _itbl.Rectangle(1, 4, 2, 0.05)
wall1.transform()[0:3, 3] = np.array([1.5, -0.5, 0]).reshape(wall1.transform()[0:3, 3].shape)
wall2.transform()[0:3, 3] = np.array([-0.5, 2, 0]).reshape(wall2.transform()[0:3, 3].shape)
manager.add(wall1)
manager.add(wall2)
return manager
def table():
manager = _itbl.CollisionManager2D()
wall1 = _itbl.Rectangle(5, 1, 2, 0.05)
wall1.transform()[0:3, 3] = np.array([1.5, -0.5, 0]).reshape(wall1.transform()[0:3, 3].shape)
manager.add(wall1)
return manager
def wall():
manager = _itbl.CollisionManager2D()
wall1 = _itbl.Rectangle(8, 3, 2, 0.05)
wall2 = _itbl.Rectangle(8, 10, 2, 0.05)
wall1.transform()[0:3, 3] = np.array([4, -1.5, 0]).reshape(wall1.transform()[0:3, 3].shape)
wall2.transform()[0:3, 3] = np.array([-4, 2, 0]).reshape(wall2.transform()[0:3, 3].shape)
manager.add(wall1)
manager.add(wall2)
return manager
def table():
manager = _itbl.CollisionManager2D()
wall1 = _itbl.Rectangle(20, 3, 2, 0.05)
wall1.transform()[0:3, 3] = np.array([0, -1.5, 0]).reshape(wall1.transform()[0:3, 3].shape)
manager.add(wall1)
return manager
def obstacle_course():
manager = _itbl.CollisionManager2D()
wall1 = _itbl.Rectangle(8, 2, 2, 0.05)
wall2 = _itbl.Rectangle(1, 0.5, 2, 0.05)
wall1.transform()[0:3, 3] = np.array([0, 0, 0]).reshape(wall1.transform()[0:3, 3].shape)
wall2.transform()[0:3, 3] = np.array([0, 1.25, 0]).reshape(wall2.transform()[0:3, 3].shape)
manager.add(wall1)
manager.add(wall2)
return manager
def peg_in_hole_v():
manager = _itbl.CollisionManager2D()
wall1 = _itbl.Rectangle(3, 4, 2, 0.05)
wall2 = _itbl.Rectangle(3, 4, 2, 0.05)
wall0 = _itbl.Rectangle(1, 2, 2, 0.05)
wall1.transform()[0:3, 3] = np.array([-2, -2 , 0]).reshape(wall1.transform()[0:3, 3].shape)
wall2.transform()[0:3, 3] = np.array([2, -2, 0]).reshape(wall2.transform()[0:3, 3].shape)
wall0.transform()[0:3, 3] = np.array([0, -3, 0]).reshape(wall2.transform()[0:3, 3].shape)
manager.add(wall1)
manager.add(wall2)
manager.add(wall0)
return manager
def peg_in_hole_p():
manager = _itbl.CollisionManager2D()
wall1 = _itbl.Rectangle(5, 3, 2, 0.05)
wall2 = _itbl.Rectangle(3, 1, 2, 0.05)
wall0 = _itbl.Rectangle(10, 3, 2, 0.05)
wall1.transform()[0:3, 3] = np.array([-2.5, 2.5 , 0]).reshape(wall1.transform()[0:3, 3].shape)
wall2.transform()[0:3, 3] = np.array([-3.5, 0.5, 0]).reshape(wall2.transform()[0:3, 3].shape)
wall0.transform()[0:3, 3] = np.array([0, -1.5, 0]).reshape(wall2.transform()[0:3, 3].shape)
manager.add(wall1)
manager.add(wall2)
manager.add(wall0)
return manager
def book():
manager = _itbl.CollisionManager2D()
wall1 = _itbl.Rectangle(6, 4, 2, 0.05)
wall1.transform()[0:3, 3] = np.array([0, 0, 0]).reshape(wall1.transform()[0:3, 3].shape)
manager.add(wall1)
return manager
def unpacking():
manager = _itbl.CollisionManager2D()
wall1 = _itbl.Rectangle(4, 1, 2, 0.05)
wall2 = _itbl.Rectangle(1, 2, 2, 0.05)
wall0 = _itbl.Rectangle(2, 2, 2, 0.05)
wall1.transform()[0:3, 3] = np.array([0, -1.5 , 0]).reshape(wall1.transform()[0:3, 3].shape)
wall2.transform()[0:3, 3] = np.array([-1.5, 0, 0]).reshape(wall2.transform()[0:3, 3].shape)
wall0.transform()[0:3, 3] = np.array([1, 0, 0]).reshape(wall0.transform()[0:3, 3].shape)
manager.add(wall1)
manager.add(wall2)
manager.add(wall0)
return manager
def pushing():
manager = _itbl.CollisionManager2D()
wall1 = _itbl.Rectangle(9, 1.5, 2, 0.05)
wall1.transform()[0:3, 3] = np.array([0, -3.25, 0]).reshape(wall1.transform()[0:3, 3].shape)
manager.add(wall1)
wall2 = _itbl.Rectangle(1.5, 6.5, 2, 0.05)
wall2.transform()[0:3, 3] = np.array([-3.75, 0.75, 0]).reshape(wall1.transform()[0:3, 3].shape)
manager.add(wall2)
wall3 = _itbl.Rectangle(4, 2.5, 2, 0.05)
wall3.transform()[0:3, 3] = np.array([1, -1.25, 0]).reshape(wall1.transform()[0:3, 3].shape)
manager.add(wall3)
wall4 = _itbl.Rectangle(1.5, 6.5, 2, 0.05)
wall4.transform()[0:3, 3] = np.array([3.75, 0.75, 0]).reshape(wall1.transform()[0:3, 3].shape)
manager.add(wall4)
wall5 = _itbl.Rectangle(2.4, 2.7, 2, 0.05)
wall5.transform()[0:3, 3] = np.array([0, 2.65, 0]).reshape(wall1.transform()[0:3, 3].shape)
manager.add(wall5)
wall6 = _itbl.Rectangle(9, 1.5, 2, 0.05)
wall6.transform()[0:3, 3] = np.array([0, 4.75, 0]).reshape(wall1.transform()[0:3, 3].shape)
manager.add(wall6)
return manager | [
"itbl._itbl.CollisionManager2D",
"numpy.prod",
"numpy.random.randint",
"numpy.array",
"itbl._itbl.Rectangle"
] | [((2373, 2399), 'itbl._itbl.CollisionManager2D', '_itbl.CollisionManager2D', ([], {}), '()\n', (2397, 2399), True, 'import itbl._itbl as _itbl\n'), ((2943, 2969), 'itbl._itbl.CollisionManager2D', '_itbl.CollisionManager2D', ([], {}), '()\n', (2967, 2969), True, 'import itbl._itbl as _itbl\n'), ((2982, 3033), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['block_width', 'block_height', '(2)', '(0.05)'], {}), '(block_width, block_height, 2, 0.05)\n', (2997, 3033), True, 'import itbl._itbl as _itbl\n'), ((3046, 3097), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['block_width', 'block_height', '(2)', '(0.05)'], {}), '(block_width, block_height, 2, 0.05)\n', (3061, 3097), True, 'import itbl._itbl as _itbl\n'), ((3517, 3543), 'itbl._itbl.CollisionManager2D', '_itbl.CollisionManager2D', ([], {}), '()\n', (3541, 3543), True, 'import itbl._itbl as _itbl\n'), ((3557, 3587), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(5)', '(1)', '(2)', '(0.05)'], {}), '(5, 1, 2, 0.05)\n', (3572, 3587), True, 'import itbl._itbl as _itbl\n'), ((3600, 3630), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(1)', '(4)', '(2)', '(0.05)'], {}), '(1, 4, 2, 0.05)\n', (3615, 3630), True, 'import itbl._itbl as _itbl\n'), ((3921, 3947), 'itbl._itbl.CollisionManager2D', '_itbl.CollisionManager2D', ([], {}), '()\n', (3945, 3947), True, 'import itbl._itbl as _itbl\n'), ((3961, 3991), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(5)', '(1)', '(2)', '(0.05)'], {}), '(5, 1, 2, 0.05)\n', (3976, 3991), True, 'import itbl._itbl as _itbl\n'), ((4162, 4188), 'itbl._itbl.CollisionManager2D', '_itbl.CollisionManager2D', ([], {}), '()\n', (4186, 4188), True, 'import itbl._itbl as _itbl\n'), ((4202, 4232), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(8)', '(3)', '(2)', '(0.05)'], {}), '(8, 3, 2, 0.05)\n', (4217, 4232), True, 'import itbl._itbl as _itbl\n'), ((4245, 4276), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(8)', '(10)', '(2)', '(0.05)'], {}), '(8, 10, 2, 0.05)\n', (4260, 4276), True, 'import itbl._itbl as _itbl\n'), ((4564, 4590), 'itbl._itbl.CollisionManager2D', '_itbl.CollisionManager2D', ([], {}), '()\n', (4588, 4590), True, 'import itbl._itbl as _itbl\n'), ((4604, 4635), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(20)', '(3)', '(2)', '(0.05)'], {}), '(20, 3, 2, 0.05)\n', (4619, 4635), True, 'import itbl._itbl as _itbl\n'), ((4814, 4840), 'itbl._itbl.CollisionManager2D', '_itbl.CollisionManager2D', ([], {}), '()\n', (4838, 4840), True, 'import itbl._itbl as _itbl\n'), ((4854, 4884), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(8)', '(2)', '(2)', '(0.05)'], {}), '(8, 2, 2, 0.05)\n', (4869, 4884), True, 'import itbl._itbl as _itbl\n'), ((4897, 4929), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(1)', '(0.5)', '(2)', '(0.05)'], {}), '(1, 0.5, 2, 0.05)\n', (4912, 4929), True, 'import itbl._itbl as _itbl\n'), ((5224, 5250), 'itbl._itbl.CollisionManager2D', '_itbl.CollisionManager2D', ([], {}), '()\n', (5248, 5250), True, 'import itbl._itbl as _itbl\n'), ((5264, 5294), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(3)', '(4)', '(2)', '(0.05)'], {}), '(3, 4, 2, 0.05)\n', (5279, 5294), True, 'import itbl._itbl as _itbl\n'), ((5307, 5337), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(3)', '(4)', '(2)', '(0.05)'], {}), '(3, 4, 2, 0.05)\n', (5322, 5337), True, 'import itbl._itbl as _itbl\n'), ((5350, 5380), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(1)', '(2)', '(2)', '(0.05)'], {}), '(1, 2, 2, 0.05)\n', (5365, 5380), True, 'import itbl._itbl as _itbl\n'), ((5793, 5819), 'itbl._itbl.CollisionManager2D', '_itbl.CollisionManager2D', ([], {}), '()\n', (5817, 5819), True, 'import itbl._itbl as _itbl\n'), ((5833, 5863), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(5)', '(3)', '(2)', '(0.05)'], {}), '(5, 3, 2, 0.05)\n', (5848, 5863), True, 'import itbl._itbl as _itbl\n'), ((5876, 5906), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(3)', '(1)', '(2)', '(0.05)'], {}), '(3, 1, 2, 0.05)\n', (5891, 5906), True, 'import itbl._itbl as _itbl\n'), ((5919, 5950), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(10)', '(3)', '(2)', '(0.05)'], {}), '(10, 3, 2, 0.05)\n', (5934, 5950), True, 'import itbl._itbl as _itbl\n'), ((6362, 6388), 'itbl._itbl.CollisionManager2D', '_itbl.CollisionManager2D', ([], {}), '()\n', (6386, 6388), True, 'import itbl._itbl as _itbl\n'), ((6401, 6431), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(6)', '(4)', '(2)', '(0.05)'], {}), '(6, 4, 2, 0.05)\n', (6416, 6431), True, 'import itbl._itbl as _itbl\n'), ((6601, 6627), 'itbl._itbl.CollisionManager2D', '_itbl.CollisionManager2D', ([], {}), '()\n', (6625, 6627), True, 'import itbl._itbl as _itbl\n'), ((6641, 6671), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(4)', '(1)', '(2)', '(0.05)'], {}), '(4, 1, 2, 0.05)\n', (6656, 6671), True, 'import itbl._itbl as _itbl\n'), ((6684, 6714), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(1)', '(2)', '(2)', '(0.05)'], {}), '(1, 2, 2, 0.05)\n', (6699, 6714), True, 'import itbl._itbl as _itbl\n'), ((6727, 6757), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(2)', '(2)', '(2)', '(0.05)'], {}), '(2, 2, 2, 0.05)\n', (6742, 6757), True, 'import itbl._itbl as _itbl\n'), ((7165, 7191), 'itbl._itbl.CollisionManager2D', '_itbl.CollisionManager2D', ([], {}), '()\n', (7189, 7191), True, 'import itbl._itbl as _itbl\n'), ((7204, 7236), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(9)', '(1.5)', '(2)', '(0.05)'], {}), '(9, 1.5, 2, 0.05)\n', (7219, 7236), True, 'import itbl._itbl as _itbl\n'), ((7369, 7403), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(1.5)', '(6.5)', '(2)', '(0.05)'], {}), '(1.5, 6.5, 2, 0.05)\n', (7384, 7403), True, 'import itbl._itbl as _itbl\n'), ((7539, 7571), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(4)', '(2.5)', '(2)', '(0.05)'], {}), '(4, 2.5, 2, 0.05)\n', (7554, 7571), True, 'import itbl._itbl as _itbl\n'), ((7704, 7738), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(1.5)', '(6.5)', '(2)', '(0.05)'], {}), '(1.5, 6.5, 2, 0.05)\n', (7719, 7738), True, 'import itbl._itbl as _itbl\n'), ((7873, 7907), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(2.4)', '(2.7)', '(2)', '(0.05)'], {}), '(2.4, 2.7, 2, 0.05)\n', (7888, 7907), True, 'import itbl._itbl as _itbl\n'), ((8039, 8071), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(9)', '(1.5)', '(2)', '(0.05)'], {}), '(9, 1.5, 2, 0.05)\n', (8054, 8071), True, 'import itbl._itbl as _itbl\n'), ((2433, 2447), 'numpy.array', 'np.array', (['e[0]'], {}), '(e[0])\n', (2441, 2447), True, 'import numpy as np\n'), ((2461, 2475), 'numpy.array', 'np.array', (['e[1]'], {}), '(e[1])\n', (2469, 2475), True, 'import numpy as np\n'), ((2492, 2557), 'itbl._itbl.Rectangle', '_itbl.Rectangle', (['(hall_width + 2 * wall_width)', 'wall_width', '(2)', '(0.05)'], {}), '(hall_width + 2 * wall_width, wall_width, 2, 0.05)\n', (2507, 2557), True, 'import itbl._itbl as _itbl\n'), ((1088, 1106), 'numpy.prod', 'np.prod', (['maze_dims'], {}), '(maze_dims)\n', (1095, 1106), True, 'import numpy as np\n'), ((1262, 1285), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (1279, 1285), True, 'import numpy as np\n'), ((2761, 2788), 'numpy.array', 'np.array', (['[[0, -1], [1, 0]]'], {}), '([[0, -1], [1, 0]])\n', (2769, 2788), True, 'import numpy as np\n'), ((3131, 3190), 'numpy.array', 'np.array', (['[center_x + block_width / 2, block_height / 2, 0]'], {}), '([center_x + block_width / 2, block_height / 2, 0])\n', (3139, 3190), True, 'import numpy as np\n'), ((3273, 3364), 'numpy.array', 'np.array', (['[center_x + block_width / 2, block_height / 2 + block_height + hall_width, 0]'], {}), '([center_x + block_width / 2, block_height / 2 + block_height +\n hall_width, 0])\n', (3281, 3364), True, 'import numpy as np\n'), ((3664, 3688), 'numpy.array', 'np.array', (['[1.5, -0.5, 0]'], {}), '([1.5, -0.5, 0])\n', (3672, 3688), True, 'import numpy as np\n'), ((3762, 3784), 'numpy.array', 'np.array', (['[-0.5, 2, 0]'], {}), '([-0.5, 2, 0])\n', (3770, 3784), True, 'import numpy as np\n'), ((4024, 4048), 'numpy.array', 'np.array', (['[1.5, -0.5, 0]'], {}), '([1.5, -0.5, 0])\n', (4032, 4048), True, 'import numpy as np\n'), ((4310, 4332), 'numpy.array', 'np.array', (['[4, -1.5, 0]'], {}), '([4, -1.5, 0])\n', (4318, 4332), True, 'import numpy as np\n'), ((4406, 4426), 'numpy.array', 'np.array', (['[-4, 2, 0]'], {}), '([-4, 2, 0])\n', (4414, 4426), True, 'import numpy as np\n'), ((4668, 4690), 'numpy.array', 'np.array', (['[0, -1.5, 0]'], {}), '([0, -1.5, 0])\n', (4676, 4690), True, 'import numpy as np\n'), ((4963, 4982), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (4971, 4982), True, 'import numpy as np\n'), ((5056, 5078), 'numpy.array', 'np.array', (['[0, 1.25, 0]'], {}), '([0, 1.25, 0])\n', (5064, 5078), True, 'import numpy as np\n'), ((5414, 5435), 'numpy.array', 'np.array', (['[-2, -2, 0]'], {}), '([-2, -2, 0])\n', (5422, 5435), True, 'import numpy as np\n'), ((5510, 5530), 'numpy.array', 'np.array', (['[2, -2, 0]'], {}), '([2, -2, 0])\n', (5518, 5530), True, 'import numpy as np\n'), ((5604, 5624), 'numpy.array', 'np.array', (['[0, -3, 0]'], {}), '([0, -3, 0])\n', (5612, 5624), True, 'import numpy as np\n'), ((5984, 6008), 'numpy.array', 'np.array', (['[-2.5, 2.5, 0]'], {}), '([-2.5, 2.5, 0])\n', (5992, 6008), True, 'import numpy as np\n'), ((6083, 6107), 'numpy.array', 'np.array', (['[-3.5, 0.5, 0]'], {}), '([-3.5, 0.5, 0])\n', (6091, 6107), True, 'import numpy as np\n'), ((6181, 6203), 'numpy.array', 'np.array', (['[0, -1.5, 0]'], {}), '([0, -1.5, 0])\n', (6189, 6203), True, 'import numpy as np\n'), ((6464, 6483), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (6472, 6483), True, 'import numpy as np\n'), ((6791, 6813), 'numpy.array', 'np.array', (['[0, -1.5, 0]'], {}), '([0, -1.5, 0])\n', (6799, 6813), True, 'import numpy as np\n'), ((6888, 6910), 'numpy.array', 'np.array', (['[-1.5, 0, 0]'], {}), '([-1.5, 0, 0])\n', (6896, 6910), True, 'import numpy as np\n'), ((6984, 7003), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (6992, 7003), True, 'import numpy as np\n'), ((7269, 7292), 'numpy.array', 'np.array', (['[0, -3.25, 0]'], {}), '([0, -3.25, 0])\n', (7277, 7292), True, 'import numpy as np\n'), ((7436, 7462), 'numpy.array', 'np.array', (['[-3.75, 0.75, 0]'], {}), '([-3.75, 0.75, 0])\n', (7444, 7462), True, 'import numpy as np\n'), ((7604, 7627), 'numpy.array', 'np.array', (['[1, -1.25, 0]'], {}), '([1, -1.25, 0])\n', (7612, 7627), True, 'import numpy as np\n'), ((7771, 7796), 'numpy.array', 'np.array', (['[3.75, 0.75, 0]'], {}), '([3.75, 0.75, 0])\n', (7779, 7796), True, 'import numpy as np\n'), ((7940, 7962), 'numpy.array', 'np.array', (['[0, 2.65, 0]'], {}), '([0, 2.65, 0])\n', (7948, 7962), True, 'import numpy as np\n'), ((8104, 8126), 'numpy.array', 'np.array', (['[0, 4.75, 0]'], {}), '([0, 4.75, 0])\n', (8112, 8126), True, 'import numpy as np\n')] |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' from wikipedia: dx/dt = sigma*(y-x) ; dy/dt = x*(rho-z)-y dz/dt = x*y-beta*z ; '''
import numpy as np
def initialize(self,runInfoDict,inputFiles):
self.sigma = 10.0
self.rho = 28.0
self.beta = 8.0/3.0
return
def run(self,Input):
max_time = 1.0
t_step = 0.001
numberTimeSteps = int(max_time/t_step)
self.x = np.zeros(numberTimeSteps)
self.y = np.zeros(numberTimeSteps)
self.z = np.zeros(numberTimeSteps)
self.time = np.zeros(numberTimeSteps)
self.x0 = Input['x0']
self.y0 = Input['y0']
self.z0 = Input['z0']
self.x[0] = self.x0
self.y[0] = self.y0
self.z[0] = self.z0
self.time[0]= 0.0
for t in range (numberTimeSteps-1):
self.time[t+1] = self.time[t] + t_step
self.x[t+1] = self.x[t] + self.sigma*(self.y[t]-self.x[t]) * t_step
self.y[t+1] = self.y[t] + (self.x[t]*(self.rho-self.z[t])-self.y[t]) * t_step
self.z[t+1] = self.z[t] + (self.x[t]*self.y[t]-self.beta*self.z[t]) * t_step
| [
"numpy.zeros"
] | [((931, 956), 'numpy.zeros', 'np.zeros', (['numberTimeSteps'], {}), '(numberTimeSteps)\n', (939, 956), True, 'import numpy as np\n'), ((971, 996), 'numpy.zeros', 'np.zeros', (['numberTimeSteps'], {}), '(numberTimeSteps)\n', (979, 996), True, 'import numpy as np\n'), ((1011, 1036), 'numpy.zeros', 'np.zeros', (['numberTimeSteps'], {}), '(numberTimeSteps)\n', (1019, 1036), True, 'import numpy as np\n'), ((1051, 1076), 'numpy.zeros', 'np.zeros', (['numberTimeSteps'], {}), '(numberTimeSteps)\n', (1059, 1076), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import DeepSparseCoding.tf1x.utils.plot_functions as pf
import DeepSparseCoding.tf1x.utils.data_processing as dp
import DeepSparseCoding.tf1x.utils.entropy_functions as ef
from DeepSparseCoding.tf1x.models.ae_model import AeModel
from DeepSparseCoding.tf1x.modules.vae_module import VaeModule
from DeepSparseCoding.tf1x.modules.activations import activation_picker
class VaeModel(AeModel):
def __init__(self):
"""
Variational Autoencoder using Mixture of Gaussians
<NAME> and <NAME> (2014) - "Auto-encoding variational bayes."
https://arxiv.org/pdf/1312.6114.pdf
Variational sparse coding
<NAME>, <NAME>, <NAME> (2018) - Variational Sparse Coding
https://openreview.net/pdf?id=SkeJ6iR9Km
Sparse Coding Variational Autoencoders
<NAME>, <NAME>, <NAME> (2018) - Sparse-Coding Variational Auto-Encoders
https://www.biorxiv.org/content/biorxiv/early/2018/08/29/399246.full.pdf
"""
super(VaeModel, self).__init__() # will call super.load_params() and super.ae_load_params()
def load_params(self, params):
super(VaeModel, self).load_params(params)
self.vae_load_params(params) # only vae-specific params
def vae_load_params(self, params):
self.vae_mean_act_funcs = [activation_picker(act_func_str)
for act_func_str in self.params.vae_mean_activation_functions]
self.vae_var_act_funcs = [activation_picker(act_func_str)
for act_func_str in self.params.vae_var_activation_functions]
self.num_latent = self.params.vae_mean_channels[-1]
def build_module(self, input_node):
module = VaeModule(input_node, self.params.ae_layer_types, self.params.ae_enc_channels,
self.params.ae_dec_channels, self.params.ae_patch_size, self.params.ae_conv_strides,
self.vae_mean_act_funcs, self.params.vae_mean_layer_types,
self.params.vae_mean_channels, self.params.vae_mean_patch_size,
self.params.vae_mean_conv_strides, self.params.vae_mean_dropout,
self.vae_var_act_funcs, self.params.vae_var_layer_types, self.params.vae_var_channels,
self.params.vae_var_patch_size, self.params.vae_var_conv_strides, self.params.vae_var_dropout,
self.w_decay_mult, self.w_norm_mult, self.kld_mult, self.act_funcs,
self.ae_dropout_keep_probs, self.params.tie_dec_weights, self.params.noise_level,
self.params.prior_params, self.params.w_init_type,
variable_scope="vae")
return module
def build_graph_from_input(self, input_node):
"""Build the TensorFlow graph object"""
with tf.device(self.params.device):
with self.graph.as_default():
with tf.compat.v1.variable_scope("auto_placeholders") as scope:
self.kld_mult = tf.compat.v1.placeholder(tf.float32, shape=(), name="kld_mult")
super(VaeModel, self).build_graph_from_input(input_node)
def get_encodings(self):
return self.module.act
def get_total_loss(self):
return self.module.total_loss
def generate_update_dict(self, input_data, input_labels=None, batch_step=0):
"""
Inputs:
input_data: data object containing the current image batch
input_labels: data object containing the current label batch
batch_step: current batch number within the schedule
"""
update_dict = super(VaeModel, self).generate_update_dict(input_data, input_labels, batch_step)
feed_dict = self.get_feed_dict(input_data, input_labels)
latent_loss = tf.compat.v1.get_default_session().run(self.module.loss_dict["latent_loss"], feed_dict)
stat_dict = {"latent_loss":latent_loss}
update_dict.update(stat_dict)
return update_dict
def generate_plots(self, input_data, input_labels=None):
"""
Plot weights, reconstruction, and gradients
Inputs:
input_data: data object containing the current image batch
input_labels: data object containing the current label batch
"""
super(VaeModel, self).generate_plots(input_data, input_labels)
feed_dict = self.get_feed_dict(input_data, input_labels)
eval_list = [self.global_step, self.module.act]
if self.params.noise_level > 0.0:
eval_list += [self.module.corrupt_data]
eval_out = tf.compat.v1.get_default_session().run(eval_list, feed_dict)
current_step = str(eval_out[0])
latent_code = eval_out[1]
if self.params.noise_level > 0.0:
corrupt_data = eval_out[2]
filename_suffix = "_v"+self.params.version+"_"+current_step.zfill(5)+".png"
#TODO histogram with large bins is broken
#fig = pf.plot_activity_hist(b_enc_std, title="Encoding Bias Std Histogram",
# save_filename=(self.disp_dir+"b_enc_std_hist_v"+self.version+"-"
# +current_step.zfill(5)+".png"))
latent_layer = self.module.num_enc_layers-1
if self.params.noise_level > 0.0:
corrupt_data = dp.reshape_data(corrupt_data, flatten=False)[0]
fig = pf.plot_data_tiled(corrupt_data, normalize=False,
title="Corrupted Images at step "+current_step,
save_filename=self.params.disp_dir+"corrupt_images"+filename_suffix)
# Plot generated digits
latent_shape = latent_code.shape[1:]
randoms = [np.random.normal(0, 1, latent_shape) for _ in range(self.params.batch_size)]
feed_dict[self.latent_input] = np.stack(randoms, axis=0)
feed_dict[self.ae_dropout_keep_probs] = [1.0] * len(self.params.ae_dropout)
imgs = tf.compat.v1.get_default_session().run(self.compute_recon_from_placeholder(), feed_dict)
imgs = imgs.reshape(imgs.shape[0], self.params.num_edge_pixels, self.params.num_edge_pixels,
self.params.num_data_channels)
#if imgs.ndim == 2:
# imgs = np.stack([np.reshape(imgs[i], [28, 28, 1]) for i in range(len(imgs))], axis=0)
imgs = (imgs - np.min(imgs)) / (np.max(imgs) - np.min(imgs))
gen_imgs_fig = pf.plot_data_tiled(imgs, normalize=False,
title="Generated images", vmin=0, vmax=1,
save_filename=(self.params.disp_dir+"generated_images"
+"_v"+self.params.version+"_"+str(current_step).zfill(5)+".png"))
if self.params.ae_layer_types[-1] == "fc":
# display a 30x30 2D manifold of digits
n = 30
digit_size = int(np.sqrt(self.params.num_pixels))
figure_img = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates corresponding to the 2D plot
# of digit classes in the latent space
grid_x = np.linspace(-4, 4, n)
grid_y = np.linspace(-4, 4, n)[::-1]
num_z = self.num_latent
for i, yi in enumerate(grid_y):
for j, xi in enumerate(grid_x):
z_sample = np.array([[xi, yi]+[0.0]*(num_z-2)])
feed_dict[self.latent_input] = z_sample
x_decoded = tf.compat.v1.get_default_session().run(self.compute_recon_from_placeholder(),
feed_dict)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure_img[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
fig, ax = plt.subplots(1, figsize=(10, 10))
start_range = digit_size // 2
end_range = n * digit_size + start_range + 1
pixel_range = np.arange(start_range, end_range, digit_size)
sample_range_x = np.round(grid_x, 1)
sample_range_y = np.round(grid_y, 1)
ax.set_xticks(pixel_range)
ax.set_xticklabels(sample_range_x)
ax.set_yticks(pixel_range)
ax.set_yticklabels(sample_range_y)
ax.set_xlabel("latent[0]", fontsize=16)
ax.set_ylabel("latent[1]", fontsize=16)
ax.set_title("Generated Images from Latent Interpolation", fontsize=16)
ax.imshow(figure_img, cmap='Greys_r')
fig.savefig(self.params.disp_dir+"generated_latent_interpolation"+filename_suffix)
plt.close(fig)
if input_labels is not None and self.params.ae_layer_types[-1] == "fc":
z_mean = tf.compat.v1.get_default_session().run(self.get_encodings(), feed_dict)
fig, ax = plt.subplots(1, figsize=(12, 10))
sc = ax.scatter(z_mean[:, 0], z_mean[:, 1], c=dp.one_hot_to_dense(input_labels))
fig.colorbar(sc)
ax.set_xlabel("latent[0]", fontsize=16)
ax.set_ylabel("latent[1]", fontsize=16)
ax.set_title("Latent Encoding of Labeled Examples", fontsize=16)
fig.savefig(self.params.disp_dir+"latent_enc"+filename_suffix)
plt.close(fig)
| [
"DeepSparseCoding.tf1x.modules.vae_module.VaeModule",
"numpy.arange",
"numpy.random.normal",
"numpy.round",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.placeholder",
"matplotlib.pyplot.close",
"DeepSparseCoding.tf1x.utils.plot_functions.plot_data_tiled",
"numpy.max",
"DeepSparseCo... | [((1645, 2453), 'DeepSparseCoding.tf1x.modules.vae_module.VaeModule', 'VaeModule', (['input_node', 'self.params.ae_layer_types', 'self.params.ae_enc_channels', 'self.params.ae_dec_channels', 'self.params.ae_patch_size', 'self.params.ae_conv_strides', 'self.vae_mean_act_funcs', 'self.params.vae_mean_layer_types', 'self.params.vae_mean_channels', 'self.params.vae_mean_patch_size', 'self.params.vae_mean_conv_strides', 'self.params.vae_mean_dropout', 'self.vae_var_act_funcs', 'self.params.vae_var_layer_types', 'self.params.vae_var_channels', 'self.params.vae_var_patch_size', 'self.params.vae_var_conv_strides', 'self.params.vae_var_dropout', 'self.w_decay_mult', 'self.w_norm_mult', 'self.kld_mult', 'self.act_funcs', 'self.ae_dropout_keep_probs', 'self.params.tie_dec_weights', 'self.params.noise_level', 'self.params.prior_params', 'self.params.w_init_type'], {'variable_scope': '"""vae"""'}), "(input_node, self.params.ae_layer_types, self.params.\n ae_enc_channels, self.params.ae_dec_channels, self.params.ae_patch_size,\n self.params.ae_conv_strides, self.vae_mean_act_funcs, self.params.\n vae_mean_layer_types, self.params.vae_mean_channels, self.params.\n vae_mean_patch_size, self.params.vae_mean_conv_strides, self.params.\n vae_mean_dropout, self.vae_var_act_funcs, self.params.\n vae_var_layer_types, self.params.vae_var_channels, self.params.\n vae_var_patch_size, self.params.vae_var_conv_strides, self.params.\n vae_var_dropout, self.w_decay_mult, self.w_norm_mult, self.kld_mult,\n self.act_funcs, self.ae_dropout_keep_probs, self.params.tie_dec_weights,\n self.params.noise_level, self.params.prior_params, self.params.\n w_init_type, variable_scope='vae')\n", (1654, 2453), False, 'from DeepSparseCoding.tf1x.modules.vae_module import VaeModule\n'), ((5267, 5292), 'numpy.stack', 'np.stack', (['randoms'], {'axis': '(0)'}), '(randoms, axis=0)\n', (5275, 5292), True, 'import numpy as np\n'), ((1306, 1337), 'DeepSparseCoding.tf1x.modules.activations.activation_picker', 'activation_picker', (['act_func_str'], {}), '(act_func_str)\n', (1323, 1337), False, 'from DeepSparseCoding.tf1x.modules.activations import activation_picker\n'), ((1437, 1468), 'DeepSparseCoding.tf1x.modules.activations.activation_picker', 'activation_picker', (['act_func_str'], {}), '(act_func_str)\n', (1454, 1468), False, 'from DeepSparseCoding.tf1x.modules.activations import activation_picker\n'), ((2582, 2611), 'tensorflow.device', 'tf.device', (['self.params.device'], {}), '(self.params.device)\n', (2591, 2611), True, 'import tensorflow as tf\n'), ((4888, 5070), 'DeepSparseCoding.tf1x.utils.plot_functions.plot_data_tiled', 'pf.plot_data_tiled', (['corrupt_data'], {'normalize': '(False)', 'title': "('Corrupted Images at step ' + current_step)", 'save_filename': "(self.params.disp_dir + 'corrupt_images' + filename_suffix)"}), "(corrupt_data, normalize=False, title=\n 'Corrupted Images at step ' + current_step, save_filename=self.params.\n disp_dir + 'corrupt_images' + filename_suffix)\n", (4906, 5070), True, 'import DeepSparseCoding.tf1x.utils.plot_functions as pf\n'), ((5155, 5191), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'latent_shape'], {}), '(0, 1, latent_shape)\n', (5171, 5191), True, 'import numpy as np\n'), ((6212, 6254), 'numpy.zeros', 'np.zeros', (['(digit_size * n, digit_size * n)'], {}), '((digit_size * n, digit_size * n))\n', (6220, 6254), True, 'import numpy as np\n'), ((6380, 6401), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', 'n'], {}), '(-4, 4, n)\n', (6391, 6401), True, 'import numpy as np\n'), ((6980, 7013), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(10, 10)'}), '(1, figsize=(10, 10))\n', (6992, 7013), True, 'import matplotlib.pyplot as plt\n'), ((7121, 7166), 'numpy.arange', 'np.arange', (['start_range', 'end_range', 'digit_size'], {}), '(start_range, end_range, digit_size)\n', (7130, 7166), True, 'import numpy as np\n'), ((7190, 7209), 'numpy.round', 'np.round', (['grid_x', '(1)'], {}), '(grid_x, 1)\n', (7198, 7209), True, 'import numpy as np\n'), ((7233, 7252), 'numpy.round', 'np.round', (['grid_y', '(1)'], {}), '(grid_y, 1)\n', (7241, 7252), True, 'import numpy as np\n'), ((7710, 7724), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (7719, 7724), True, 'import matplotlib.pyplot as plt\n'), ((7904, 7937), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(12, 10)'}), '(1, figsize=(12, 10))\n', (7916, 7937), True, 'import matplotlib.pyplot as plt\n'), ((8286, 8300), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (8295, 8300), True, 'import matplotlib.pyplot as plt\n'), ((3468, 3502), 'tensorflow.compat.v1.get_default_session', 'tf.compat.v1.get_default_session', ([], {}), '()\n', (3500, 3502), True, 'import tensorflow as tf\n'), ((4204, 4238), 'tensorflow.compat.v1.get_default_session', 'tf.compat.v1.get_default_session', ([], {}), '()\n', (4236, 4238), True, 'import tensorflow as tf\n'), ((4828, 4872), 'DeepSparseCoding.tf1x.utils.data_processing.reshape_data', 'dp.reshape_data', (['corrupt_data'], {'flatten': '(False)'}), '(corrupt_data, flatten=False)\n', (4843, 4872), True, 'import DeepSparseCoding.tf1x.utils.data_processing as dp\n'), ((5384, 5418), 'tensorflow.compat.v1.get_default_session', 'tf.compat.v1.get_default_session', ([], {}), '()\n', (5416, 5418), True, 'import tensorflow as tf\n'), ((5743, 5755), 'numpy.min', 'np.min', (['imgs'], {}), '(imgs)\n', (5749, 5755), True, 'import numpy as np\n'), ((5760, 5772), 'numpy.max', 'np.max', (['imgs'], {}), '(imgs)\n', (5766, 5772), True, 'import numpy as np\n'), ((5775, 5787), 'numpy.min', 'np.min', (['imgs'], {}), '(imgs)\n', (5781, 5787), True, 'import numpy as np\n'), ((6160, 6191), 'numpy.sqrt', 'np.sqrt', (['self.params.num_pixels'], {}), '(self.params.num_pixels)\n', (6167, 6191), True, 'import numpy as np\n'), ((6417, 6438), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', 'n'], {}), '(-4, 4, n)\n', (6428, 6438), True, 'import numpy as np\n'), ((2662, 2710), 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['"""auto_placeholders"""'], {}), "('auto_placeholders')\n", (2689, 2710), True, 'import tensorflow as tf\n'), ((2747, 2810), 'tensorflow.compat.v1.placeholder', 'tf.compat.v1.placeholder', (['tf.float32'], {'shape': '()', 'name': '"""kld_mult"""'}), "(tf.float32, shape=(), name='kld_mult')\n", (2771, 2810), True, 'import tensorflow as tf\n'), ((6574, 6616), 'numpy.array', 'np.array', (['[[xi, yi] + [0.0] * (num_z - 2)]'], {}), '([[xi, yi] + [0.0] * (num_z - 2)])\n', (6582, 6616), True, 'import numpy as np\n'), ((7816, 7850), 'tensorflow.compat.v1.get_default_session', 'tf.compat.v1.get_default_session', ([], {}), '()\n', (7848, 7850), True, 'import tensorflow as tf\n'), ((7990, 8023), 'DeepSparseCoding.tf1x.utils.data_processing.one_hot_to_dense', 'dp.one_hot_to_dense', (['input_labels'], {}), '(input_labels)\n', (8009, 8023), True, 'import DeepSparseCoding.tf1x.utils.data_processing as dp\n'), ((6683, 6717), 'tensorflow.compat.v1.get_default_session', 'tf.compat.v1.get_default_session', ([], {}), '()\n', (6715, 6717), True, 'import tensorflow as tf\n')] |
#!/usr/bin/python
import numpy as np
import math
# import fppy
class Cell:
def __init__(self,
name=None,
symbols=None,
typt=None,
types=None,
lattice=None,
positions=None,
cart_positions=None,
znucl=None,
latv=None,
atomv=None,
stress=None,
e=None,
sfp=None,
lfp=None):
if name is None:
self.name = None
else:
self.name = str(name)
if typt is None:
self.typt = None
else:
self.typt = np.array(typt)
if lattice is None:
self.lattice = None
else:
self.lattice = np.array(lattice)
if positions is None:
self.positions = None
else:
self.positions = np.array(positions)
def set_name(self, name):
self.name = str(name)
def get_name(self):
return self.name
def set_lattice(self, lat):
self.lattice = np.array(lat)
def get_lattice(self):
return self.lattice
def set_znucl(self, znucl):
self.znucl = np.array(znucl)
def set_types(self):
types = []
for i in range(len(self.typt)):
types += [i+1] * self.typt[i]
self.types = np.array(types, int)
def get_types(self):
return self.types
def set_e(self, e):
self.e = float(e)
def get_e(self):
return self.e
def set_positions(self, pos):
# for ia in range(len(pos)):
# for ib in range(3):
# if pos[ia][ib] >= 1: pos[ia][ib] -= int(pos[ia][ib])
# if pos[ia][ib] < 0: pos[ia][ib] -= (int(pos[ia][ib]) - 1)
self.positions = np.array(pos)
self.cart_positions = np.dot(pos, self.lattice)
def set_cart_positions(self, rxyz):
self.cart_positions = np.array(rxyz)
self.positions = np.dot(rxyz, np.linalg.inv(self.lattice))
def get_positions(self):
return self.positions
def get_cart_positions(self):
return self.cart_positions
def set_typt(self, typ):
self.typt = np.array(typ)
def get_typt(self):
return self.typt
def set_symbols(self, symb):
self.symbols = symb
def get_symbols(self):
return self.symbols
def set_latv(self, v):
self.latv = np.array(v)
def get_latv(self):
return self.latv
def set_atomv(self, v):
self.atomv = np.array(v)
def get_atomv(self):
return self.atomv
def get_volume(self):
return np.linalg.det(self.lattice)
def set_stress(self, stres):
self.stress = np.array(stres)
def get_stress(self):
return self.stress
# def cal_fp(self, cutoff, lmax, natx=300):
# lat = self.lattice
# rxyz = self.get_cart_positions()
# types = self.types
# znucl = self.znucl
# (sfp, lfp) = fppy.fp_periodic(lat, rxyz, types, znucl, lmax, natx,
# cutoff)
# self.sfp = sfp
# self.lfp = lfp
# def get_sfp(self):
# return self.sfp
# def get_lfp(self):
# return self.lfp
def lat2vec(lat):
return np.array([lat[0][0], lat[1][1], lat[2][2],
lat[1][0], lat[2][0], lat[2][1]], float)
def vec2lat(vec):
return np.array([[vec[0], 0., 0.],
[vec[3], vec[1], 0.],
[vec[4], vec[5], vec[2]]], float)
def lat2lcons(lat):
ra = math.sqrt(lat[0][0]**2 + lat[0][1]**2 + lat[0][2]**2)
rb = math.sqrt(lat[1][0]**2 + lat[1][1]**2 + lat[1][2]**2)
rc = math.sqrt(lat[2][0]**2 + lat[2][1]**2 + lat[2][2]**2)
cosa = (lat[1][0]*lat[2][0] + lat[1][1]*lat[2][1] +
lat[1][2]*lat[2][2])/rb/rc
cosb = (lat[0][0]*lat[2][0] + lat[0][1]*lat[2][1] +
lat[0][2]*lat[2][2])/ra/rc
cosc = (lat[0][0]*lat[1][0] + lat[0][1]*lat[1][1] +
lat[0][2]*lat[1][2])/rb/ra
alpha = math.acos(cosa)
beta = math.acos(cosb)
gamma = math.acos(cosc)
return np.array([ra, rb, rc, alpha, beta, gamma], float)
def lcons2lat(cons):
(a, b, c, alpha, beta, gamma) = cons
bc2 = b**2 + c**2 - 2*b*c*math.cos(alpha)
h1 = a
h2 = b * math.cos(gamma)
h3 = b * math.sin(gamma)
h4 = c * math.cos(beta)
h5 = ((h2 - h4)**2 + h3**2 + c**2 - h4**2 - bc2)/(2 * h3)
h6 = math.sqrt(c**2 - h4**2 - h5**2)
lattice = [[h1, 0., 0.], [h2, h3, 0.], [h4, h5, h6]]
return lattice
def get_cutoff(lat):
volume = np.linalg.det(lat)
(a, b, c, alpha, beta, gamma) = lat2lcons(lat)
area_ab = a * b * np.sin(gamma)
area_ac = a * c * np.sin(beta)
area_bc = b * c * np.sin(alpha)
h_ab = volume / area_ab
h_ac = volume / area_ac
h_bc = volume / area_bc
h = np.array([h_ab, h_ac, h_bc], float)
return h.min() * 0.75 / 2.
| [
"math.sqrt",
"math.sin",
"math.acos",
"numpy.sin",
"numpy.array",
"math.cos",
"numpy.linalg.inv",
"numpy.dot",
"numpy.linalg.det"
] | [((3370, 3457), 'numpy.array', 'np.array', (['[lat[0][0], lat[1][1], lat[2][2], lat[1][0], lat[2][0], lat[2][1]]', 'float'], {}), '([lat[0][0], lat[1][1], lat[2][2], lat[1][0], lat[2][0], lat[2][1]],\n float)\n', (3378, 3457), True, 'import numpy as np\n'), ((3506, 3597), 'numpy.array', 'np.array', (['[[vec[0], 0.0, 0.0], [vec[3], vec[1], 0.0], [vec[4], vec[5], vec[2]]]', 'float'], {}), '([[vec[0], 0.0, 0.0], [vec[3], vec[1], 0.0], [vec[4], vec[5], vec[2\n ]]], float)\n', (3514, 3597), True, 'import numpy as np\n'), ((3663, 3722), 'math.sqrt', 'math.sqrt', (['(lat[0][0] ** 2 + lat[0][1] ** 2 + lat[0][2] ** 2)'], {}), '(lat[0][0] ** 2 + lat[0][1] ** 2 + lat[0][2] ** 2)\n', (3672, 3722), False, 'import math\n'), ((3726, 3785), 'math.sqrt', 'math.sqrt', (['(lat[1][0] ** 2 + lat[1][1] ** 2 + lat[1][2] ** 2)'], {}), '(lat[1][0] ** 2 + lat[1][1] ** 2 + lat[1][2] ** 2)\n', (3735, 3785), False, 'import math\n'), ((3789, 3848), 'math.sqrt', 'math.sqrt', (['(lat[2][0] ** 2 + lat[2][1] ** 2 + lat[2][2] ** 2)'], {}), '(lat[2][0] ** 2 + lat[2][1] ** 2 + lat[2][2] ** 2)\n', (3798, 3848), False, 'import math\n'), ((4142, 4157), 'math.acos', 'math.acos', (['cosa'], {}), '(cosa)\n', (4151, 4157), False, 'import math\n'), ((4169, 4184), 'math.acos', 'math.acos', (['cosb'], {}), '(cosb)\n', (4178, 4184), False, 'import math\n'), ((4197, 4212), 'math.acos', 'math.acos', (['cosc'], {}), '(cosc)\n', (4206, 4212), False, 'import math\n'), ((4225, 4274), 'numpy.array', 'np.array', (['[ra, rb, rc, alpha, beta, gamma]', 'float'], {}), '([ra, rb, rc, alpha, beta, gamma], float)\n', (4233, 4274), True, 'import numpy as np\n'), ((4555, 4592), 'math.sqrt', 'math.sqrt', (['(c ** 2 - h4 ** 2 - h5 ** 2)'], {}), '(c ** 2 - h4 ** 2 - h5 ** 2)\n', (4564, 4592), False, 'import math\n'), ((4700, 4718), 'numpy.linalg.det', 'np.linalg.det', (['lat'], {}), '(lat)\n', (4713, 4718), True, 'import numpy as np\n'), ((4969, 5004), 'numpy.array', 'np.array', (['[h_ab, h_ac, h_bc]', 'float'], {}), '([h_ab, h_ac, h_bc], float)\n', (4977, 5004), True, 'import numpy as np\n'), ((1141, 1154), 'numpy.array', 'np.array', (['lat'], {}), '(lat)\n', (1149, 1154), True, 'import numpy as np\n'), ((1265, 1280), 'numpy.array', 'np.array', (['znucl'], {}), '(znucl)\n', (1273, 1280), True, 'import numpy as np\n'), ((1429, 1449), 'numpy.array', 'np.array', (['types', 'int'], {}), '(types, int)\n', (1437, 1449), True, 'import numpy as np\n'), ((1876, 1889), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (1884, 1889), True, 'import numpy as np\n'), ((1920, 1945), 'numpy.dot', 'np.dot', (['pos', 'self.lattice'], {}), '(pos, self.lattice)\n', (1926, 1945), True, 'import numpy as np\n'), ((2017, 2031), 'numpy.array', 'np.array', (['rxyz'], {}), '(rxyz)\n', (2025, 2031), True, 'import numpy as np\n'), ((2279, 2292), 'numpy.array', 'np.array', (['typ'], {}), '(typ)\n', (2287, 2292), True, 'import numpy as np\n'), ((2509, 2520), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (2517, 2520), True, 'import numpy as np\n'), ((2621, 2632), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (2629, 2632), True, 'import numpy as np\n'), ((2727, 2754), 'numpy.linalg.det', 'np.linalg.det', (['self.lattice'], {}), '(self.lattice)\n', (2740, 2754), True, 'import numpy as np\n'), ((2811, 2826), 'numpy.array', 'np.array', (['stres'], {}), '(stres)\n', (2819, 2826), True, 'import numpy as np\n'), ((4411, 4426), 'math.cos', 'math.cos', (['gamma'], {}), '(gamma)\n', (4419, 4426), False, 'import math\n'), ((4440, 4455), 'math.sin', 'math.sin', (['gamma'], {}), '(gamma)\n', (4448, 4455), False, 'import math\n'), ((4469, 4483), 'math.cos', 'math.cos', (['beta'], {}), '(beta)\n', (4477, 4483), False, 'import math\n'), ((4792, 4805), 'numpy.sin', 'np.sin', (['gamma'], {}), '(gamma)\n', (4798, 4805), True, 'import numpy as np\n'), ((4828, 4840), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (4834, 4840), True, 'import numpy as np\n'), ((4863, 4876), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (4869, 4876), True, 'import numpy as np\n'), ((711, 725), 'numpy.array', 'np.array', (['typt'], {}), '(typt)\n', (719, 725), True, 'import numpy as np\n'), ((828, 845), 'numpy.array', 'np.array', (['lattice'], {}), '(lattice)\n', (836, 845), True, 'import numpy as np\n'), ((954, 973), 'numpy.array', 'np.array', (['positions'], {}), '(positions)\n', (962, 973), True, 'import numpy as np\n'), ((2070, 2097), 'numpy.linalg.inv', 'np.linalg.inv', (['self.lattice'], {}), '(self.lattice)\n', (2083, 2097), True, 'import numpy as np\n'), ((4370, 4385), 'math.cos', 'math.cos', (['alpha'], {}), '(alpha)\n', (4378, 4385), False, 'import math\n')] |
import os
import sys
import time
import numpy as np
from tqdm import tqdm
sys.path.append('../../')
from utils import *
from NeuralNet import NeuralNet
import torch
import torch.optim as optim
from .CheckersNNet import CheckersNNet as chnet
from ..CheckersGame import CheckersGame
args = dotdict({
'lr': 0.01,
'dropout': 0.3,
'epochs': 10,
'batch_size': 256,
'cuda': torch.cuda.is_available(),
'num_channels': 512,
})
class NNetWrapper(NeuralNet):
def __init__(self, game, state_dict = None, gpu_num = 0):
self.nnet = chnet(game, args)
self.board_x, self.board_y = game.getBoardSize()
self.action_size = game.getActionSize()
if args.cuda:
torch.cuda.set_device(torch.device(f'cuda:{gpu_num}'))
self.nnet.cuda()
if state_dict != None:
self.nnet.load_state_dict(state_dict)
def train(self, examples):
"""
examples: list of examples, each example is of form (board, pi, v)
"""
optimizer = optim.Adam(self.nnet.parameters(), weight_decay = 1e-4)
for epoch in range(args.epochs):
print('EPOCH ::: ' + str(epoch + 1))
self.nnet.train()
pi_losses = AverageMeter()
v_losses = AverageMeter()
batch_count = int(len(examples) / args.batch_size)
t = tqdm(range(batch_count), desc='Training Net')
for _ in t:
sample_ids = np.random.randint(len(examples), size=args.batch_size)
boards, pis, vs = list(zip(*[examples[i] for i in sample_ids]))
boards = torch.FloatTensor(np.array(boards).astype(np.float64))
target_pis = torch.FloatTensor(np.array(pis))
target_vs = torch.FloatTensor(np.array(vs).astype(np.float64))
# predict
if args.cuda:
boards, target_pis, target_vs = boards.contiguous().cuda(), target_pis.contiguous().cuda(), target_vs.contiguous().cuda()
# compute output
out_pi, out_v = self.nnet(boards)
l_pi = self.loss_pi(target_pis, out_pi)
l_v = self.loss_v(target_vs, out_v)
total_loss = l_pi + l_v
# record loss
pi_losses.update(l_pi.item(), boards.size(0))
v_losses.update(l_v.item(), boards.size(0))
t.set_postfix(Loss_pi=pi_losses, Loss_v=v_losses)
# compute gradient and do SGD step
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
def predict(self, board):
"""
board: np array with board
"""
# timing
start = time.time()
# preparing input
board = CheckersGame.encodeBoard(board)
board = torch.FloatTensor(board.astype(np.float64))
if args.cuda:
board = board.contiguous().cuda()
board = board.view(5, self.board_x, self.board_y)
self.nnet.eval()
with torch.no_grad():
pi, v = self.nnet(board)
# print('PREDICTION TIME TAKEN : {0:03f}'.format(time.time()-start))
return torch.exp(pi).data.cpu().numpy()[0], v.data.cpu().numpy()[0]
def loss_pi(self, targets, outputs):
return -torch.sum(targets * outputs) / targets.size()[0]
def loss_v(self, targets, outputs):
return torch.sum((targets - outputs.view(-1)) ** 2) / targets.size()[0]
def save_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(folder, filename)
if not os.path.exists(folder):
print("Checkpoint Directory does not exist! Making directory {}".format(folder))
os.mkdir(folder)
else:
print("Checkpoint Directory exists! ")
torch.save({
'state_dict': self.nnet.state_dict(),
}, filepath)
def load_checkpoint(self, folder='checkpoint', filename='checkpoint.pth.tar'):
# https://github.com/pytorch/examples/blob/master/imagenet/main.py#L98
filepath = os.path.join(folder, filename)
if not os.path.exists(filepath):
raise ("No model in path {}".format(filepath))
map_location = None if args.cuda else 'cpu'
checkpoint = torch.load(filepath, map_location=map_location)
self.nnet.load_state_dict(checkpoint['state_dict'])
| [
"sys.path.append",
"os.mkdir",
"torch.sum",
"torch.load",
"os.path.exists",
"time.time",
"torch.exp",
"torch.cuda.is_available",
"numpy.array",
"torch.device",
"torch.no_grad",
"os.path.join"
] | [((76, 101), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (91, 101), False, 'import sys\n'), ((392, 417), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (415, 417), False, 'import torch\n'), ((2760, 2771), 'time.time', 'time.time', ([], {}), '()\n', (2769, 2771), False, 'import time\n'), ((3611, 3641), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (3623, 3641), False, 'import os\n'), ((4142, 4172), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (4154, 4172), False, 'import os\n'), ((4346, 4393), 'torch.load', 'torch.load', (['filepath'], {'map_location': 'map_location'}), '(filepath, map_location=map_location)\n', (4356, 4393), False, 'import torch\n'), ((3072, 3087), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3085, 3087), False, 'import torch\n'), ((3657, 3679), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (3671, 3679), False, 'import os\n'), ((3786, 3802), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (3794, 3802), False, 'import os\n'), ((4188, 4212), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (4202, 4212), False, 'import os\n'), ((741, 772), 'torch.device', 'torch.device', (['f"""cuda:{gpu_num}"""'], {}), "(f'cuda:{gpu_num}')\n", (753, 772), False, 'import torch\n'), ((3338, 3366), 'torch.sum', 'torch.sum', (['(targets * outputs)'], {}), '(targets * outputs)\n', (3347, 3366), False, 'import torch\n'), ((1732, 1745), 'numpy.array', 'np.array', (['pis'], {}), '(pis)\n', (1740, 1745), True, 'import numpy as np\n'), ((1648, 1664), 'numpy.array', 'np.array', (['boards'], {}), '(boards)\n', (1656, 1664), True, 'import numpy as np\n'), ((1793, 1805), 'numpy.array', 'np.array', (['vs'], {}), '(vs)\n', (1801, 1805), True, 'import numpy as np\n'), ((3219, 3232), 'torch.exp', 'torch.exp', (['pi'], {}), '(pi)\n', (3228, 3232), False, 'import torch\n')] |
import numpy as np
import warnings
from agents.agent import Agent
from agents.basis import SimpleBasis, ScaledBasis, PolynomialBasis
from agents.sarsa_lambda import SarsaLambdaAgent
class QPAMDPAgent(Agent):
"""
Defines an agent to optimize H(theta) using the episodic natural actor critic (eNAC) algorithm for continuous
action spaces.
Uses Gaussian policy for continuous actions.
N.B. assumes same state variables used for all actions, and separately same for all parameters
"""
name = "Q-PAMDP"
def __init__(self, observation_space, action_space,
alpha=0.01,
initial_action_learning_episodes=10000,
action_relearn_episodes=1000,
parameter_updates=180,
parameter_rollouts=50,
action_obs_index=None,
parameter_obs_index=None,
discrete_agent=None,
norm_grad=False,
variances=None, # list of variances per continuous action parameter (one entry per action)
seed=None,
phi0_func=None,
phi0_size=None,
poly_basis=False,
print_freq=1):
super().__init__(observation_space, action_space)
# split the action space into the discrete actions and continuous parameters
self.discrete_action_space = action_space.spaces[0]
self.parameter_space = action_space.spaces[1]
self.num_actions = self.discrete_action_space.n
nvars = self.observation_space.shape[0]
self.alpha = alpha
if isinstance(variances, (list, np.ndarray)):
assert len(variances) == self.num_actions
else:
variances = variances*np.ones((self.num_actions,))
self.variances = variances
self.initial_action_learning_episodes = initial_action_learning_episodes
self.action_relearn_episodes = action_relearn_episodes
self.parameter_updates = parameter_updates
self.parameter_rollouts = parameter_rollouts
self.episodes_per_cycle = self.action_relearn_episodes + self.parameter_updates * self.parameter_rollouts
self.parameter_obs_index = parameter_obs_index
self.norm_grad = norm_grad
self.phi0_func = phi0_func
self.phi0_size = phi0_size
if self.phi0_size is None: assert self.phi0_func is None # raise error? Need to specify size of custom phi0_func
self.print_freq = print_freq
self.R = 0.
self._total_episodes = 0
# initialise discrete action learner
self.discrete_agent = discrete_agent
if self.discrete_agent is None:
self.discrete_agent = SarsaLambdaAgent(self.observation_space, self.discrete_action_space, alpha=1.0,
gamma=0.999, temperature=1.0, cooling=0.995, lmbda=0.5, order=6,
scale_alpha=True, use_softmax=True, seed=seed,
observation_index=action_obs_index)
self.np_random = None
self.__seed = 0
self._seed(seed)
# initialise basis for each action-parameter (one per action)
if self.parameter_obs_index is not None:
self.basis = []
if isinstance(self.parameter_obs_index[0], (list, np.ndarray)):
if len(self.parameter_obs_index) == 1:
self.parameter_obs_index = np.tile(self.parameter_obs_index, (self.num_actions, 1))
else:
# different observation variables for each action-parameter
assert len(self.parameter_obs_index) == self.num_actions
else:
assert isinstance(self.parameter_obs_index[0], int)
# same observation variables for all action-parameters, duplicate them for convenience0
self.parameter_obs_index = np.tile(self.parameter_obs_index,(self.num_actions,1))
for a in range(self.num_actions):
nvars = len(self.parameter_obs_index[a])
low = self.observation_space.low[self.parameter_obs_index[a]]
high = self.observation_space.high[self.parameter_obs_index[a]]
# self.basis.append(ScaledBasis(nvars, low, high, bias_unit=True))
if poly_basis is True:
self.basis.append(PolynomialBasis(nvars, order=2, bias_unit=True))
else:
self.basis.append(SimpleBasis(nvars, bias_unit=True))
# self.basis.append(SimpleBasis(nvars, bias_unit=True))
else:
# use simple basis with bias unit (for parameter initialisation)
# self.basis = [ScaledBasis(nvars, low, high, bias_unit=True) for _ in range(self.num_actions)]
# if poly_basis is True:
# self.basis = [PolynomialBasis(nvars, order=2, bias_unit=True) for _ in range(self.num_actions)]
# else:
# self.basis = [SimpleBasis(nvars, bias_unit=True) for _ in range(self.num_actions)]
self.basis = [SimpleBasis(nvars, bias_unit=True) for _ in range(self.num_actions)]
self.num_basis_functions = [self.basis[a].get_num_basis_functions() for a in range(self.num_actions)]
# self.poly_basis = poly_basis
# self.parameter_weights = np.zeros((self.num_actions, self.num_basis_functions)) # TODO: randomly init weights?
# for multidimensional parameters
self.parameter_weights = []
for a in range(self.num_actions):
shape = (self.num_basis_functions[a],)
param_shape = self.parameter_space.spaces[a].shape
assert len(param_shape) <= 1
if len(param_shape) == 1 and param_shape[0] > 0:
shape = (param_shape[0], self.num_basis_functions[a])
self.parameter_weights.append(np.zeros(shape))
# self.parameter_weights.append(self.np_random.normal(loc=0.,scale=0.0001,size=shape))
# self.parameter_weights = self.np_random.random_sample((self.num_actions, self.num_basis_functions))
def act(self, state):
act = self._action_policy(state)
param = self._parameter_policy(state, act)
return self._pad_action(act, param)
def learn(self, env, max_episodes=100000, max_steps_per_episode=None):
""" Learn for a given number of episodes. """
self.e = 0
if max_episodes < self.initial_action_learning_episodes:
warnings.warn("Too few episodes to initialise agent!", UserWarning)
print("Initial discrete action learning for %d episodes..." % self.initial_action_learning_episodes)
for _ in range(self.initial_action_learning_episodes):
self._rollout(env, update_actions=True, max_steps=max_steps_per_episode)
self.e += 1
if self.e > max_episodes: break
while True:
self.discrete_agent.temperature = 0.0
self.discrete_agent.epsilon = 0.0
# update parameter policy
print(self.e, "Updating parameter selection...")
for _ in range(self.parameter_updates):
self._parameter_update(env, max_steps_per_episode)
self.e += self.parameter_rollouts
if self.e > max_episodes: break
if self.e > max_episodes: break
self.discrete_agent.temperature = 1.0
self.discrete_agent.epsilon = 1.0
# update discrete action policy
print(self.e, "Updating action selection...")
for _ in range(self.action_relearn_episodes):
self._rollout(env, update_actions=True, max_steps=max_steps_per_episode)
self.e += 1
if self.e > max_episodes: break
if self.e > max_episodes: break
# no stochastic actions for evaluation?
self.discrete_agent.temperature = 0.0
self.discrete_agent.epsilon = 0.0
def start_episode(self):
self.discrete_agent.start_episode()
def end_episode(self):
self.discrete_agent.end_episode()
def _seed(self, seed=None):
"""
NOTE: this will not reset the randomly initialised weights; use the seed parameter in the constructor instead.
:param seed:
:return:
"""
self.np_random = np.random.RandomState(seed=seed)
def _get_parameters(self):
""" Returns all the parameters in a vector. """
# parameters = []
# # for non-uniform parameter wieghts shapes (ragged array)
# for a in range(self.num_actions):
# parameters.append(self.parameter_weights[a])
# return np.ravel(self.parameter_weights) # np.array(parameters)
return np.concatenate([self.parameter_weights[i].flat for i in range(len(self.parameter_weights))])
def _set_parameters(self, parameters):
""" Set the parameters using a vector. """
index = 0
for action in range(self.num_actions):
rows = self.parameter_weights[action].size
self.parameter_weights[action] = parameters[index: index + rows].reshape(self.parameter_weights[action].shape)
index += rows
def _log_parameter_gradient(self, state, act, param):
""" Returns the log gradient for the parameter,
given the state and the value. """
features = self._compute_features(state, act)
mean = self.parameter_weights[act].dot(features)
grad = np.outer((param - mean),features / self.variances[act])
return grad.ravel()
def log_gradient(self, state, action, param):
""" Returns the log gradient for the entire policy. """
grad = np.zeros((0,))
for i in range(self.num_actions):
elems = self.parameter_weights[i].size
if i == action:
parameter_grad = self._log_parameter_gradient(state, i, param)
grad = np.append(grad, parameter_grad)
else:
grad = np.append(grad, np.zeros((elems,)))
return grad
def _pad_action(self, act, param):
# Box for each parameter wrapped in a Compound
action = [np.zeros(self.parameter_space.spaces[a].shape) for a in range(self.num_actions)]
action[act] = param
action = (act, action)
return action
def _rollout(self, env, update_actions=False, max_steps=None):
""" Run a single episode for a maximum number of steps. """
state, _ = env.reset()
states = [state]
rewards = []
actions = []
terminal = False
act = self._action_policy(state)
acts = [act]
steps = 0
if update_actions:
self.discrete_agent.start_episode()
while not terminal and not (max_steps is not None and steps > max_steps):
param = self._parameter_policy(state, act)
# print (act,param)
(new_state, time_steps), reward, terminal, _ = env.step(self._pad_action(act, param))
new_act = self._action_policy(new_state)
if update_actions:
self.discrete_agent.step(state, act, reward, new_state, new_act, terminal, time_steps)
state = new_state
states.append(state)
actions.append((act, param))
rewards.append(reward)
act = new_act
acts.append(act)
steps += 1
if update_actions:
self.discrete_agent.end_episode()
self.R += sum(rewards)
self._total_episodes += 1
if self.print_freq > 0 and self._total_episodes % self.print_freq == 0:
if self.print_freq == 1:
print("{0:5s} R: {1:.4f} r: {2:.4f}".format(str(self._total_episodes), self.R/self._total_episodes,sum(rewards)))
else:
# print("{0:5s} R: {1:.4f}".format(str(self._total_episodes), self.R/self._total_episodes))
returns = np.array(env.get_episode_rewards())
print('{0:5s} R:{1:.5f} P(S):{2:.4f}'.format(str(self._total_episodes), sum(returns) / (self._total_episodes),
(np.array(returns) == 50.).sum() / len(returns)))
return states, actions, rewards, acts
def _enac_gradient(self, env, max_steps=None): #, phi0_func=None, phi0_size=None):
"""
Compute the episodic NAC gradient.
phi0_func : lambda function giving the state features of s_0, the initial state in a trajectory
defaults to [1.] if None
phi0_size : number of features returned by phi0_func
"""
if self.phi0_size is None: assert self.phi0_func is None # raise error? Need to specify size of custom phi0_fun
if self.phi0_func is None:
self.phi0_func = lambda state: np.array([1,])
self.phi0_size = 1
returns = np.zeros((self.parameter_rollouts, 1))
param_size = self._get_parameters().size
psi = np.zeros((self.parameter_rollouts, param_size + self.phi0_size))
for run in range(self.parameter_rollouts):
states, actions, rewards, acts = self._rollout(env, False, max_steps)
returns[run, 0] = sum(rewards)
log_grad = np.zeros((param_size,))
for state, act, action in zip(states, acts, actions):
log_grad += self.log_gradient(state, act, action[1])
psi[run, :] = np.append(log_grad, self.phi0_func(states[0]))
grad = np.linalg.pinv(psi).dot(returns)[0:param_size, 0]
return grad
def _parameter_update(self, env, max_steps=None):
""" Perform a single gradient update. """
grad = self._enac_gradient(env, max_steps)
if np.linalg.norm(grad) > 0 and self.norm_grad:
grad /= np.linalg.norm(grad)
self._set_parameters(self._get_parameters() + self.alpha * grad)
def _action_update(self, state, action, reward, next_state, next_action, terminal, time_steps=1):
self.discrete_agent.step(state, action[0], reward, next_state, next_action[0], terminal, time_steps)
def _action_policy(self, state):
return self.discrete_agent.act(state)
def _parameter_policy(self, state, act):
return self._gaussian_policy(state, act)
def _gaussian_policy(self, state, act):
""" Gaussian action policy for continuous actions. """
mean = np.dot(self.parameter_weights[act], self._compute_features(state, act))
variance = 0.
if self.variances is not None:
if isinstance(self.variances, (list, np.ndarray)):
variance = self.variances[act]
else:
variance = self.variances
if variance == 0.:
return mean
else:
# TODO: multivariate_normal expects variance, normal expects stdev? may be important...
# this may be incorrect / unnecessary but trying to be consistent with Warwick's source code for now
if isinstance(mean, np.ndarray) and len(mean) > 1:
return self.np_random.multivariate_normal(mean, variance*np.eye(len(mean)))
return self.np_random.normal(mean, variance)
def _compute_features(self, state, act):
""" Returns phi: the features after the function approximation basis has been applied. """
if self.parameter_obs_index is not None:
state = state[self.parameter_obs_index[act]]
return self.basis[act].compute_features(state)
def __str__(self):
desc = ("Q-PAMDP Agent\n"+
"Alpha: {}\n".format(self.alpha)+
"Initial Action Episodes: {}\n".format(self.initial_action_learning_episodes)+
"Action Relearn Episodes: {}\n".format(self.action_relearn_episodes)+
"Parameter Updates: {}\n".format(self.parameter_updates) +
"Parameter Rollouts: {}\n".format(self.parameter_rollouts) +
"Observation Index: {}\n".format(self.parameter_obs_index) +
"Variances: {}\n".format(self.variances) +
"Norm Grad: {}\n".format(self.norm_grad) +
"Phi0 func.: {}\n".format(self.phi0_func) +
"Phi0 size: {}\n".format(self.phi0_size) +
"Discrete Agent: {}\n".format(self.discrete_agent) +
"Seed: {}\n".format(self.__seed))
return desc
| [
"agents.basis.PolynomialBasis",
"numpy.outer",
"agents.sarsa_lambda.SarsaLambdaAgent",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.ones",
"numpy.append",
"numpy.linalg.norm",
"numpy.tile",
"numpy.array",
"warnings.warn",
"numpy.linalg.pinv",
"agents.basis.SimpleBasis"
] | [((8449, 8481), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'seed'}), '(seed=seed)\n', (8470, 8481), True, 'import numpy as np\n'), ((9600, 9654), 'numpy.outer', 'np.outer', (['(param - mean)', '(features / self.variances[act])'], {}), '(param - mean, features / self.variances[act])\n', (9608, 9654), True, 'import numpy as np\n'), ((9814, 9828), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (9822, 9828), True, 'import numpy as np\n'), ((13033, 13071), 'numpy.zeros', 'np.zeros', (['(self.parameter_rollouts, 1)'], {}), '((self.parameter_rollouts, 1))\n', (13041, 13071), True, 'import numpy as np\n'), ((13135, 13199), 'numpy.zeros', 'np.zeros', (['(self.parameter_rollouts, param_size + self.phi0_size)'], {}), '((self.parameter_rollouts, param_size + self.phi0_size))\n', (13143, 13199), True, 'import numpy as np\n'), ((2737, 2978), 'agents.sarsa_lambda.SarsaLambdaAgent', 'SarsaLambdaAgent', (['self.observation_space', 'self.discrete_action_space'], {'alpha': '(1.0)', 'gamma': '(0.999)', 'temperature': '(1.0)', 'cooling': '(0.995)', 'lmbda': '(0.5)', 'order': '(6)', 'scale_alpha': '(True)', 'use_softmax': '(True)', 'seed': 'seed', 'observation_index': 'action_obs_index'}), '(self.observation_space, self.discrete_action_space, alpha=\n 1.0, gamma=0.999, temperature=1.0, cooling=0.995, lmbda=0.5, order=6,\n scale_alpha=True, use_softmax=True, seed=seed, observation_index=\n action_obs_index)\n', (2753, 2978), False, 'from agents.sarsa_lambda import SarsaLambdaAgent\n'), ((6589, 6656), 'warnings.warn', 'warnings.warn', (['"""Too few episodes to initialise agent!"""', 'UserWarning'], {}), "('Too few episodes to initialise agent!', UserWarning)\n", (6602, 6656), False, 'import warnings\n'), ((10294, 10340), 'numpy.zeros', 'np.zeros', (['self.parameter_space.spaces[a].shape'], {}), '(self.parameter_space.spaces[a].shape)\n', (10302, 10340), True, 'import numpy as np\n'), ((13399, 13422), 'numpy.zeros', 'np.zeros', (['(param_size,)'], {}), '((param_size,))\n', (13407, 13422), True, 'import numpy as np\n'), ((13948, 13968), 'numpy.linalg.norm', 'np.linalg.norm', (['grad'], {}), '(grad)\n', (13962, 13968), True, 'import numpy as np\n'), ((1772, 1800), 'numpy.ones', 'np.ones', (['(self.num_actions,)'], {}), '((self.num_actions,))\n', (1779, 1800), True, 'import numpy as np\n'), ((3993, 4049), 'numpy.tile', 'np.tile', (['self.parameter_obs_index', '(self.num_actions, 1)'], {}), '(self.parameter_obs_index, (self.num_actions, 1))\n', (4000, 4049), True, 'import numpy as np\n'), ((5185, 5219), 'agents.basis.SimpleBasis', 'SimpleBasis', (['nvars'], {'bias_unit': '(True)'}), '(nvars, bias_unit=True)\n', (5196, 5219), False, 'from agents.basis import SimpleBasis, ScaledBasis, PolynomialBasis\n'), ((5974, 5989), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (5982, 5989), True, 'import numpy as np\n'), ((10052, 10083), 'numpy.append', 'np.append', (['grad', 'parameter_grad'], {}), '(grad, parameter_grad)\n', (10061, 10083), True, 'import numpy as np\n'), ((12969, 12982), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (12977, 12982), True, 'import numpy as np\n'), ((13883, 13903), 'numpy.linalg.norm', 'np.linalg.norm', (['grad'], {}), '(grad)\n', (13897, 13903), True, 'import numpy as np\n'), ((3524, 3580), 'numpy.tile', 'np.tile', (['self.parameter_obs_index', '(self.num_actions, 1)'], {}), '(self.parameter_obs_index, (self.num_actions, 1))\n', (3531, 3580), True, 'import numpy as np\n'), ((10141, 10159), 'numpy.zeros', 'np.zeros', (['(elems,)'], {}), '((elems,))\n', (10149, 10159), True, 'import numpy as np\n'), ((13646, 13665), 'numpy.linalg.pinv', 'np.linalg.pinv', (['psi'], {}), '(psi)\n', (13660, 13665), True, 'import numpy as np\n'), ((4471, 4518), 'agents.basis.PolynomialBasis', 'PolynomialBasis', (['nvars'], {'order': '(2)', 'bias_unit': '(True)'}), '(nvars, order=2, bias_unit=True)\n', (4486, 4518), False, 'from agents.basis import SimpleBasis, ScaledBasis, PolynomialBasis\n'), ((4580, 4614), 'agents.basis.SimpleBasis', 'SimpleBasis', (['nvars'], {'bias_unit': '(True)'}), '(nvars, bias_unit=True)\n', (4591, 4614), False, 'from agents.basis import SimpleBasis, ScaledBasis, PolynomialBasis\n'), ((12307, 12324), 'numpy.array', 'np.array', (['returns'], {}), '(returns)\n', (12315, 12324), True, 'import numpy as np\n')] |
'''
submodules to handle Catalogs used in the project
'''
import os
import numpy as np
import h5py
from astropy.io import fits
from astropy.table import Table as aTable
from astropy.cosmology import FlatLambdaCDM
# -- local --
from . import util as UT
class Catalog(object):
''' parent object for the objects in this module. Currently
has no functionality
'''
def __init__(self):
self.catalog = None
def _h5py_create_dataset(self, grp, key, data):
''' the arrays from the fits files do not play well with the new h5py
and python3
'''
if isinstance(data, np.chararray) or isinstance(data[0], np.str_):
_chararray = np.array(data, dtype=h5py.special_dtype(vlen=str))
grp.create_dataset(key.lower(), data=_chararray)
elif isinstance(data[0], np.bool_):
_bool = np.zeros(len(data)).astype(bool)
_bool[data] = True
grp.create_dataset(key.lower(), data=_bool)
else:
grp.create_dataset(key.lower(), data=data)
return None
def flux_to_mag(self, flux):
return 22.5 - 2.5*np.log10(flux)
class GAMA(Catalog):
''' class to build/read in photometric and spectroscopic overlap
of the GAMA DR2/DR3 data.
The GAMA DR2 data contains photometry and
spectroscopy from GAMA I, which covers three regions of 48 deg^2
area for a total of 144 deg^2.
The GAMA DR3 data contains photometry and spectroscopy from GAMA II,
which covers the 14x6.5 GAMA regions in NGP (G02 region is EXCLUDED).
'''
def __init__(self):
pass
def Read(self, field, data_release=3, silent=True):
''' Read in spherematched photometric and spectroscopic
data from GAMA DR2 (constructed using _Build).
'''
_file = self._File(field, data_release=data_release)
if not os.path.isfile(_file): # if file is not constructed
if not silent: print('Building %s' % _file)
if field == 'all': self._Build(data_release=data_release, silent=silent)
else: self._fieldSplit(data_release=data_release, silent=silent)
# read in data and compile onto a dictionary
f = h5py.File(_file, 'r')
grp_p = f['photo'] # photo data
grp_s = f['spec'] # spec data
grp_k0 = f['kcorr_z0.0']
grp_k1 = f['kcorr_z0.1']
if not silent:
print('colums in GAMA photometry')
print(sorted(grp_p.keys()))
print('========================')
print('colums in GAMA spectroscopy')
print(sorted(grp_s.keys()))
print('========================')
print('colums in GAMA kcorrects')
print(sorted(grp_k0.keys()))
print('========================')
print('%i objects' % len(grp_p['ra'][...]))
print('========================')
data = {}
for dkey, grp in zip(['photo', 'spec', 'kcorr_z0.0', 'kcorr_z0.1'], [grp_p, grp_s, grp_k0, grp_k1]):
data[dkey] = {}
for key in grp.keys():
data[dkey][key] = grp[key][...]
return data
def _File(self, field, data_release=3):
''' hdf5 file name of spherematched photometric and spectroscopic
data from GAMA DR3.
notes
-----
* v2 flag was added when photometry catalog was changed from InputCatA.fits
to TilingCat.fits
'''
if field == 'all':
return ''.join([UT.dat_dir(), 'GAMA_photo_spec.DR', str(data_release), '.v2.hdf5']) # output file
elif field == 'g09':
return ''.join([UT.dat_dir(), 'GAMA_photo_spec.DR', str(data_release), '.G09.v2.hdf5']) # output file
elif field == 'g12':
return ''.join([UT.dat_dir(), 'GAMA_photo_spec.DR', str(data_release), '.G12.v2.hdf5']) # output file
elif field == 'g15':
return ''.join([UT.dat_dir(), 'GAMA_photo_spec.DR', str(data_release), '.G15.v2.hdf5']) # output file
def _Build(self, data_release=3, silent=True):
''' Read in the photometric data and the spectroscopic data,
spherematch them and write the intersecting data to hdf5 file.
'''
if data_release == 3:
# this includes *three* of the four gama fields G02 field has its own data
# read in photometry (GAMA`s tiling catalog; http://www.gama-survey.org/dr3/schema/table.php?id=3)
gama_p = fits.open(UT.dat_dir()+'gama/dr3/TilingCat.fits')[1].data
# read in emission line measurements (http://www.gama-survey.org/dr3/schema/table.php?id=40)
gama_s = fits.open(UT.dat_dir()+'gama/dr3/GaussFitSimple.fits')[1].data
# read in kcorrect z = 0.0 (http://www.gama-survey.org/dr2/schema/table.php?id=177)
gama_k0 = self._readKcorrect(UT.dat_dir()+'gama/dr3/kcorr_model_z00.fits')
# read in kcorrect z = 0.1 (http://www.gama-survey.org/dr2/schema/table.php?id=178)
gama_k1 = self._readKcorrect(UT.dat_dir()+'gama/dr3/kcorr_model_z01.fits')
elif data_release == 2: # Data Release 2 (what I had before)
# read in photometry (GAMA`s master input catalogue; http://www.gama-survey.org/dr2/schema/table.php?id=156)
gama_p = fits.open(UT.dat_dir()+'gama/InputCatA.fits')[1].data
# read in spectroscopy (http://www.gama-survey.org/dr2/schema/table.php?id=197)
gama_s = fits.open(UT.dat_dir()+'gama/SpecLines.fits')[1].data
# read in kcorrect z = 0.0 (http://www.gama-survey.org/dr2/schema/table.php?id=177)
gama_k0 = self._readKcorrect(UT.dat_dir()+'gama/kcorr_z00.fits')
# read in kcorrect z = 0.1 (http://www.gama-survey.org/dr2/schema/table.php?id=178)
gama_k1 = self._readKcorrect(UT.dat_dir()+'gama/kcorr_z01.fits')
if not silent:
#print('colums in GAMA photometry')
#print(sorted(gama_p.__dict__.keys()))
print('%i GAMA photometry objects' % len(gama_p['ra']))
print('========================')
#print('colums in GAMA spectroscopy')
#print(sorted(gama_s.__dict__.keys()))
print('%i GAMA spectroscopy (emission line) objects' % len(gama_s['ra']))
print('========================')
#print('colums in GAMA k-correct')
#print(sorted(gama_k0.__dict__.keys()))
print('%i GAMA k-correct objects' % len(gama_k0['mass']))
print('========================')
# impose some common sense cuts to make sure there's SDSS photometry
# these magnitudes are extinction corrected!
has_sdss_photo = (
(gama_p['u_model'] > -9999.) &
(gama_p['g_model'] > -9999.) &
(gama_p['r_model'] > -9999.) &
(gama_p['i_model'] > -9999.) &
(gama_p['z_model'] > -9999.))
# impose science catalog cuts
# sc >= 4: r < 19.8, GAMA II main survey
# sc >= 5: r < 19.8 and satisfies r-band star-galaxy separation
# sc = 6: r < 19.4 and satisfies r-band star-galaxy separation
# r = r_petro
sciencecut = (gama_p['survey_class'] > 3)
# match cataid with spectroscopic data
has_spec = np.in1d(gama_p['cataid'], gama_s['cataid'])
# match cataid with k-correct data
assert np.array_equal(gama_k0['cataid'], gama_k1['cataid'])
has_kcorr = np.in1d(gama_p['cataid'], gama_k0['cataid'])
# combined sample cut
sample_cut = (has_spec & sciencecut & has_kcorr & has_sdss_photo)
if not silent:
print('of %i GAMA photometry objects' % len(gama_p['cataid']))
print('========================')
print('%i have SDSS photometry data' % np.sum(has_sdss_photo))
print('========================')
print('%i have spectroscopic data' % np.sum(has_spec))
print('========================')
print('%i have k-correct data' % np.sum(has_kcorr))
print('========================')
print('%i have all of the above' % np.sum(sample_cut))
print('========================')
# match up with spectroscopic data
s_match = np.searchsorted(gama_s['cataid'], gama_p['cataid'][sample_cut])
assert np.array_equal(gama_s['cataid'][s_match], gama_p['cataid'][sample_cut])
# match up with k-correct data
k_match = np.searchsorted(gama_k0['cataid'], gama_p['cataid'][sample_cut])
assert np.array_equal(gama_k0['cataid'][k_match], gama_p['cataid'][sample_cut])
# write everything into a hdf5 file
f = h5py.File(self._File('all', data_release=data_release), 'w')
# store photometry data in photometry group
grp_p = f.create_group('photo')
for key in gama_p.names:
self._h5py_create_dataset(grp_p, key, gama_p[key][sample_cut])
# store spectroscopic data in spectroscopic group
grp_s = f.create_group('spec')
for key in gama_s.names:
self._h5py_create_dataset(grp_s, key, gama_s[key][s_match])
# store kcorrect data in kcorrect groups
grp_k0 = f.create_group('kcorr_z0.0')
for key in gama_k0.names:
self._h5py_create_dataset(grp_k0, key, gama_k0[key][k_match])
grp_k1 = f.create_group('kcorr_z0.1')
for key in gama_k1.names:
self._h5py_create_dataset(grp_k1, key, gama_k1[key][k_match])
f.close()
return None
def _fieldSplit(self, data_release=3, silent=True):
''' Split the GAMA photo-spectroscopic data into the differnt
GAMA regions. Different regions have different r-mag limits and
etc so treating them separately is the most sensible!
'''
all_gama = self.Read('all', data_release=data_release, silent=True)
fields = ['g09', 'g12', 'g15']
ra_min = [129.0, 174.0, 211.5]
ra_max = [141.0, 186.0, 223.5]
for i_f, field in enumerate(fields):
in_ra = ((all_gama['photo']['ra'] >= ra_min[i_f]) & (all_gama['photo']['ra'] <= ra_max[i_f]))
if not silent: print('%i objects in %s field' % (np.sum(in_ra), field.upper()))
# write each field into hdf5 files
f = h5py.File(self._File(field, data_release=data_release), 'w')
for k_grp in all_gama.keys(): # photo, spec, kcorr_z0.0, kcorr_z0.1
grp = f.create_group(k_grp)
for key in all_gama[k_grp].keys():
grp.create_dataset(key, data=all_gama[k_grp][key][in_ra])
f.close()
return None
def _readKcorrect(self, fitsfile):
''' GAMA Kcorrect raises VerifyError if read in the usual fashion.
'''
f = fits.open(fitsfile)
f.verify('fix')
return f[1].data
class GamaLegacy(Catalog):
''' class to append imaging data from the Legacy survey DR7 for the objects
in the GAMA DR3 photo+spec data (.GAMA object). The objects in the final
catalog has GAMA photometry, GAMA spectroscopy, and Legacy-survey photometry
'''
def AbsMag(self, data, kcorr=0.1, H0=70, Om0=0.3, galext=False):
''' Calculate absolute magnitude in SDSS u, g, r, i, z bands with kcorrect
at z=`kcorr` given the data dictionary from the `GamaLegacy.Read` method.
H0 and Om0 specifies the cosmology for the distance modulus.
'''
# check data's structure
for k in ['gama-photo', 'gama-spec','gama-kcorr-z0.0', 'gama-kcorr-z0.1']:
if k not in data.keys():
raise ValueError('input data does not have the approprite keys')
# check kcorr
if kcorr not in [0.0, 0.1]: raise ValueError('kcorr = 0.0, 0.1 only')
bands_sdss = ['u','g','r','i','z']
# apparent magnitude from GAMA photometry
if not galext:
mag_ugriz = np.array([data['gama-photo'][b+'_model'] for b in bands_sdss])
else:
mag_ugriz =np.array([data['gama-kcorr-z0.1'][b+'_model'] for b in bands_sdss])
redshift = data['gama-spec']['z'] # redshift
# distance modulus
cosmo = FlatLambdaCDM(H0=H0, Om0=Om0)
D_L = cosmo.luminosity_distance(redshift).value # Mpc
DM = 5. * np.log10(1e5*D_L)
# k-correct
if kcorr == 0.0:
kcorr = np.array([data['gama-kcorr-z0.0']['kcorr_'+b] for b in bands_sdss])
elif kcorr == 0.1:
kcorr = np.array([data['gama-kcorr-z0.1']['kcorr_'+b] for b in bands_sdss])
absmag_ugriz = mag_ugriz - DM - kcorr
return absmag_ugriz
def Read(self, field, dr_gama=3, dr_legacy=7, silent=True):
''' Read in objects from legacy survey DR 5 that overlap with the
GAMA photo+spectra objects
'''
fgleg = self._File(field, dr_gama=dr_gama, dr_legacy=dr_legacy)
if not os.path.isfile(fgleg): # if file is not constructed
if not silent: print('Building %s' % fgleg)
self._Build(field, dr_gama=dr_gama, dr_legacy=dr_legacy, silent=silent)
# read in data and compile onto a dictionary
f = h5py.File(fgleg, 'r')
grp_gp = f['gama-photo']
grp_gs = f['gama-spec']
grp_k0 = f['gama-kcorr-z0.0']
grp_k1 = f['gama-kcorr-z0.1']
grp_lp = f['legacy-photo']
if not silent:
print('colums in GAMA Photo Data:')
print(sorted(grp_gp.keys()))
print('colums in GAMA Spec Data:')
print(sorted(grp_gs.keys()))
print('colums in Legacy Data:')
print(sorted(grp_lp.keys()))
print('========================')
print('%i objects' % len(grp_gp['ra'][...]))
data = {}
for dk, grp in zip(['gama-photo', 'gama-spec', 'gama-kcorr-z0.0', 'gama-kcorr-z0.1', 'legacy-photo'],
[grp_gp, grp_gs, grp_k0, grp_k1, grp_lp]):
data[dk] = {}
for key in grp.keys():
data[dk][key] = grp[key][...]
self.catalog = data.copy()
return data
def select(self, index=None):
''' select objects in the catalog by their index
'''
if index is not None:
if isinstance(index, list):
index = np.array(index)
elif isinstance(index, np.ndarray):
pass
else:
raise ValueError("index can only be a list of array")
select_data = {}
for grp in self.catalog.keys():
select_data[grp] = {}
for key in self.catalog[grp].keys():
select_data[grp][key] = self.catalog[grp][key][index]
return select_data
def write(self, catalog, fname):
''' Given dictionary with same structure as self.catalog
write to hdf5 file
'''
f = h5py.File(fname, 'w')
for g in catalog.keys():
grp = f.create_group(g)
for k in catalog[g].keys():
grp.create_dataset(k, data=catalog[g][k])
f.close()
return None
def _File(self, field, dr_gama=3, dr_legacy=7):
return ''.join([UT.dat_dir(), 'GAMAdr', str(dr_gama), '.', field, '.LEGACYdr', str(dr_legacy), '.v2.hdf5'])
def _Build(self, field, dr_gama=3, dr_legacy=7, silent=True):
''' Get Legacy Survey photometry for objects in the GAMA DR`dr_gama`
photo+spec objects from the sweep files. This is meant to run on nersc
but you can also manually download the sweep files and specify the dir
where the sweep files are located in.
'''
from pydl.pydlutils.spheregroup import spherematch
if dr_legacy == 5:
sweep_n_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr5/sweep/5.0/'
sweep_s_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr5/sweep/5.0/'
tractor_n_dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr5/tractor/'
elif dr_legacy == 7:
sweep_n_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr7/sweep/7.1/'
sweep_s_dir = '/global/project/projectdirs/cosmo/data/legacysurvey/dr7/sweep/7.1/'
tractor_n_dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr7/tractor/'
tractor_s_dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr7/tractor/'
elif dr_legacy == 8:
sweep_n_dir = \
'/global/project/projectdirs/cosmo/data/legacysurvey/dr8/north/sweep/8.0/'
sweep_s_dir = \
'/global/project/projectdirs/cosmo/data/legacysurvey/dr8/south/sweep/8.0/'
tractor_n_dir = \
'/global/project/projectdirs/cosmo/data/legacysurvey/dr8/north/tractor/'
tractor_s_dir = \
'/global/project/projectdirs/cosmo/data/legacysurvey/dr7/south/tractor/'
# read in the names of the sweep files
fsweep = ''.join([UT.dat_dir(), 'legacy/', field, '.sweep_list.dat'])
if not os.path.isfile(fsweep): _ = self._getSweeps(field, silent=silent)
sweep_files = np.loadtxt(fsweep, unpack=True, usecols=[0], dtype='S')
if not silent: print("there are %i sweep files in the %s GAMA region" % (len(sweep_files), field))
# read in GAMA objects
gama = GAMA()
gama_data = gama.Read(field, data_release=dr_gama, silent=silent)
sweep_dict = {}
gama_photo_dict, gama_spec_dict, gama_kcorr0_dict, gama_kcorr1_dict = {}, {}, {}, {}
# loop through the files and only keep ones that spherematch with GAMA objects
for i_f, f in enumerate(sweep_files):
# read in sweep object
for sweep_dir in [sweep_n_dir, sweep_s_dir]:
fsweep = os.path.join(sweep_dir, f.decode('unicode_escape'))
if os.path.isfile(fsweep): break
sweep = fits.open(fsweep)[1].data
if not silent: print('matching %s' % fsweep)
# spherematch the sweep objects with GAMA objects
if len(sweep['ra']) > len(gama_data['photo']['ra']):
match = spherematch(sweep['ra'], sweep['dec'],
gama_data['photo']['ra'], gama_data['photo']['dec'], 0.000277778)
else:
match_inv = spherematch(gama_data['photo']['ra'], gama_data['photo']['dec'],
sweep['ra'], sweep['dec'], 0.000277778)
match = [match_inv[1], match_inv[0], match_inv[2]]
if not silent:
print('%i matches from the %s sweep file' % (len(match[0]), f))
# save sweep photometry to `sweep_dict`
for key in sweep.names:
if i_f == 0:
sweep_dict[key.lower()] = sweep[key][match[0]]
else:
sweep_dict[key.lower()] = np.concatenate([sweep_dict[key.lower()], sweep[key][match[0]]])
# save matching GAMA data ('photo', 'spec', and kcorrects)
for gkey, gdict in zip(['photo', 'spec', 'kcorr_z0.0', 'kcorr_z0.1'],
[gama_photo_dict, gama_spec_dict, gama_kcorr0_dict, gama_kcorr1_dict]):
for key in gama_data[gkey].keys():
if i_f == 0:
gdict[key] = gama_data[gkey][key][match[1]]
else:
gdict[key] = np.concatenate([gdict[key], gama_data[gkey][key][match[1]]])
del sweep # free memory? (apparently not really)
if not silent:
print('========================')
print('%i objects out of %i GAMA objects mached' % (len(sweep_dict['ra']), len(gama_data['photo']['dec'])) )
assert len(sweep_dict['ra']) == len(gama_photo_dict['ra'])
assert len(sweep_dict['ra']) == len(gama_spec_dict['ra'])
assert len(sweep_dict['ra']) == len(gama_kcorr0_dict['mass'])
assert len(sweep_dict['ra']) == len(gama_kcorr1_dict['mass'])
# writeout all the GAMA objects without sweep objects
if not silent:
nosweep = ~np.in1d(gama_data['photo']['objid'], gama_photo_dict['objid'])
f_nosweep = ''.join([UT.dat_dir(),
'GAMAdr', str(dr_gama), '.', field, '.LEGACYdr', str(dr_legacy), '.nosweep_match.fits'])
print('========================')
print('Writing out RA, Dec of %i GAMA objects without Legacy sweep objects to %s' %
(np.sum(nosweep), f_nosweep))
tb = aTable([gama_data['photo']['ra'][nosweep], gama_data['photo']['dec'][nosweep]],
names=('ra', 'dec'))
tb.meta['COMMENTS'] = 'RA, Dec of GAMA objects without matches in Legacy DR5 sweep'
tb.write(f_nosweep, format='fits', overwrite=True)
#np.savetxt(f_nosweep, np.array([gama_data['photo']['ra'], gama_data['photo']['dec']]).T, header='RA, Dec')
# read apfluxes from tractor catalogs
try:
apflux_dict = self._getTractorApflux(sweep_dict['brickname'],
sweep_dict['objid'], tractor_dir=tractor_n_dir)
except ValueError:
apflux_dict = self._getTractorApflux(sweep_dict['brickname'],
sweep_dict['objid'], tractor_dir=tractor_s_dir)
assert apflux_dict['apflux_g'].shape[0] == len(sweep_dict['brickname'])
# save data to hdf5 file
if not silent: print('writing to %s' % self._File(field, dr_gama=dr_gama, dr_legacy=dr_legacy))
f = h5py.File(self._File(field, dr_gama=dr_gama, dr_legacy=dr_legacy), 'w')
grp_gp = f.create_group('gama-photo')
grp_gs = f.create_group('gama-spec')
grp_k0 = f.create_group('gama-kcorr-z0.0')
grp_k1 = f.create_group('gama-kcorr-z0.1')
grp_lp = f.create_group('legacy-photo')
for key in sweep_dict.keys():
self._h5py_create_dataset(grp_lp, key, sweep_dict[key])
for key in apflux_dict.keys(): # additional apflux data.
self._h5py_create_dataset(grp_lp, key, apflux_dict[key])
for key in gama_photo_dict.keys():
grp_gp.create_dataset(key, data=gama_photo_dict[key])
for key in gama_spec_dict.keys():
grp_gs.create_dataset(key, data=gama_spec_dict[key])
for key in gama_kcorr0_dict.keys():
grp_k0.create_dataset(key, data=gama_kcorr0_dict[key])
for key in gama_kcorr1_dict.keys():
grp_k1.create_dataset(key, data=gama_kcorr1_dict[key])
f.close()
return None
def _getSweeps(self, field, silent=True):
''' Construct list of sweep files given GAMA object.
'''
# read in GAMA objects in field
gama = GAMA()
if field == 'all': raise ValueError("only select specific GAMA fields; not the entire data release")
gama_data = gama.Read(field, silent=silent)
# get brickmin and brickmax of sweep files
ra_mins = 10.*np.arange(gama_data['photo']['ra'].min() // 10., (gama_data['photo']['ra'].max() // 10.) + 1)
ra_maxs = ra_mins + 10.
dec_mins = 5.*np.arange(gama_data['photo']['dec'].min() // 5., (gama_data['photo']['dec'].max() // 5.) + 1)
dec_maxs = dec_mins + 5.
legacy_gama_sweep = []
for i in range(len(ra_mins)):
for j in range(len(dec_mins)):
if dec_mins[j] < 0: pm_sign = 'm'
else: pm_sign = 'p'
brickmin = ''.join([str(int(ra_mins[i])).zfill(3), pm_sign,
str(int(np.abs(dec_mins[j]))).zfill(3)])
if dec_maxs[j] < 0: pm_sign = 'm'
else: pm_sign = 'p'
brickmax = ''.join([str(int(ra_maxs[i])).zfill(3), pm_sign,
str(int(np.abs(dec_maxs[j]))).zfill(3)])
f_sweep = ''.join(['sweep-', brickmin, '-', brickmax, '.fits'])
legacy_gama_sweep.append(f_sweep)
if not silent: print('... %s' % f_sweep)
np.savetxt(''.join([UT.dat_dir(), 'legacy/', field, '.sweep_list.dat']),
legacy_gama_sweep, fmt='%s')
return ra_mins, dec_mins
def _getTractorApflux(self, brickname, objids,
tractor_dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr7/tractor/', silent=True):
''' The catalog is constructed from the sweep catalog and the
GAMA DR3 photo+spec data. The sweep catalog does not include
all the photometric data from the legacy survey. This methods
appends 'apflux_g', 'apflux_r', 'apflux_z' and relevant columsn
from the tractor files.
This can (and probably should) be extended to other columns
'''
bricks_uniq = np.unique(brickname) # unique bricks
AAAs = np.array([brick[:3] for brick in bricks_uniq])
# apfluxes in 'g', 'r', and 'z' bands
bands = ['g', 'r', 'z']
apfluxes = np.zeros((3, len(brickname), 8))
apflux_ivars = np.zeros((3, len(brickname), 8))
apflux_resids = np.zeros((3, len(brickname), 8))
n_brick = 0
for ii, AAA, brick in zip(range(len(AAAs)), AAAs, bricks_uniq):
name = ''.join([tractor_dir, AAA, '/tractor-', brick, '.fits'])
if not silent: print('%i of %i unique bricks -- %s' % (ii, len(AAAs), brick))
if not os.path.isfile(name): raise ValueError('%s tractor file not available' % name)
f_tractor = fits.open(name)
tractor = f_tractor[1].data
inbrick = (brickname == brick)
for i_k, key in enumerate(bands):
apfluxes[i_k, inbrick, :] = tractor.field('apflux_'+key)[objids[inbrick]]
apflux_ivars[i_k, inbrick, :] = tractor.field('apflux_ivar_'+key)[objids[inbrick]]
apflux_resids[i_k, inbrick, :] = tractor.field('apflux_resid_'+key)[objids[inbrick]]
n_brick += np.sum(inbrick)
assert n_brick == len(brickname)
# return dictionary with appropriate keys
apflux_dict = {}
for i_k, key in enumerate(bands):
apflux_dict['apflux_'+key] = apfluxes[i_k,:,:]
apflux_dict['apflux_ivar_'+key] = apflux_ivars[i_k,:,:]
apflux_dict['apflux_resid_'+key] = apflux_resids[i_k,:,:]
return apflux_dict
class Legacy(Catalog):
'''
'''
def _1400deg2_test(self, dr=8, rlimit=None):
'''
'''
# hardcoded patch of sky
ra_min, ra_max = 160., 230.
dec_min, dec_max = -2., 18.
area = self._1400deg2_area()
# read legacy sweeps data in 1400 deg^2 region
if rlimit is None:
_fsweep = os.path.join(UT.dat_dir(), 'survey_validation', 'legacy_sweeps.1400deg2.hdf5')
else:
_fsweep = os.path.join(UT.dat_dir(), 'survey_validation', 'legacy_sweeps.1400deg2.rlim%.1f.hdf5' % rlimit)
fsweep = h5py.File(_fsweep, 'r')
sweep = {}
for k in fsweep.keys(): sweep[k] = fsweep[k][...]
print('%i sweep objects' % len(sweep['flux_r']))
# spatial masking
_spatial_mask = self.spatial_mask(sweep['maskbits'], [sweep['nobs_g'], sweep['nobs_r'], sweep['nobs_z']])
print('%i spatial mask' % np.sum(_spatial_mask))
# star-galaxy separation
_star_galaxy = self.star_galaxy(sweep['gaia_phot_g_mean_mag'], sweep['flux_r'])
print('%i star-galaxy separation' % np.sum(_star_galaxy))
# quality cut
gmag = self.flux_to_mag(sweep['flux_g']/sweep['mw_transmission_g'])
rmag = self.flux_to_mag(sweep['flux_r']/sweep['mw_transmission_r'])
zmag = self.flux_to_mag(sweep['flux_z']/sweep['mw_transmission_z'])
_quality_cut = self.quality_cut(
np.array([sweep['fracflux_g'], sweep['fracflux_r'], sweep['fracflux_z']]),
np.array([sweep['fracmasked_g'], sweep['fracmasked_r'], sweep['fracmasked_z']]),
np.array([sweep['fracin_g'], sweep['fracin_r'], sweep['fracin_z']]),
gmag - rmag,
rmag - zmag)
print('%i quality cut' % np.sum(_quality_cut))
sample_select = (_spatial_mask & _star_galaxy & _quality_cut)
print('%i (spatial mask) & (star-galaxy sep.) & (quality cut)' % (np.sum(sample_select)))
fout = os.path.join(UT.dat_dir(), 'survey_validation', 'bgs.1400deg2.rlim%.1f.hdf5' % rlimit)
f = h5py.File(fout, 'w')
for k in sweep.keys():
self._h5py_create_dataset(f, k, sweep[k][sample_select])
f.close()
return None
return None
def _1400deg2_area(self):
''' area of 1400 deg^2 test region
'''
# hardcoded patch of sky
ra_min, ra_max = 160., 230.
dec_min, dec_max = -2., 18.
area = (np.radians(ra_max) - np.radians(ra_min))*(np.sin(np.radians(dec_max)) - np.sin(np.radians(dec_min)))
area *= (180/np.pi)**2
print('%.f deg^2 test region' % area)
return area
def quality_cut(self, frac_flux, fracmasked, fracin, g_r, r_z):
''' apply baseline quality cut
* frac_flux_[g,r,z]<5 Not overwhelmed by neighbouring source (any band)
* fracmasked_[g,r,z]<0.4 Model not dominated by masked pixels in any band
* fracin_[g,r,z]>0.3 Most of the model flux not outside the region of the data used to fit the model
* -1< g-r < 4 Not an absolutely bizarre colour
* -1< r-z < 4 Not an absolutely bizarre colour
'''
assert frac_flux.shape[0] == 3
assert fracmasked.shape[0] == 3
assert fracin.shape[0] == 3
# Not overwhelmed by neighbouring source (any band)
_frac_flux = ((frac_flux[0] < 5.) & (frac_flux[1] < 5.) & (frac_flux[2] < 5.))
# Model not dominated by masked pixels in any band
_fracmasked = ((fracmasked[0] < 0.4) & (fracmasked[1] < 0.4) & (fracmasked[2] < 0.4))
# Most of the model flux not outside the region of the data used to fit the model
_fracin = ((fracin[0] > 0.3) & (fracin[1] > 0.3) & (fracin[2] > 0.3))
# color cut
_colorcut = ((g_r > -1.) & (g_r < 4.) & (r_z > -1.) & (r_z < 4.))
cut = (_frac_flux & _fracmasked & _fracin & _colorcut)
return cut
def star_galaxy(self, gaia_G, r_flux):
''' star-galaxy separation using GAIA and tractor photometry
(gaia G mag) - (raw r mag) > 0.6 or (gaia G mag) == 0
'''
G_rr = gaia_G - self.flux_to_mag(r_flux)
isgalaxy = (G_rr > 0.6) | (gaia_G == 0)
return isgalaxy
def spatial_mask(self, maskbits, nobs):
''' spatial masking around
* bright stars
* medium bright stars
* clusters
* large galaxies
'''
nobs_g, nobs_r, nobs_z = nobs
BS = (np.uint64(maskbits) & np.uint64(2**1))!=0 # bright stars
MS = (np.uint64(maskbits) & np.uint64(2**11))!=0 # medium bright stars
GC = (np.uint64(maskbits) & np.uint64(2**13))!=0 # clusters
LG = (np.uint64(maskbits) & np.uint64(2**12))!=0 # large galaxies
allmask = ((maskbits & 2**6) != 0) | ((maskbits & 2**5) != 0) | ((maskbits & 2**7) != 0)
nobs = ((nobs_g < 1) | (nobs_r < 1) | (nobs_z < 1))
mask = ~(BS | MS | GC | LG | allmask | nobs)
return mask
def _collect_1400deg2_test(self, dr=8, rlimit=None):
''' collect sweeps data within the same 1400 deg2 test region that Omar used for dr7
and save to file.
'''
import glob
if dr != 8: raise NotImplementedError
if os.environ['NERSC_HOST'] != 'cori': raise ValueError('this script is meant to run on cori only')
# hardcoded patch of sky
ra_min, ra_max = 160., 230.
dec_min, dec_max = -2., 18.
dir_legacy = '/project/projectdirs/cosmo/data/legacysurvey/'
dir_north = os.path.join(dir_legacy, 'dr8/north/sweep/8.0')
dir_south = os.path.join(dir_legacy, 'dr8/south/sweep/8.0')
fsweeps_N = glob.glob('%s/*.fits' % dir_north)
print('%i North sweep files' % len(fsweeps_N))
fsweeps_S = glob.glob('%s/*.fits' % dir_south)
print('%i South sweep files' % len(fsweeps_S))
fsweeps = sorted([os.path.join(dir_north, _fs) for _fs in fsweeps_N] + [os.path.join(dir_south, _fs) for _fs in fsweeps_S])
sweeps = {}
for _fsweep in fsweeps:
# get sweep RA and Dec range
sweep_ra_min, sweep_ra_max, sweep_dec_min, sweep_dec_max = self._parse_brickname(_fsweep)
# check whether it's in the region or not
not_in_region = (
(sweep_ra_max < ra_min) |
(sweep_ra_min > ra_max) |
(sweep_dec_max < dec_min) |
(sweep_dec_min > dec_max)
)
if not_in_region: continue
# read sweep file
sweep = fits.open(_fsweep)[1].data
# area that's within the test region
mask_region = (
(sweep['RA'] >= ra_min) &
(sweep['RA'] <= ra_max) &
(sweep['DEC'] >= dec_min) &
(sweep['DEC'] <= dec_max))
if np.sum(mask_region) == 0: continue
if rlimit is None:
rcut = np.ones(sweep['RA']).astype(bool)
else:
rflux = sweep['FLUX_R'] / sweep['MW_TRANSMISSION_R']
rcut = (rflux > 10**((22.5-rlimit)/2.5))
print('%i obj in %s' % (np.sum(mask_region), os.path.basename(_fsweep)))
if len(sweeps.keys()) == 0:
for k in sweep.names:
sweeps[k] = sweep[k][mask_region & rcut]
else:
for k in sweep.names:
sweeps[k] = np.concatenate([sweeps[k], sweep[k][mask_region & rcut]], axis=0)
if rlimit is None:
fout = os.path.join(UT.dat_dir(), 'survey_validation', 'legacy_sweeps.1400deg2.hdf5')
else:
fout = os.path.join(UT.dat_dir(), 'survey_validation', 'legacy_sweeps.1400deg2.rlim%.1f.hdf5' % rlimit)
f = h5py.File(fout, 'w')
for k in sweeps.keys():
self._h5py_create_dataset(f, k, sweeps[k])
f.close()
return None
def _parse_brickname(self, brickname):
''' parse ra and dec range from brick name
'''
name = os.path.basename(brickname).replace('.fits', '') # get rid of directory and ext
radec1 = name.split('-')[1]
radec2 = name.split('-')[2]
if 'p' in radec1: _c = 'p'
elif 'm' in radec1: _c = 'm'
ra_min = float(radec1.split(_c)[0])
dec_min = float(radec1.split(_c)[1])
if 'p' in radec2: _c = 'p'
elif 'm' in radec2: _c = 'm'
ra_max = float(radec2.split(_c)[0])
dec_max = float(radec2.split(_c)[1])
return ra_min, ra_max, dec_min, dec_max
def _Tycho(self, ra_lim=None, dec_lim=None):
''' read in tycho2 catalog within RA and Dec range
'''
_tycho = fits.open(os.path.join(UT.dat_dir(), 'survey_validation', 'tycho2.fits'))[1].data
mask_region = np.ones(len(_tycho['RA'])).astype(bool)
if ra_lim is not None:
mask_region = mask_region & (_tycho['RA'] >= ra_lim[0]) & (_tycho['RA'] <= ra_lim[1])
if dec_lim is not None:
mask_region = mask_region & (_tycho['DEC'] >= dec_lim[0]) & (_tycho['DEC'] <= dec_lim[1])
tycho = {}
for k in _tycho.names:
tycho[k] = _tycho[k][mask_region]
return tycho
def _LSLGA(self, ra_lim=None, dec_lim=None):
''' read in Legacy Survey Large Galaxy Atlas
'''
_lslga = fits.open(os.path.join(UT.dat_dir(), 'survey_validation', 'LSLGA-v2.0.fits'))[1].data
mask_region = np.ones(len(_lslga['RA'])).astype(bool)
if ra_lim is not None:
mask_region = mask_region & (_lslga['RA'] >= ra_lim[0]) & (_lslga['RA'] <= ra_lim[1])
if dec_lim is not None:
mask_region = mask_region & (_lslga['DEC'] >= dec_lim[0]) & (_lslga['DEC'] <= dec_lim[1])
lslga = {}
for k in _lslga.names:
lslga[k] = _lslga[k][mask_region]
return lslga
def _GamaLegacy_TractorAPFLUX():
''' Retroactively add apflux columns from the tractor catalogs
to the GamaLegacy catalog constructed and saved to file. This is a
hack.
'''
gleg = GamaLegacy()
# open saved gama-legacy catalog for appending
f_gleg = h5py.File(gleg._File(), 'r+')
# legacy photometry group
grp_lp = f_gleg['legacy-photo']
if 'apflux_g' in grp_lp.keys():
# check that the columsn dont' already exist
f_gleg.close()
raise ValueError('apfluxes already in the catalog')
# read apfluxes from tractor catalogs
apflux_dict = gleg._getTractorApflux(grp_lp['brickname'].value, grp_lp['objid'].value,
dir='/global/project/projectdirs/cosmo/data/legacysurvey/dr5/tractor/')
assert apflux_dict['apflux_g'].shape[0] == len(grp_lp['brickname'].value)
# save fluxes to the dataset
for key in apflux_dict.keys():
grp_lp.create_dataset(key, data=apflux_dict[key])
f_gleg.close()
return None
| [
"numpy.uint64",
"numpy.sum",
"numpy.abs",
"h5py.special_dtype",
"numpy.ones",
"os.path.isfile",
"glob.glob",
"os.path.join",
"numpy.unique",
"numpy.loadtxt",
"pydl.pydlutils.spheregroup.spherematch",
"numpy.log10",
"astropy.cosmology.FlatLambdaCDM",
"h5py.File",
"numpy.radians",
"os.pa... | [((2259, 2280), 'h5py.File', 'h5py.File', (['_file', '"""r"""'], {}), "(_file, 'r')\n", (2268, 2280), False, 'import h5py\n'), ((7417, 7460), 'numpy.in1d', 'np.in1d', (["gama_p['cataid']", "gama_s['cataid']"], {}), "(gama_p['cataid'], gama_s['cataid'])\n", (7424, 7460), True, 'import numpy as np\n'), ((7521, 7573), 'numpy.array_equal', 'np.array_equal', (["gama_k0['cataid']", "gama_k1['cataid']"], {}), "(gama_k0['cataid'], gama_k1['cataid'])\n", (7535, 7573), True, 'import numpy as np\n'), ((7595, 7639), 'numpy.in1d', 'np.in1d', (["gama_p['cataid']", "gama_k0['cataid']"], {}), "(gama_p['cataid'], gama_k0['cataid'])\n", (7602, 7639), True, 'import numpy as np\n'), ((8410, 8473), 'numpy.searchsorted', 'np.searchsorted', (["gama_s['cataid']", "gama_p['cataid'][sample_cut]"], {}), "(gama_s['cataid'], gama_p['cataid'][sample_cut])\n", (8425, 8473), True, 'import numpy as np\n'), ((8490, 8561), 'numpy.array_equal', 'np.array_equal', (["gama_s['cataid'][s_match]", "gama_p['cataid'][sample_cut]"], {}), "(gama_s['cataid'][s_match], gama_p['cataid'][sample_cut])\n", (8504, 8561), True, 'import numpy as np\n'), ((8621, 8685), 'numpy.searchsorted', 'np.searchsorted', (["gama_k0['cataid']", "gama_p['cataid'][sample_cut]"], {}), "(gama_k0['cataid'], gama_p['cataid'][sample_cut])\n", (8636, 8685), True, 'import numpy as np\n'), ((8701, 8773), 'numpy.array_equal', 'np.array_equal', (["gama_k0['cataid'][k_match]", "gama_p['cataid'][sample_cut]"], {}), "(gama_k0['cataid'][k_match], gama_p['cataid'][sample_cut])\n", (8715, 8773), True, 'import numpy as np\n'), ((11006, 11025), 'astropy.io.fits.open', 'fits.open', (['fitsfile'], {}), '(fitsfile)\n', (11015, 11025), False, 'from astropy.io import fits\n'), ((12430, 12459), 'astropy.cosmology.FlatLambdaCDM', 'FlatLambdaCDM', ([], {'H0': 'H0', 'Om0': 'Om0'}), '(H0=H0, Om0=Om0)\n', (12443, 12459), False, 'from astropy.cosmology import FlatLambdaCDM\n'), ((13434, 13455), 'h5py.File', 'h5py.File', (['fgleg', '"""r"""'], {}), "(fgleg, 'r')\n", (13443, 13455), False, 'import h5py\n'), ((15202, 15223), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (15211, 15223), False, 'import h5py\n'), ((17496, 17551), 'numpy.loadtxt', 'np.loadtxt', (['fsweep'], {'unpack': '(True)', 'usecols': '[0]', 'dtype': '"""S"""'}), "(fsweep, unpack=True, usecols=[0], dtype='S')\n", (17506, 17551), True, 'import numpy as np\n'), ((25248, 25268), 'numpy.unique', 'np.unique', (['brickname'], {}), '(brickname)\n', (25257, 25268), True, 'import numpy as np\n'), ((25301, 25347), 'numpy.array', 'np.array', (['[brick[:3] for brick in bricks_uniq]'], {}), '([brick[:3] for brick in bricks_uniq])\n', (25309, 25347), True, 'import numpy as np\n'), ((27473, 27496), 'h5py.File', 'h5py.File', (['_fsweep', '"""r"""'], {}), "(_fsweep, 'r')\n", (27482, 27496), False, 'import h5py\n'), ((28998, 29018), 'h5py.File', 'h5py.File', (['fout', '"""w"""'], {}), "(fout, 'w')\n", (29007, 29018), False, 'import h5py\n'), ((32537, 32584), 'os.path.join', 'os.path.join', (['dir_legacy', '"""dr8/north/sweep/8.0"""'], {}), "(dir_legacy, 'dr8/north/sweep/8.0')\n", (32549, 32584), False, 'import os\n'), ((32605, 32652), 'os.path.join', 'os.path.join', (['dir_legacy', '"""dr8/south/sweep/8.0"""'], {}), "(dir_legacy, 'dr8/south/sweep/8.0')\n", (32617, 32652), False, 'import os\n'), ((32682, 32716), 'glob.glob', 'glob.glob', (["('%s/*.fits' % dir_north)"], {}), "('%s/*.fits' % dir_north)\n", (32691, 32716), False, 'import glob\n'), ((32793, 32827), 'glob.glob', 'glob.glob', (["('%s/*.fits' % dir_south)"], {}), "('%s/*.fits' % dir_south)\n", (32802, 32827), False, 'import glob\n'), ((34869, 34889), 'h5py.File', 'h5py.File', (['fout', '"""w"""'], {}), "(fout, 'w')\n", (34878, 34889), False, 'import h5py\n'), ((1918, 1939), 'os.path.isfile', 'os.path.isfile', (['_file'], {}), '(_file)\n', (1932, 1939), False, 'import os\n'), ((12160, 12224), 'numpy.array', 'np.array', (["[data['gama-photo'][b + '_model'] for b in bands_sdss]"], {}), "([data['gama-photo'][b + '_model'] for b in bands_sdss])\n", (12168, 12224), True, 'import numpy as np\n'), ((12262, 12331), 'numpy.array', 'np.array', (["[data['gama-kcorr-z0.1'][b + '_model'] for b in bands_sdss]"], {}), "([data['gama-kcorr-z0.1'][b + '_model'] for b in bands_sdss])\n", (12270, 12331), True, 'import numpy as np\n'), ((12541, 12565), 'numpy.log10', 'np.log10', (['(100000.0 * D_L)'], {}), '(100000.0 * D_L)\n', (12549, 12565), True, 'import numpy as np\n'), ((12626, 12695), 'numpy.array', 'np.array', (["[data['gama-kcorr-z0.0']['kcorr_' + b] for b in bands_sdss]"], {}), "([data['gama-kcorr-z0.0']['kcorr_' + b] for b in bands_sdss])\n", (12634, 12695), True, 'import numpy as np\n'), ((13172, 13193), 'os.path.isfile', 'os.path.isfile', (['fgleg'], {}), '(fgleg)\n', (13186, 13193), False, 'import os\n'), ((17408, 17430), 'os.path.isfile', 'os.path.isfile', (['fsweep'], {}), '(fsweep)\n', (17422, 17430), False, 'import os\n'), ((20950, 21055), 'astropy.table.Table', 'aTable', (["[gama_data['photo']['ra'][nosweep], gama_data['photo']['dec'][nosweep]]"], {'names': "('ra', 'dec')"}), "([gama_data['photo']['ra'][nosweep], gama_data['photo']['dec'][\n nosweep]], names=('ra', 'dec'))\n", (20956, 21055), True, 'from astropy.table import Table as aTable\n'), ((25989, 26004), 'astropy.io.fits.open', 'fits.open', (['name'], {}), '(name)\n', (25998, 26004), False, 'from astropy.io import fits\n'), ((26450, 26465), 'numpy.sum', 'np.sum', (['inbrick'], {}), '(inbrick)\n', (26456, 26465), True, 'import numpy as np\n'), ((28339, 28412), 'numpy.array', 'np.array', (["[sweep['fracflux_g'], sweep['fracflux_r'], sweep['fracflux_z']]"], {}), "([sweep['fracflux_g'], sweep['fracflux_r'], sweep['fracflux_z']])\n", (28347, 28412), True, 'import numpy as np\n'), ((28431, 28510), 'numpy.array', 'np.array', (["[sweep['fracmasked_g'], sweep['fracmasked_r'], sweep['fracmasked_z']]"], {}), "([sweep['fracmasked_g'], sweep['fracmasked_r'], sweep['fracmasked_z']])\n", (28439, 28510), True, 'import numpy as np\n'), ((28528, 28595), 'numpy.array', 'np.array', (["[sweep['fracin_g'], sweep['fracin_r'], sweep['fracin_z']]"], {}), "([sweep['fracin_g'], sweep['fracin_r'], sweep['fracin_z']])\n", (28536, 28595), True, 'import numpy as np\n'), ((1155, 1169), 'numpy.log10', 'np.log10', (['flux'], {}), '(flux)\n', (1163, 1169), True, 'import numpy as np\n'), ((12743, 12812), 'numpy.array', 'np.array', (["[data['gama-kcorr-z0.1']['kcorr_' + b] for b in bands_sdss]"], {}), "([data['gama-kcorr-z0.1']['kcorr_' + b] for b in bands_sdss])\n", (12751, 12812), True, 'import numpy as np\n'), ((14585, 14600), 'numpy.array', 'np.array', (['index'], {}), '(index)\n', (14593, 14600), True, 'import numpy as np\n'), ((18238, 18260), 'os.path.isfile', 'os.path.isfile', (['fsweep'], {}), '(fsweep)\n', (18252, 18260), False, 'import os\n'), ((18534, 18643), 'pydl.pydlutils.spheregroup.spherematch', 'spherematch', (["sweep['ra']", "sweep['dec']", "gama_data['photo']['ra']", "gama_data['photo']['dec']", '(0.000277778)'], {}), "(sweep['ra'], sweep['dec'], gama_data['photo']['ra'], gama_data[\n 'photo']['dec'], 0.000277778)\n", (18545, 18643), False, 'from pydl.pydlutils.spheregroup import spherematch\n'), ((18711, 18820), 'pydl.pydlutils.spheregroup.spherematch', 'spherematch', (["gama_data['photo']['ra']", "gama_data['photo']['dec']", "sweep['ra']", "sweep['dec']", '(0.000277778)'], {}), "(gama_data['photo']['ra'], gama_data['photo']['dec'], sweep['ra'\n ], sweep['dec'], 0.000277778)\n", (18722, 18820), False, 'from pydl.pydlutils.spheregroup import spherematch\n'), ((20523, 20585), 'numpy.in1d', 'np.in1d', (["gama_data['photo']['objid']", "gama_photo_dict['objid']"], {}), "(gama_data['photo']['objid'], gama_photo_dict['objid'])\n", (20530, 20585), True, 'import numpy as np\n'), ((25886, 25906), 'os.path.isfile', 'os.path.isfile', (['name'], {}), '(name)\n', (25900, 25906), False, 'import os\n'), ((27809, 27830), 'numpy.sum', 'np.sum', (['_spatial_mask'], {}), '(_spatial_mask)\n', (27815, 27830), True, 'import numpy as np\n'), ((28007, 28027), 'numpy.sum', 'np.sum', (['_star_galaxy'], {}), '(_star_galaxy)\n', (28013, 28027), True, 'import numpy as np\n'), ((28691, 28711), 'numpy.sum', 'np.sum', (['_quality_cut'], {}), '(_quality_cut)\n', (28697, 28711), True, 'import numpy as np\n'), ((28860, 28881), 'numpy.sum', 'np.sum', (['sample_select'], {}), '(sample_select)\n', (28866, 28881), True, 'import numpy as np\n'), ((29402, 29420), 'numpy.radians', 'np.radians', (['ra_max'], {}), '(ra_max)\n', (29412, 29420), True, 'import numpy as np\n'), ((29423, 29441), 'numpy.radians', 'np.radians', (['ra_min'], {}), '(ra_min)\n', (29433, 29441), True, 'import numpy as np\n'), ((31438, 31457), 'numpy.uint64', 'np.uint64', (['maskbits'], {}), '(maskbits)\n', (31447, 31457), True, 'import numpy as np\n'), ((31460, 31477), 'numpy.uint64', 'np.uint64', (['(2 ** 1)'], {}), '(2 ** 1)\n', (31469, 31477), True, 'import numpy as np\n'), ((31512, 31531), 'numpy.uint64', 'np.uint64', (['maskbits'], {}), '(maskbits)\n', (31521, 31531), True, 'import numpy as np\n'), ((31534, 31552), 'numpy.uint64', 'np.uint64', (['(2 ** 11)'], {}), '(2 ** 11)\n', (31543, 31552), True, 'import numpy as np\n'), ((31594, 31613), 'numpy.uint64', 'np.uint64', (['maskbits'], {}), '(maskbits)\n', (31603, 31613), True, 'import numpy as np\n'), ((31616, 31634), 'numpy.uint64', 'np.uint64', (['(2 ** 13)'], {}), '(2 ** 13)\n', (31625, 31634), True, 'import numpy as np\n'), ((31665, 31684), 'numpy.uint64', 'np.uint64', (['maskbits'], {}), '(maskbits)\n', (31674, 31684), True, 'import numpy as np\n'), ((31687, 31705), 'numpy.uint64', 'np.uint64', (['(2 ** 12)'], {}), '(2 ** 12)\n', (31696, 31705), True, 'import numpy as np\n'), ((33927, 33946), 'numpy.sum', 'np.sum', (['mask_region'], {}), '(mask_region)\n', (33933, 33946), True, 'import numpy as np\n'), ((35150, 35177), 'os.path.basename', 'os.path.basename', (['brickname'], {}), '(brickname)\n', (35166, 35177), False, 'import os\n'), ((725, 753), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'str'}), '(vlen=str)\n', (743, 753), False, 'import h5py\n'), ((7942, 7964), 'numpy.sum', 'np.sum', (['has_sdss_photo'], {}), '(has_sdss_photo)\n', (7948, 7964), True, 'import numpy as np\n'), ((8061, 8077), 'numpy.sum', 'np.sum', (['has_spec'], {}), '(has_spec)\n', (8067, 8077), True, 'import numpy as np\n'), ((8170, 8187), 'numpy.sum', 'np.sum', (['has_kcorr'], {}), '(has_kcorr)\n', (8176, 8187), True, 'import numpy as np\n'), ((8282, 8300), 'numpy.sum', 'np.sum', (['sample_cut'], {}), '(sample_cut)\n', (8288, 8300), True, 'import numpy as np\n'), ((18290, 18307), 'astropy.io.fits.open', 'fits.open', (['fsweep'], {}), '(fsweep)\n', (18299, 18307), False, 'from astropy.io import fits\n'), ((29451, 29470), 'numpy.radians', 'np.radians', (['dec_max'], {}), '(dec_max)\n', (29461, 29470), True, 'import numpy as np\n'), ((29481, 29500), 'numpy.radians', 'np.radians', (['dec_min'], {}), '(dec_min)\n', (29491, 29500), True, 'import numpy as np\n'), ((32910, 32938), 'os.path.join', 'os.path.join', (['dir_north', '_fs'], {}), '(dir_north, _fs)\n', (32922, 32938), False, 'import os\n'), ((32964, 32992), 'os.path.join', 'os.path.join', (['dir_south', '_fs'], {}), '(dir_south, _fs)\n', (32976, 32992), False, 'import os\n'), ((33618, 33636), 'astropy.io.fits.open', 'fits.open', (['_fsweep'], {}), '(_fsweep)\n', (33627, 33636), False, 'from astropy.io import fits\n'), ((34531, 34596), 'numpy.concatenate', 'np.concatenate', (['[sweeps[k], sweep[k][mask_region & rcut]]'], {'axis': '(0)'}), '([sweeps[k], sweep[k][mask_region & rcut]], axis=0)\n', (34545, 34596), True, 'import numpy as np\n'), ((19818, 19878), 'numpy.concatenate', 'np.concatenate', (['[gdict[key], gama_data[gkey][key][match[1]]]'], {}), '([gdict[key], gama_data[gkey][key][match[1]]])\n', (19832, 19878), True, 'import numpy as np\n'), ((20904, 20919), 'numpy.sum', 'np.sum', (['nosweep'], {}), '(nosweep)\n', (20910, 20919), True, 'import numpy as np\n'), ((34019, 34039), 'numpy.ones', 'np.ones', (["sweep['RA']"], {}), "(sweep['RA'])\n", (34026, 34039), True, 'import numpy as np\n'), ((34250, 34269), 'numpy.sum', 'np.sum', (['mask_region'], {}), '(mask_region)\n', (34256, 34269), True, 'import numpy as np\n'), ((34271, 34296), 'os.path.basename', 'os.path.basename', (['_fsweep'], {}), '(_fsweep)\n', (34287, 34296), False, 'import os\n'), ((10401, 10414), 'numpy.sum', 'np.sum', (['in_ra'], {}), '(in_ra)\n', (10407, 10414), True, 'import numpy as np\n'), ((24031, 24050), 'numpy.abs', 'np.abs', (['dec_mins[j]'], {}), '(dec_mins[j])\n', (24037, 24050), True, 'import numpy as np\n'), ((24256, 24275), 'numpy.abs', 'np.abs', (['dec_maxs[j]'], {}), '(dec_maxs[j])\n', (24262, 24275), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import pyranges as pr
from pyranges.multithreaded import pyrange_apply
from pyranges.methods.statistics import _relative_distance
def simes(df, groupby, pcol):
if isinstance(groupby, str):
groupby = [groupby]
sorter = groupby + [pcol]
sdf = df[sorter].sort_values(sorter)
g = sdf.groupby(groupby)
ranks = g.cumcount().values + 1
size = g.size().values
size = np.repeat(size, size)
multiplied = (sdf[pcol].values * size)
simes = multiplied / ranks
sdf.insert(sdf.shape[1], "Simes", simes)
simes = sdf.groupby(groupby).Simes.min().reset_index()
return simes
def rowbased_spearman(x, y):
x = np.asarray(x)
y = np.asarray(y)
rx = rowbased_rankdata(x)
ry = rowbased_rankdata(y)
return rowbased_pearson(rx, ry)
def rowbased_pearson(x, y):
# Thanks to https://github.com/dengemann
def ss(a, axis):
return np.sum(a * a, axis=axis)
x = np.asarray(x)
y = np.asarray(y)
mx = x.mean(axis=-1)
my = y.mean(axis=-1)
xm, ym = x - mx[..., None], y - my[..., None]
r_num = np.add.reduce(xm * ym, axis=-1)
r_den = np.sqrt(ss(xm, axis=-1) * ss(ym, axis=-1))
with np.errstate(divide='ignore', invalid="ignore"):
r = r_num / r_den
return r
def rowbased_rankdata(data):
"""Row-based rankdata using method=mean"""
dc = np.asarray(data).copy()
sorter = np.apply_along_axis(np.argsort, 1, data)
inv = np.empty(data.shape, np.intp)
ranks = np.tile(np.arange(data.shape[1]), (len(data), 1))
np.put_along_axis(inv, sorter, ranks, axis=1)
dc = np.take_along_axis(dc, sorter, 1)
res = np.apply_along_axis(lambda r: r[1:] != r[:-1], 1, dc)
obs = np.column_stack([np.ones(len(res), dtype=bool), res])
dense = np.take_along_axis(np.apply_along_axis(np.cumsum, 1, obs), inv, 1)
len_r = obs.shape[1]
nonzero = np.count_nonzero(obs, axis=1)
obs = pd.DataFrame(obs)
nonzero = pd.Series(nonzero)
dense = pd.DataFrame(dense)
ranks = []
for _nonzero, nzdf in obs.groupby(nonzero, sort=False):
nz = np.apply_along_axis(lambda r: np.nonzero(r)[0], 1, nzdf)
_count = np.column_stack([nz, np.ones(len(nz)) * len_r])
_dense = dense.reindex(nzdf.index).values
_result = 0.5 * (np.take_along_axis(_count, _dense, 1) + np.take_along_axis(_count, _dense - 1, 1) + 1)
result = pd.DataFrame(_result, index=nzdf.index)
ranks.append(result)
final = pd.concat(ranks).sort_index(kind="mergesort")
return final
def fdr(p_vals):
from scipy.stats import rankdata
ranked_p_values = rankdata(p_vals)
fdr = p_vals * len(p_vals) / ranked_p_values
fdr[fdr > 1] = 1
return fdr
def fisher_exact(n1, d1, n2, d2, **kwargs):
try:
from fisher import pvalue_npy
except:
import sys
print("fisher needs to be installed to use fisher exact. pip install fisher or conda install -c bioconda fisher.")
sys.exit(-1)
pseudocount = kwargs.get("pseudocount", 0)
fe_type = kwargs.get("alternative", "twosided")
n1 = np.array(n1, dtype=np.uint)
n2 = np.array(n2, dtype=np.uint)
d1 = np.array(d1, dtype=np.uint)
d2 = np.array(d2, dtype=np.uint)
left, right, twosided = pvalue_npy(n1, d1, n2, d2)
if fe_type == "twosided":
p_vals = twosided
elif fe_type == "left":
p_vals = left
elif fe_type == "right":
p_vals = right
else:
raise Exception("alternative must be twosided, left or right")
OR = ((n1 + pseudocount) / (d2 + pseudocount)) / ((n2 + pseudocount) / (d1 + pseudocount))
df = pd.DataFrame({"OR": OR, "P": p_vals})
return df
def chromsizes_as_int(chromsizes):
if isinstance(chromsizes, int):
pass
elif isinstance(chromsizes, dict):
chromsizes = sum(chromsizes.values())
elif isinstance(chromsizes, (pd.DataFrame, pr.PyRanges)):
chromsizes = chromsizes.End.sum()
return chromsizes
class StatisticsMethods():
pr = None
def __init__(self, pr):
self.pr = pr
# def __tetrachoric(self, other, chromsizes, **kwargs):
# self = self.pr
# chromsizes = chromsizes_as_int(chromsizes)
# kwargs["new_pos"] = "intersection"
# strand = True if kwargs.get("strandedness") else False
# ss = self.merge(strand=strand)
# so = other.merge(strand=strand)
# a = ss.intersect(so, **kwargs).length
# b = ss.subtract(so, **kwargs).length
# c = so.subtract(ss, **kwargs).length
# m = pr.concat([ss, so]).merge(strand=strand).length
# d = chromsizes - m
# from math import cos, sqrt
# _tetrachoric = cos(180/(1 + sqrt((b * c) / (a * d))))
# return _tetrachoric
def forbes(self, other, chromsizes, **kwargs):
chromsizes = chromsizes_as_int(chromsizes)
self = self.pr
kwargs["sparse"] = {"self": True, "other": True}
kwargs = pr.pyranges.fill_kwargs(kwargs)
strand = True if kwargs.get("strandedness") else False
kwargs["new_pos"] = "intersection"
reference_length = self.merge(strand=strand).length
query_length = other.merge(strand=strand).length
intersection_sum = sum(
v.sum()
for v in self.set_intersect(other, **kwargs).lengths(as_dict=True).values())
forbes = chromsizes * intersection_sum / (reference_length * query_length)
return forbes
def jaccard(self, other, **kwargs):
self = self.pr
kwargs["sparse"] = {"self": True, "other": True}
kwargs = pr.pyranges.fill_kwargs(kwargs)
strand = True if kwargs.get("strandedness") else False
kwargs["new_pos"] = "intersection"
intersection_sum = sum(
v.sum()
for v in self.set_intersect(other, **kwargs).lengths(as_dict=True).values())
union_sum = 0
for gr in [self, other]:
union_sum += sum(
v.sum() for v in gr.merge(strand=strand).lengths(as_dict=True).values())
denominator = (union_sum - intersection_sum)
if denominator == 0:
return 1
else:
jc = intersection_sum / denominator
return jc
def relative_distance(self, other, **kwargs):
self = self.pr
kwargs["sparse"] = {"self": True, "other": True}
kwargs = pr.pyranges.fill_kwargs(kwargs)
result = pyrange_apply(_relative_distance, self, other, **kwargs) # pylint: disable=E1132
result = pd.Series(np.concatenate(list(result.values())))
not_nan = ~np.isnan(result)
result.loc[not_nan] = np.floor(result[not_nan] * 100) / 100
vc = result.value_counts(dropna=False).to_frame().reset_index()
vc.columns = "reldist count".split()
vc.insert(vc.shape[1], "total", len(result))
vc.insert(vc.shape[1], "fraction", vc["count"] / len(result))
vc = vc.sort_values("reldist", ascending=True)
vc = vc.reset_index(drop=True)
return vc
from math import sqrt
def _mcc(tp, fp, tn, fn):
# https://stackoverflow.com/a/56875660/992687
x = (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)
return ((tp * tn) - (fp * fn)) / sqrt(x)
def mcc(grs, genome=None, labels=None, strand=False, verbose=False):
import sys
from itertools import combinations_with_replacement, chain
if labels is None:
_labels = list(range(len(grs)))
_labels = combinations_with_replacement(_labels, r=2)
else:
assert len(labels) == len(grs)
_labels = combinations_with_replacement(labels, r=2)
# remove all non-loc columns before computation
grs = [gr.merge(strand=strand) for gr in grs]
if genome is not None:
try:
genome_length = int(genome)
except (TypeError, ValueError):
genome_length = int(genome.End.sum())
if verbose:
# check that genome definition does not have many more
# chromosomes than datafiles
gr_cs = set(chain(*[gr.chromosomes for gr in grs]))
g_cs = set(genome.chromosomes)
surplus = g_cs - gr_cs
if len(surplus):
print("The following chromosomes are in the genome, but not the PyRanges:", ", ".join(surplus), file=sys.stderr)
if strand:
def make_stranded(df):
df = df.copy()
df2 = df.copy()
df.insert(df.shape[1], "Strand", "+")
df2.insert(df2.shape[1], "Strand", "-")
return pd.concat([df, df2])
genome = genome.apply(make_stranded)
genome_length = genome.length
else:
if len(grs) == 2:
print("If you do not have a genome and the number of compared pyranges is two, mcc will not give any true negatives.", file=sys.stderr)
genome_length = pr.concat(grs).merge().length
strandedness = "same" if strand else None
rowdicts = []
for (lt, lf), (t, f) in zip(_labels, combinations_with_replacement(grs, r=2)):
if verbose:
print(lt, lf, file=sys.stderr)
if lt == lf:
if not strand:
tp = t.length
fn = 0
tn = genome_length - tp
fp = 0
rowdicts.append({"T": lt, "F": lf, "TP": tp, "FP": fp, "TN": tn, "FN": fn, "MCC": 1})
else:
for strand in "+ -".split():
tp = t[strand].length
fn = 0
tn = genome_length - tp
fp = 0
rowdicts.append({"T": lt, "F": lf, "Strand": strand, "TP": tp, "FP": fp, "TN": tn, "FN": fn, "MCC": 1})
continue
else:
j = t.join(f, strandedness=strandedness)
tp_gr = j.new_position("intersection").merge(strand=strand)
if strand:
for strand in "+ -".split():
tp = tp_gr[strand].length
fp = f[strand].length - tp
fn = t[strand].length - tp
tn = genome_length - (tp + fp + fn)
mcc = _mcc(tp, fp, tn, fn)
rowdicts.append({"T": lt, "F": lf, "Strand": strand, "TP": tp, "FP": fp, "TN": tn, "FN": fn, "MCC": mcc})
rowdicts.append({"T": lf, "F": lt, "Strand": strand, "TP": tp, "FP": fn, "TN": tn, "FN": fp, "MCC": mcc})
else:
tp = tp_gr.length
fp = f.length - tp
fn = t.length - tp
tn = genome_length - (tp + fp + fn)
mcc = _mcc(tp, fp, tn, fn)
rowdicts.append({"T": lt, "F": lf, "TP": tp, "FP": fp, "TN": tn, "FN": fn, "MCC": mcc})
rowdicts.append({"T": lf, "F": lt, "TP": tp, "FP": fn, "TN": tn, "FN": fp, "MCC": mcc})
df = pd.DataFrame.from_dict(rowdicts).sort_values(["T", "F"])
return df
| [
"numpy.sum",
"numpy.empty",
"numpy.floor",
"numpy.isnan",
"pyranges.pyranges.fill_kwargs",
"numpy.arange",
"pandas.DataFrame",
"numpy.add.reduce",
"scipy.stats.rankdata",
"numpy.apply_along_axis",
"itertools.chain",
"pandas.concat",
"numpy.repeat",
"pyranges.concat",
"pandas.DataFrame.fr... | [((442, 463), 'numpy.repeat', 'np.repeat', (['size', 'size'], {}), '(size, size)\n', (451, 463), True, 'import numpy as np\n'), ((703, 716), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (713, 716), True, 'import numpy as np\n'), ((725, 738), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (735, 738), True, 'import numpy as np\n'), ((983, 996), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (993, 996), True, 'import numpy as np\n'), ((1005, 1018), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1015, 1018), True, 'import numpy as np\n'), ((1134, 1165), 'numpy.add.reduce', 'np.add.reduce', (['(xm * ym)'], {'axis': '(-1)'}), '(xm * ym, axis=-1)\n', (1147, 1165), True, 'import numpy as np\n'), ((1446, 1486), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.argsort', '(1)', 'data'], {}), '(np.argsort, 1, data)\n', (1465, 1486), True, 'import numpy as np\n'), ((1498, 1527), 'numpy.empty', 'np.empty', (['data.shape', 'np.intp'], {}), '(data.shape, np.intp)\n', (1506, 1527), True, 'import numpy as np\n'), ((1596, 1641), 'numpy.put_along_axis', 'np.put_along_axis', (['inv', 'sorter', 'ranks'], {'axis': '(1)'}), '(inv, sorter, ranks, axis=1)\n', (1613, 1641), True, 'import numpy as np\n'), ((1652, 1685), 'numpy.take_along_axis', 'np.take_along_axis', (['dc', 'sorter', '(1)'], {}), '(dc, sorter, 1)\n', (1670, 1685), True, 'import numpy as np\n'), ((1697, 1750), 'numpy.apply_along_axis', 'np.apply_along_axis', (['(lambda r: r[1:] != r[:-1])', '(1)', 'dc'], {}), '(lambda r: r[1:] != r[:-1], 1, dc)\n', (1716, 1750), True, 'import numpy as np\n'), ((1937, 1966), 'numpy.count_nonzero', 'np.count_nonzero', (['obs'], {'axis': '(1)'}), '(obs, axis=1)\n', (1953, 1966), True, 'import numpy as np\n'), ((1977, 1994), 'pandas.DataFrame', 'pd.DataFrame', (['obs'], {}), '(obs)\n', (1989, 1994), True, 'import pandas as pd\n'), ((2009, 2027), 'pandas.Series', 'pd.Series', (['nonzero'], {}), '(nonzero)\n', (2018, 2027), True, 'import pandas as pd\n'), ((2040, 2059), 'pandas.DataFrame', 'pd.DataFrame', (['dense'], {}), '(dense)\n', (2052, 2059), True, 'import pandas as pd\n'), ((2679, 2695), 'scipy.stats.rankdata', 'rankdata', (['p_vals'], {}), '(p_vals)\n', (2687, 2695), False, 'from scipy.stats import rankdata\n'), ((3160, 3187), 'numpy.array', 'np.array', (['n1'], {'dtype': 'np.uint'}), '(n1, dtype=np.uint)\n', (3168, 3187), True, 'import numpy as np\n'), ((3197, 3224), 'numpy.array', 'np.array', (['n2'], {'dtype': 'np.uint'}), '(n2, dtype=np.uint)\n', (3205, 3224), True, 'import numpy as np\n'), ((3234, 3261), 'numpy.array', 'np.array', (['d1'], {'dtype': 'np.uint'}), '(d1, dtype=np.uint)\n', (3242, 3261), True, 'import numpy as np\n'), ((3271, 3298), 'numpy.array', 'np.array', (['d2'], {'dtype': 'np.uint'}), '(d2, dtype=np.uint)\n', (3279, 3298), True, 'import numpy as np\n'), ((3328, 3354), 'fisher.pvalue_npy', 'pvalue_npy', (['n1', 'd1', 'n2', 'd2'], {}), '(n1, d1, n2, d2)\n', (3338, 3354), False, 'from fisher import pvalue_npy\n'), ((3702, 3739), 'pandas.DataFrame', 'pd.DataFrame', (["{'OR': OR, 'P': p_vals}"], {}), "({'OR': OR, 'P': p_vals})\n", (3714, 3739), True, 'import pandas as pd\n'), ((949, 973), 'numpy.sum', 'np.sum', (['(a * a)'], {'axis': 'axis'}), '(a * a, axis=axis)\n', (955, 973), True, 'import numpy as np\n'), ((1231, 1277), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (1242, 1277), True, 'import numpy as np\n'), ((1549, 1573), 'numpy.arange', 'np.arange', (['data.shape[1]'], {}), '(data.shape[1])\n', (1558, 1573), True, 'import numpy as np\n'), ((1848, 1886), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.cumsum', '(1)', 'obs'], {}), '(np.cumsum, 1, obs)\n', (1867, 1886), True, 'import numpy as np\n'), ((2454, 2493), 'pandas.DataFrame', 'pd.DataFrame', (['_result'], {'index': 'nzdf.index'}), '(_result, index=nzdf.index)\n', (2466, 2493), True, 'import pandas as pd\n'), ((5086, 5117), 'pyranges.pyranges.fill_kwargs', 'pr.pyranges.fill_kwargs', (['kwargs'], {}), '(kwargs)\n', (5109, 5117), True, 'import pyranges as pr\n'), ((5732, 5763), 'pyranges.pyranges.fill_kwargs', 'pr.pyranges.fill_kwargs', (['kwargs'], {}), '(kwargs)\n', (5755, 5763), True, 'import pyranges as pr\n'), ((6522, 6553), 'pyranges.pyranges.fill_kwargs', 'pr.pyranges.fill_kwargs', (['kwargs'], {}), '(kwargs)\n', (6545, 6553), True, 'import pyranges as pr\n'), ((6572, 6628), 'pyranges.multithreaded.pyrange_apply', 'pyrange_apply', (['_relative_distance', 'self', 'other'], {}), '(_relative_distance, self, other, **kwargs)\n', (6585, 6628), False, 'from pyranges.multithreaded import pyrange_apply\n'), ((7370, 7377), 'math.sqrt', 'sqrt', (['x'], {}), '(x)\n', (7374, 7377), False, 'from math import sqrt\n'), ((7609, 7652), 'itertools.combinations_with_replacement', 'combinations_with_replacement', (['_labels'], {'r': '(2)'}), '(_labels, r=2)\n', (7638, 7652), False, 'from itertools import combinations_with_replacement, chain\n'), ((7720, 7762), 'itertools.combinations_with_replacement', 'combinations_with_replacement', (['labels'], {'r': '(2)'}), '(labels, r=2)\n', (7749, 7762), False, 'from itertools import combinations_with_replacement, chain\n'), ((9175, 9214), 'itertools.combinations_with_replacement', 'combinations_with_replacement', (['grs'], {'r': '(2)'}), '(grs, r=2)\n', (9204, 9214), False, 'from itertools import combinations_with_replacement, chain\n'), ((1409, 1425), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (1419, 1425), True, 'import numpy as np\n'), ((2536, 2552), 'pandas.concat', 'pd.concat', (['ranks'], {}), '(ranks)\n', (2545, 2552), True, 'import pandas as pd\n'), ((3037, 3049), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (3045, 3049), False, 'import sys\n'), ((6741, 6757), 'numpy.isnan', 'np.isnan', (['result'], {}), '(result)\n', (6749, 6757), True, 'import numpy as np\n'), ((6788, 6819), 'numpy.floor', 'np.floor', (['(result[not_nan] * 100)'], {}), '(result[not_nan] * 100)\n', (6796, 6819), True, 'import numpy as np\n'), ((11035, 11067), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['rowdicts'], {}), '(rowdicts)\n', (11057, 11067), True, 'import pandas as pd\n'), ((8190, 8228), 'itertools.chain', 'chain', (['*[gr.chromosomes for gr in grs]'], {}), '(*[gr.chromosomes for gr in grs])\n', (8195, 8228), False, 'from itertools import combinations_with_replacement, chain\n'), ((8719, 8739), 'pandas.concat', 'pd.concat', (['[df, df2]'], {}), '([df, df2])\n', (8728, 8739), True, 'import pandas as pd\n'), ((2180, 2193), 'numpy.nonzero', 'np.nonzero', (['r'], {}), '(r)\n', (2190, 2193), True, 'import numpy as np\n'), ((2349, 2386), 'numpy.take_along_axis', 'np.take_along_axis', (['_count', '_dense', '(1)'], {}), '(_count, _dense, 1)\n', (2367, 2386), True, 'import numpy as np\n'), ((2389, 2430), 'numpy.take_along_axis', 'np.take_along_axis', (['_count', '(_dense - 1)', '(1)'], {}), '(_count, _dense - 1, 1)\n', (2407, 2430), True, 'import numpy as np\n'), ((9037, 9051), 'pyranges.concat', 'pr.concat', (['grs'], {}), '(grs)\n', (9046, 9051), True, 'import pyranges as pr\n')] |
import numpy as np
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from time import time
from include.data import get_data_set
# from include.model import model
from include.model import model
from utils import progress_bar
x, y, output, global_step, y_pred_cls, keep_prob = model()
_IMG_SIZE = 32
_NUM_CHANNELS = 3
_BATCH_SIZE = 128
_CLASS_SIZE = 10
_ITERATION = 20000
_EPOCH = 161
# _SAVE_PATH = "./tensorboard/cifar-10/"
_SAVE_PATH = "./tensorboard/aug-decay-RMS2/"
train_x, train_y, train_l, mu, std = get_data_set(cifar=10, whitten=False)
test_x, test_y, test_l, mu, std = get_data_set(name="test", mu=mu, std=std, cifar=10, whitten=False)
print (train_x)
print (test_x)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y))
steps_per_epoch = len(train_x) / _BATCH_SIZE
boundaries = [steps_per_epoch * _epoch for _epoch in [81, 122]]
values = [0.1, 0.01, 0.001]
learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)
l2 = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])
weight_decay = 0.0001
optimizer = tf.train.RMSPropOptimizer(learning_rate=1e-4).minimize(loss + l2 * weight_decay, global_step=global_step)
# optimizer = tf.train.RMSPropOptimizer(learning_rate=1e-4).minimize(loss, global_step=global_step)
# optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9, name='Momentum', use_nesterov=True).minimize(loss + l2 * weight_decay, global_step=global_step)
correct_prediction = tf.equal(y_pred_cls, tf.argmax(y, dimension=1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar("Accuracy/train", accuracy)
tf.summary.scalar("Loss", loss)
merged = tf.summary.merge_all()
saver = tf.train.Saver()
sess = tf.Session()
train_writer = tf.summary.FileWriter(_SAVE_PATH, sess.graph)
try:
print("Trying to restore last checkpoint ...")
last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=_SAVE_PATH)
saver.restore(sess, save_path=last_chk_path)
print("Restored checkpoint from:", last_chk_path)
except:
print("Failed to restore checkpoint. Initializing variables instead.")
sess.run(tf.global_variables_initializer())
def train(num_epoch):
'''
Train CNN
'''
global train_x
global train_y
epoch_size = len(train_x)
for i in range(num_epoch):
print ('Epoch: %d' % i)
randidx = np.arange(epoch_size)
np.random.shuffle(randidx)
print (epoch_size)
train_x = train_x[randidx]
train_y = train_y[randidx]
if (epoch_size % _BATCH_SIZE == 0):
num_iterations = epoch_size / _BATCH_SIZE
else:
num_iterations = int(epoch_size / _BATCH_SIZE) + 1
train_loss = 0
for j in range(num_iterations):
if (j < num_iterations - 1):
batch_xs = train_x[j * _BATCH_SIZE:(j + 1) * _BATCH_SIZE]
batch_ys = train_y[j * _BATCH_SIZE:(j + 1) * _BATCH_SIZE]
else:
batch_xs = train_x[j * _BATCH_SIZE:epoch_size]
batch_ys = train_y[j * _BATCH_SIZE:epoch_size]
start_time = time()
i_global, _ = sess.run([global_step, optimizer], feed_dict={x: batch_xs, y: batch_ys, keep_prob: 0.5})
duration = time() - start_time
if (i_global % 10 == 0) or (j == num_iterations - 1):
_loss, batch_acc, _learning_rate = sess.run([loss, accuracy, learning_rate], feed_dict={x: batch_xs, y: batch_ys, keep_prob: 0.5})
# msg = "Global Step: {0:>6}, accuracy: {1:>6.1%}, loss = {2:.2f} ({3:.1f} examples/sec, {4:.2f} sec/batch)"
# print(msg.format(i_global, batch_acc, _loss, _BATCH_SIZE / duration, duration))
train_loss = train_loss + _loss
progress_bar(j, num_iterations, 'Loss: %.3f | Acc: %.3f%% '
% (train_loss / (j + 1), batch_acc))
# if (i_global % 100 == 0) or (i == num_iterations - 1):
if (j == num_iterations - 1):
data_merged, global_1 = sess.run([merged, global_step], feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.0})
acc = predict_test()
summary = tf.Summary(value=[
tf.Summary.Value(tag="Accuracy/test", simple_value=acc),
])
train_writer.add_summary(data_merged, global_1)
train_writer.add_summary(summary, global_1)
saver.save(sess, save_path=_SAVE_PATH, global_step=global_step)
print("Saved checkpoint.")
def predict_test(show_confusion_matrix=False):
'''
Make prediction for all images in test_x
'''
i = 0
predicted_class = np.zeros(shape=len(test_x), dtype=np.int)
while i < len(test_x):
j = min(i + _BATCH_SIZE, len(test_x))
batch_xs = test_x[i:j, :]
batch_ys = test_y[i:j, :]
predicted_class[i:j] = sess.run(y_pred_cls, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.0})
i = j
correct = (np.argmax(test_y, axis=1) == predicted_class)
acc = correct.mean() * 100
correct_numbers = correct.sum()
print("Accuracy on Test-Set: {0:.2f}% ({1} / {2})".format(acc, correct_numbers, len(test_x)))
if show_confusion_matrix is True:
cm = confusion_matrix(y_true=np.argmax(test_y, axis=1), y_pred=predicted_class)
for i in range(_CLASS_SIZE):
class_name = "({}) {}".format(i, test_l[i])
print(cm[i, :], class_name)
class_numbers = [" ({0})".format(i) for i in range(_CLASS_SIZE)]
print("".join(class_numbers))
return acc
if _ITERATION != 0:
# train(_ITERATION)
train(_EPOCH)
sess.close()
| [
"tensorflow.trainable_variables",
"numpy.argmax",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.train.latest_checkpoint",
"numpy.arange",
"include.data.get_data_set",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.cast",
"tensorflow.summary.FileWriter",
"tensorflow.train.piecewi... | [((300, 307), 'include.model.model', 'model', ([], {}), '()\n', (305, 307), False, 'from include.model import model\n'), ((533, 570), 'include.data.get_data_set', 'get_data_set', ([], {'cifar': '(10)', 'whitten': '(False)'}), '(cifar=10, whitten=False)\n', (545, 570), False, 'from include.data import get_data_set\n'), ((605, 671), 'include.data.get_data_set', 'get_data_set', ([], {'name': '"""test"""', 'mu': 'mu', 'std': 'std', 'cifar': '(10)', 'whitten': '(False)'}), "(name='test', mu=mu, std=std, cifar=10, whitten=False)\n", (617, 671), False, 'from include.data import get_data_set\n'), ((945, 1005), 'tensorflow.train.piecewise_constant', 'tf.train.piecewise_constant', (['global_step', 'boundaries', 'values'], {}), '(global_step, boundaries, values)\n', (972, 1005), True, 'import tensorflow as tf\n'), ((1612, 1657), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Accuracy/train"""', 'accuracy'], {}), "('Accuracy/train', accuracy)\n", (1629, 1657), True, 'import tensorflow as tf\n'), ((1658, 1689), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Loss"""', 'loss'], {}), "('Loss', loss)\n", (1675, 1689), True, 'import tensorflow as tf\n'), ((1701, 1723), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (1721, 1723), True, 'import tensorflow as tf\n'), ((1732, 1748), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1746, 1748), True, 'import tensorflow as tf\n'), ((1756, 1768), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1766, 1768), True, 'import tensorflow as tf\n'), ((1784, 1829), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['_SAVE_PATH', 'sess.graph'], {}), '(_SAVE_PATH, sess.graph)\n', (1805, 1829), True, 'import tensorflow as tf\n'), ((726, 790), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'output', 'labels': 'y'}), '(logits=output, labels=y)\n', (765, 790), True, 'import tensorflow as tf\n'), ((1518, 1543), 'tensorflow.argmax', 'tf.argmax', (['y'], {'dimension': '(1)'}), '(y, dimension=1)\n', (1527, 1543), True, 'import tensorflow as tf\n'), ((1571, 1610), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (1578, 1610), True, 'import tensorflow as tf\n'), ((1908, 1961), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', ([], {'checkpoint_dir': '_SAVE_PATH'}), '(checkpoint_dir=_SAVE_PATH)\n', (1934, 1961), True, 'import tensorflow as tf\n'), ((1021, 1039), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['var'], {}), '(var)\n', (1034, 1039), True, 'import tensorflow as tf\n'), ((1112, 1159), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (1137, 1159), True, 'import tensorflow as tf\n'), ((2404, 2425), 'numpy.arange', 'np.arange', (['epoch_size'], {}), '(epoch_size)\n', (2413, 2425), True, 'import numpy as np\n'), ((2434, 2460), 'numpy.random.shuffle', 'np.random.shuffle', (['randidx'], {}), '(randidx)\n', (2451, 2460), True, 'import numpy as np\n'), ((5080, 5105), 'numpy.argmax', 'np.argmax', (['test_y'], {'axis': '(1)'}), '(test_y, axis=1)\n', (5089, 5105), True, 'import numpy as np\n'), ((1051, 1075), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (1073, 1075), True, 'import tensorflow as tf\n'), ((2161, 2194), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2192, 2194), True, 'import tensorflow as tf\n'), ((3158, 3164), 'time.time', 'time', ([], {}), '()\n', (3162, 3164), False, 'from time import time\n'), ((3303, 3309), 'time.time', 'time', ([], {}), '()\n', (3307, 3309), False, 'from time import time\n'), ((3824, 3924), 'utils.progress_bar', 'progress_bar', (['j', 'num_iterations', "('Loss: %.3f | Acc: %.3f%% ' % (train_loss / (j + 1), batch_acc))"], {}), "(j, num_iterations, 'Loss: %.3f | Acc: %.3f%% ' % (train_loss /\n (j + 1), batch_acc))\n", (3836, 3924), False, 'from utils import progress_bar\n'), ((5367, 5392), 'numpy.argmax', 'np.argmax', (['test_y'], {'axis': '(1)'}), '(test_y, axis=1)\n', (5376, 5392), True, 'import numpy as np\n'), ((4291, 4346), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': '"""Accuracy/test"""', 'simple_value': 'acc'}), "(tag='Accuracy/test', simple_value=acc)\n", (4307, 4346), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
import re
import collections
import numpy as np
import sklearn
from tabulate import tabulate
BaseAtom = collections.namedtuple("Atom", ["predicate", "arguments"])
class Atom(BaseAtom):
def __hash__(self):
hash_value = hash(self.predicate)
for a in self.arguments:
hash_value *= hash(a)
return hash_value
def __eq__(self, other):
return (self.predicate == other.predicate) and (self.arguments == other.arguments)
def trim(string: str) -> str:
"""
:param string: an input string
:return: the string without trailing whitespaces
"""
return re.sub("\A\s+|\s+\Z", "", string)
def parse_rules(rules, delimiter="#####", rule_template=False):
kb = []
for rule in rules:
if rule_template:
splits = re.split("\A\n?([0-9]?[0-9]+)", rule)
# fixme: should be 0 and 1 respectively
num = int(splits[1])
rule = splits[2]
rule = re.sub(":-", delimiter, rule)
rule = re.sub("\),", ")"+delimiter, rule)
rule = [trim(x) for x in rule.split(delimiter)]
rule = [x for x in rule if x != ""]
if len(rule) > 0:
atoms = []
for atom in rule:
splits = atom.split("(")
predicate = splits[0]
args = [x for x in re.split("\s?,\s?|\)", splits[1]) if x != ""]
atoms.append(Atom(predicate, args))
if rule_template:
kb.append((atoms, num))
else:
kb.append(atoms)
return kb
def load_from_file(path, rule_template=False):
with open(path, "r") as f:
text = f.readlines()
text = [x for x in text if not x.startswith("%") and x.strip() != ""]
text = "".join(text)
rules = [x for x in re.split("\.\n|\.\Z", text) if x != "" and
x != "\n" and not x.startswith("%")]
kb = parse_rules(rules, rule_template=rule_template)
return kb
def evaluate_on_countries(test_set, entity_to_index, predicate_to_index,
scoring_function, verbose=False):
test_countries = []
with open("./data/countries/{}.txt".format(test_set), "r") as f:
for line in f.readlines():
test_countries.append(line[:-1])
regions = []
with open("./data/countries/regions.txt", "r") as f:
for line in f.readlines():
regions.append(line[:-1])
ground_truth = load_from_file("./data/countries/countries.nl")
country2region = {}
for atom in ground_truth:
atom = atom[0]
if atom.predicate == "locatedIn":
country, region = atom.arguments
if region in regions:
country2region[country] = region
located_in_ids = [predicate_to_index['locatedIn']] * len(regions)
region_ids = [entity_to_index[region] for region in regions]
def predict(country):
country_ids = [entity_to_index[country]] * len(regions)
Xp = np.array(located_in_ids)
Xs = np.array(country_ids)
Xo = np.array(region_ids)
scores = scoring_function(Xp, Xs, Xo)
return scores
table = []
scores_all = []
target_all = []
for country in test_countries:
known_kb = country2region[country]
region_idx = regions.index(known_kb)
scores = predict(country)
table += [[country] + list(scores)]
target = np.zeros(len(regions), np.int32)
target[region_idx] = 1
scores_all += list(scores)
target_all += list(target)
if verbose:
print(tabulate(table, headers=["country"] + regions))
auc_val = sklearn.metrics.average_precision_score(target_all, scores_all)
return auc_val
| [
"re.split",
"numpy.array",
"collections.namedtuple",
"tabulate.tabulate",
"sklearn.metrics.average_precision_score",
"re.sub"
] | [((132, 190), 'collections.namedtuple', 'collections.namedtuple', (['"""Atom"""', "['predicate', 'arguments']"], {}), "('Atom', ['predicate', 'arguments'])\n", (154, 190), False, 'import collections\n'), ((642, 679), 're.sub', 're.sub', (['"""\\\\A\\\\s+|\\\\s+\\\\Z"""', '""""""', 'string'], {}), "('\\\\A\\\\s+|\\\\s+\\\\Z', '', string)\n", (648, 679), False, 'import re\n'), ((3697, 3760), 'sklearn.metrics.average_precision_score', 'sklearn.metrics.average_precision_score', (['target_all', 'scores_all'], {}), '(target_all, scores_all)\n', (3736, 3760), False, 'import sklearn\n'), ((991, 1020), 're.sub', 're.sub', (['""":-"""', 'delimiter', 'rule'], {}), "(':-', delimiter, rule)\n", (997, 1020), False, 'import re\n'), ((1036, 1073), 're.sub', 're.sub', (['"""\\\\),"""', "(')' + delimiter)", 'rule'], {}), "('\\\\),', ')' + delimiter, rule)\n", (1042, 1073), False, 'import re\n'), ((3028, 3052), 'numpy.array', 'np.array', (['located_in_ids'], {}), '(located_in_ids)\n', (3036, 3052), True, 'import numpy as np\n'), ((3066, 3087), 'numpy.array', 'np.array', (['country_ids'], {}), '(country_ids)\n', (3074, 3087), True, 'import numpy as np\n'), ((3101, 3121), 'numpy.array', 'np.array', (['region_ids'], {}), '(region_ids)\n', (3109, 3121), True, 'import numpy as np\n'), ((824, 865), 're.split', 're.split', (['"""\\\\A\n?([0-9]?[0-9]+)"""', 'rule'], {}), '("""\\\\A\n?([0-9]?[0-9]+)""", rule)\n', (832, 865), False, 'import re\n'), ((3634, 3680), 'tabulate.tabulate', 'tabulate', (['table'], {'headers': "(['country'] + regions)"}), "(table, headers=['country'] + regions)\n", (3642, 3680), False, 'from tabulate import tabulate\n'), ((1841, 1871), 're.split', 're.split', (['"""\\\\.\n|\\\\.\\\\Z"""', 'text'], {}), "('\\\\.\\n|\\\\.\\\\Z', text)\n", (1849, 1871), False, 'import re\n'), ((1364, 1400), 're.split', 're.split', (['"""\\\\s?,\\\\s?|\\\\)"""', 'splits[1]'], {}), "('\\\\s?,\\\\s?|\\\\)', splits[1])\n", (1372, 1400), False, 'import re\n')] |
import numpy as np
import scipy
from scipy import sparse
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm
import os
import scanpy as sc
import scanpy.external as sce
import sys
import scrublet as scr
from typing import Union
from anndata import AnnData
from inspect import signature
from .utils import scanpy_adata_loader
from .utils import score_doublets
from .utils import score_cc_genes
# For Numba
import warnings
warnings.filterwarnings('ignore')
def bcs_by_group(
obs: pd.DataFrame,
group: str = 'percent_mito',
key: str = 'louvain',
thresh: float = 2,
verbose:bool = False
) -> list:
"""
Barcodes by Group
----------------------------
Filters out quality thresholds for value of "group" variable.
Inputs:
- obs: observations dataframe from AnnData object
- group: group to filter out barcodes by
- key: what sub-groups to separately consider when doing filtering
- thresh: std. err threshold cutoff for removing barcodes
- verbose: print out filtering
Outputs:
- bcs: selected barcodes
"""
bcs = list()
for opt in set(obs[key]):
obs_sub = obs[obs[key]==opt]
d = obs_sub[group].values
filt_thresh = np.mean(d) + thresh * np.std(d)
obs_sub = obs_sub[obs_sub[group] < filt_thresh]
if verbose:
print("\tFiltering {} group {} - {} < {}".format(key, opt, group, filt_thresh))
bcs += list(obs_sub.index)
if verbose: print("Filtered {} / {} barcodes by {}.".format(obs.shape[0]-len(bcs), obs.shape[0], group))
return bcs
def filter_upper(
adata: AnnData,
groups: list,
verbose: bool,
**kwargs
) -> list:
"""
Filter Upper
----------------------------
Performs downstream clustering on unfiltered data to estimate thresholds
for filtering out diseased cells.
Inputs:
- adata: AnnData object
- groups: groups to do separate filtering on
- **kwargs: key-word args for bcs_by_group
Outpts:
- final list of selected barcodes
"""
sc.pp.normalize_per_cell(adata, counts_per_cell_after=10000)
sc.pp.log1p(adata)
sc.pp.highly_variable_genes(adata, min_mean=0.0125, max_mean=3, min_disp=0.5)
adata = adata[:,adata.var['highly_variable']]
sc.tl.pca(adata, svd_solver='arpack')
sc.pp.neighbors(adata, knn=True, n_pcs=40)
sc.tl.louvain(adata, resolution=1)
return list(
set.intersection(*[set(bcs_by_group(adata.obs, group=g, verbose=verbose, **kwargs)) for g in groups])
)
def recipe(
file_name: Union[AnnData, str, list], \
min_genes: int = 200, \
min_cells: int = 3, \
thresh: float = 1.25, \
mito_thresh: Union[None,float] = None, \
groups: Union[None, str] = None, \
genome: Union[None, str] = None, \
norm: str = "library", \
scran_key: Union[None, str] = None, \
scran_res: float = 0.5, \
regress_vars: Union[None, str] = None, \
regress_jobs: int = 1, \
compute_doublets: bool = True, \
remove_doublets: bool = False, \
scrublet_key: str = 'batch', \
hvg: Union[None, dict] = None, \
qc: bool = False,
downstream: bool = True,
bbknn: Union[None, str] = None,
issparse: bool = False,
verbose: bool = False,
make_sparse: bool = True,
**kwargs
) -> AnnData:
"""
Recipe for single-cell processing.
----------------------------
Inputs:
- min_genes: minimum number of genes required for a cell
- min_cells: minimum number of cells for expression of a gene
- thresh: estimated threshold for qc-filtering
- groups: qc-metrics to threshold over (default is percent_mito)
- genome: genome build for loading scanpy object (usually not needed)
- norm: normalization method
* library: default
* scran: scran
- scran_key: scran key to normalize on - if not provided, will do course clustering
- scran_res: scran resolution for course clustering
- regress_vars: variables to regress over
*** Note: this will subset adata.X to highly variable genes
*** Note: raw data will still be stores in adata.raw and adata.layers['counts']
- regress_jobs: n_jobs to use for regression if specified
- compute_doublets: run scrublet
- remove_doublets: remove doublets before downstream processing
- hvg: dictionary of keys specifying highly variable gene selection
- qc: if True, returns adata object pre-filtering before downstream processing
*** Note: good for easily computing qc-metrics
- downstream: if True, continues with downstream processing
- bbknn: if specified, performs light-weight batch correction on the provided
variable
- issparse: if provided an AnnData object, if hte X variable is alreayd in sparse format
- make_sparse: if True, converts the hte X variable of the AnnData to be returned to sparse format
Outputs:
- adata: AnnData Object
"""
# ---------------------------------
# Argument Helpers
# ---------------------------------
if qc:
min_cells = 0
min_genes = 0
if hvg is None:
hvg = {'min_mean':0.0125, 'max_mean':3, 'min_disp':0.5}
for key in signature(sc.pp.highly_variable_genes).parameters:
if key in kwargs:
raise KeyError("Please place {} in hvg.".format(key))
if groups is None:
groups = ['percent_mito']
if remove_doublets:
assert compute_doublets, 'Doublet removal specified but doublets are not being computed.'
# ---------------------------------
# Pipeline
# ---------------------------------
if isinstance(file_name, AnnData):
adata = file_name
if not issparse:
adata.X = sparse.csr_matrix(adata.X)
else:
adata = scanpy_adata_loader(file_name, genome=genome)
adata.var_names_make_unique()
# General QC
sc.pp.calculate_qc_metrics(adata, inplace=True)
sc.pp.filter_cells(adata, min_genes=min_genes)
sc.pp.filter_genes(adata, min_cells=min_cells)
if 'batch' not in list(adata.obs):
adata.obs['batch'] = '1'
# ---------------------------------
# Compute Doublets
# ---------------------------------
if compute_doublets:
score_doublets(adata, key=scrublet_key)
if remove_doublets:
print("Dropping {} doublets".format(sum(adata.obs['doublet'])))
adata = adata[~adata.obs['doublet']]
# ---------------------------------
# Compute Mitochondrial + Ribo Genes
# ---------------------------------
mito_genes = adata.var_names.str.startswith('MT-')
ribo_genes = adata.var_names.str.startswith('RPS') + adata.var_names.str.startswith('RPL')
# Filter by grouped mito-thresholds
adata.obs['percent_mito'] = np.sum(adata[:, mito_genes].X, axis=1).A1 / np.sum(adata.X, axis=1).A1
adata.obs['percent_ribo'] = np.sum(adata[:, ribo_genes].X, axis=1).A1 / np.sum(adata.X, axis=1).A1
m = adata.obs['percent_mito']
if qc:
return adata
# ---------------------------------
# Filter out barcodes
# ---------------------------------
if thresh is not None:
adata = adata[adata.obs.index.isin(filter_upper(adata.copy(), groups, thresh=thresh, verbose=verbose, **kwargs))]
# Mito Threshold
if mito_thresh is not None:
if mito_thresh == 'auto':
mito_thresh = np.std(m)*1.5 + np.mean(m)
print("\tFiltering mitochondiral genes over {}.".format(mito_thresh))
prev_cells = adata.shape[0]
adata = adata[adata.obs['percent_mito'] < mito_thresh]
print("Filtered {} / {} barcodes.".format(prev_cells-adata.shape[0], prev_cells))
adata.layers['counts'] = adata.X.copy()
# ---------------------------------
# Normalization
# ---------------------------------
sc.pp.normalize_per_cell(adata, counts_per_cell_after=10000)
sc.pp.log1p(adata)
# Save Raw
adata.raw = adata.copy()
if norm=='library':
pass
elif norm=='scran':
from .norm import pyscran
adata = pyscran(adata, resolution=scran_res, scran_key=scran_key, hvg=hvg, log_norm=True)
# ---------------------------------
# Score cell-cycle genes
# ---------------------------------
try:
score_cc_genes(adata)
except:
if verbose: print("Unable to compute cell-cycle genes.")
pass
# ---------------------------------
# Downstream processing
# ---------------------------------
if downstream:
sc.pp.highly_variable_genes(adata, **hvg)
# Regress out vars if provided
if regress_vars is not None:
adata = adata[:, adata.var['highly_variable']]
sc.pp.regress_out(adata, regress_vars, n_jobs=regress_jobs)
sc.pp.scale(adata, max_value=10)
sc.tl.pca(adata, svd_solver='arpack', use_highly_variable=True)
if bbknn is not None:
# Light-weight batch correction
sce.pp.bbknn(adata, batch_key=bbknn)
else:
sc.pp.neighbors(adata)
sc.tl.louvain(adata, resolution=1)
sc.tl.umap(adata)
# Convert back to sparse if need-be
if make_sparse:
try:
adata.X = sparse.csr_matrix(adata.X)
except:
pass
return adata
| [
"numpy.sum",
"scanpy.pp.highly_variable_genes",
"scanpy.pp.neighbors",
"numpy.mean",
"scanpy.tl.louvain",
"scanpy.external.pp.bbknn",
"numpy.std",
"scanpy.pp.filter_genes",
"inspect.signature",
"scanpy.pp.scale",
"scanpy.pp.normalize_per_cell",
"scanpy.tl.pca",
"scanpy.pp.calculate_qc_metric... | [((445, 478), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (468, 478), False, 'import warnings\n'), ((2125, 2185), 'scanpy.pp.normalize_per_cell', 'sc.pp.normalize_per_cell', (['adata'], {'counts_per_cell_after': '(10000)'}), '(adata, counts_per_cell_after=10000)\n', (2149, 2185), True, 'import scanpy as sc\n'), ((2190, 2208), 'scanpy.pp.log1p', 'sc.pp.log1p', (['adata'], {}), '(adata)\n', (2201, 2208), True, 'import scanpy as sc\n'), ((2214, 2291), 'scanpy.pp.highly_variable_genes', 'sc.pp.highly_variable_genes', (['adata'], {'min_mean': '(0.0125)', 'max_mean': '(3)', 'min_disp': '(0.5)'}), '(adata, min_mean=0.0125, max_mean=3, min_disp=0.5)\n', (2241, 2291), True, 'import scanpy as sc\n'), ((2347, 2384), 'scanpy.tl.pca', 'sc.tl.pca', (['adata'], {'svd_solver': '"""arpack"""'}), "(adata, svd_solver='arpack')\n", (2356, 2384), True, 'import scanpy as sc\n'), ((2389, 2431), 'scanpy.pp.neighbors', 'sc.pp.neighbors', (['adata'], {'knn': '(True)', 'n_pcs': '(40)'}), '(adata, knn=True, n_pcs=40)\n', (2404, 2431), True, 'import scanpy as sc\n'), ((2436, 2470), 'scanpy.tl.louvain', 'sc.tl.louvain', (['adata'], {'resolution': '(1)'}), '(adata, resolution=1)\n', (2449, 2470), True, 'import scanpy as sc\n'), ((6070, 6117), 'scanpy.pp.calculate_qc_metrics', 'sc.pp.calculate_qc_metrics', (['adata'], {'inplace': '(True)'}), '(adata, inplace=True)\n', (6096, 6117), True, 'import scanpy as sc\n'), ((6122, 6168), 'scanpy.pp.filter_cells', 'sc.pp.filter_cells', (['adata'], {'min_genes': 'min_genes'}), '(adata, min_genes=min_genes)\n', (6140, 6168), True, 'import scanpy as sc\n'), ((6173, 6219), 'scanpy.pp.filter_genes', 'sc.pp.filter_genes', (['adata'], {'min_cells': 'min_cells'}), '(adata, min_cells=min_cells)\n', (6191, 6219), True, 'import scanpy as sc\n'), ((8024, 8084), 'scanpy.pp.normalize_per_cell', 'sc.pp.normalize_per_cell', (['adata'], {'counts_per_cell_after': '(10000)'}), '(adata, counts_per_cell_after=10000)\n', (8048, 8084), True, 'import scanpy as sc\n'), ((8089, 8107), 'scanpy.pp.log1p', 'sc.pp.log1p', (['adata'], {}), '(adata)\n', (8100, 8107), True, 'import scanpy as sc\n'), ((5382, 5420), 'inspect.signature', 'signature', (['sc.pp.highly_variable_genes'], {}), '(sc.pp.highly_variable_genes)\n', (5391, 5420), False, 'from inspect import signature\n'), ((8723, 8764), 'scanpy.pp.highly_variable_genes', 'sc.pp.highly_variable_genes', (['adata'], {}), '(adata, **hvg)\n', (8750, 8764), True, 'import scanpy as sc\n'), ((8982, 9014), 'scanpy.pp.scale', 'sc.pp.scale', (['adata'], {'max_value': '(10)'}), '(adata, max_value=10)\n', (8993, 9014), True, 'import scanpy as sc\n'), ((9023, 9086), 'scanpy.tl.pca', 'sc.tl.pca', (['adata'], {'svd_solver': '"""arpack"""', 'use_highly_variable': '(True)'}), "(adata, svd_solver='arpack', use_highly_variable=True)\n", (9032, 9086), True, 'import scanpy as sc\n'), ((9269, 9303), 'scanpy.tl.louvain', 'sc.tl.louvain', (['adata'], {'resolution': '(1)'}), '(adata, resolution=1)\n', (9282, 9303), True, 'import scanpy as sc\n'), ((9312, 9329), 'scanpy.tl.umap', 'sc.tl.umap', (['adata'], {}), '(adata)\n', (9322, 9329), True, 'import scanpy as sc\n'), ((1268, 1278), 'numpy.mean', 'np.mean', (['d'], {}), '(d)\n', (1275, 1278), True, 'import numpy as np\n'), ((5914, 5940), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['adata.X'], {}), '(adata.X)\n', (5931, 5940), False, 'from scipy import sparse\n'), ((6969, 7007), 'numpy.sum', 'np.sum', (['adata[:, mito_genes].X'], {'axis': '(1)'}), '(adata[:, mito_genes].X, axis=1)\n', (6975, 7007), True, 'import numpy as np\n'), ((7013, 7036), 'numpy.sum', 'np.sum', (['adata.X'], {'axis': '(1)'}), '(adata.X, axis=1)\n', (7019, 7036), True, 'import numpy as np\n'), ((7072, 7110), 'numpy.sum', 'np.sum', (['adata[:, ribo_genes].X'], {'axis': '(1)'}), '(adata[:, ribo_genes].X, axis=1)\n', (7078, 7110), True, 'import numpy as np\n'), ((7116, 7139), 'numpy.sum', 'np.sum', (['adata.X'], {'axis': '(1)'}), '(adata.X, axis=1)\n', (7122, 7139), True, 'import numpy as np\n'), ((8913, 8972), 'scanpy.pp.regress_out', 'sc.pp.regress_out', (['adata', 'regress_vars'], {'n_jobs': 'regress_jobs'}), '(adata, regress_vars, n_jobs=regress_jobs)\n', (8930, 8972), True, 'import scanpy as sc\n'), ((9174, 9210), 'scanpy.external.pp.bbknn', 'sce.pp.bbknn', (['adata'], {'batch_key': 'bbknn'}), '(adata, batch_key=bbknn)\n', (9186, 9210), True, 'import scanpy.external as sce\n'), ((9237, 9259), 'scanpy.pp.neighbors', 'sc.pp.neighbors', (['adata'], {}), '(adata)\n', (9252, 9259), True, 'import scanpy as sc\n'), ((9426, 9452), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['adata.X'], {}), '(adata.X)\n', (9443, 9452), False, 'from scipy import sparse\n'), ((1290, 1299), 'numpy.std', 'np.std', (['d'], {}), '(d)\n', (1296, 1299), True, 'import numpy as np\n'), ((7596, 7606), 'numpy.mean', 'np.mean', (['m'], {}), '(m)\n', (7603, 7606), True, 'import numpy as np\n'), ((7580, 7589), 'numpy.std', 'np.std', (['m'], {}), '(m)\n', (7586, 7589), True, 'import numpy as np\n')] |
"""
This file is part of the rgf_grape python package.
Copyright (C) 2017-2018 <NAME>
For details of the rgf_grape algorithm and applications see:
<NAME>, <NAME>, and <NAME>, Majorana bound state engineering
via efficient real-space parameter optimization, ArXiv 1804.03170 (2018).
"""
import subprocess
import os
import pickle
import numpy as np
from datetime import datetime
def git_version():
'''
Taken from numpy/setup.py
'''
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(cmd, stdout=subprocess.PIPE,
env=env).communicate()[0]
return out
try:
cwd = os.getcwd()
os.chdir(os.path.dirname(os.path.realpath(__file__)))
out = _minimal_ext_cmd(['git', 'rev-parse', '--short', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
os.chdir(cwd)
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def print_git_version():
print('Program started on {}'.format(datetime.now()))
print('Current git version {}'.format(git_version()))
def get_path(file):
'''
Calling get_path(__file__) returns the path of the file from which the
function is called.
'''
return os.path.dirname(os.path.abspath(file))
def save_to_file(fname, obj, append=False):
if append:
opt = 'ab'
else:
opt = 'wb'
with open(fname, opt) as output:
pickle.dump(obj, output)
def load_file(fname):
with open(fname, 'rb') as infile:
objs = []
while 1:
try:
objs.append(pickle.load(infile))
except EOFError:
break
if len(objs)==1:
return objs[0]
return objs
def init_numpy_seed(seed=None, save_to_file=False):
if seed is None:
seed = int(np.floor(np.random.rand(1)[0]*1e8))
print('Randomly chosen seed = ', seed)
if save_to_file:
np.savetxt('seed.dat', [[seed]])
r = np.random.seed(seed) | [
"os.path.abspath",
"pickle.dump",
"numpy.random.seed",
"subprocess.Popen",
"os.getcwd",
"os.path.realpath",
"numpy.savetxt",
"os.environ.get",
"pickle.load",
"numpy.random.rand",
"datetime.datetime.now",
"os.chdir"
] | [((2281, 2301), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2295, 2301), True, 'import numpy as np\n'), ((946, 957), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (955, 957), False, 'import os\n'), ((1151, 1164), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (1159, 1164), False, 'import os\n'), ((1550, 1571), 'os.path.abspath', 'os.path.abspath', (['file'], {}), '(file)\n', (1565, 1571), False, 'import os\n'), ((1727, 1751), 'pickle.dump', 'pickle.dump', (['obj', 'output'], {}), '(obj, output)\n', (1738, 1751), False, 'import pickle\n'), ((2240, 2272), 'numpy.savetxt', 'np.savetxt', (['"""seed.dat"""', '[[seed]]'], {}), "('seed.dat', [[seed]])\n", (2250, 2272), True, 'import numpy as np\n'), ((591, 608), 'os.environ.get', 'os.environ.get', (['k'], {}), '(k)\n', (605, 608), False, 'import os\n'), ((1311, 1325), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1323, 1325), False, 'from datetime import datetime\n'), ((991, 1017), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1007, 1017), False, 'import os\n'), ((800, 854), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'env': 'env'}), '(cmd, stdout=subprocess.PIPE, env=env)\n', (816, 854), False, 'import subprocess\n'), ((1894, 1913), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (1905, 1913), False, 'import pickle\n'), ((2137, 2154), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2151, 2154), True, 'import numpy as np\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
sample script of preparing txt for autodis infer
"""
import os
import argparse
import numpy as np
def parse_args():
"""set and check parameters."""
parser = argparse.ArgumentParser(description="prepare txt")
parser.add_argument('--train_line_count', type=int, default=45840617)
parser.add_argument('--test_size', type=float, default=0.1)
parser.add_argument('--seed', type=int, default=2020)
parser.add_argument('--data_dir', type=str, default='../data/input/origin_data')
parser.add_argument('--dst_dir', type=str, default='../data/input/origin_data')
parser.add_argument('--data_input', type=str, default="train.txt")
parser.add_argument('--data_output', type=str, default="test.txt")
args, _ = parser.parse_known_args()
return args
def run():
"""
prepare txt data
"""
args = parse_args()
test_size = int(args.train_line_count * args.test_size)
all_indices = [i for i in range(args.train_line_count)]
np.random.seed(args.seed)
np.random.shuffle(all_indices)
print("all_indices.size:{}".format(len(all_indices)))
test_indices_set = set(all_indices[:test_size])
print("test_indices_set.size:{}".format(len(test_indices_set)))
with open(os.path.join(args.data_dir, args.data_input), "r") as f:
fo = open(os.path.join(args.dst_dir, args.data_output), "w")
i = 0
line = f.readline()
while line:
if i in test_indices_set:
fo.write(line)
i += 1
line = f.readline()
fo.close()
if __name__ == '__main__':
run()
| [
"os.path.join",
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.random.shuffle"
] | [((836, 886), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""prepare txt"""'}), "(description='prepare txt')\n", (859, 886), False, 'import argparse\n'), ((1647, 1672), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1661, 1672), True, 'import numpy as np\n'), ((1677, 1707), 'numpy.random.shuffle', 'np.random.shuffle', (['all_indices'], {}), '(all_indices)\n', (1694, 1707), True, 'import numpy as np\n'), ((1900, 1944), 'os.path.join', 'os.path.join', (['args.data_dir', 'args.data_input'], {}), '(args.data_dir, args.data_input)\n', (1912, 1944), False, 'import os\n'), ((1975, 2019), 'os.path.join', 'os.path.join', (['args.dst_dir', 'args.data_output'], {}), '(args.dst_dir, args.data_output)\n', (1987, 2019), False, 'import os\n')] |
'''
convert to Tusimple json/txt format.
'''
import cv2
import json
import numpy as np
import os
'''
datasets name:vil-100
paper link: https://arxiv.org/abs/2108.08482
reference: https://github.com/yujun0-0/MMA-Net/tree/main/dataset
datasets structure:
VIL-100
|----Annotations
|----data
|----JPEGImages
|----Json
|----train.json
*********** A sample of one json-file ***********
{
"camera_id": 8272,
"info": {
"height": 1080 ,
"width": 1920,
"date": "2020-11-24",
"image_path": "0_Road014_Trim005_frames/XXXXXX.jpg"
},
"annotations": {
"lane": [{
"id": 1,
"lane_id": 1,
"attribute": 1,
"occlusion": 0,
"points": [[412.6, 720],[423.7, 709.9], ...]
}, {...}, {...}, {...}]
}
}
'''
import os
import cv2
import numpy as np
import json
def get_mask(mask, label, instance_gap):
# read label
label_content = open(label)
label_info = json.load(label_content)['annotations']
lanes_num = 0
for index, line in enumerate(label_info['lane']):
lanes_num += 1
# print(line)
points_x = []
points_y = []
# get points
for point in line['points']:
points_x.append(int(float(point[0])))
points_y.append(int(float(point[1])))
ptStart = 0
ptEnd = 1
points = list(zip(points_x, points_y))
# sort along y
points = sorted(points , key=lambda k: (k[1], k[0]))
# print(points)
while ptEnd < len(points_x):
mask = cv2.line(mask, points[ptStart], points[ptEnd], [instance_gap * (index+1)]*3, 4, lineType = 8)
ptStart += 1
ptEnd += 1
max_val = lanes_num * instance_gap
return mask, max_val
def lane_instance(label_gray,pix_value, hstart, hend, hdis):
lane = []
for hstep in range(hstart, hend, hdis): #
# h_samples.append(hstep)
wids = np.where(label_gray[hstep][:] == pix_value)
for ele in list(wids):
# print(list(ele))
if len(ele) == 0:
val = -2
else:
val = int(sum(ele)/(len(ele))) # get average x_value.
# if val != 1:
lane.append(val)
return lane
if __name__ == '__main__':
# choose datasets category from:'train','test'
datasets_category = 'test'
dataset_dir = '/mnt/h/lane_datasets/VIL-100'
# datasets dir
# dataset_dir = '{}/{}/'.format(path_to_datasets, datasets_category)
# write ground truth in json or txt.
save_gt = dataset_dir + '/data/{}_converted.json'.format(datasets_category)
# read file from txt
txt_file = '{}/data/{}.txt'.format(dataset_dir, datasets_category)
file_list = open(txt_file)
for file in file_list:
file = file.strip()
full_img_path = dataset_dir + file
if not os.path.exists(full_img_path):
continue
print("Now dealing with:", file)
file_name = os.path.splitext(file.strip().split('/')[-1])[0]
json_file = dataset_dir + file.replace('JPEGImages', 'Json') + '.json'
# if os.path.exists(full_img_path):
img = cv2.imread(full_img_path)
h = img.shape[0]
w = img.shape[1]
# set param.
points_num = 56*3
instance_gap = 20
hstart = 0
hend = h
hdis = h // points_num
img_dict = {}
h_samples = [] # height
lanes = []
mask = np.zeros([h,w,3],dtype=np.uint8)
# parse label
label_mask, max_value = get_mask(mask, json_file,instance_gap)
# convert to grayscale.
label_gray = label_mask[:,:,1]
for hstep in range(hstart, hend, hdis):
h_samples.append(hstep)
# neg samples.
if max_value == 0:
lanes.append([-2]*points_num)
# value:pix_value
else:
for value in range(instance_gap, max_value + 1, instance_gap):
# print("value", value)
lane = lane_instance(label_gray,value, hstart, hend, hdis)
if max(lane) == -2:
lanes.append([-2]*points_num)
else:
lanes.append(lane)
img_dict["lanes"] = lanes
img_dict["h_samples"] = h_samples
img_dict["raw_file"] = f'{file}' # img_path
img_dict_str = str(img_dict)
# print(img_dict_str)
img_dict = eval(img_dict_str)
# write to txt
# with open("save_gt","a+") as f:
# f.writelines(img_dict_str + '\n')
# f.close()
# write to json
with open(save_gt,"a+") as out:
string = json.dumps(img_dict)
string += '\n'
out.write(string)
out.close()
# cv2.imencode('.png',label_mask)[1].tofile('{}\{}.png'.format(save_mask_dir,file_name))
print("finished~~")
| [
"cv2.line",
"json.load",
"numpy.zeros",
"os.path.exists",
"json.dumps",
"cv2.imread",
"numpy.where"
] | [((1091, 1115), 'json.load', 'json.load', (['label_content'], {}), '(label_content)\n', (1100, 1115), False, 'import json\n'), ((2167, 2210), 'numpy.where', 'np.where', (['(label_gray[hstep][:] == pix_value)'], {}), '(label_gray[hstep][:] == pix_value)\n', (2175, 2210), True, 'import numpy as np\n'), ((3462, 3487), 'cv2.imread', 'cv2.imread', (['full_img_path'], {}), '(full_img_path)\n', (3472, 3487), False, 'import cv2\n'), ((3822, 3857), 'numpy.zeros', 'np.zeros', (['[h, w, 3]'], {'dtype': 'np.uint8'}), '([h, w, 3], dtype=np.uint8)\n', (3830, 3857), True, 'import numpy as np\n'), ((1754, 1853), 'cv2.line', 'cv2.line', (['mask', 'points[ptStart]', 'points[ptEnd]', '([instance_gap * (index + 1)] * 3)', '(4)'], {'lineType': '(8)'}), '(mask, points[ptStart], points[ptEnd], [instance_gap * (index + 1)] *\n 3, 4, lineType=8)\n', (1762, 1853), False, 'import cv2\n'), ((3142, 3171), 'os.path.exists', 'os.path.exists', (['full_img_path'], {}), '(full_img_path)\n', (3156, 3171), False, 'import os\n'), ((5162, 5182), 'json.dumps', 'json.dumps', (['img_dict'], {}), '(img_dict)\n', (5172, 5182), False, 'import json\n')] |
from context import ROOT_DIR, nnUtils
import numpy as np
import tensorflow as tf
import ast
import CAME
import csv
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
def get_save_path(antipattern, history_length, net_number):
models_dir = os.path.join(ROOT_DIR, 'approaches', 'came', 'trained_models', antipattern, 'hist_' + str(history_length))
return os.path.join(models_dir, 'network' + str(net_number))
def get_optimal_parameters(antipattern, history_length):
tuning_file = os.path.join(ROOT_DIR, 'experiments', 'tuning', 'results', 'came_' + antipattern + '_' + str(history_length) + '.csv')
with open(tuning_file, 'r') as file:
reader = csv.DictReader(file, delimiter=';')
for row in reader:
if row['F-measure'] != 'nan':
return {key:ast.literal_eval(row[key]) for key in row}
def getSmells(systemName, history_length):
params = get_optimal_parameters('god_class', history_length)
x = nnUtils.get_came_instances(systemName, 'god_class', history_length)
# New graph
tf.reset_default_graph()
# Create model
model = CAME.CAME(
nb_metrics=x.shape[-1],
history_length=history_length,
filters=params['Filters'],
kernel_sizes=params['Kernel'],
pool_sizes=params['Pool'],
dense_sizes=params['Dense'])
# To restore a trained model
saver = tf.train.Saver(max_to_keep=10)
with tf.Session() as session:
# Ensemble Prediction
predictions = []
for i in range(10):
# Reload the variables into the TensorFlow graph.
saver.restore(sess=session, save_path=get_save_path('god_class', history_length, i))
# Perform forward calculation
pred = session.run(model.inference, feed_dict={model.input_x: x})
predictions.append(pred)
return np.mean(np.array(predictions), axis=0) | [
"tensorflow.train.Saver",
"csv.DictReader",
"context.nnUtils.get_came_instances",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"numpy.array",
"CAME.CAME",
"ast.literal_eval"
] | [((929, 996), 'context.nnUtils.get_came_instances', 'nnUtils.get_came_instances', (['systemName', '"""god_class"""', 'history_length'], {}), "(systemName, 'god_class', history_length)\n", (955, 996), False, 'from context import ROOT_DIR, nnUtils\n'), ((1012, 1036), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1034, 1036), True, 'import tensorflow as tf\n'), ((1063, 1251), 'CAME.CAME', 'CAME.CAME', ([], {'nb_metrics': 'x.shape[-1]', 'history_length': 'history_length', 'filters': "params['Filters']", 'kernel_sizes': "params['Kernel']", 'pool_sizes': "params['Pool']", 'dense_sizes': "params['Dense']"}), "(nb_metrics=x.shape[-1], history_length=history_length, filters=\n params['Filters'], kernel_sizes=params['Kernel'], pool_sizes=params[\n 'Pool'], dense_sizes=params['Dense'])\n", (1072, 1251), False, 'import CAME\n'), ((1295, 1325), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': '(10)'}), '(max_to_keep=10)\n', (1309, 1325), True, 'import tensorflow as tf\n'), ((668, 703), 'csv.DictReader', 'csv.DictReader', (['file'], {'delimiter': '""";"""'}), "(file, delimiter=';')\n", (682, 703), False, 'import csv\n'), ((1333, 1345), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1343, 1345), True, 'import tensorflow as tf\n'), ((1717, 1738), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (1725, 1738), True, 'import numpy as np\n'), ((775, 801), 'ast.literal_eval', 'ast.literal_eval', (['row[key]'], {}), '(row[key])\n', (791, 801), False, 'import ast\n')] |
#
# This file is part of qa_explorer.
#
# Developed for the LSST Data Management System.
# This product includes software developed by the LSST Project
# (http://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This module implements a task to match visit sources to coadd sources and save a table with that info.
Table is saved as `visitMatchTable` dataset.
"""
import pandas as pd
import numpy as np
from lsst.pex.config import Config, Field
from lsst.pipe.base import CmdLineTask, ArgumentParser
from lsst.daf.persistence import NoResults
from lsst.qa.explorer.match import match_lists
from lsst.pipe.drivers.utils import TractDataIdContainer
from lsst.pipe.tasks.parquetTable import ParquetTable
__all__ = ["MatchVisitsConfig", "MatchVisitsTask"]
class MatchVisitsConfig(Config):
coaddName = Field(dtype=str, default="deep", doc="Name of coadd")
matchRadius = Field(dtype=float, default=0.2, doc="match radius in arcseconds")
class MatchVisitsTask(CmdLineTask):
"""Write a tract-level table of closest-match visit IDs
Run this task on a tract, and it writes a full-tract `visitMatchTable` (a ParquetTable)
of coadd -> visit match ids and match distances, organized with a multi-level index.
Example usage:
matchVisits.py <repo> --output <output_repo> --id tract=9615 filter=HSC-I
"""
_DefaultName = "matchVisits"
ConfigClass = MatchVisitsConfig
inputDataset = "analysisCoaddTable_forced"
outputDataset = "visitMatchTable"
@classmethod
def _makeArgumentParser(cls):
parser = ArgumentParser(name=cls._DefaultName)
parser.add_id_argument(
"--id",
cls.inputDataset,
help="data ID, e.g. --id tract=12345",
ContainerClass=TractDataIdContainer,
)
return parser
def matchCats(self, df1, df2, raColumn="coord_ra", decColumn="coord_dec"):
"""Match two catalogs, represented as dataframes
This uses the `match_lists` function, that uses a KDTree for matching.
Parameters
----------
df1, df2 : pandas.DataFrame
Catalogs to match
raColumn, decColumn : str
Names of ra and dec columns
Returns
-------
good : `numpy.ndarray` (bool)
Boolean array indicating which indices of df1 have valid matches
matchId : `numpy.ndarray` (dtype int)
Index of closest source in df2 to each source in df1.
distance : `numpy.ndarray` (dtype float)
Distance of match
"""
ra1, dec1 = df1[raColumn].values, df1[decColumn].values
ra2, dec2 = df2[raColumn].values, df2[decColumn].values
dist, inds = match_lists(ra1, dec1, ra2, dec2, self.config.matchRadius / 3600)
good = np.isfinite(dist) # sometimes dist is inf, sometimes nan.
id2 = df2.index
return good, id2[inds[good]], dist[good] * 3600.0
def runDataRef(self, patchRefList):
"""Matches visits to coadd and writes output
Visits to match are chosen by taking all input coadd patches (collected from
requested tract), and querying for all visits used to construct that coadd.
The set of total visits to put in the match table is union of all
Parameters
----------
patchRefList : `list`
List of patch datarefs from which visits will be selected.
Returns
-------
matchDf : `pandas.DataFrame`
Dataframe of match data. Column index is multi-level, with the first
level being visit number, and second level being `['matchId', 'distance']`.
"""
butler = patchRefList[0].getButler()
tract = patchRefList[0].dataId["tract"]
filt = patchRefList[0].dataId["filter"]
# Collect all visits that overlap any part of the requested tract
allVisits = set()
for patchRef in patchRefList:
try:
exp = butler.get("deepCoadd_calexp", dataId=patchRef.dataId)
allVisits = allVisits.union(set(exp.getInfo().getCoaddInputs().visits["id"]))
except NoResults:
pass
self.log.info("matching {} visits to tract {}: {}".format(len(allVisits), tract, allVisits))
# Match
columns = ["coord_ra", "coord_dec"]
coaddDf = (butler.get(self.inputDataset, tract=tract, filter=filt, subdir="").
toDataFrame(columns=columns))
column_index = pd.MultiIndex.from_product([["matchId", "distance"], allVisits])
matchDf = pd.DataFrame(columns=column_index, index=coaddDf.index)
for i, visit in enumerate(allVisits):
try:
visitDf = (butler.get("analysisVisitTable", tract=tract, filter=filt, visit=visit, subdir="").
toDataFrame(columns=columns))
except NoResults:
self.log.info(f"({i+1} of {len(allVisits)}) visit {visit}: analysisVisitTable not available")
continue
good, ids, distance = self.matchCats(coaddDf, visitDf)
matchDf.loc[good, ("matchId", visit)] = ids
matchDf.loc[good, ("distance", visit)] = distance
self.log.info(
"({} of {}) visit {}: {} sources matched.".format(i + 1, len(allVisits), visit, good.sum())
)
butler.put(ParquetTable(dataFrame=matchDf), self.outputDataset, tract=tract, filter=filt)
return matchDf
def writeMetadata(self, dataRef):
"""No metadata to write.
"""
pass
| [
"pandas.DataFrame",
"lsst.pipe.tasks.parquetTable.ParquetTable",
"numpy.isfinite",
"pandas.MultiIndex.from_product",
"lsst.pex.config.Field",
"lsst.qa.explorer.match.match_lists",
"lsst.pipe.base.ArgumentParser"
] | [((1522, 1575), 'lsst.pex.config.Field', 'Field', ([], {'dtype': 'str', 'default': '"""deep"""', 'doc': '"""Name of coadd"""'}), "(dtype=str, default='deep', doc='Name of coadd')\n", (1527, 1575), False, 'from lsst.pex.config import Config, Field\n'), ((1594, 1659), 'lsst.pex.config.Field', 'Field', ([], {'dtype': 'float', 'default': '(0.2)', 'doc': '"""match radius in arcseconds"""'}), "(dtype=float, default=0.2, doc='match radius in arcseconds')\n", (1599, 1659), False, 'from lsst.pex.config import Config, Field\n'), ((2277, 2314), 'lsst.pipe.base.ArgumentParser', 'ArgumentParser', ([], {'name': 'cls._DefaultName'}), '(name=cls._DefaultName)\n', (2291, 2314), False, 'from lsst.pipe.base import CmdLineTask, ArgumentParser\n'), ((3430, 3495), 'lsst.qa.explorer.match.match_lists', 'match_lists', (['ra1', 'dec1', 'ra2', 'dec2', '(self.config.matchRadius / 3600)'], {}), '(ra1, dec1, ra2, dec2, self.config.matchRadius / 3600)\n', (3441, 3495), False, 'from lsst.qa.explorer.match import match_lists\n'), ((3512, 3529), 'numpy.isfinite', 'np.isfinite', (['dist'], {}), '(dist)\n', (3523, 3529), True, 'import numpy as np\n'), ((5227, 5291), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[['matchId', 'distance'], allVisits]"], {}), "([['matchId', 'distance'], allVisits])\n", (5253, 5291), True, 'import pandas as pd\n'), ((5310, 5365), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'column_index', 'index': 'coaddDf.index'}), '(columns=column_index, index=coaddDf.index)\n', (5322, 5365), True, 'import pandas as pd\n'), ((6118, 6149), 'lsst.pipe.tasks.parquetTable.ParquetTable', 'ParquetTable', ([], {'dataFrame': 'matchDf'}), '(dataFrame=matchDf)\n', (6130, 6149), False, 'from lsst.pipe.tasks.parquetTable import ParquetTable\n')] |
#import random
from random import uniform
import numpy as np
#random.seed(100)
#np.random.seed(100)
def randi(N):
""" get random integer in range [0, N) """
return int(uniform(0, N))
def merge_init_structs(s0, s1):
""" merge struct s1 into s0 """
for k in s1['model']:
assert (not k in s0['model']), 'Error: looks like parameter %s is trying to be initialized twice!' % (k, )
s0['model'][k] = s1['model'][k] # copy over the pointer
s0['update'].extend(s1['update'])
s0['regularize'].extend(s1['regularize'])
def initw(n,d): # initialize matrix of this size
magic_number = 0.1
return (np.random.rand(n,d) * 2 - 1) * magic_number # U[-0.1, 0.1]
def accumNpDicts(d0, d1):
""" forall k in d0, d0 += d1 . d's are dictionaries of key -> numpy array """
for k in d1:
if k in d0:
d0[k] += d1[k]
else:
d0[k] = d1[k] | [
"numpy.random.rand",
"random.uniform"
] | [((174, 187), 'random.uniform', 'uniform', (['(0)', 'N'], {}), '(0, N)\n', (181, 187), False, 'from random import uniform\n'), ((612, 632), 'numpy.random.rand', 'np.random.rand', (['n', 'd'], {}), '(n, d)\n', (626, 632), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 4 08:56:58 2021
@author: Thore
"""
import numpy as np
from numpy.random import exponential
from random import randint, uniform
class GeneticAlgorithm:
def __init__(self, cost_func, bounds, N = 8000, mutation_rate = 0.05,
survivor_fraction = 0.1, num_children = 2, beta = 0.1, seed = []):
"""
Create model for genetic algorithm solver
Parameters
----------
cost_func : function
takes in population and computes cost.
bounds : list or array
upper bounds for population.
N : int, optional
population size. The default is 8000.
mutation_rate : float, optional
chance each gene mutates. The default is 0.05.
survivor_fraction : TYPE, optional
fraction of carry-over of fittest from previous gen. The default is 0.1.
num_children : int, optional
number of children each parent pair generates. The default is 2.
beta: float, optional
exp(-1)% of parents are chosen in top fraction of this size. The default is 0.1.
seed : Array, optional
initial population. Random if left empty. The default is [].
"""
self.f = cost_func
self.bounds = bounds
self.N = N #population size
self.mutation_rate = mutation_rate #chance a feature mutates randomly
self.survivor_fraction = survivor_fraction #fraction of fittest old gen carry-over to new gen
self.num_children = num_children #number of children each selected pair generates
self.beta = beta #exp(-1)% of parents are chosen in top fraction of this size
if len(seed) == 0:
print('randomly generating seed.')
self.population = self.generate_random(N)
else:
self.population = seed
assert len(self.population) == N, str(len(self.population))
def generate_random(self, N):
"""generate random population of size N"""
population = []
for i in range(N):
ensemble = [randint(0, upper_limit) for upper_limit in self.bounds]
population.append(ensemble)
return np.array(population)
def get_fitness(self):
"""compute fitness of population"""
return np.array([self.f(p) for p in self.population])
def get_diversity(self):
"""compote how varied the population is in each feature"""
return np.array([len(np.unique(p)) for p in self.population.T])
def __iter__(self):
"""make iterable"""
return self
def __next__(self):
"""Next step in optimisation: Update population by one generation"""
#calculate fitness
fitness = self.get_fitness()
#calucate diversity
diversity = self.get_diversity()
#Oder popluation
order = np.argsort(fitness)
population_sorted = self.population[order]
#create new generation
population_newgen = []
b = self.N * self.beta
newsize = int(self.N * (1 - self.survivor_fraction))
oldsize = int(self.N - newsize)
while len(population_newgen) < newsize:
#get random indeces to select parents
pairs_idx = np.array(exponential(b, 2)).astype(int)
if max(pairs_idx) >= (self.N - 1): #index too high
continue #try again
pair = population_sorted[pairs_idx]
#cross over: randomly select features from 2 parents
children = []
for i in range(self.num_children):
children.append([b[randint(0,1)] for b in pair.T])
#mutate
for child in children:
for i, feature in enumerate(child):
#mutate this gene with a chance of mutation_rate
if uniform(0, 1) < self.mutation_rate:
child[i] = randint(0, self.bounds[i])
#add to population
for child in children:
population_newgen.append(child)
#finished creating new population, turn to np.array
population_newgen = np.array(population_newgen)
#carry-over fittest from the old gen
population_oldgen = population_sorted[0:oldsize,:]
#update population
self.population = np.concatenate((population_newgen,population_oldgen))
return (min(fitness), diversity)
def get_solution(self):
"""return fittest sample"""
fitness = self.get_fitness()
order = np.argsort(fitness)
population_sorted = self.population[order]
return population_sorted[0]
| [
"random.randint",
"numpy.concatenate",
"random.uniform",
"numpy.random.exponential",
"numpy.argsort",
"numpy.array",
"numpy.unique"
] | [((2308, 2328), 'numpy.array', 'np.array', (['population'], {}), '(population)\n', (2316, 2328), True, 'import numpy as np\n'), ((3045, 3064), 'numpy.argsort', 'np.argsort', (['fitness'], {}), '(fitness)\n', (3055, 3064), True, 'import numpy as np\n'), ((4397, 4424), 'numpy.array', 'np.array', (['population_newgen'], {}), '(population_newgen)\n', (4405, 4424), True, 'import numpy as np\n'), ((4582, 4636), 'numpy.concatenate', 'np.concatenate', (['(population_newgen, population_oldgen)'], {}), '((population_newgen, population_oldgen))\n', (4596, 4636), True, 'import numpy as np\n'), ((4808, 4827), 'numpy.argsort', 'np.argsort', (['fitness'], {}), '(fitness)\n', (4818, 4827), True, 'import numpy as np\n'), ((2188, 2211), 'random.randint', 'randint', (['(0)', 'upper_limit'], {}), '(0, upper_limit)\n', (2195, 2211), False, 'from random import randint, uniform\n'), ((2632, 2644), 'numpy.unique', 'np.unique', (['p'], {}), '(p)\n', (2641, 2644), True, 'import numpy as np\n'), ((3459, 3476), 'numpy.random.exponential', 'exponential', (['b', '(2)'], {}), '(b, 2)\n', (3470, 3476), False, 'from numpy.random import exponential\n'), ((4067, 4080), 'random.uniform', 'uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (4074, 4080), False, 'from random import randint, uniform\n'), ((4138, 4164), 'random.randint', 'randint', (['(0)', 'self.bounds[i]'], {}), '(0, self.bounds[i])\n', (4145, 4164), False, 'from random import randint, uniform\n'), ((3823, 3836), 'random.randint', 'randint', (['(0)', '(1)'], {}), '(0, 1)\n', (3830, 3836), False, 'from random import randint, uniform\n')] |
"""
Constraints class used to specify the density constraints of the topology
optimisation problem. It contains functions for minimum and maximum element
density in the upcomming iteration and the magnitude of the volume constraint
function itself of the current design. This version of the code is used for the
global compliance minimization.
<NAME>
Aerospace Structures and Materials Department TU Delft
2018
"""
import numpy as np
class DensityConstraint(object):
"""
This object relates to the constraints used in this optimization.
It can be used for the MMA updatescheme to derive what the limit is for all
element densities at every itteration.
The class itself is not changed by the itterations.
Parameters
---------
nelx : int
Number of elements in x direction.
nely : int
Number of elements in y direction.
move : float
Maximum change in density of an element over 1 itteration.
volume_frac : float
Maximum volume that can be filled with material.
volume_derivative : 2D array size(1, nelx*nely)
Sensityvity of the density constraint to the density in each element.
density_min : float (optional)
Minumum density, set at 0.0 if not specified.
density_max : float (optional)
Maximum density, set at 0.0 if not specified.
Attributes
----------
nelx : int
Number of elements in x direction.
nely : int
Number of elements in y direction.
move : float
Maximum change in density of an element over 1 itteration.
volume_frac : float
Maximum volume that can be filled with material.
volume_derivative : 2D array size(1, nelx*nely)
Sensityvity of the density constraint to the density in each element.
density_min : float, optional
Minumum density, set at 0.0 if not specified.
density_max : float, optional
Maximum density, set at 0.0 if not specified.
"""
def __init__(self, nelx, nely, move, volume_frac, density_min=0.0, density_max=1.0):
self.nelx = nelx
self.nely = nely
self.move = move
self.volume_frac = volume_frac
self.volume_derivative = 1/(nelx*nely*volume_frac)*np.ones((1, nely*nelx))
self.density_min = density_min
self.density_max = density_max
def xmin(self, x):
"""
This function calculates the minimum density value of all ellements of
this itteration.
Parameters
----------
x : 2D array size(nely, nelx)
Density distribution of this itteration.
Returns
-------
xmin : 2D array size(nely, nelx)
Minimum density values of this itteration for the update scheme.
"""
xmin = self.density_min*np.ones((self.nely, self.nelx))
xmin = np.maximum(xmin, x - self.move)
return xmin
def xmax(self, x):
"""
This function calculates the maximum density value of all ellements of
this itteration.
Parameters
----------
x : 2D array size(nely, nelx)
Density distribution of this itteration.
Returns
-------
xmax : 2D array size(nely, nelx)
Maximum density values of this itteration after updating.
"""
xmax = self.density_max*np.ones((self.nely, self.nelx))
xmax = np.minimum(xmax, x + self.move)
return xmax
def current_volconstrain(self, x):
"""
Calculates the current magnitude of the volume constraint funcion:
.. math::
V_{\\text{constraint}} = \\frac{\\sum v_e X_e}{ V_{\\max}}-1
Parameters
----------
x : 2D array size(nely, nelx)
Density distribution of this itteration.
Returns
-------
curvol : float
Curent value of the density constraint function.
"""
cur_vol = np.sum(x)/(self.nelx*self.nely*self.volume_frac) - 1
return cur_vol
| [
"numpy.sum",
"numpy.minimum",
"numpy.maximum",
"numpy.ones"
] | [((2849, 2880), 'numpy.maximum', 'np.maximum', (['xmin', '(x - self.move)'], {}), '(xmin, x - self.move)\n', (2859, 2880), True, 'import numpy as np\n'), ((3406, 3437), 'numpy.minimum', 'np.minimum', (['xmax', '(x + self.move)'], {}), '(xmax, x + self.move)\n', (3416, 3437), True, 'import numpy as np\n'), ((2235, 2260), 'numpy.ones', 'np.ones', (['(1, nely * nelx)'], {}), '((1, nely * nelx))\n', (2242, 2260), True, 'import numpy as np\n'), ((2802, 2833), 'numpy.ones', 'np.ones', (['(self.nely, self.nelx)'], {}), '((self.nely, self.nelx))\n', (2809, 2833), True, 'import numpy as np\n'), ((3359, 3390), 'numpy.ones', 'np.ones', (['(self.nely, self.nelx)'], {}), '((self.nely, self.nelx))\n', (3366, 3390), True, 'import numpy as np\n'), ((3949, 3958), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (3955, 3958), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from primitives_ubc.regCCFS.src.utils.commonUtils import sVT
from primitives_ubc.regCCFS.src.utils.commonUtils import is_numeric
from primitives_ubc.regCCFS.src.utils.commonUtils import makeSureString
from primitives_ubc.regCCFS.src.prediction_utils.replicate_input_process import replicateInputProcess
def processInputData(XTrainRC, bOrdinal=None, XTestRC=None, bNaNtoMean=False, FNormalize=True):
"""
Process input features, expanding categoricals and converting to zScores.
Parameters
----------
XTrain: Unprocessed input features, can be a numerical array, a cell
array or a table. Each row is a seperate data point.
bOrdinal: Logical array indicating if the corresponding feature is
ordinal (i.e. numerical or order categorical). The default
treatment is that numerical inputs are ordinal and strings
are not. If a feature contains for numerical features and
strings, it is presumed to not be ordinal unless there is
only a single string category, in which case this is
presumed to indicate missing data.
XTest: Additional data to be transformed. This is seperate to the training
data for the purpose of Z-scores and to avoid using any features /
categories that do not appear in the training data.
bNaNtoMean: Replace NaNs with the mean, default false.
FNormalize: Normalize the processed features, default true.
Returns
-------
XTrain: Processed input features
iFeatureNum: Array idenitifying groups of expanded features.
Expanded features with the same iFeatureNum value come
from the same original non-ordinal feature.
inputProcessDetails: Details required to convert new data in the same way
XTest: Additional processed input features
featureNames: Names of the expanded features. Variable names are
taken from the table properties if in the input is a
cell. For expanded categorical features the name of the
category is also included.
"""
D = XTrainRC.shape[1]
XCat_exist = False
if isinstance(XTrainRC, pd.DataFrame):
featureNamesOrig = np.array(list(XTrainRC.columns.values))
# Rename pandas column for indexing convenience
new_col_names = [idx for idx in range(len(featureNamesOrig))]
XTrainRC.columns = new_col_names
else:
featureNamesOrig = np.array([f'Var_{idx}' for idx in range(XTrainRC.shape[1])])
if bOrdinal == None:
if isinstance(XTrainRC, type(np.array([]))):
# Default is that if input is all numeric, everything is treated as
# ordinal
bOrdinal = np.array([True] * D)
else:
bNumeric = is_numeric(XTrainRC, compress=False)
if np.all(bNumeric):
# Numeric features treated as ordinal
bOrdinal = np.array([True] * D)
XCat_exist = False
else:
# Features with more than one unique string taken as non-ordinal
iContainsString = (np.sum(~bNumeric, axis=0) > 0).ravel().nonzero()[0]
nStr = np.zeros((XTrainRC.shape[1]), dtype=int)
for n in iContainsString.flatten(order='F'):
x_unique = np.unique(XTrainRC.loc[~bNumeric[:, n], n])
nStr[n] = len(x_unique)
bOrdinal = nStr < 2
# Features with only a single unqiue string and otherwise
# numeric treated also treated as ordinal with the string
# taken to give a missing value
iSingleStr = (nStr == 1).ravel().nonzero()[0]
for n in iSingleStr:
XTrainRC.loc[~bNumeric[:, n], n] = np.nan
XCat_exist = True
elif len(bOrdinal) != XTrainRC.shape[1]:
assert (True), 'bOrdinal must match size of XTrainRC!'
# Numerical Features
if isinstance(XTrainRC, pd.DataFrame):
# Anything not numeric in the ordinal features taken to be missing
# values
XTrain = XTrainRC.loc[:, bOrdinal]
bNumeric = is_numeric(XTrain, compress=False)
bNumeric = pd.DataFrame(bNumeric, dtype=type(True))
XTrain[~bNumeric] = np.nan
XTrain = XTrain.to_numpy(dtype=float)
else:
XTrain = XTrainRC[:, bOrdinal]
# Categorical Features
if isinstance(XTrainRC, pd.DataFrame) and XCat_exist:
XCat = XTrainRC.loc[:, ~bOrdinal]
XCat = makeSureString(XCat, nSigFigTol=10)
# Previous properties
iFeatureNum = list(range(XTrain.shape[1]))
featureNames = featureNamesOrig[bOrdinal]
featureBaseNames = featureNamesOrig[~bOrdinal]
# Collect Categorical features
Cats = {}
iFeatureNum = np.array([iFeatureNum], dtype=int)
# Expand the categorical features
for n in range(XCat.shape[1]):
cats_unique = np.unique(XCat.iloc[:, n])
Cats[n] = cats_unique
newNames = np.array([f'Cat_{name}' for name in cats_unique])
featureNames = np.concatenate((featureNames, newNames))
nCats = len(cats_unique)
# This is setup so that any trivial features are not included
if nCats==1:
continue
sizeSoFar = iFeatureNum.shape[1]
if len(iFeatureNum) == 0:
iFeatureNum = np.ones((1,nCats))
else:
iFeatureNum = np.concatenate((iFeatureNum, (iFeatureNum[:, -1] + 1) * np.ones((1,nCats))), axis=1).astype(float)
XTrain = np.concatenate((XTrain, np.zeros((XTrain.shape[0], nCats))), axis=1)
for c in range(nCats):
XTrain[XCat.iloc[:, n] == cats_unique[c], (sizeSoFar+c)] = 1
# Remove single dimension if any
iFeatureNum = np.squeeze(iFeatureNum)
else:
Cats = {}
iFeatureNum = np.arange(XTrain.shape[1]) * 1.0
featureNames = featureNamesOrig[bOrdinal]
featureBaseNames = featureNamesOrig[~bOrdinal]
if FNormalize:
# Convert to Z-scores, Normalize feature vectors
mu_XTrain = np.nanmean(XTrain, axis=0)
std_XTrain = np.nanstd(XTrain, axis=0, ddof=1)
std_XTrain[abs(std_XTrain)<1e-10] = 1.0
XTrain = np.divide(np.subtract(XTrain, mu_XTrain), std_XTrain)
else:
mu_XTrain = 0.0
std_XTrain = 1.0
if bNaNtoMean:
XTrain[np.isnan(XTrain)] = 0.0
# If required, generate function for converting additional data and
# calculate conversion for any test data provided.
inputProcessDetails = {}
inputProcessDetails["Cats"] = Cats # {0: array(['False', 'True'], dtype=object), 1: array(['f', 't'], dtype=object)}
inputProcessDetails['XCat_exist'] = XCat_exist
inputProcessDetails['bOrdinal'] = bOrdinal
inputProcessDetails['mu_XTrain'] = mu_XTrain
inputProcessDetails['std_XTrain'] = std_XTrain
inputProcessDetails['bNaNtoMean'] = bNaNtoMean
if XTestRC == None:
return XTrain, iFeatureNum, inputProcessDetails, featureNames
XTest = replicateInputProcess(Xraw=XTestRC, InputProcessDetails=inputProcessDetails)
return XTrain, iFeatureNum, inputProcessDetails, XTest, featureNames
| [
"numpy.sum",
"numpy.concatenate",
"primitives_ubc.regCCFS.src.utils.commonUtils.is_numeric",
"numpy.subtract",
"numpy.nanstd",
"numpy.unique",
"numpy.zeros",
"numpy.ones",
"numpy.isnan",
"primitives_ubc.regCCFS.src.utils.commonUtils.makeSureString",
"numpy.array",
"numpy.arange",
"primitives... | [((7270, 7346), 'primitives_ubc.regCCFS.src.prediction_utils.replicate_input_process.replicateInputProcess', 'replicateInputProcess', ([], {'Xraw': 'XTestRC', 'InputProcessDetails': 'inputProcessDetails'}), '(Xraw=XTestRC, InputProcessDetails=inputProcessDetails)\n', (7291, 7346), False, 'from primitives_ubc.regCCFS.src.prediction_utils.replicate_input_process import replicateInputProcess\n'), ((4261, 4295), 'primitives_ubc.regCCFS.src.utils.commonUtils.is_numeric', 'is_numeric', (['XTrain'], {'compress': '(False)'}), '(XTrain, compress=False)\n', (4271, 4295), False, 'from primitives_ubc.regCCFS.src.utils.commonUtils import is_numeric\n'), ((4629, 4664), 'primitives_ubc.regCCFS.src.utils.commonUtils.makeSureString', 'makeSureString', (['XCat'], {'nSigFigTol': '(10)'}), '(XCat, nSigFigTol=10)\n', (4643, 4664), False, 'from primitives_ubc.regCCFS.src.utils.commonUtils import makeSureString\n'), ((4931, 4965), 'numpy.array', 'np.array', (['[iFeatureNum]'], {'dtype': 'int'}), '([iFeatureNum], dtype=int)\n', (4939, 4965), True, 'import numpy as np\n'), ((5993, 6016), 'numpy.squeeze', 'np.squeeze', (['iFeatureNum'], {}), '(iFeatureNum)\n', (6003, 6016), True, 'import numpy as np\n'), ((6305, 6331), 'numpy.nanmean', 'np.nanmean', (['XTrain'], {'axis': '(0)'}), '(XTrain, axis=0)\n', (6315, 6331), True, 'import numpy as np\n'), ((6353, 6386), 'numpy.nanstd', 'np.nanstd', (['XTrain'], {'axis': '(0)', 'ddof': '(1)'}), '(XTrain, axis=0, ddof=1)\n', (6362, 6386), True, 'import numpy as np\n'), ((2804, 2824), 'numpy.array', 'np.array', (['([True] * D)'], {}), '([True] * D)\n', (2812, 2824), True, 'import numpy as np\n'), ((2862, 2898), 'primitives_ubc.regCCFS.src.utils.commonUtils.is_numeric', 'is_numeric', (['XTrainRC'], {'compress': '(False)'}), '(XTrainRC, compress=False)\n', (2872, 2898), False, 'from primitives_ubc.regCCFS.src.utils.commonUtils import is_numeric\n'), ((2914, 2930), 'numpy.all', 'np.all', (['bNumeric'], {}), '(bNumeric)\n', (2920, 2930), True, 'import numpy as np\n'), ((5074, 5100), 'numpy.unique', 'np.unique', (['XCat.iloc[:, n]'], {}), '(XCat.iloc[:, n])\n', (5083, 5100), True, 'import numpy as np\n'), ((5167, 5216), 'numpy.array', 'np.array', (["[f'Cat_{name}' for name in cats_unique]"], {}), "([f'Cat_{name}' for name in cats_unique])\n", (5175, 5216), True, 'import numpy as np\n'), ((5244, 5284), 'numpy.concatenate', 'np.concatenate', (['(featureNames, newNames)'], {}), '((featureNames, newNames))\n', (5258, 5284), True, 'import numpy as np\n'), ((6069, 6095), 'numpy.arange', 'np.arange', (['XTrain.shape[1]'], {}), '(XTrain.shape[1])\n', (6078, 6095), True, 'import numpy as np\n'), ((6463, 6493), 'numpy.subtract', 'np.subtract', (['XTrain', 'mu_XTrain'], {}), '(XTrain, mu_XTrain)\n', (6474, 6493), True, 'import numpy as np\n'), ((6602, 6618), 'numpy.isnan', 'np.isnan', (['XTrain'], {}), '(XTrain)\n', (6610, 6618), True, 'import numpy as np\n'), ((2663, 2675), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2671, 2675), True, 'import numpy as np\n'), ((3013, 3033), 'numpy.array', 'np.array', (['([True] * D)'], {}), '([True] * D)\n', (3021, 3033), True, 'import numpy as np\n'), ((3278, 3316), 'numpy.zeros', 'np.zeros', (['XTrainRC.shape[1]'], {'dtype': 'int'}), '(XTrainRC.shape[1], dtype=int)\n', (3286, 3316), True, 'import numpy as np\n'), ((5560, 5579), 'numpy.ones', 'np.ones', (['(1, nCats)'], {}), '((1, nCats))\n', (5567, 5579), True, 'import numpy as np\n'), ((3411, 3454), 'numpy.unique', 'np.unique', (['XTrainRC.loc[~bNumeric[:, n], n]'], {}), '(XTrainRC.loc[~bNumeric[:, n], n])\n', (3420, 3454), True, 'import numpy as np\n'), ((5772, 5806), 'numpy.zeros', 'np.zeros', (['(XTrain.shape[0], nCats)'], {}), '((XTrain.shape[0], nCats))\n', (5780, 5806), True, 'import numpy as np\n'), ((5683, 5702), 'numpy.ones', 'np.ones', (['(1, nCats)'], {}), '((1, nCats))\n', (5690, 5702), True, 'import numpy as np\n'), ((3203, 3228), 'numpy.sum', 'np.sum', (['(~bNumeric)'], {'axis': '(0)'}), '(~bNumeric, axis=0)\n', (3209, 3228), True, 'import numpy as np\n')] |
import numpy as np
import os
from tqdm import tqdm
from src.readers.reader_support_functions import *
from src.readers.reader import *
__all__=['get_tke_per_unit_area']
def get_tke_per_unit_area(configFile):
'''
Input
-----
configFile: path of configuration file
Output
------
coordPlane: coordinate of the plane
vorCenter: coordinates of the vorticity center
'''
configDict = config_to_dict(configFile)
# read data from configDict:
# file path details:
filePath = os.getcwd()
filePath = filePath + '/postProcessing/surfaces'
tDir = get_time_dir(filePath, configDict)
filePath = filePath + '/' + tDir
# non-dim parameters:
h = float( configDict["h"] )
ubar = float( configDict["ubar"] )
# patch and AOI details:
patchName = configDict["patchName"]
nPlanes = int( configDict["nPlanes"] )
dir1 = configDict["direction1"]
dir2 = configDict["direction2"]
minX, maxX = dict(), dict()
minX['x1'] = float( configDict["x1min"] )
minX['x2'] = float( configDict["x2min"] )
maxX['x1'] = float( configDict["x1max"] )
maxX['x2'] = float( configDict["x2max"] )
# get the plane coordinate and the vorticity center:
coordPlane = np.zeros(nPlanes)
tkePerArea = np.zeros(nPlanes)
print('\n calculating tke per unit area ...')
for i in tqdm( range(nPlanes), ncols=100 ):
dataPath = filePath + '/UPrime2Mean_' + patchName + str(i+1) + '.raw'
coordCols, normalCol = get_columns(dir1, dir2)
data = get_data(dataPath, skiprows=2)
# coordinate data:
coords = data[:, coordCols]
coords = coords/h
indices, nPts = get_indices_npts(coords, minX, maxX)
x1, x2 = coords[indices, 0], coords[indices, 1]
coordPlane[i] = data[0, normalCol]/h
# calculate area of the plane:
area = abs( x1.max() - x1.min() ) * abs( x2.max() - x2.min() )
# tke data:
tke = data[indices, 3] + data[indices, 6] + data[indices, 8]
tkePerArea[i] = np.sum( tke ) / (area * ubar**2)
return coordPlane, tkePerArea
| [
"os.getcwd",
"numpy.zeros",
"numpy.sum"
] | [((534, 545), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (543, 545), False, 'import os\n'), ((1290, 1307), 'numpy.zeros', 'np.zeros', (['nPlanes'], {}), '(nPlanes)\n', (1298, 1307), True, 'import numpy as np\n'), ((1325, 1342), 'numpy.zeros', 'np.zeros', (['nPlanes'], {}), '(nPlanes)\n', (1333, 1342), True, 'import numpy as np\n'), ((2122, 2133), 'numpy.sum', 'np.sum', (['tke'], {}), '(tke)\n', (2128, 2133), True, 'import numpy as np\n')] |
# %%
import numpy as np
import pygaps as pg
import matplotlib.pyplot as plt
plt.rcParams['mathtext.fontset'] = 'dejavusans'
plt.style.use('seaborn-muted')
# read files
d4_30 = pg.isotherm_from_csv("../data/d4sorp/iso_PCN777_D4_303_c1.csv")
d4_40 = pg.isotherm_from_csv("../data/d4sorp/iso_PCN777_D4_313.csv")
isos = [d4_30, d4_40]
# Calculate isosteric enthalpy
for iso in isos:
iso.convert_loading(basis_to="molar", unit_to="mmol")
res = pg.isosteric_enthalpy(
isos,
loading_points=np.linspace(0.3, 6.0, 300),
)
res['loading_g'] = res['loading'] * iso.adsorbate.molar_mass() / 1000
# Plot isotherms and results
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
pg.plot_iso(
isos,
ax=axs[0],
lgd_keys=['temperature'],
lgd_pos=None,
loading_basis='mass',
loading_unit='g',
color=['C0', "C7"],
branch="all-nol",
pressure_unit='Pa',
y1_range=[-0.1, 2.1],
)
axs[0].set_ylabel(r"Loading ($g\ g^{-1}$)")
axs[0].set_xlabel(r"Pressure ($Pa$)")
pg.plot_iso(
isos,
ax=axs[1],
lgd_keys=['temperature'],
lgd_pos="inner",
loading_basis='mass',
loading_unit='g',
pressure_mode='relative%',
color=['C0', "C7"],
branch="all-nol",
pressure_unit='Pa',
y1_range=[-0.1, 2.1],
lgd_style=dict(loc='center left'),
)
axs[1].set_ylabel(r"Loading ($g\ g^{-1}$)")
axs[1].set_xlabel(r"Pressure (%$p/p^0$)")
axs[2].errorbar(
res['loading_g'],
res['isosteric_enthalpy'],
yerr=res['std_errs'],
marker="o",
color="C2",
markersize=2,
)
axs[2].set_xlabel(r"Loading ($g\ g^{-1}$)", fontsize=15)
axs[2].set_ylabel(r"Isosteric Enthalpy ($-kJ\ mol^{-1}$)", fontsize=15)
axs[2].set_ylim(-5, 105)
axs[2].set_xlim(0, 2.1)
axs[2].tick_params(axis='both', labelsize=13)
axs[0].text(0.05, 0.9, "(a)", fontsize=15, transform=axs[0].transAxes)
axs[1].text(0.05, 0.9, "(b)", fontsize=15, transform=axs[1].transAxes)
axs[2].text(0.05, 0.9, "(c)", fontsize=15, transform=axs[2].transAxes)
fig.savefig("../figs/isosteric-enth.svg", bbox_inches='tight')
fig.savefig("../figs/isosteric-enth.pdf", bbox_inches='tight')
fig.savefig("../figs/isosteric-enth.png", dpi=300, bbox_inches='tight')
# %% | [
"pygaps.plot_iso",
"pygaps.isotherm_from_csv",
"matplotlib.pyplot.style.use",
"numpy.linspace",
"matplotlib.pyplot.subplots"
] | [((125, 155), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-muted"""'], {}), "('seaborn-muted')\n", (138, 155), True, 'import matplotlib.pyplot as plt\n'), ((179, 242), 'pygaps.isotherm_from_csv', 'pg.isotherm_from_csv', (['"""../data/d4sorp/iso_PCN777_D4_303_c1.csv"""'], {}), "('../data/d4sorp/iso_PCN777_D4_303_c1.csv')\n", (199, 242), True, 'import pygaps as pg\n'), ((251, 311), 'pygaps.isotherm_from_csv', 'pg.isotherm_from_csv', (['"""../data/d4sorp/iso_PCN777_D4_313.csv"""'], {}), "('../data/d4sorp/iso_PCN777_D4_313.csv')\n", (271, 311), True, 'import pygaps as pg\n'), ((643, 678), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(15, 5)'}), '(1, 3, figsize=(15, 5))\n', (655, 678), True, 'import matplotlib.pyplot as plt\n'), ((680, 877), 'pygaps.plot_iso', 'pg.plot_iso', (['isos'], {'ax': 'axs[0]', 'lgd_keys': "['temperature']", 'lgd_pos': 'None', 'loading_basis': '"""mass"""', 'loading_unit': '"""g"""', 'color': "['C0', 'C7']", 'branch': '"""all-nol"""', 'pressure_unit': '"""Pa"""', 'y1_range': '[-0.1, 2.1]'}), "(isos, ax=axs[0], lgd_keys=['temperature'], lgd_pos=None,\n loading_basis='mass', loading_unit='g', color=['C0', 'C7'], branch=\n 'all-nol', pressure_unit='Pa', y1_range=[-0.1, 2.1])\n", (691, 877), True, 'import pygaps as pg\n'), ((501, 527), 'numpy.linspace', 'np.linspace', (['(0.3)', '(6.0)', '(300)'], {}), '(0.3, 6.0, 300)\n', (512, 527), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import os
fs = 31250.0 #Samplings frekvens
cutoff_lpass = 3 #Nedre knekkfrekvens
cutoff_hpass = 500 #Øvre knekkfrekvens
ch1 = 3
ch2 = 4
remove_samples = 10000 #Antall samples som fjernes fra begynnelsen
def raspi_import(path, channels=5):
"""
Import data produced using adc_sampler.c.
Returns sample period and ndarray with one column per channel.
Sampled data for each channel, in dimensions NUM_SAMPLES x NUM_CHANNELS.
"""
with open(path, 'r') as fid:
sample_period = np.fromfile(fid, count=1, dtype=float)[0]
data = np.fromfile(fid, dtype=np.uint16)
data = data.reshape((-1, channels))
return sample_period, data
def butter_coeff(fs, cutoff, order):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, fs = fs, cutoff =cutoff_hpass ,order=6):
b, a = butter_coeff(fs, cutoff, order)
return signal.lfilter(b, a, data)
def butter_bandpass(sig, fs = fs, order = 6, lowcut=cutoff_lpass, highcut=cutoff_hpass ):
# Defining sos for butterworth bandpassfilter
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
sos = signal.butter(order, [low, high], analog=False, btype='band', output='sos')
sig = signal.sosfilt(sos, sig)
return sig
def complex_ffi(data, fs, ch1=ch1, ch2=ch2, band = 0, plot_doppler = 0, plot_IF = 0):
IFI = data[remove_samples:, ch1]
IFQ = data[remove_samples:, ch2]
IFI = signal.detrend(IFI, axis=0)
IFQ = signal.detrend(IFQ, axis=0)
if(band):
IFI = butter_bandpass(IFI)
IFQ = butter_bandpass(IFQ)
else:
IFI = butter_lowpass_filter(IFI)
IFQ = butter_lowpass_filter(IFQ)
IF = IFI + 1j * IFQ
fft = np.fft.fft(IF, n=500000)
fft = abs(fft)
N = len(fft)
freqs = np.fft.fftfreq(N, 1 / fs)
if(plot_IF):
plt.plot(IFI)
plt.plot(IFQ)
if(plot_doppler):
plt.plot(freqs, 10*np.log10(fft))
if(plot_doppler or plot_IF):
plt.show()
return fft, freqs
def dopplerSpeed(fft, freqs, f0=24.13e9, c=299792458):
fd = freqs[np.argmax(fft)]
vr = (fd * c) / (2 * f0)
return vr
def plotRaw(filename, filt = 0, detrend = 0):
sampleR, data = raspi_import(filename)
IFI = data[remove_samples:, ch1]
IFQ = data[remove_samples:, ch2]
if(detrend):
IFI = signal.detrend(IFI, axis=0)
IFQ = signal.detrend(IFQ, axis=0)
if(filt):
IFI = butter_lowpass_filter(IFI)
IFQ = butter_lowpass_filter(IFQ)
fig, ax = plt.subplots()
ax.plot(IFI)
ax.plot(IFQ)
ax.set(xlabel='Sample', ylabel='Amplitude') #, fontsize = 'large'
ax.grid()
ax.legend(['I', 'Q'], fontsize = 'large', loc='upper right')
plt.show()
def calculate_speed_from_file(filename, band = 0):
_, data = raspi_import(filename)
fft, freqs = complex_ffi(data, fs, plot_doppler=0, plot_IF=0)
speed = dopplerSpeed(fft, freqs)
return speed
def signaltonoise(sig, axis=0, ddof=0):
sig = np.asanyarray(sig)
mean = sig.mean(axis)
sd = sig.std(axis=axis, ddof=ddof)
snr = mean/sd
return sd, mean, snr
def plotSpectrum(sig):
plt.magnitude_spectrum(sig, fs)
plt.show()
def plotDopplerSpectrum(filename, band = 0):
_, data = raspi_import(filename)
fft, freqs = complex_ffi(data, fs, plot_doppler=0, plot_IF=0)
fig, ax = plt.subplots()
plt.semilogy(freqs, fft)
ax.set(xlabel='Frekvens[Hz]', ylabel='FFT amplitude')
plt.show()
def plot_compare_velocity(directory):
data_measured=[]
data_radar=[]
for filename in os.listdir(directory):
measured_speed = 0
_, data = raspi_import(os.path.join(directory, filename))
sd1, mean1, snr1 = signaltonoise(data[remove_samples:, 3])
sd2, mean2, snr2 = signaltonoise(data[remove_samples:, 4])
fft, freqs = complex_ffi(data, fs, plot_IF=0, plot_doppler=0)
speed = dopplerSpeed(fft, freqs)
if(filename.find("fra_0.34") != -1): measured_speed = 0.34
elif(filename.find("fra_1.33") != -1): measured_speed = 1.33
elif(filename.find("mot_0.34") != -1): measured_speed = -0.34
elif(filename.find("mot_1") != -1): measured_speed = -1.0
else: measured_speed = 0
data_measured.append(measured_speed)
data_radar.append(speed)
fig, ax = plt.subplots()
ax.scatter(data_measured, data_radar)
line = [min(data_radar)-0.2,max(data_radar)+0.2]
ax.plot(line, line, linestyle="dashed")
ax.set(ylabel='Velocity from radar (m/s)', xlabel='Veleocity from stopwatch (m/s)')
ax.grid()
plt.show()
def print_speed_from_dir(directory, band = 0):
f = open("data.csv", "a")
f.write("velocity, measured velocity, SdI, SNRI, SDQ, SNRQ")
f.write("\n")
for filename in os.listdir(directory):
#print(filename)
measured_speed = 0
_, data = raspi_import(os.path.join(directory, filename))
sd1, mean1, snr1 = signaltonoise(data[remove_samples:, 3])
sd2, mean2, snr2 = signaltonoise(data[remove_samples:, 4])
fft, freqs = complex_ffi(data, fs, plot_IF=0, plot_doppler=0)
#sd1, mean1, snr1 = signaltonoise(fft)
speed = dopplerSpeed(fft, freqs)
if(filename.find("fra_0.34") != -1): measured_speed = 0.34
elif(filename.find("fra_1.33") != -1): measured_speed = 1.33
elif(filename.find("mot_0.34") != -1): measured_speed = -0.34
elif(filename.find("mot_1") != -1): measured_speed = -1.0
else: measured_speed = 0
#print("%s, velocity: %.6f, measured velocity: %.3f sd1: %.3f, SNR1: %.3f, sd2: %.3f, SNR2: %.3f" % (filename, speed, measured_speed, sd1, 20*np.log10(snr1), sd2, 20*np.log10(snr2)))
#Print latex table
#print("%.3f & %.3f & %.3f & %.3f & %.3f & %.3f \\\\" % (measured_speed, speed, sd1, 20*np.log10(snr1), sd2, 20*np.log10(snr2)))
f.write("%.3f, %.3f, %.3f, %.3f, %.3f, %.3f" %(speed, measured_speed, sd1, 20*np.log10(snr1), sd2, 20*np.log10(snr2)))
f.write("\n")
f.close()
#1) Plott rådata av (I- og Q-signalene), dopplerspektrum og beregn SNR (fra plottene).
#Plot rå data
#plotRaw("Data_bil/bil_fra_fast_1")
#Plot filtrert og detrend
#plotRaw("Data_bil/bil_fra_fast_1", 1, 1)
#plotDopplerSpectrum("Data_bil/bil_fra_fast_1")
#Plot doppler spektrum
#plotDopplerSpectrum("test/test1")
#print(calculate_speed_from_file("test/test1"))
#2) Analyse av målenøyaktighet og estimat av standardavvik (ha med plott).
#Print data fra alle målinger
#print_speed_from_dir("Radar_0904", band = 0)
#Plot measured speed vs radar speed
plot_compare_velocity("Radar_0904") | [
"os.listdir",
"matplotlib.pyplot.show",
"matplotlib.pyplot.magnitude_spectrum",
"scipy.signal.sosfilt",
"matplotlib.pyplot.plot",
"scipy.signal.lfilter",
"numpy.fft.fft",
"numpy.fromfile",
"numpy.asanyarray",
"numpy.argmax",
"os.path.join",
"numpy.log10",
"numpy.fft.fftfreq",
"scipy.signal... | [((897, 959), 'scipy.signal.butter', 'signal.butter', (['order', 'normal_cutoff'], {'btype': '"""low"""', 'analog': '(False)'}), "(order, normal_cutoff, btype='low', analog=False)\n", (910, 959), False, 'from scipy import signal\n'), ((1109, 1135), 'scipy.signal.lfilter', 'signal.lfilter', (['b', 'a', 'data'], {}), '(b, a, data)\n', (1123, 1135), False, 'from scipy import signal\n'), ((1361, 1436), 'scipy.signal.butter', 'signal.butter', (['order', '[low, high]'], {'analog': '(False)', 'btype': '"""band"""', 'output': '"""sos"""'}), "(order, [low, high], analog=False, btype='band', output='sos')\n", (1374, 1436), False, 'from scipy import signal\n'), ((1448, 1472), 'scipy.signal.sosfilt', 'signal.sosfilt', (['sos', 'sig'], {}), '(sos, sig)\n', (1462, 1472), False, 'from scipy import signal\n'), ((1667, 1694), 'scipy.signal.detrend', 'signal.detrend', (['IFI'], {'axis': '(0)'}), '(IFI, axis=0)\n', (1681, 1694), False, 'from scipy import signal\n'), ((1706, 1733), 'scipy.signal.detrend', 'signal.detrend', (['IFQ'], {'axis': '(0)'}), '(IFQ, axis=0)\n', (1720, 1733), False, 'from scipy import signal\n'), ((1957, 1981), 'numpy.fft.fft', 'np.fft.fft', (['IF'], {'n': '(500000)'}), '(IF, n=500000)\n', (1967, 1981), True, 'import numpy as np\n'), ((2037, 2062), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['N', '(1 / fs)'], {}), '(N, 1 / fs)\n', (2051, 2062), True, 'import numpy as np\n'), ((2805, 2819), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2817, 2819), True, 'import matplotlib.pyplot as plt\n'), ((3015, 3025), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3023, 3025), True, 'import matplotlib.pyplot as plt\n'), ((3295, 3313), 'numpy.asanyarray', 'np.asanyarray', (['sig'], {}), '(sig)\n', (3308, 3313), True, 'import numpy as np\n'), ((3458, 3489), 'matplotlib.pyplot.magnitude_spectrum', 'plt.magnitude_spectrum', (['sig', 'fs'], {}), '(sig, fs)\n', (3480, 3489), True, 'import matplotlib.pyplot as plt\n'), ((3495, 3505), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3503, 3505), True, 'import matplotlib.pyplot as plt\n'), ((3676, 3690), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3688, 3690), True, 'import matplotlib.pyplot as plt\n'), ((3696, 3720), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['freqs', 'fft'], {}), '(freqs, fft)\n', (3708, 3720), True, 'import matplotlib.pyplot as plt\n'), ((3785, 3795), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3793, 3795), True, 'import matplotlib.pyplot as plt\n'), ((3899, 3920), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (3909, 3920), False, 'import os\n'), ((4673, 4687), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4685, 4687), True, 'import matplotlib.pyplot as plt\n'), ((4941, 4951), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4949, 4951), True, 'import matplotlib.pyplot as plt\n'), ((5139, 5160), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (5149, 5160), False, 'import os\n'), ((680, 713), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': 'np.uint16'}), '(fid, dtype=np.uint16)\n', (691, 713), True, 'import numpy as np\n'), ((2092, 2105), 'matplotlib.pyplot.plot', 'plt.plot', (['IFI'], {}), '(IFI)\n', (2100, 2105), True, 'import matplotlib.pyplot as plt\n'), ((2115, 2128), 'matplotlib.pyplot.plot', 'plt.plot', (['IFQ'], {}), '(IFQ)\n', (2123, 2128), True, 'import matplotlib.pyplot as plt\n'), ((2241, 2251), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2249, 2251), True, 'import matplotlib.pyplot as plt\n'), ((2351, 2365), 'numpy.argmax', 'np.argmax', (['fft'], {}), '(fft)\n', (2360, 2365), True, 'import numpy as np\n'), ((2614, 2641), 'scipy.signal.detrend', 'signal.detrend', (['IFI'], {'axis': '(0)'}), '(IFI, axis=0)\n', (2628, 2641), False, 'from scipy import signal\n'), ((2657, 2684), 'scipy.signal.detrend', 'signal.detrend', (['IFQ'], {'axis': '(0)'}), '(IFQ, axis=0)\n', (2671, 2684), False, 'from scipy import signal\n'), ((622, 660), 'numpy.fromfile', 'np.fromfile', (['fid'], {'count': '(1)', 'dtype': 'float'}), '(fid, count=1, dtype=float)\n', (633, 660), True, 'import numpy as np\n'), ((3982, 4015), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (3994, 4015), False, 'import os\n'), ((5248, 5281), 'os.path.join', 'os.path.join', (['directory', 'filename'], {}), '(directory, filename)\n', (5260, 5281), False, 'import os\n'), ((2183, 2196), 'numpy.log10', 'np.log10', (['fft'], {}), '(fft)\n', (2191, 2196), True, 'import numpy as np\n'), ((6350, 6364), 'numpy.log10', 'np.log10', (['snr1'], {}), '(snr1)\n', (6358, 6364), True, 'import numpy as np\n'), ((6375, 6389), 'numpy.log10', 'np.log10', (['snr2'], {}), '(snr2)\n', (6383, 6389), True, 'import numpy as np\n')] |
from dataloaders.visual_genome import VGDataLoader, VG
import numpy as np
import torch
from config import ModelConfig
from lib.pytorch_misc import optimistic_restore
from lib.evaluation.sg_eval import BasicSceneGraphEvaluator
from tqdm import tqdm
from config import BOX_SCALE, IM_SCALE
import dill as pkl
import os
conf = ModelConfig()
if conf.model == 'motifnet':
from lib.rel_model import RelModel
elif conf.model == 'stanford':
from lib.rel_model_stanford import RelModelStanford as RelModel
else:
raise ValueError()
train, val, test = VG.splits(num_val_im=conf.val_size, filter_duplicate_rels=True,
use_proposals=conf.use_proposals,
filter_non_overlap=conf.mode == 'sgdet')
# use val code for test
if conf.test:
val = test
train_loader, val_loader = VGDataLoader.splits(train, val, mode='rel',
batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus)
detector = RelModel(classes=train.ind_to_classes, rel_classes=train.ind_to_predicates,
num_gpus=conf.num_gpus, mode=conf.mode, require_overlap_det=True,
use_resnet=conf.use_resnet, order=conf.order,
nl_edge=conf.nl_edge, nl_obj=conf.nl_obj, hidden_dim=conf.hidden_dim,
use_proposals=conf.use_proposals,
pass_in_obj_feats_to_decoder=conf.pass_in_obj_feats_to_decoder,
pass_in_obj_feats_to_edge=conf.pass_in_obj_feats_to_edge,
pooling_dim=conf.pooling_dim,
rec_dropout=conf.rec_dropout,
use_bias=conf.use_bias,
use_tanh=conf.use_tanh,
limit_vision=conf.limit_vision
)
detector.cuda()
ckpt = torch.load(conf.ckpt)
optimistic_restore(detector, ckpt['state_dict'])
# if conf.mode == 'sgdet':
# det_ckpt = torch.load('checkpoints/new_vgdet/vg-19.tar')['state_dict']
# detector.detector.bbox_fc.weight.data.copy_(det_ckpt['bbox_fc.weight'])
# detector.detector.bbox_fc.bias.data.copy_(det_ckpt['bbox_fc.bias'])
# detector.detector.score_fc.weight.data.copy_(det_ckpt['score_fc.weight'])
# detector.detector.score_fc.bias.data.copy_(det_ckpt['score_fc.bias'])
all_pred_entries = []
def val_batch(batch_num, b, evaluator, thrs=(20, 50, 100)):
det_res = detector[b]
if conf.num_gpus == 1:
det_res = [det_res]
for i, (boxes_i, objs_i, obj_scores_i, rels_i, pred_scores_i) in enumerate(det_res):
gt_entry = {
'gt_classes': val.gt_classes[batch_num + i].copy(),
'gt_relations': val.relationships[batch_num + i].copy(),
'gt_boxes': val.gt_boxes[batch_num + i].copy(),
}
assert np.all(objs_i[rels_i[:,0]] > 0) and np.all(objs_i[rels_i[:,1]] > 0)
# assert np.all(rels_i[:,2] > 0)
pred_entry = {
'pred_boxes': boxes_i * BOX_SCALE/IM_SCALE,
'pred_classes': objs_i,
'pred_rel_inds': rels_i,
'obj_scores': obj_scores_i,
'rel_scores': pred_scores_i,
}
all_pred_entries.append(pred_entry)
# compare ground truth with prediction
evaluator[conf.mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
# multi_pred controls whether
evaluator = BasicSceneGraphEvaluator.all_modes(multiple_preds=conf.multi_pred)
# evaluation done previously and saved as a .pkl file, so restore it and compare gt and predictions
if conf.cache is not None and os.path.exists(conf.cache):
print("Found {}! Loading from it".format(conf.cache))
with open(conf.cache,'rb') as f:
all_pred_entries = pkl.load(f)
for i, pred_entry in enumerate(tqdm(all_pred_entries)):
gt_entry = {
'gt_classes': val.gt_classes[i].copy(),
'gt_relations': val.relationships[i].copy(),
'gt_boxes': val.gt_boxes[i].copy(),
}
#if i < 2:
#print(gt_entry)
#print(pred_entry)
# comapare gt and predictions
evaluator[conf.mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
evaluator[conf.mode].print_stats()
# evaluation never done before, so needed to val_batch: get gt and pred, and compare them
else:
detector.eval()
for val_b, batch in enumerate(tqdm(val_loader)):
val_batch(conf.num_gpus*val_b, batch, evaluator)
evaluator[conf.mode].print_stats()
if conf.cache is not None:
with open(conf.cache,'wb') as f:
pkl.dump(all_pred_entries, f) | [
"lib.evaluation.sg_eval.BasicSceneGraphEvaluator.all_modes",
"tqdm.tqdm",
"lib.rel_model_stanford.RelModelStanford",
"dataloaders.visual_genome.VG.splits",
"torch.load",
"os.path.exists",
"dataloaders.visual_genome.VGDataLoader.splits",
"dill.load",
"lib.pytorch_misc.optimistic_restore",
"dill.dum... | [((326, 339), 'config.ModelConfig', 'ModelConfig', ([], {}), '()\n', (337, 339), False, 'from config import ModelConfig\n'), ((556, 698), 'dataloaders.visual_genome.VG.splits', 'VG.splits', ([], {'num_val_im': 'conf.val_size', 'filter_duplicate_rels': '(True)', 'use_proposals': 'conf.use_proposals', 'filter_non_overlap': "(conf.mode == 'sgdet')"}), "(num_val_im=conf.val_size, filter_duplicate_rels=True,\n use_proposals=conf.use_proposals, filter_non_overlap=conf.mode == 'sgdet')\n", (565, 698), False, 'from dataloaders.visual_genome import VGDataLoader, VG\n'), ((828, 957), 'dataloaders.visual_genome.VGDataLoader.splits', 'VGDataLoader.splits', (['train', 'val'], {'mode': '"""rel"""', 'batch_size': 'conf.batch_size', 'num_workers': 'conf.num_workers', 'num_gpus': 'conf.num_gpus'}), "(train, val, mode='rel', batch_size=conf.batch_size,\n num_workers=conf.num_workers, num_gpus=conf.num_gpus)\n", (847, 957), False, 'from dataloaders.visual_genome import VGDataLoader, VG\n'), ((1107, 1696), 'lib.rel_model_stanford.RelModelStanford', 'RelModel', ([], {'classes': 'train.ind_to_classes', 'rel_classes': 'train.ind_to_predicates', 'num_gpus': 'conf.num_gpus', 'mode': 'conf.mode', 'require_overlap_det': '(True)', 'use_resnet': 'conf.use_resnet', 'order': 'conf.order', 'nl_edge': 'conf.nl_edge', 'nl_obj': 'conf.nl_obj', 'hidden_dim': 'conf.hidden_dim', 'use_proposals': 'conf.use_proposals', 'pass_in_obj_feats_to_decoder': 'conf.pass_in_obj_feats_to_decoder', 'pass_in_obj_feats_to_edge': 'conf.pass_in_obj_feats_to_edge', 'pooling_dim': 'conf.pooling_dim', 'rec_dropout': 'conf.rec_dropout', 'use_bias': 'conf.use_bias', 'use_tanh': 'conf.use_tanh', 'limit_vision': 'conf.limit_vision'}), '(classes=train.ind_to_classes, rel_classes=train.ind_to_predicates,\n num_gpus=conf.num_gpus, mode=conf.mode, require_overlap_det=True,\n use_resnet=conf.use_resnet, order=conf.order, nl_edge=conf.nl_edge,\n nl_obj=conf.nl_obj, hidden_dim=conf.hidden_dim, use_proposals=conf.\n use_proposals, pass_in_obj_feats_to_decoder=conf.\n pass_in_obj_feats_to_decoder, pass_in_obj_feats_to_edge=conf.\n pass_in_obj_feats_to_edge, pooling_dim=conf.pooling_dim, rec_dropout=\n conf.rec_dropout, use_bias=conf.use_bias, use_tanh=conf.use_tanh,\n limit_vision=conf.limit_vision)\n', (1115, 1696), True, 'from lib.rel_model_stanford import RelModelStanford as RelModel\n'), ((1927, 1948), 'torch.load', 'torch.load', (['conf.ckpt'], {}), '(conf.ckpt)\n', (1937, 1948), False, 'import torch\n'), ((1950, 1998), 'lib.pytorch_misc.optimistic_restore', 'optimistic_restore', (['detector', "ckpt['state_dict']"], {}), "(detector, ckpt['state_dict'])\n", (1968, 1998), False, 'from lib.pytorch_misc import optimistic_restore\n'), ((3506, 3572), 'lib.evaluation.sg_eval.BasicSceneGraphEvaluator.all_modes', 'BasicSceneGraphEvaluator.all_modes', ([], {'multiple_preds': 'conf.multi_pred'}), '(multiple_preds=conf.multi_pred)\n', (3540, 3572), False, 'from lib.evaluation.sg_eval import BasicSceneGraphEvaluator\n'), ((3704, 3730), 'os.path.exists', 'os.path.exists', (['conf.cache'], {}), '(conf.cache)\n', (3718, 3730), False, 'import os\n'), ((3854, 3865), 'dill.load', 'pkl.load', (['f'], {}), '(f)\n', (3862, 3865), True, 'import dill as pkl\n'), ((3901, 3923), 'tqdm.tqdm', 'tqdm', (['all_pred_entries'], {}), '(all_pred_entries)\n', (3905, 3923), False, 'from tqdm import tqdm\n'), ((4535, 4551), 'tqdm.tqdm', 'tqdm', (['val_loader'], {}), '(val_loader)\n', (4539, 4551), False, 'from tqdm import tqdm\n'), ((2904, 2936), 'numpy.all', 'np.all', (['(objs_i[rels_i[:, 0]] > 0)'], {}), '(objs_i[rels_i[:, 0]] > 0)\n', (2910, 2936), True, 'import numpy as np\n'), ((2940, 2972), 'numpy.all', 'np.all', (['(objs_i[rels_i[:, 1]] > 0)'], {}), '(objs_i[rels_i[:, 1]] > 0)\n', (2946, 2972), True, 'import numpy as np\n'), ((4736, 4765), 'dill.dump', 'pkl.dump', (['all_pred_entries', 'f'], {}), '(all_pred_entries, f)\n', (4744, 4765), True, 'import dill as pkl\n')] |
import sys
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
try:
from plyfile import PlyData, PlyElement
except:
print('Install python-plyfile from https://github.com/dranjan/python-plyfile (using pip: pip install plyfile')
sys.exit(1)
import time
def main(argv=None):
if argv is None:
argv = sys.argv
if len(argv) != 2:
print("Format ply-display ply-file.ply")
sys.exit(1)
try:
plydata = PlyData.read(argv[1])
except Exception as ex:
print("Error reading ply file %s (%s)." % (argv[0], str(ex)), file=sys.stderr)
sys.exit(1)
# no = plydata['vertex'].count
# According to the comment in https://github.com/googlesamples/tango-examples-c/blob/master/tango_client_api/include/tango_client_api.h:
# "...+Z points in the direction of the camera's optical axis, perpendicular to the plane of the camera.
# +X points toward the user's right, and +Y points toward the bottom of the screen. The origin is the focal center
# of the depth camera."
# i.e. it resembles that of OpenCV.
# Convert to OpenGL looking down -Z axis with +X on right and +Y above use:
#PCtoGL4x4 = np.array([ [ 1.0, 0.0, 0.0, 0.0 ] , [ 0.0, -1.0, 0.0, 0.0 ], [ 0.0, 0.0, -1.0, 0.0 ], [ 0.0, 0.0, 0.0, 1.0 ] ])
PCtoGL = np.array([ [1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0] ])
Xs = []
Ys = []
Zs = []
for vertex in plydata['vertex']:
X = PCtoGL.dot(np.array([vertex[0], vertex[1], vertex[2]]))
Xs.append(X[0])
Ys.append(X[1])
Zs.append(X[2])
figure = plt.figure()
plot3d = figure.add_subplot(111, projection='3d')
plot3d.scatter(Xs, Zs, Ys, c='r', marker='o')
plot3d.set_xlabel('X')
plot3d.set_ylabel('Z')
plot3d.set_zlabel('Y')
plt.show()
if __name__ == '__main__':
sys.exit(main())
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure",
"numpy.array",
"plyfile.PlyData.read",
"sys.exit"
] | [((1312, 1375), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, -1.0]])\n', (1320, 1375), True, 'import numpy as np\n'), ((1591, 1603), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1601, 1603), True, 'import matplotlib.pyplot as plt\n'), ((1787, 1797), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1795, 1797), True, 'import matplotlib.pyplot as plt\n'), ((275, 286), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (283, 286), False, 'import sys\n'), ((438, 449), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (446, 449), False, 'import sys\n'), ((475, 496), 'plyfile.PlyData.read', 'PlyData.read', (['argv[1]'], {}), '(argv[1])\n', (487, 496), False, 'from plyfile import PlyData, PlyElement\n'), ((615, 626), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (623, 626), False, 'import sys\n'), ((1468, 1511), 'numpy.array', 'np.array', (['[vertex[0], vertex[1], vertex[2]]'], {}), '([vertex[0], vertex[1], vertex[2]])\n', (1476, 1511), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import os
import math
from scipy.spatial import distance as dist
from collections import OrderedDict
from scipy.optimize import linear_sum_assignment
from filterpy.kalman import KalmanFilter, UnscentedKalmanFilter, MerweScaledSigmaPoints
class Kalman_filter():
def __init__(self, dt, u_x, u_y, std_acc, x_std_meas, y_std_meas, init_x, init_y):
self.init_x = init_x
self.init_y = init_y
self.dt = dt
self.f = KalmanFilter(dim_x=4, dim_z=2)
self.f.x = np.array([[self.init_x],[self.init_y],[0],[0]])
self.f.F = np.array([[1, 0, self.dt, 0],
[0, 1, 0, self.dt],
[0, 0, 1, 0],
[0, 0, 0, 1]])
self.f.H = np.array([[1, 0, 0, 0],
[0, 1, 0, 0]])
self.f.P = np.eye(self.f.F.shape[1])
self.f.Q *= np.array([[(self.dt**4)/4, 0, (self.dt**3)/2, 0],
[0, (self.dt**4)/4, 0, (self.dt**3)/2],
[(self.dt**3)/2, 0, self.dt**2, 0],
[0, (self.dt**3)/2, 0, self.dt**2]]) * std_acc**2
self.f.R = np.array([[x_std_meas**2,0],
[0, y_std_meas**2]])
self.f.predict()
class Trajectory_kf:
def __init__(self, maxDisappeared = 10):
self.nextObjectID = 0
self.point_dict = OrderedDict()
self.disappeared_dict = OrderedDict()
self.kf_dict = OrderedDict()
self.kf_pred_dict = OrderedDict()
self.maxDisappeared = maxDisappeared
#self.kf_esti_dict = OrderedDict()
def register(self, centroid):
self.point_dict[self.nextObjectID] = [centroid]
self.disappeared_dict[self.nextObjectID] = 0
self.kf_dict[self.nextObjectID] = Kalman_filter(dt = 0.3,
u_x = 0,
u_y = 0.1,
std_acc = 0.01,
x_std_meas = 0.01,
y_std_meas = 0.01,
init_x = centroid[0],
init_y = centroid[1])
self.kf_pred_dict[self.nextObjectID] = centroid
self.nextObjectID += 1
def deregister(self, objectID):
del self.point_dict[objectID]
del self.disappeared_dict[objectID]
del self.kf_dict[objectID]
del self.kf_pred_dict[objectID]
def update(self, next_centroid_list):
if len(next_centroid_list) == 0:
for ID in list(self.disappeared_dict.keys()):
self.disappeared_dict[ID] += 1
pred_point = self.kf_dict[ID].f.x
x, y = int(pred_point[0]), int(pred_point[1])
self.kf_pred_dict[ID] = [x, y]
if self.disappeared_dict[ID] >= self.maxDisappeared:
self.deregister(ID)
return self.point_dict
if len(self.point_dict) == 0:
for i in range(len(next_centroid_list)):
self.register(next_centroid_list[i])
else:
objectIDs = list(self.point_dict.keys())
#pre_point = list()
self.kf_predict_list = list()
for ID in objectIDs:
#pre_point.append(((self.point_dict[ID])[-1]))
self.kf_dict[ID].f.predict()
pred_point = self.kf_dict[ID].f.x
x, y = int(pred_point[0]), int(pred_point[1])
self.kf_pred_dict[ID] = [x, y]
self.kf_predict_list.append([x, y])
distan = dist.cdist(np.array(self.kf_predict_list), next_centroid_list)
ID_list, indexes = linear_sum_assignment(distan)
used_ID = set()
used_next_pts = set()
for i in (ID_list):
objectID = objectIDs[i]
min_index = (ID_list.tolist()).index(i)
"""if distan[i][indexes[min_index]] > 100:
continue"""
self.point_dict[objectID].append(next_centroid_list[indexes[min_index]])
self.disappeared_dict[objectID] = 0
self.kf_dict[ID].f.update(next_centroid_list[indexes[min_index]])
pred_point = self.kf_dict[ID].f.x
x, y = int(pred_point[0]), int(pred_point[1])
self.kf_pred_dict[ID] = [x, y]
used_ID.add(objectID)
used_next_pts.add(indexes[min_index])
unused_ID = set(objectIDs).difference(used_ID)
unused_next_pts = set(range(len(next_centroid_list))).difference(used_next_pts)
if unused_ID:
for ID in unused_ID:
self.disappeared_dict[ID] += 1
#pred_point = self.kf_dict[ID].update(self.point_dict[ID][-1])
#x, y = int(pred_point[0]), int(pred_point[1])
#self.kf_pred_dict[ID] = [x, y]
if self.disappeared_dict[ID] > self.maxDisappeared:
self.deregister(ID)
if unused_next_pts:
for index in unused_next_pts:
#print("register_pts",next_centroid_list[index])
self.register(next_centroid_list[index])
return self.point_dict
| [
"numpy.array",
"filterpy.kalman.KalmanFilter",
"collections.OrderedDict",
"numpy.eye",
"scipy.optimize.linear_sum_assignment"
] | [((492, 522), 'filterpy.kalman.KalmanFilter', 'KalmanFilter', ([], {'dim_x': '(4)', 'dim_z': '(2)'}), '(dim_x=4, dim_z=2)\n', (504, 522), False, 'from filterpy.kalman import KalmanFilter, UnscentedKalmanFilter, MerweScaledSigmaPoints\n'), ((542, 592), 'numpy.array', 'np.array', (['[[self.init_x], [self.init_y], [0], [0]]'], {}), '([[self.init_x], [self.init_y], [0], [0]])\n', (550, 592), True, 'import numpy as np\n'), ((609, 687), 'numpy.array', 'np.array', (['[[1, 0, self.dt, 0], [0, 1, 0, self.dt], [0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[1, 0, self.dt, 0], [0, 1, 0, self.dt], [0, 0, 1, 0], [0, 0, 0, 1]])\n', (617, 687), True, 'import numpy as np\n'), ((792, 830), 'numpy.array', 'np.array', (['[[1, 0, 0, 0], [0, 1, 0, 0]]'], {}), '([[1, 0, 0, 0], [0, 1, 0, 0]])\n', (800, 830), True, 'import numpy as np\n'), ((879, 904), 'numpy.eye', 'np.eye', (['self.f.F.shape[1]'], {}), '(self.f.F.shape[1])\n', (885, 904), True, 'import numpy as np\n'), ((1206, 1260), 'numpy.array', 'np.array', (['[[x_std_meas ** 2, 0], [0, y_std_meas ** 2]]'], {}), '([[x_std_meas ** 2, 0], [0, y_std_meas ** 2]])\n', (1214, 1260), True, 'import numpy as np\n'), ((1435, 1448), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1446, 1448), False, 'from collections import OrderedDict\n'), ((1481, 1494), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1492, 1494), False, 'from collections import OrderedDict\n'), ((1518, 1531), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1529, 1531), False, 'from collections import OrderedDict\n'), ((1560, 1573), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1571, 1573), False, 'from collections import OrderedDict\n'), ((926, 1113), 'numpy.array', 'np.array', (['[[self.dt ** 4 / 4, 0, self.dt ** 3 / 2, 0], [0, self.dt ** 4 / 4, 0, self.\n dt ** 3 / 2], [self.dt ** 3 / 2, 0, self.dt ** 2, 0], [0, self.dt ** 3 /\n 2, 0, self.dt ** 2]]'], {}), '([[self.dt ** 4 / 4, 0, self.dt ** 3 / 2, 0], [0, self.dt ** 4 / 4,\n 0, self.dt ** 3 / 2], [self.dt ** 3 / 2, 0, self.dt ** 2, 0], [0, self.\n dt ** 3 / 2, 0, self.dt ** 2]])\n', (934, 1113), True, 'import numpy as np\n'), ((4074, 4103), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['distan'], {}), '(distan)\n', (4095, 4103), False, 'from scipy.optimize import linear_sum_assignment\n'), ((3977, 4007), 'numpy.array', 'np.array', (['self.kf_predict_list'], {}), '(self.kf_predict_list)\n', (3985, 4007), True, 'import numpy as np\n')] |
"""Functions to calculate spherical/cilyndrical power spectrum."""
import numpy as np
def ps1D(
lc,
cell_size,
n_psbins=12,
logk=True,
convert_to_delta=True,
chunk_skip=None,
calculate_variance=False,
):
"""Calculating 1D PS for a series of redshifts for one lightcone.
Parameters
----------
lc : array
Lightcone.
cell_size : float
Simulation voxel size (in Mpc).
n_psbins : int
Number of PS bins.
logk : bool
If bins should be in log or linear space.
convert_to_delta : bool
Either to convert from power to non-dimensional delta.
chunk_skip : int
In redshift dimension of the lightcone,
PS is calculated on chunks `chunk_skip` apart.
Eg. `chunk_skip = 2` amounts in taking every second redshift bin into account.
If `None`, it amounts to the lightcone sky-plane size.
calculate_variance : bool
Either to calculate sample variance of each bin or not.
Returns
-------
PS : dict
Power spectrum and its sample variance (if flag is turned on) for all redshift bins.
k_values : array
Centers of k bins.
"""
PS, k_values = _power_1D(
lc,
cell_size=cell_size,
n_psbins=n_psbins,
chunk_skip=chunk_skip,
logk=logk,
calculate_variance=calculate_variance,
)
if convert_to_delta is True:
conversion_factor = k_values ** 3 / (2 * np.pi ** 2)
PS["power"] = PS["power"] * conversion_factor[np.newaxis, ...]
if calculate_variance:
PS["var_power"] = PS["var_power"] * conversion_factor[np.newaxis, ...] ** 2
return PS, k_values
def ps2D(
lc,
cell_size,
n_psbins_par=12,
n_psbins_perp=12,
logk=True,
convert_to_delta=True,
chunk_skip=None,
calculate_variance=False,
):
"""Calculating 2D PS for a series of redshifts for one lightcone.
Parameters
----------
lc : array
Lightcone.
redshifts : list
List of redshifts for which the lightcone has been computed.
cell_size : float
Simulation voxel size (in Mpc).
n_psbins_par : int
Number of PS bins in LoS direction.
n_psbins_perp : int
Number of PS bins in sky-plane direction.
logk : bool
If bins should be in log or linear space.
convert_to_delta : bool
Either to convert from power to non-dimensional delta.
chunk_skip : int
In redshift dimension of the lightcone,
PS is calculated on chunks `chunk_skip` apart.
Eg. `chunk_skip = 2` amounts in taking every second redshift bin
into account. If `None`, it amounts to the lightcone sky-plane size.
calculate_variance : bool
Either to calculate sample variance of each bin or not.
Returns
-------
PS : dict
Power spectrum and its sample variance (if flag is turned on) for all redshift bins.
k_values_perp : array
Centers of k_perp bins.
k_values_par : array
Centers of k_par bins.
"""
PS, k_values_perp, k_values_par = _power_2D(
lc,
cell_size=cell_size,
n_psbins_par=n_psbins_par,
n_psbins_perp=n_psbins_perp,
chunk_skip=chunk_skip,
logk=logk,
calculate_variance=calculate_variance,
)
if convert_to_delta is True:
k_values_cube = np.meshgrid(
k_values_par, k_values_perp
) # all k_values on the 2D grid
conversion_factor = (k_values_cube[1] ** 2 * k_values_cube[0]) / (
4 * np.pi ** 2
) # pre-factor k_perp**2 * k_par
PS["power"] = PS["power"] * conversion_factor[np.newaxis, ...]
if calculate_variance:
PS["var_power"] = PS["var_power"] * conversion_factor[np.newaxis, ...] ** 2
return PS, k_values_perp, k_values_par
def _power_1D(
lightcone,
cell_size,
n_psbins,
chunk_skip,
logk,
calculate_variance,
):
HII_DIM = lightcone.shape[0]
n_slices = lightcone.shape[-1]
chunk_skip = HII_DIM if chunk_skip is None else chunk_skip
chunk_indices = list(range(0, n_slices + 1 - HII_DIM, chunk_skip))
epsilon = 1e-12
# DFT frequency modes
k = np.fft.fftfreq(HII_DIM, d=cell_size)
k = 2 * np.pi * k
# ignoring 0 and negative modes
k_min, k_max = k[1], np.abs(k).max()
# maximal mode will be k_max * sqrt(3)
if logk:
k_bins = np.logspace(
np.log10(k_min - epsilon),
np.log10(np.sqrt(3) * k_max + epsilon),
n_psbins + 1,
)
else:
k_bins = np.linspace(
k_min - epsilon, np.sqrt(3) * k_max + epsilon, n_psbins + 1
)
# grid of all k_values
k_cube = np.meshgrid(k, k, k)
# calculating k_perp, k_par in cylindrical coordinates
k_sphere = np.sqrt(k_cube[0] ** 2 + k_cube[1] ** 2 + k_cube[2] ** 2)
# return a bin index across flattened k_sphere array
k_sphere_digits = np.digitize(k_sphere.flatten(), k_bins)
# count occurence of modes in each bin & cut out all values outside the edges
k_binsum = np.bincount(k_sphere_digits, minlength=n_psbins + 2)[1:-1]
# geometrical means for values
k_values = np.sqrt(k_bins[:-1] * k_bins[1:])
lightcones = [] # all chunks that need to be computed
# appending all chunks together
for i in chunk_indices:
start = i
end = i + HII_DIM
lightcones.append(lightcone[..., start:end])
lightcones = np.array(lightcones, dtype=np.float32)
V = (HII_DIM * cell_size) ** 3
dV = cell_size ** 3
def _power(box):
FT = np.fft.fftn(box) * dV
PS_box = np.real(FT * np.conj(FT)) / V
# calculating average power as a bin count with PS as weights
res = {}
res["power"] = (
np.bincount(
k_sphere_digits, weights=PS_box.flatten(), minlength=n_psbins + 2
)[1:-1]
/ k_binsum
)
# calculating average square of the power, used for estimating sample variance
if calculate_variance:
p_sq = (
np.bincount(
k_sphere_digits,
weights=PS_box.flatten() ** 2,
minlength=n_psbins + 2,
)[1:-1]
/ k_binsum
)
res["var_power"] = p_sq - res["power"] ** 2
return res
res = [_power(lc) for lc in lightcones]
P = {key: [] for key in res[0].keys()}
for r in res:
for key, value in r.items():
P[key].append(value)
P = {key: np.array(value, dtype=np.float32) for key, value in P.items()}
return P, k_values
def _power_2D(
lightcone,
cell_size,
n_psbins_par,
n_psbins_perp,
chunk_skip,
logk,
calculate_variance,
):
HII_DIM = lightcone.shape[0]
n_slices = lightcone.shape[-1]
chunk_skip = HII_DIM if chunk_skip is None else chunk_skip
chunk_indices = list(range(0, n_slices + 1 - HII_DIM, chunk_skip))
epsilon = 1e-12
# DFT frequency modes
k = np.fft.fftfreq(HII_DIM, d=cell_size)
k = 2 * np.pi * k
# ignoring 0 and negative modes
k_min, k_max = k[1], np.abs(k).max()
if logk:
# maximal perp mode will be k_max * sqrt(2)
k_bins_perp = np.logspace(
np.log10(k_min - epsilon),
np.log10(np.sqrt(2.0) * k_max + epsilon),
n_psbins_perp + 1,
)
# maximal par mode will be k_max
k_bins_par = np.logspace(
np.log10(k_min - epsilon), np.log10(k_max + epsilon), n_psbins_par + 1
)
else:
k_bins_perp = np.linspace(
k_min - epsilon,
np.sqrt(2.0) * k_max + epsilon,
n_psbins_perp + 1,
)
k_bins_par = np.linspace(k_min - epsilon, k_max + epsilon, n_psbins_par + 1)
# grid of all k_values, where k_cube[0], k_cube[1] are perp values, and k_cube[2] par values
k_cube = np.meshgrid(k, k, k)
# calculating k_perp, k_par in cylindrical coordinates
k_cylinder = [np.sqrt(k_cube[0] ** 2 + k_cube[1] ** 2), np.abs(k_cube[2])]
# return a bin index across flattened k_cylinder, for perp and par
k_perp_digits = np.digitize(k_cylinder[0].flatten(), k_bins_perp)
k_par_digits = np.digitize(k_cylinder[1].flatten(), k_bins_par)
# construct a unique digit counter for a 2D PS array
# for first k_perp uses range [1, n_psbins_par]
# for second k_perp uses range [n_psbins_par + 1, 2 * n_psbins_par] etc.
k_cylinder_digits = (k_perp_digits - 1) * n_psbins_par + k_par_digits
# now cut out outsiders: zeros, n_psbins_par + 1, n_psbins_perp + 1
k_cylinder_digits = np.where(
np.logical_or(k_perp_digits == 0, k_par_digits == 0), 0, k_cylinder_digits
)
k_cylinder_digits = np.where(
np.logical_or(
k_perp_digits == n_psbins_perp + 1, k_par_digits == n_psbins_par + 1
),
n_psbins_perp * n_psbins_par + 1,
k_cylinder_digits,
)
k_binsum = np.bincount(
k_cylinder_digits, minlength=n_psbins_par * n_psbins_perp + 2
)[1:-1]
# geometrical means for values
k_values_perp = np.sqrt(k_bins_perp[:-1] * k_bins_perp[1:])
k_values_par = np.sqrt(k_bins_par[:-1] * k_bins_par[1:])
lightcones = [] # all chunks that need to be computed
# appending all chunks together
for i in chunk_indices:
start = i
end = i + HII_DIM
lightcones.append(lightcone[..., start:end])
lightcones = np.array(lightcones, dtype=np.float32)
V = (HII_DIM * cell_size) ** 3
dV = cell_size ** 3
def _power(box):
FT = np.fft.fftn(box) * dV
PS_box = np.real(FT * np.conj(FT)) / V
res = {}
# calculating average power as a bin count with PS as weights
res["power"] = (
np.bincount(
k_cylinder_digits,
weights=PS_box.flatten(),
minlength=n_psbins_par * n_psbins_perp + 2,
)[1:-1]
/ k_binsum
).reshape(n_psbins_perp, n_psbins_par)
if calculate_variance:
# calculating average square of the power, used for estimating sample variance
p_sq = (
np.bincount(
k_cylinder_digits,
weights=PS_box.flatten() ** 2,
minlength=n_psbins_par * n_psbins_perp + 2,
)[1:-1]
/ k_binsum
).reshape(n_psbins_perp, n_psbins_par)
res["var_power"] = p_sq - res["power"] ** 2
return res
res = [_power(lc) for lc in lightcones]
P = {key: [] for key in res[0].keys()}
for r in res:
for key, value in r.items():
P[key].append(value)
P = {key: np.array(value, dtype=np.float32) for key, value in P.items()}
return P, k_values_perp, k_values_par
| [
"numpy.conj",
"numpy.meshgrid",
"numpy.abs",
"numpy.fft.fftn",
"numpy.log10",
"numpy.fft.fftfreq",
"numpy.array",
"numpy.logical_or",
"numpy.linspace",
"numpy.bincount",
"numpy.sqrt"
] | [((4438, 4474), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['HII_DIM'], {'d': 'cell_size'}), '(HII_DIM, d=cell_size)\n', (4452, 4474), True, 'import numpy as np\n'), ((4950, 4970), 'numpy.meshgrid', 'np.meshgrid', (['k', 'k', 'k'], {}), '(k, k, k)\n', (4961, 4970), True, 'import numpy as np\n'), ((5045, 5102), 'numpy.sqrt', 'np.sqrt', (['(k_cube[0] ** 2 + k_cube[1] ** 2 + k_cube[2] ** 2)'], {}), '(k_cube[0] ** 2 + k_cube[1] ** 2 + k_cube[2] ** 2)\n', (5052, 5102), True, 'import numpy as np\n'), ((5428, 5461), 'numpy.sqrt', 'np.sqrt', (['(k_bins[:-1] * k_bins[1:])'], {}), '(k_bins[:-1] * k_bins[1:])\n', (5435, 5461), True, 'import numpy as np\n'), ((5702, 5740), 'numpy.array', 'np.array', (['lightcones'], {'dtype': 'np.float32'}), '(lightcones, dtype=np.float32)\n', (5710, 5740), True, 'import numpy as np\n'), ((7290, 7326), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['HII_DIM'], {'d': 'cell_size'}), '(HII_DIM, d=cell_size)\n', (7304, 7326), True, 'import numpy as np\n'), ((8183, 8203), 'numpy.meshgrid', 'np.meshgrid', (['k', 'k', 'k'], {}), '(k, k, k)\n', (8194, 8203), True, 'import numpy as np\n'), ((9395, 9438), 'numpy.sqrt', 'np.sqrt', (['(k_bins_perp[:-1] * k_bins_perp[1:])'], {}), '(k_bins_perp[:-1] * k_bins_perp[1:])\n', (9402, 9438), True, 'import numpy as np\n'), ((9458, 9499), 'numpy.sqrt', 'np.sqrt', (['(k_bins_par[:-1] * k_bins_par[1:])'], {}), '(k_bins_par[:-1] * k_bins_par[1:])\n', (9465, 9499), True, 'import numpy as np\n'), ((9740, 9778), 'numpy.array', 'np.array', (['lightcones'], {'dtype': 'np.float32'}), '(lightcones, dtype=np.float32)\n', (9748, 9778), True, 'import numpy as np\n'), ((3595, 3635), 'numpy.meshgrid', 'np.meshgrid', (['k_values_par', 'k_values_perp'], {}), '(k_values_par, k_values_perp)\n', (3606, 3635), True, 'import numpy as np\n'), ((5319, 5371), 'numpy.bincount', 'np.bincount', (['k_sphere_digits'], {'minlength': '(n_psbins + 2)'}), '(k_sphere_digits, minlength=n_psbins + 2)\n', (5330, 5371), True, 'import numpy as np\n'), ((6809, 6842), 'numpy.array', 'np.array', (['value'], {'dtype': 'np.float32'}), '(value, dtype=np.float32)\n', (6817, 6842), True, 'import numpy as np\n'), ((8008, 8071), 'numpy.linspace', 'np.linspace', (['(k_min - epsilon)', '(k_max + epsilon)', '(n_psbins_par + 1)'], {}), '(k_min - epsilon, k_max + epsilon, n_psbins_par + 1)\n', (8019, 8071), True, 'import numpy as np\n'), ((8281, 8321), 'numpy.sqrt', 'np.sqrt', (['(k_cube[0] ** 2 + k_cube[1] ** 2)'], {}), '(k_cube[0] ** 2 + k_cube[1] ** 2)\n', (8288, 8321), True, 'import numpy as np\n'), ((8323, 8340), 'numpy.abs', 'np.abs', (['k_cube[2]'], {}), '(k_cube[2])\n', (8329, 8340), True, 'import numpy as np\n'), ((8925, 8977), 'numpy.logical_or', 'np.logical_or', (['(k_perp_digits == 0)', '(k_par_digits == 0)'], {}), '(k_perp_digits == 0, k_par_digits == 0)\n', (8938, 8977), True, 'import numpy as np\n'), ((9048, 9136), 'numpy.logical_or', 'np.logical_or', (['(k_perp_digits == n_psbins_perp + 1)', '(k_par_digits == n_psbins_par + 1)'], {}), '(k_perp_digits == n_psbins_perp + 1, k_par_digits == \n n_psbins_par + 1)\n', (9061, 9136), True, 'import numpy as np\n'), ((9245, 9319), 'numpy.bincount', 'np.bincount', (['k_cylinder_digits'], {'minlength': '(n_psbins_par * n_psbins_perp + 2)'}), '(k_cylinder_digits, minlength=n_psbins_par * n_psbins_perp + 2)\n', (9256, 9319), True, 'import numpy as np\n'), ((11003, 11036), 'numpy.array', 'np.array', (['value'], {'dtype': 'np.float32'}), '(value, dtype=np.float32)\n', (11011, 11036), True, 'import numpy as np\n'), ((4673, 4698), 'numpy.log10', 'np.log10', (['(k_min - epsilon)'], {}), '(k_min - epsilon)\n', (4681, 4698), True, 'import numpy as np\n'), ((5836, 5852), 'numpy.fft.fftn', 'np.fft.fftn', (['box'], {}), '(box)\n', (5847, 5852), True, 'import numpy as np\n'), ((7538, 7563), 'numpy.log10', 'np.log10', (['(k_min - epsilon)'], {}), '(k_min - epsilon)\n', (7546, 7563), True, 'import numpy as np\n'), ((7747, 7772), 'numpy.log10', 'np.log10', (['(k_min - epsilon)'], {}), '(k_min - epsilon)\n', (7755, 7772), True, 'import numpy as np\n'), ((7774, 7799), 'numpy.log10', 'np.log10', (['(k_max + epsilon)'], {}), '(k_max + epsilon)\n', (7782, 7799), True, 'import numpy as np\n'), ((9874, 9890), 'numpy.fft.fftn', 'np.fft.fftn', (['box'], {}), '(box)\n', (9885, 9890), True, 'import numpy as np\n'), ((4559, 4568), 'numpy.abs', 'np.abs', (['k'], {}), '(k)\n', (4565, 4568), True, 'import numpy as np\n'), ((7410, 7419), 'numpy.abs', 'np.abs', (['k'], {}), '(k)\n', (7416, 7419), True, 'import numpy as np\n'), ((4857, 4867), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (4864, 4867), True, 'import numpy as np\n'), ((5888, 5899), 'numpy.conj', 'np.conj', (['FT'], {}), '(FT)\n', (5895, 5899), True, 'import numpy as np\n'), ((7914, 7926), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (7921, 7926), True, 'import numpy as np\n'), ((9926, 9937), 'numpy.conj', 'np.conj', (['FT'], {}), '(FT)\n', (9933, 9937), True, 'import numpy as np\n'), ((4721, 4731), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (4728, 4731), True, 'import numpy as np\n'), ((7586, 7598), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (7593, 7598), True, 'import numpy as np\n')] |
import pytest
import pandas as pd
import numpy as np
from cobra.evaluation import plot_incidence
from cobra.evaluation import ClassificationEvaluator, RegressionEvaluator
def mock_data():
d = {'variable': ['education', 'education', 'education', 'education'],
'label': ['1st-4th', '5th-6th', '7th-8th', '9th'],
'pop_size': [0.002, 0.004, 0.009, 0.019],
'avg_incidence': [0.23, 0.23, 0.23, 0.23],
'incidence': [0.047, 0.0434, 0.054, 0.069]}
return pd.DataFrame(d)
def mock_preds(n, seed = 505):
np.random.seed(seed)
y_true = np.random.uniform(size=n)
y_pred = np.random.uniform(size=n)
return y_true, y_pred
class TestEvaluation:
def test_plot_incidence_with_unsupported_model_type(self):
with pytest.raises(ValueError):
plot_incidence(pig_tables=None,
variable="",
model_type="anomaly_detection")
def test_plot_incidence_with_different_column_orders(self):
data = mock_data()
with pytest.raises(ValueError):
plot_incidence(pig_tables=data,
variable='education',
model_type="classification",
# different bins than in the data variable:
column_order=['1st-4th', '5th-6th', '7th-8th'])
# Stubs for later (requires exposing df_plot and testing matplotlib's
# plot object fix and ax internals):
"""
def test_plot_incidence_without_column_order(self):
data = mock_data()
plot_incidence(pig_tables=data,
variable='education',
model_type="classification",
column_order=None)
def test_plot_incidence_with_column_order(self):
data = mock_data()
plot_incidence(pig_tables=data,
variable='education',
model_type="classification",
column_order=['1st-4th', '5th-6th', '7th-8th', '9th'])
def test_plot_incidence_visual_result_for_classification(self):
data = mock_data()
plot_incidence(pig_tables=data,
variable='education',
model_type="classification",
column_order=['1st-4th', '5th-6th', '7th-8th', '9th'])
def test_plot_incidence_visual_result_for_regression(self):
data = mock_data() # change into regression target though.
plot_incidence(pig_tables=data,
variable='education',
model_type="regression",
column_order=['1st-4th', '5th-6th', '7th-8th', '9th'])
def test_plot_predictions_regression(self):
y_true, y_pred = mock_preds(50, seed=123)
evaluator = RegressionEvaluator()
evaluator.fit(y_true, y_pred)
evaluator.plot_predictions()
def test_plot_qq(self):
y_true, y_pred = mock_preds(50, seed=631993)
evaluator = RegressionEvaluator()
evaluator.fit(y_true, y_pred)
evaluator.plot_qq()
"""
def test_lift_curve_n_bins(self):
n_bins_test = [5, 10, 15, 35]
y_true, y_pred = mock_preds(50)
n_bins_out = []
for n_bins in n_bins_test:
e = ClassificationEvaluator(n_bins=n_bins)
out = ClassificationEvaluator._compute_lift_per_bin(y_true, y_pred, e.n_bins)
lifts = out[1]
n_bins_out.append(len(lifts))
assert n_bins_test == n_bins_out
def test_fit_classification(self):
y_true, y_pred = mock_preds(50)
y_true = (y_true > 0.5).astype(int) # convert to 0-1 labels
evaluator = ClassificationEvaluator(n_bins=5)
evaluator.fit(y_true, y_pred)
assert (evaluator.y_true == y_true).all()
assert (evaluator.y_pred == y_pred).all()
for metric in ["accuracy", "AUC", "precision", "recall",
"F1", "matthews_corrcoef", "lift at {}".format(evaluator.lift_at)]:
assert evaluator.scalar_metrics[metric] is not None
assert evaluator.roc_curve is not None
assert evaluator.confusion_matrix is not None
assert evaluator.lift_curve is not None
assert evaluator.cumulative_gains is not None
def test_fit_regression(self):
y_true, y_pred = mock_preds(50, seed=789)
y_true, y_pred = y_true*10, y_pred*10 # rescale so it looks more regression-like
evaluator = RegressionEvaluator()
evaluator.fit(y_true, y_pred)
assert (evaluator.y_true == y_true).all()
assert (evaluator.y_pred == y_pred).all()
for metric in ["R2", "MAE", "MSE", "RMSE"]:
assert evaluator.scalar_metrics[metric] is not None
assert evaluator.qq is not None
| [
"pandas.DataFrame",
"numpy.random.uniform",
"numpy.random.seed",
"cobra.evaluation.RegressionEvaluator",
"cobra.evaluation.ClassificationEvaluator._compute_lift_per_bin",
"cobra.evaluation.plot_incidence",
"pytest.raises",
"cobra.evaluation.ClassificationEvaluator"
] | [((493, 508), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (505, 508), True, 'import pandas as pd\n'), ((545, 565), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (559, 565), True, 'import numpy as np\n'), ((580, 605), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'n'}), '(size=n)\n', (597, 605), True, 'import numpy as np\n'), ((619, 644), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'n'}), '(size=n)\n', (636, 644), True, 'import numpy as np\n'), ((3755, 3788), 'cobra.evaluation.ClassificationEvaluator', 'ClassificationEvaluator', ([], {'n_bins': '(5)'}), '(n_bins=5)\n', (3778, 3788), False, 'from cobra.evaluation import ClassificationEvaluator, RegressionEvaluator\n'), ((4547, 4568), 'cobra.evaluation.RegressionEvaluator', 'RegressionEvaluator', ([], {}), '()\n', (4566, 4568), False, 'from cobra.evaluation import ClassificationEvaluator, RegressionEvaluator\n'), ((772, 797), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (785, 797), False, 'import pytest\n'), ((811, 887), 'cobra.evaluation.plot_incidence', 'plot_incidence', ([], {'pig_tables': 'None', 'variable': '""""""', 'model_type': '"""anomaly_detection"""'}), "(pig_tables=None, variable='', model_type='anomaly_detection')\n", (825, 887), False, 'from cobra.evaluation import plot_incidence\n'), ((1047, 1072), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1060, 1072), False, 'import pytest\n'), ((1086, 1221), 'cobra.evaluation.plot_incidence', 'plot_incidence', ([], {'pig_tables': 'data', 'variable': '"""education"""', 'model_type': '"""classification"""', 'column_order': "['1st-4th', '5th-6th', '7th-8th']"}), "(pig_tables=data, variable='education', model_type=\n 'classification', column_order=['1st-4th', '5th-6th', '7th-8th'])\n", (1100, 1221), False, 'from cobra.evaluation import plot_incidence\n'), ((3345, 3383), 'cobra.evaluation.ClassificationEvaluator', 'ClassificationEvaluator', ([], {'n_bins': 'n_bins'}), '(n_bins=n_bins)\n', (3368, 3383), False, 'from cobra.evaluation import ClassificationEvaluator, RegressionEvaluator\n'), ((3402, 3473), 'cobra.evaluation.ClassificationEvaluator._compute_lift_per_bin', 'ClassificationEvaluator._compute_lift_per_bin', (['y_true', 'y_pred', 'e.n_bins'], {}), '(y_true, y_pred, e.n_bins)\n', (3447, 3473), False, 'from cobra.evaluation import ClassificationEvaluator, RegressionEvaluator\n')] |
from __future__ import division
from keras.layers.recurrent import Recurrent, time_distributed_dense
from keras.layers import Dense
from keras import activations, initializations, regularizers, constraints
from keras.engine.topology import Layer, InputSpec
from keras import backend as K
import numpy as np
#Leaky Recurrent Layer
class leak_recurrent(Recurrent):
''' Fully-connected RNN with output fed back into the input.
We implement a 'leak' on each neuron that dampens the signal
depending on a time constant, tau
'''
def __init__(self, output_dim,
init = 'glorot_uniform', inner_init = 'orthogonal',
activation = 'tanh', W_regularizer = None,
U_regularizer = None, b_regularizer = None,
dropout_W = 0.0, dropout_U = 0.0,
tau=100, dt=20, noise=.1,
dale_ratio = None, **kwargs):
self.output_dim = output_dim
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.activation = activations.get(activation)
self.W_regularizer = regularizers.get(W_regularizer)
self.U_regularizer = regularizers.get(U_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.dropout_W, self.dropout_U = dropout_W, dropout_U
self.tau = tau
self.dt = dt
self.noise = noise
self.dale_ratio = dale_ratio
if dale_ratio:
#make dales law matrix
dale_vec = np.ones(output_dim)
dale_vec[int(dale_ratio*output_dim):] = -1
dale = np.diag(dale_vec)
self.Dale = K.variable(dale)
if self.dropout_W or self.dropout_U:
self.uses_learning_phase = True
super(leak_recurrent, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
if self.stateful:
self.reset_states()
else:
self.states = [K.random_normal(shape=(self.output_dim,), mean=0.5, std=0.5)]
input_dim = input_shape[2]
self.input_dim = input_dim
self.W = self.init((input_dim, self.output_dim), name='{}_W'.format(self.name))
self.U = self.inner_init((self.output_dim, self.output_dim), name='{}_U'.format(self.name))
self.b = K.zeros((self.output_dim,), name='{}_b'.format(self.name))
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
if self.U_regularizer:
self.U_regularizer.set_param(self.U)
self.regularizers.append(self.U_regularizer)
if self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
self.trainable_weights = [self.W, self.U]
if self.dale_ratio:
self.non_trainable_weights = [self.Dale]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise Exception('If a RNN is stateful, a complete ' + 'input_shape must be provided (including batch size).')
if hasattr(self, 'states'):
K.set_value(self.states[0], np.zeros((input_shape[0], self.output_dim)))
else:
self.states = [K.zeros((input_shape[0], self.output_dim))]
def preprocess_input(self, x):
if self.consume_less == 'cpu':
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
return time_distributed_dense(x, self.W, self.b, self.dropout_W,
input_dim, self.output_dim,
timesteps)
else:
return x
def step(self, x, states):
prev_output = states[0]
tau = self.tau
dt = self.dt
noise = self.noise
alpha = dt/tau
if self.consume_less == 'cpu':
h = x
else:
if(self.dale_ratio):
h = K.dot(x, K.abs(self.W)) # + self.b
else:
h = K.dot(x, self.W)
# For our case, h = W * x is the input component fed in
#noise = self.noise * np.random.randn(self.output_dim)
#noise = K.variable(noise)
if(self.dale_ratio):
output = prev_output*(1-alpha) + \
alpha*(h + K.dot(self.activation(prev_output) , K.abs(self.U) * self.Dale)) \
+ K.random_normal(shape=K.shape(self.b), mean=0.0, std=noise)
else:
output = prev_output * (1 - alpha) + \
alpha * (h + K.dot(self.activation(prev_output), self.U )) \
+ K.random_normal(shape=K.shape(self.b), mean=0.0, std=noise)
return (output, [output])
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.0))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.0))
return constants
def get_config(self):
config = {'output_dim': self.output_dim,
'init': self.init.__name__,
'inner_init': self.inner_init.__name__,
'activation': self.activation.__name__,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'U_regularizer': self.U_regularizer.get_config() if self.U_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'dropout_W': self.dropout_W,
'dropout_U': self.dropout_U}
base_config = super(leak_recurrent, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class dense_output_with_mask(Dense):
# same as a dense layer, but with output masking by dales law so we only see
# output from the excitatory neurons
def __init__(self, output_dim, init='glorot_uniform',
activation='linear', weights=None,
W_regularizer=None, b_regularizer=None, activity_regularizer=None,
W_constraint=None, b_constraint=None,
bias=False, input_dim=None, dale_ratio = .8, **kwargs):
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.output_dim = output_dim
self.input_dim = input_dim
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.initial_weights = weights
self.input_spec = [InputSpec(ndim=2)]
# OUR CHANGE
self.dale_ratio = dale_ratio
if dale_ratio:
dale_vec = np.ones((input_dim, 1))
dale_vec[int(dale_ratio*input_dim):, 0] = 0
self.Dale = K.variable(dale_vec)
if self.input_dim:
kwargs['input_shape'] = (self.input_dim,)
super(Dense, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 2
input_dim = input_shape[1]
self.input_spec = [InputSpec(dtype=K.floatx(),
shape=(None, input_dim))]
self.W = self.init((input_dim, self.output_dim),
name='{}_W'.format(self.name))
if self.bias:
self.b = K.zeros((self.output_dim,),
name='{}_b'.format(self.name))
self.trainable_weights = [self.W, self.b]
else:
self.trainable_weights = [self.W]
self.regularizers = []
if self.W_regularizer:
self.W_regularizer.set_param(self.W)
self.regularizers.append(self.W_regularizer)
if self.bias and self.b_regularizer:
self.b_regularizer.set_param(self.b)
self.regularizers.append(self.b_regularizer)
if self.activity_regularizer:
self.activity_regularizer.set_layer(self)
self.regularizers.append(self.activity_regularizer)
#OUR CHANGE
if self.dale_ratio:
self.non_trainable_weights = [self.Dale]
self.constraints = {}
if self.W_constraint:
self.constraints[self.W] = self.W_constraint
if self.bias and self.b_constraint:
self.constraints[self.b] = self.b_constraint
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, x, mask=None):
if self.dale_ratio:
output = K.dot(x, K.abs(self.W) * self.Dale)
else:
output = K.dot(x, self.W)
return self.activation(output)
class newGaussianNoise(Layer):
'''Apply to the input an additive zero-centered Gaussian noise with
standard deviation `sigma`. This is useful to mitigate overfitting
(you could see it as a kind of random data augmentation).
Gaussian Noise (GS) is a natural choice as corruption process
for real valued inputs.
As it is a regularization layer, it is only active at training time.
# Arguments
sigma: float, standard deviation of the noise distribution.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
# Output shape
Same shape as input.
'''
def __init__(self, sigma, **kwargs):
self.supports_masking = True
self.sigma = sigma
self.uses_learning_phase = True
super(newGaussianNoise, self).__init__(**kwargs)
def call(self, x, mask=None):
noise_x = x + K.random_normal(shape=K.shape(x),
mean=0.,
std=self.sigma)
return noise_x
def get_config(self):
config = {'sigma': self.sigma}
base_config = super(newGaussianNoise, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| [
"keras.backend.dot",
"keras.layers.recurrent.time_distributed_dense",
"keras.regularizers.get",
"numpy.ones",
"keras.backend.abs",
"keras.backend.shape",
"numpy.diag",
"keras.activations.get",
"keras.backend.reshape",
"keras.constraints.get",
"keras.backend.cast_to_floatx",
"keras.backend.drop... | [((981, 1006), 'keras.initializations.get', 'initializations.get', (['init'], {}), '(init)\n', (1000, 1006), False, 'from keras import activations, initializations, regularizers, constraints\n'), ((1033, 1064), 'keras.initializations.get', 'initializations.get', (['inner_init'], {}), '(inner_init)\n', (1052, 1064), False, 'from keras import activations, initializations, regularizers, constraints\n'), ((1091, 1118), 'keras.activations.get', 'activations.get', (['activation'], {}), '(activation)\n', (1106, 1118), False, 'from keras import activations, initializations, regularizers, constraints\n'), ((1148, 1179), 'keras.regularizers.get', 'regularizers.get', (['W_regularizer'], {}), '(W_regularizer)\n', (1164, 1179), False, 'from keras import activations, initializations, regularizers, constraints\n'), ((1209, 1240), 'keras.regularizers.get', 'regularizers.get', (['U_regularizer'], {}), '(U_regularizer)\n', (1225, 1240), False, 'from keras import activations, initializations, regularizers, constraints\n'), ((1270, 1301), 'keras.regularizers.get', 'regularizers.get', (['b_regularizer'], {}), '(b_regularizer)\n', (1286, 1301), False, 'from keras import activations, initializations, regularizers, constraints\n'), ((7191, 7216), 'keras.initializations.get', 'initializations.get', (['init'], {}), '(init)\n', (7210, 7216), False, 'from keras import activations, initializations, regularizers, constraints\n'), ((7243, 7270), 'keras.activations.get', 'activations.get', (['activation'], {}), '(activation)\n', (7258, 7270), False, 'from keras import activations, initializations, regularizers, constraints\n'), ((7373, 7404), 'keras.regularizers.get', 'regularizers.get', (['W_regularizer'], {}), '(W_regularizer)\n', (7389, 7404), False, 'from keras import activations, initializations, regularizers, constraints\n'), ((7434, 7465), 'keras.regularizers.get', 'regularizers.get', (['b_regularizer'], {}), '(b_regularizer)\n', (7450, 7465), False, 'from keras import activations, initializations, regularizers, constraints\n'), ((7502, 7540), 'keras.regularizers.get', 'regularizers.get', (['activity_regularizer'], {}), '(activity_regularizer)\n', (7518, 7540), False, 'from keras import activations, initializations, regularizers, constraints\n'), ((7570, 7599), 'keras.constraints.get', 'constraints.get', (['W_constraint'], {}), '(W_constraint)\n', (7585, 7599), False, 'from keras import activations, initializations, regularizers, constraints\n'), ((7628, 7657), 'keras.constraints.get', 'constraints.get', (['b_constraint'], {}), '(b_constraint)\n', (7643, 7657), False, 'from keras import activations, initializations, regularizers, constraints\n'), ((1554, 1573), 'numpy.ones', 'np.ones', (['output_dim'], {}), '(output_dim)\n', (1561, 1573), True, 'import numpy as np\n'), ((1648, 1665), 'numpy.diag', 'np.diag', (['dale_vec'], {}), '(dale_vec)\n', (1655, 1665), True, 'import numpy as np\n'), ((1690, 1706), 'keras.backend.variable', 'K.variable', (['dale'], {}), '(dale)\n', (1700, 1706), True, 'from keras import backend as K\n'), ((1913, 1941), 'keras.engine.topology.InputSpec', 'InputSpec', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (1922, 1941), False, 'from keras.engine.topology import Layer, InputSpec\n'), ((3858, 3959), 'keras.layers.recurrent.time_distributed_dense', 'time_distributed_dense', (['x', 'self.W', 'self.b', 'self.dropout_W', 'input_dim', 'self.output_dim', 'timesteps'], {}), '(x, self.W, self.b, self.dropout_W, input_dim, self.\n output_dim, timesteps)\n', (3880, 3959), False, 'from keras.layers.recurrent import Recurrent, time_distributed_dense\n'), ((5318, 5352), 'keras.backend.tile', 'K.tile', (['ones', '(1, self.output_dim)'], {}), '(ones, (1, self.output_dim))\n', (5324, 5352), True, 'from keras import backend as K\n'), ((5766, 5794), 'keras.backend.tile', 'K.tile', (['ones', '(1, input_dim)'], {}), '(ones, (1, input_dim))\n', (5772, 5794), True, 'from keras import backend as K\n'), ((7750, 7767), 'keras.engine.topology.InputSpec', 'InputSpec', ([], {'ndim': '(2)'}), '(ndim=2)\n', (7759, 7767), False, 'from keras.engine.topology import Layer, InputSpec\n'), ((7874, 7897), 'numpy.ones', 'np.ones', (['(input_dim, 1)'], {}), '((input_dim, 1))\n', (7881, 7897), True, 'import numpy as np\n'), ((7978, 7998), 'keras.backend.variable', 'K.variable', (['dale_vec'], {}), '(dale_vec)\n', (7988, 7998), True, 'from keras import backend as K\n'), ((9831, 9847), 'keras.backend.dot', 'K.dot', (['x', 'self.W'], {}), '(x, self.W)\n', (9836, 9847), True, 'from keras import backend as K\n'), ((2042, 2102), 'keras.backend.random_normal', 'K.random_normal', ([], {'shape': '(self.output_dim,)', 'mean': '(0.5)', 'std': '(0.5)'}), '(shape=(self.output_dim,), mean=0.5, std=0.5)\n', (2057, 2102), True, 'from keras import backend as K\n'), ((3505, 3548), 'numpy.zeros', 'np.zeros', (['(input_shape[0], self.output_dim)'], {}), '((input_shape[0], self.output_dim))\n', (3513, 3548), True, 'import numpy as np\n'), ((3591, 3633), 'keras.backend.zeros', 'K.zeros', (['(input_shape[0], self.output_dim)'], {}), '((input_shape[0], self.output_dim))\n', (3598, 3633), True, 'from keras import backend as K\n'), ((4440, 4456), 'keras.backend.dot', 'K.dot', (['x', 'self.W'], {}), '(x, self.W)\n', (4445, 4456), True, 'from keras import backend as K\n'), ((5267, 5297), 'keras.backend.reshape', 'K.reshape', (['x[:, 0, 0]', '(-1, 1)'], {}), '(x[:, 0, 0], (-1, 1))\n', (5276, 5297), True, 'from keras import backend as K\n'), ((5388, 5419), 'keras.backend.dropout', 'K.dropout', (['ones', 'self.dropout_U'], {}), '(ones, self.dropout_U)\n', (5397, 5419), True, 'from keras import backend as K\n'), ((5504, 5525), 'keras.backend.cast_to_floatx', 'K.cast_to_floatx', (['(1.0)'], {}), '(1.0)\n', (5520, 5525), True, 'from keras import backend as K\n'), ((5715, 5745), 'keras.backend.reshape', 'K.reshape', (['x[:, 0, 0]', '(-1, 1)'], {}), '(x[:, 0, 0], (-1, 1))\n', (5724, 5745), True, 'from keras import backend as K\n'), ((5830, 5861), 'keras.backend.dropout', 'K.dropout', (['ones', 'self.dropout_W'], {}), '(ones, self.dropout_W)\n', (5839, 5861), True, 'from keras import backend as K\n'), ((5946, 5967), 'keras.backend.cast_to_floatx', 'K.cast_to_floatx', (['(1.0)'], {}), '(1.0)\n', (5962, 5967), True, 'from keras import backend as K\n'), ((4376, 4389), 'keras.backend.abs', 'K.abs', (['self.W'], {}), '(self.W)\n', (4381, 4389), True, 'from keras import backend as K\n'), ((8279, 8289), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (8287, 8289), True, 'from keras import backend as K\n'), ((9769, 9782), 'keras.backend.abs', 'K.abs', (['self.W'], {}), '(self.W)\n', (9774, 9782), True, 'from keras import backend as K\n'), ((10953, 10963), 'keras.backend.shape', 'K.shape', (['x'], {}), '(x)\n', (10960, 10963), True, 'from keras import backend as K\n'), ((4841, 4856), 'keras.backend.shape', 'K.shape', (['self.b'], {}), '(self.b)\n', (4848, 4856), True, 'from keras import backend as K\n'), ((5072, 5087), 'keras.backend.shape', 'K.shape', (['self.b'], {}), '(self.b)\n', (5079, 5087), True, 'from keras import backend as K\n'), ((4770, 4783), 'keras.backend.abs', 'K.abs', (['self.U'], {}), '(self.U)\n', (4775, 4783), True, 'from keras import backend as K\n')] |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.layers.core."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class DenseTest(test.TestCase):
def testDenseProperties(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(dense.units, 2)
self.assertEqual(dense.activation, nn_ops.relu)
self.assertEqual(dense.kernel_regularizer, None)
self.assertEqual(dense.bias_regularizer, None)
self.assertEqual(dense.activity_regularizer, None)
self.assertEqual(dense.use_bias, True)
# Test auto-naming
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_1')
dense = core_layers.Dense(2, activation=nn_ops.relu)
dense.apply(random_ops.random_uniform((5, 2)))
self.assertEqual(dense.name, 'dense_2')
def testCall(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables, [])
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias.name, 'my_dense/bias:0')
def testNoBias(self):
dense = core_layers.Dense(2, use_bias=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel])
self.assertListEqual(dense.trainable_variables, [dense.kernel])
self.assertListEqual(dense.non_trainable_variables, [])
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 1)
self.assertEqual(dense.kernel.name, 'my_dense/kernel:0')
self.assertEqual(dense.bias, None)
def testNonTrainable(self):
dense = core_layers.Dense(2, trainable=False, name='my_dense')
inputs = random_ops.random_uniform((5, 2), seed=1)
_ = dense(inputs)
self.assertListEqual(dense.variables, [dense.kernel, dense.bias])
self.assertListEqual(dense.non_trainable_variables,
[dense.kernel, dense.bias])
self.assertListEqual(dense.trainable_variables, [])
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 0)
def testOutputShape(self):
dense = core_layers.Dense(7, activation=nn_ops.relu, name='my_dense')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 7])
inputs = random_ops.random_uniform((5, 2, 3), seed=1)
outputs = dense(inputs)
self.assertEqual(outputs.get_shape().as_list(), [5, 2, 7])
inputs = random_ops.random_uniform((1, 2, 4, 3), seed=1)
outputs = dense.apply(inputs)
self.assertEqual(outputs.get_shape().as_list(), [1, 2, 4, 7])
def testCallOnPlaceHolder(self):
inputs = array_ops.placeholder(dtype=dtypes.float32)
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(
dtype=dtypes.float32, shape=[None, None, None])
dense = core_layers.Dense(4, name='my_dense')
with self.assertRaises(ValueError):
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
inputs = array_ops.placeholder(dtype=dtypes.float32, shape=[None, None, 3])
dense = core_layers.Dense(4, name='my_dense')
dense(inputs)
def testActivation(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
self.assertEqual(outputs.op.name, 'dense1/Relu')
dense = core_layers.Dense(2, name='dense2')
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = dense(inputs)
self.assertEqual(outputs.op.name, 'dense2/BiasAdd')
def testActivityRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', activity_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testKernelRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(
2, name='my_dense', kernel_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testKernelRegularizerWithReuse(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
_ = core_layers.dense(
inputs, 2, name='my_dense', kernel_regularizer=regularizer, reuse=True)
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)), 1)
def testBiasRegularizer(self):
regularizer = lambda x: math_ops.reduce_sum(x) * 1e-3
dense = core_layers.Dense(2, name='my_dense', bias_regularizer=regularizer)
inputs = random_ops.random_uniform((5, 3), seed=1)
_ = dense(inputs)
loss_keys = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(len(loss_keys), 1)
self.assertListEqual(dense.losses, loss_keys)
def testFunctionalDense(self):
inputs = random_ops.random_uniform((5, 3), seed=1)
outputs = core_layers.dense(
inputs, 2, activation=nn_ops.relu, name='my_dense')
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)), 2)
self.assertEqual(outputs.op.name, 'my_dense/Relu')
self.assertEqual(outputs.get_shape().as_list(), [5, 2])
def testFunctionalDenseTwice(self):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
vars1 = variables.trainable_variables()
core_layers.dense(inputs, 2)
vars2 = variables.trainable_variables()
self.assertEqual(len(vars1), 2)
self.assertEqual(len(vars2), 4)
def testFunctionalDenseTwiceReuse(self):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
vars1 = variables.trainable_variables()
core_layers.dense(inputs, 2, name='my_dense', reuse=True)
vars2 = variables.trainable_variables()
self.assertEqual(vars1, vars2)
def testFunctionalDenseTwiceReuseFromScope(self):
with variable_scope.variable_scope('scope'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
vars1 = variables.trainable_variables()
with variable_scope.variable_scope('scope', reuse=True):
core_layers.dense(inputs, 2, name='my_dense')
vars2 = variables.trainable_variables()
self.assertEqual(vars1, vars2)
def testFunctionalDenseInitializerFromScope(self):
with self.test_session() as sess:
with variable_scope.variable_scope(
'scope', initializer=init_ops.ones_initializer()):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
sess.run(variables.global_variables_initializer())
weights = sess.run(variables.trainable_variables())
self.assertEqual(len(weights), 2)
# Check that the matrix weights got initialized to ones (from scope).
self.assertAllClose(weights[0], np.ones((3, 2)))
# Check that the bias still got initialized to zeros.
self.assertAllClose(weights[1], np.zeros((2)))
def testFunctionalDenseWithCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope('test', custom_getter=custom_getter):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
self.assertEqual(called[0], 2)
def testFunctionalDenseInScope(self):
with variable_scope.variable_scope('test'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name='my_dense')
var = variables.trainable_variables()[0]
self.assertEqual(var.name, 'test/my_dense/kernel:0')
with variable_scope.variable_scope('test1') as scope:
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2, name=scope)
var = variables.trainable_variables()[2]
self.assertEqual(var.name, 'test1/kernel:0')
with variable_scope.variable_scope('test2'):
inputs = random_ops.random_uniform((5, 3), seed=1)
core_layers.dense(inputs, 2)
var = variables.trainable_variables()[4]
self.assertEqual(var.name, 'test2/dense/kernel:0')
def testComputeOutputShape(self):
dense = core_layers.Dense(2, activation=nn_ops.relu, name='dense1')
ts = tensor_shape.TensorShape
# pylint: disable=protected-access
with self.assertRaises(ValueError):
dense._compute_output_shape(ts(None))
with self.assertRaises(ValueError):
dense._compute_output_shape(ts([]))
with self.assertRaises(ValueError):
dense._compute_output_shape(ts([1]))
self.assertEqual(
[None, 2],
dense._compute_output_shape((None, 3)).as_list())
self.assertEqual(
[None, 2],
dense._compute_output_shape(ts([None, 3])).as_list())
self.assertEqual(
[None, 4, 2],
dense._compute_output_shape(ts([None, 4, 3])).as_list())
# pylint: enable=protected-access
class DropoutTest(test.TestCase):
def testDropoutProperties(self):
dp = core_layers.Dropout(0.5, name='dropout')
self.assertEqual(dp.rate, 0.5)
self.assertEqual(dp.noise_shape, None)
dp.apply(np.ones(()))
self.assertEqual(dp.name, 'dropout')
def testBooleanLearningPhase(self):
with self.test_session() as sess:
dp = core_layers.Dropout(0.5)
inputs = array_ops.ones((5, 3))
dropped = dp.apply(inputs, training=True)
sess.run(variables.global_variables_initializer())
np_output = sess.run(dropped)
self.assertAlmostEqual(0., np_output.min())
dropped = dp.apply(inputs, training=False)
np_output = sess.run(dropped)
self.assertAllClose(np.ones((5, 3)), np_output)
def testDynamicLearningPhase(self):
with self.test_session() as sess:
dp = core_layers.Dropout(0.5, seed=1)
inputs = array_ops.ones((5, 5))
training = array_ops.placeholder(dtype='bool')
dropped = dp.apply(inputs, training=training)
sess.run(variables.global_variables_initializer())
np_output = sess.run(dropped, feed_dict={training: True})
self.assertAlmostEqual(0., np_output.min())
np_output = sess.run(dropped, feed_dict={training: False})
self.assertAllClose(np.ones((5, 5)), np_output)
def testCustomNoiseShape(self):
with self.test_session() as sess:
inputs = array_ops.ones((5, 3, 2))
noise_shape = [5, 1, 2]
dp = core_layers.Dropout(0.5, noise_shape=noise_shape, seed=1)
dropped = dp.apply(inputs, training=True)
sess.run(variables.global_variables_initializer())
np_output = sess.run(dropped)
self.assertAlmostEqual(0., np_output.min())
self.assertAllClose(np_output[:, 0, :], np_output[:, 1, :])
def testFunctionalDropout(self):
with self.test_session() as sess:
inputs = array_ops.ones((5, 5))
training = array_ops.placeholder(dtype='bool')
dropped = core_layers.dropout(inputs, 0.5, training=training, seed=1)
self.assertEqual(dropped.op.name, 'dropout/cond/Merge')
sess.run(variables.global_variables_initializer())
np_output = sess.run(dropped, feed_dict={training: True})
self.assertAlmostEqual(0., np_output.min())
np_output = sess.run(dropped, feed_dict={training: False})
self.assertAllClose(np.ones((5, 5)), np_output)
if __name__ == '__main__':
test.main()
| [
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.layers.core.Dense",
"tensorflow.python.layers.core.Dropout",
"numpy.ones",
"numpy.zeros",
"t... | [((14065, 14076), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (14074, 14076), False, 'from tensorflow.python.platform import test\n'), ((1486, 1547), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(2)'], {'activation': 'nn_ops.relu', 'name': '"""my_dense"""'}), "(2, activation=nn_ops.relu, name='my_dense')\n", (1503, 1547), True, 'from tensorflow.python.layers import core as core_layers\n'), ((1875, 1919), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(2)'], {'activation': 'nn_ops.relu'}), '(2, activation=nn_ops.relu)\n', (1892, 1919), True, 'from tensorflow.python.layers import core as core_layers\n'), ((2027, 2071), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(2)'], {'activation': 'nn_ops.relu'}), '(2, activation=nn_ops.relu)\n', (2044, 2071), True, 'from tensorflow.python.layers import core as core_layers\n'), ((2202, 2263), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(2)'], {'activation': 'nn_ops.relu', 'name': '"""my_dense"""'}), "(2, activation=nn_ops.relu, name='my_dense')\n", (2219, 2263), True, 'from tensorflow.python.layers import core as core_layers\n'), ((2277, 2318), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 2)'], {'seed': '(1)'}), '((5, 2), seed=1)\n', (2302, 2318), False, 'from tensorflow.python.ops import random_ops\n'), ((2799, 2852), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(2)'], {'use_bias': '(False)', 'name': '"""my_dense"""'}), "(2, use_bias=False, name='my_dense')\n", (2816, 2852), True, 'from tensorflow.python.layers import core as core_layers\n'), ((2866, 2907), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 2)'], {'seed': '(1)'}), '((5, 2), seed=1)\n', (2891, 2907), False, 'from tensorflow.python.ops import random_ops\n'), ((3352, 3406), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(2)'], {'trainable': '(False)', 'name': '"""my_dense"""'}), "(2, trainable=False, name='my_dense')\n", (3369, 3406), True, 'from tensorflow.python.layers import core as core_layers\n'), ((3420, 3461), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 2)'], {'seed': '(1)'}), '((5, 2), seed=1)\n', (3445, 3461), False, 'from tensorflow.python.ops import random_ops\n'), ((3854, 3915), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(7)'], {'activation': 'nn_ops.relu', 'name': '"""my_dense"""'}), "(7, activation=nn_ops.relu, name='my_dense')\n", (3871, 3915), True, 'from tensorflow.python.layers import core as core_layers\n'), ((3929, 3970), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 3)'], {'seed': '(1)'}), '((5, 3), seed=1)\n', (3954, 3970), False, 'from tensorflow.python.ops import random_ops\n'), ((4079, 4123), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 2, 3)'], {'seed': '(1)'}), '((5, 2, 3), seed=1)\n', (4104, 4123), False, 'from tensorflow.python.ops import random_ops\n'), ((4229, 4276), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(1, 2, 4, 3)'], {'seed': '(1)'}), '((1, 2, 4, 3), seed=1)\n', (4254, 4276), False, 'from tensorflow.python.ops import random_ops\n'), ((4426, 4469), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'dtype': 'dtypes.float32'}), '(dtype=dtypes.float32)\n', (4447, 4469), False, 'from tensorflow.python.ops import array_ops\n'), ((4482, 4519), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(4)'], {'name': '"""my_dense"""'}), "(4, name='my_dense')\n", (4499, 4519), True, 'from tensorflow.python.layers import core as core_layers\n'), ((4594, 4657), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'dtype': 'dtypes.float32', 'shape': '[None, None]'}), '(dtype=dtypes.float32, shape=[None, None])\n', (4615, 4657), False, 'from tensorflow.python.ops import array_ops\n'), ((4670, 4707), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(4)'], {'name': '"""my_dense"""'}), "(4, name='my_dense')\n", (4687, 4707), True, 'from tensorflow.python.layers import core as core_layers\n'), ((4782, 4851), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'dtype': 'dtypes.float32', 'shape': '[None, None, None]'}), '(dtype=dtypes.float32, shape=[None, None, None])\n', (4803, 4851), False, 'from tensorflow.python.ops import array_ops\n'), ((4873, 4910), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(4)'], {'name': '"""my_dense"""'}), "(4, name='my_dense')\n", (4890, 4910), True, 'from tensorflow.python.layers import core as core_layers\n'), ((4985, 5045), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'dtype': 'dtypes.float32', 'shape': '[None, 3]'}), '(dtype=dtypes.float32, shape=[None, 3])\n', (5006, 5045), False, 'from tensorflow.python.ops import array_ops\n'), ((5058, 5095), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(4)'], {'name': '"""my_dense"""'}), "(4, name='my_dense')\n", (5075, 5095), True, 'from tensorflow.python.layers import core as core_layers\n'), ((5128, 5194), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'dtype': 'dtypes.float32', 'shape': '[None, None, 3]'}), '(dtype=dtypes.float32, shape=[None, None, 3])\n', (5149, 5194), False, 'from tensorflow.python.ops import array_ops\n'), ((5207, 5244), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(4)'], {'name': '"""my_dense"""'}), "(4, name='my_dense')\n", (5224, 5244), True, 'from tensorflow.python.layers import core as core_layers\n'), ((5304, 5363), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(2)'], {'activation': 'nn_ops.relu', 'name': '"""dense1"""'}), "(2, activation=nn_ops.relu, name='dense1')\n", (5321, 5363), True, 'from tensorflow.python.layers import core as core_layers\n'), ((5377, 5418), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 3)'], {'seed': '(1)'}), '((5, 3), seed=1)\n', (5402, 5418), False, 'from tensorflow.python.ops import random_ops\n'), ((5513, 5548), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(2)'], {'name': '"""dense2"""'}), "(2, name='dense2')\n", (5530, 5548), True, 'from tensorflow.python.layers import core as core_layers\n'), ((5562, 5603), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 3)'], {'seed': '(1)'}), '((5, 3), seed=1)\n', (5587, 5603), False, 'from tensorflow.python.ops import random_ops\n'), ((5796, 5867), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(2)'], {'name': '"""my_dense"""', 'activity_regularizer': 'regularizer'}), "(2, name='my_dense', activity_regularizer=regularizer)\n", (5813, 5867), True, 'from tensorflow.python.layers import core as core_layers\n'), ((5890, 5931), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 3)'], {'seed': '(1)'}), '((5, 3), seed=1)\n', (5915, 5931), False, 'from tensorflow.python.ops import random_ops\n'), ((5970, 6025), 'tensorflow.python.framework.ops.get_collection', 'ops.get_collection', (['ops.GraphKeys.REGULARIZATION_LOSSES'], {}), '(ops.GraphKeys.REGULARIZATION_LOSSES)\n', (5988, 6025), False, 'from tensorflow.python.framework import ops\n'), ((6222, 6291), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(2)'], {'name': '"""my_dense"""', 'kernel_regularizer': 'regularizer'}), "(2, name='my_dense', kernel_regularizer=regularizer)\n", (6239, 6291), True, 'from tensorflow.python.layers import core as core_layers\n'), ((6314, 6355), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 3)'], {'seed': '(1)'}), '((5, 3), seed=1)\n', (6339, 6355), False, 'from tensorflow.python.ops import random_ops\n'), ((6394, 6449), 'tensorflow.python.framework.ops.get_collection', 'ops.get_collection', (['ops.GraphKeys.REGULARIZATION_LOSSES'], {}), '(ops.GraphKeys.REGULARIZATION_LOSSES)\n', (6412, 6449), False, 'from tensorflow.python.framework import ops\n'), ((6656, 6697), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 3)'], {'seed': '(1)'}), '((5, 3), seed=1)\n', (6681, 6697), False, 'from tensorflow.python.ops import random_ops\n'), ((6706, 6783), 'tensorflow.python.layers.core.dense', 'core_layers.dense', (['inputs', '(2)'], {'name': '"""my_dense"""', 'kernel_regularizer': 'regularizer'}), "(inputs, 2, name='my_dense', kernel_regularizer=regularizer)\n", (6723, 6783), True, 'from tensorflow.python.layers import core as core_layers\n'), ((6896, 6990), 'tensorflow.python.layers.core.dense', 'core_layers.dense', (['inputs', '(2)'], {'name': '"""my_dense"""', 'kernel_regularizer': 'regularizer', 'reuse': '(True)'}), "(inputs, 2, name='my_dense', kernel_regularizer=\n regularizer, reuse=True)\n", (6913, 6990), True, 'from tensorflow.python.layers import core as core_layers\n'), ((7194, 7261), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(2)'], {'name': '"""my_dense"""', 'bias_regularizer': 'regularizer'}), "(2, name='my_dense', bias_regularizer=regularizer)\n", (7211, 7261), True, 'from tensorflow.python.layers import core as core_layers\n'), ((7275, 7316), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 3)'], {'seed': '(1)'}), '((5, 3), seed=1)\n', (7300, 7316), False, 'from tensorflow.python.ops import random_ops\n'), ((7355, 7410), 'tensorflow.python.framework.ops.get_collection', 'ops.get_collection', (['ops.GraphKeys.REGULARIZATION_LOSSES'], {}), '(ops.GraphKeys.REGULARIZATION_LOSSES)\n', (7373, 7410), False, 'from tensorflow.python.framework import ops\n'), ((7548, 7589), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 3)'], {'seed': '(1)'}), '((5, 3), seed=1)\n', (7573, 7589), False, 'from tensorflow.python.ops import random_ops\n'), ((7604, 7673), 'tensorflow.python.layers.core.dense', 'core_layers.dense', (['inputs', '(2)'], {'activation': 'nn_ops.relu', 'name': '"""my_dense"""'}), "(inputs, 2, activation=nn_ops.relu, name='my_dense')\n", (7621, 7673), True, 'from tensorflow.python.layers import core as core_layers\n'), ((7943, 7984), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 3)'], {'seed': '(1)'}), '((5, 3), seed=1)\n', (7968, 7984), False, 'from tensorflow.python.ops import random_ops\n'), ((7989, 8017), 'tensorflow.python.layers.core.dense', 'core_layers.dense', (['inputs', '(2)'], {}), '(inputs, 2)\n', (8006, 8017), True, 'from tensorflow.python.layers import core as core_layers\n'), ((8030, 8061), 'tensorflow.python.ops.variables.trainable_variables', 'variables.trainable_variables', ([], {}), '()\n', (8059, 8061), False, 'from tensorflow.python.ops import variables\n'), ((8066, 8094), 'tensorflow.python.layers.core.dense', 'core_layers.dense', (['inputs', '(2)'], {}), '(inputs, 2)\n', (8083, 8094), True, 'from tensorflow.python.layers import core as core_layers\n'), ((8107, 8138), 'tensorflow.python.ops.variables.trainable_variables', 'variables.trainable_variables', ([], {}), '()\n', (8136, 8138), False, 'from tensorflow.python.ops import variables\n'), ((8268, 8309), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 3)'], {'seed': '(1)'}), '((5, 3), seed=1)\n', (8293, 8309), False, 'from tensorflow.python.ops import random_ops\n'), ((8314, 8359), 'tensorflow.python.layers.core.dense', 'core_layers.dense', (['inputs', '(2)'], {'name': '"""my_dense"""'}), "(inputs, 2, name='my_dense')\n", (8331, 8359), True, 'from tensorflow.python.layers import core as core_layers\n'), ((8372, 8403), 'tensorflow.python.ops.variables.trainable_variables', 'variables.trainable_variables', ([], {}), '()\n', (8401, 8403), False, 'from tensorflow.python.ops import variables\n'), ((8408, 8465), 'tensorflow.python.layers.core.dense', 'core_layers.dense', (['inputs', '(2)'], {'name': '"""my_dense"""', 'reuse': '(True)'}), "(inputs, 2, name='my_dense', reuse=True)\n", (8425, 8465), True, 'from tensorflow.python.layers import core as core_layers\n'), ((8478, 8509), 'tensorflow.python.ops.variables.trainable_variables', 'variables.trainable_variables', ([], {}), '()\n', (8507, 8509), False, 'from tensorflow.python.ops import variables\n'), ((10937, 10996), 'tensorflow.python.layers.core.Dense', 'core_layers.Dense', (['(2)'], {'activation': 'nn_ops.relu', 'name': '"""dense1"""'}), "(2, activation=nn_ops.relu, name='dense1')\n", (10954, 10996), True, 'from tensorflow.python.layers import core as core_layers\n'), ((11749, 11789), 'tensorflow.python.layers.core.Dropout', 'core_layers.Dropout', (['(0.5)'], {'name': '"""dropout"""'}), "(0.5, name='dropout')\n", (11768, 11789), True, 'from tensorflow.python.layers import core as core_layers\n'), ((1936, 1969), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 2)'], {}), '((5, 2))\n', (1961, 1969), False, 'from tensorflow.python.ops import random_ops\n'), ((2088, 2121), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 2)'], {}), '((5, 2))\n', (2113, 2121), False, 'from tensorflow.python.ops import random_ops\n'), ((8607, 8645), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""scope"""'], {}), "('scope')\n", (8636, 8645), False, 'from tensorflow.python.ops import variable_scope\n'), ((8662, 8703), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 3)'], {'seed': '(1)'}), '((5, 3), seed=1)\n', (8687, 8703), False, 'from tensorflow.python.ops import random_ops\n'), ((8710, 8755), 'tensorflow.python.layers.core.dense', 'core_layers.dense', (['inputs', '(2)'], {'name': '"""my_dense"""'}), "(inputs, 2, name='my_dense')\n", (8727, 8755), True, 'from tensorflow.python.layers import core as core_layers\n'), ((8770, 8801), 'tensorflow.python.ops.variables.trainable_variables', 'variables.trainable_variables', ([], {}), '()\n', (8799, 8801), False, 'from tensorflow.python.ops import variables\n'), ((8811, 8861), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""scope"""'], {'reuse': '(True)'}), "('scope', reuse=True)\n", (8840, 8861), False, 'from tensorflow.python.ops import variable_scope\n'), ((8869, 8914), 'tensorflow.python.layers.core.dense', 'core_layers.dense', (['inputs', '(2)'], {'name': '"""my_dense"""'}), "(inputs, 2, name='my_dense')\n", (8886, 8914), True, 'from tensorflow.python.layers import core as core_layers\n'), ((8929, 8960), 'tensorflow.python.ops.variables.trainable_variables', 'variables.trainable_variables', ([], {}), '()\n', (8958, 8960), False, 'from tensorflow.python.ops import variables\n'), ((9884, 9950), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""test"""'], {'custom_getter': 'custom_getter'}), "('test', custom_getter=custom_getter)\n", (9913, 9950), False, 'from tensorflow.python.ops import variable_scope\n'), ((9967, 10008), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 3)'], {'seed': '(1)'}), '((5, 3), seed=1)\n', (9992, 10008), False, 'from tensorflow.python.ops import random_ops\n'), ((10015, 10043), 'tensorflow.python.layers.core.dense', 'core_layers.dense', (['inputs', '(2)'], {}), '(inputs, 2)\n', (10032, 10043), True, 'from tensorflow.python.layers import core as core_layers\n'), ((10129, 10166), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""test"""'], {}), "('test')\n", (10158, 10166), False, 'from tensorflow.python.ops import variable_scope\n'), ((10183, 10224), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 3)'], {'seed': '(1)'}), '((5, 3), seed=1)\n', (10208, 10224), False, 'from tensorflow.python.ops import random_ops\n'), ((10231, 10276), 'tensorflow.python.layers.core.dense', 'core_layers.dense', (['inputs', '(2)'], {'name': '"""my_dense"""'}), "(inputs, 2, name='my_dense')\n", (10248, 10276), True, 'from tensorflow.python.layers import core as core_layers\n'), ((10392, 10430), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""test1"""'], {}), "('test1')\n", (10421, 10430), False, 'from tensorflow.python.ops import variable_scope\n'), ((10456, 10497), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 3)'], {'seed': '(1)'}), '((5, 3), seed=1)\n', (10481, 10497), False, 'from tensorflow.python.ops import random_ops\n'), ((10504, 10544), 'tensorflow.python.layers.core.dense', 'core_layers.dense', (['inputs', '(2)'], {'name': 'scope'}), '(inputs, 2, name=scope)\n', (10521, 10544), True, 'from tensorflow.python.layers import core as core_layers\n'), ((10652, 10690), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""test2"""'], {}), "('test2')\n", (10681, 10690), False, 'from tensorflow.python.ops import variable_scope\n'), ((10707, 10748), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 3)'], {'seed': '(1)'}), '((5, 3), seed=1)\n', (10732, 10748), False, 'from tensorflow.python.ops import random_ops\n'), ((10755, 10783), 'tensorflow.python.layers.core.dense', 'core_layers.dense', (['inputs', '(2)'], {}), '(inputs, 2)\n', (10772, 10783), True, 'from tensorflow.python.layers import core as core_layers\n'), ((11881, 11892), 'numpy.ones', 'np.ones', (['()'], {}), '(())\n', (11888, 11892), True, 'import numpy as np\n'), ((12023, 12047), 'tensorflow.python.layers.core.Dropout', 'core_layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (12042, 12047), True, 'from tensorflow.python.layers import core as core_layers\n'), ((12063, 12085), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['(5, 3)'], {}), '((5, 3))\n', (12077, 12085), False, 'from tensorflow.python.ops import array_ops\n'), ((12504, 12536), 'tensorflow.python.layers.core.Dropout', 'core_layers.Dropout', (['(0.5)'], {'seed': '(1)'}), '(0.5, seed=1)\n', (12523, 12536), True, 'from tensorflow.python.layers import core as core_layers\n'), ((12552, 12574), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['(5, 5)'], {}), '((5, 5))\n', (12566, 12574), False, 'from tensorflow.python.ops import array_ops\n'), ((12592, 12627), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'dtype': '"""bool"""'}), "(dtype='bool')\n", (12613, 12627), False, 'from tensorflow.python.ops import array_ops\n'), ((13058, 13083), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['(5, 3, 2)'], {}), '((5, 3, 2))\n', (13072, 13083), False, 'from tensorflow.python.ops import array_ops\n'), ((13125, 13182), 'tensorflow.python.layers.core.Dropout', 'core_layers.Dropout', (['(0.5)'], {'noise_shape': 'noise_shape', 'seed': '(1)'}), '(0.5, noise_shape=noise_shape, seed=1)\n', (13144, 13182), True, 'from tensorflow.python.layers import core as core_layers\n'), ((13529, 13551), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['(5, 5)'], {}), '((5, 5))\n', (13543, 13551), False, 'from tensorflow.python.ops import array_ops\n'), ((13569, 13604), 'tensorflow.python.ops.array_ops.placeholder', 'array_ops.placeholder', ([], {'dtype': '"""bool"""'}), "(dtype='bool')\n", (13590, 13604), False, 'from tensorflow.python.ops import array_ops\n'), ((13621, 13680), 'tensorflow.python.layers.core.dropout', 'core_layers.dropout', (['inputs', '(0.5)'], {'training': 'training', 'seed': '(1)'}), '(inputs, 0.5, training=training, seed=1)\n', (13640, 13680), True, 'from tensorflow.python.layers import core as core_layers\n'), ((2585, 2638), 'tensorflow.python.framework.ops.get_collection', 'ops.get_collection', (['ops.GraphKeys.TRAINABLE_VARIABLES'], {}), '(ops.GraphKeys.TRAINABLE_VARIABLES)\n', (2603, 2638), False, 'from tensorflow.python.framework import ops\n'), ((3150, 3203), 'tensorflow.python.framework.ops.get_collection', 'ops.get_collection', (['ops.GraphKeys.TRAINABLE_VARIABLES'], {}), '(ops.GraphKeys.TRAINABLE_VARIABLES)\n', (3168, 3203), False, 'from tensorflow.python.framework import ops\n'), ((3753, 3806), 'tensorflow.python.framework.ops.get_collection', 'ops.get_collection', (['ops.GraphKeys.TRAINABLE_VARIABLES'], {}), '(ops.GraphKeys.TRAINABLE_VARIABLES)\n', (3771, 3806), False, 'from tensorflow.python.framework import ops\n'), ((5754, 5776), 'tensorflow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', (['x'], {}), '(x)\n', (5773, 5776), False, 'from tensorflow.python.ops import math_ops\n'), ((6180, 6202), 'tensorflow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', (['x'], {}), '(x)\n', (6199, 6202), False, 'from tensorflow.python.ops import math_ops\n'), ((6613, 6635), 'tensorflow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', (['x'], {}), '(x)\n', (6632, 6635), False, 'from tensorflow.python.ops import math_ops\n'), ((6827, 6882), 'tensorflow.python.framework.ops.get_collection', 'ops.get_collection', (['ops.GraphKeys.REGULARIZATION_LOSSES'], {}), '(ops.GraphKeys.REGULARIZATION_LOSSES)\n', (6845, 6882), False, 'from tensorflow.python.framework import ops\n'), ((7029, 7084), 'tensorflow.python.framework.ops.get_collection', 'ops.get_collection', (['ops.GraphKeys.REGULARIZATION_LOSSES'], {}), '(ops.GraphKeys.REGULARIZATION_LOSSES)\n', (7047, 7084), False, 'from tensorflow.python.framework import ops\n'), ((7152, 7174), 'tensorflow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', (['x'], {}), '(x)\n', (7171, 7174), False, 'from tensorflow.python.ops import math_ops\n'), ((7717, 7770), 'tensorflow.python.framework.ops.get_collection', 'ops.get_collection', (['ops.GraphKeys.TRAINABLE_VARIABLES'], {}), '(ops.GraphKeys.TRAINABLE_VARIABLES)\n', (7735, 7770), False, 'from tensorflow.python.framework import ops\n'), ((9208, 9249), 'tensorflow.python.ops.random_ops.random_uniform', 'random_ops.random_uniform', (['(5, 3)'], {'seed': '(1)'}), '((5, 3), seed=1)\n', (9233, 9249), False, 'from tensorflow.python.ops import random_ops\n'), ((9258, 9286), 'tensorflow.python.layers.core.dense', 'core_layers.dense', (['inputs', '(2)'], {}), '(inputs, 2)\n', (9275, 9286), True, 'from tensorflow.python.layers import core as core_layers\n'), ((10289, 10320), 'tensorflow.python.ops.variables.trainable_variables', 'variables.trainable_variables', ([], {}), '()\n', (10318, 10320), False, 'from tensorflow.python.ops import variables\n'), ((10557, 10588), 'tensorflow.python.ops.variables.trainable_variables', 'variables.trainable_variables', ([], {}), '()\n', (10586, 10588), False, 'from tensorflow.python.ops import variables\n'), ((10796, 10827), 'tensorflow.python.ops.variables.trainable_variables', 'variables.trainable_variables', ([], {}), '()\n', (10825, 10827), False, 'from tensorflow.python.ops import variables\n'), ((12149, 12189), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (12187, 12189), False, 'from tensorflow.python.ops import variables\n'), ((12388, 12403), 'numpy.ones', 'np.ones', (['(5, 3)'], {}), '((5, 3))\n', (12395, 12403), True, 'import numpy as np\n'), ((12695, 12735), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (12733, 12735), False, 'from tensorflow.python.ops import variables\n'), ((12942, 12957), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (12949, 12957), True, 'import numpy as np\n'), ((13246, 13286), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (13284, 13286), False, 'from tensorflow.python.ops import variables\n'), ((13759, 13799), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (13797, 13799), False, 'from tensorflow.python.ops import variables\n'), ((14006, 14021), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (14013, 14021), True, 'import numpy as np\n'), ((9304, 9344), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (9342, 9344), False, 'from tensorflow.python.ops import variables\n'), ((9373, 9404), 'tensorflow.python.ops.variables.trainable_variables', 'variables.trainable_variables', ([], {}), '()\n', (9402, 9404), False, 'from tensorflow.python.ops import variables\n'), ((9566, 9581), 'numpy.ones', 'np.ones', (['(3, 2)'], {}), '((3, 2))\n', (9573, 9581), True, 'import numpy as np\n'), ((9685, 9696), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (9693, 9696), True, 'import numpy as np\n'), ((9161, 9188), 'tensorflow.python.ops.init_ops.ones_initializer', 'init_ops.ones_initializer', ([], {}), '()\n', (9186, 9188), False, 'from tensorflow.python.ops import init_ops\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class AngularLoss(nn.Module):
"""
Implementation of https://arxiv.org/abs/1708.01682
Args:
alpha: The angle (as described in the paper), specified in degrees.
"""
def __init__(self, alpha):
super(AngularLoss, self).__init__()
self.alpha = torch.tensor(np.radians(alpha))
def forward(self, anchor, positive, negative, size_average=True):
distance_anchor_positive = (anchor - positive).pow(2).sum(1) # .pow(.5)
cluster_center=(anchor+positive)/2
distance_negative_cluster = (negative - cluster_center).pow(2).sum(1) # .pow(.5)
sq_tan_alpha = torch.tan(self.alpha) ** 2
losses = F.relu(distance_anchor_positive - 4 * sq_tan_alpha * distance_negative_cluster)
#print (distance_anchor_positive - 4 * sq_tan_alpha * distance_negative_cluster)
return losses.mean() if size_average else losses.sum() | [
"torch.tan",
"numpy.radians",
"torch.nn.functional.relu"
] | [((764, 843), 'torch.nn.functional.relu', 'F.relu', (['(distance_anchor_positive - 4 * sq_tan_alpha * distance_negative_cluster)'], {}), '(distance_anchor_positive - 4 * sq_tan_alpha * distance_negative_cluster)\n', (770, 843), True, 'import torch.nn.functional as F\n'), ((384, 401), 'numpy.radians', 'np.radians', (['alpha'], {}), '(alpha)\n', (394, 401), True, 'import numpy as np\n'), ((720, 741), 'torch.tan', 'torch.tan', (['self.alpha'], {}), '(self.alpha)\n', (729, 741), False, 'import torch\n')] |
"""
Functions for quantum parameters, including electron degenerate
gases and warm dense matter.
"""
__all__ = [
"chemical_potential",
"deBroglie_wavelength",
"Ef_",
"Fermi_energy",
"lambdaDB_",
"lambdaDB_th_",
"Thomas_Fermi_length",
"thermal_deBroglie_wavelength",
"Wigner_Seitz_radius",
]
import astropy.units as u
import numpy as np
from astropy.constants.si import c, e, eps0, h, hbar, k_B, m_e
from plasmapy import particles
from plasmapy.formulary import mathematics
from plasmapy.formulary.relativity import Lorentz_factor
from plasmapy.utils import RelativityError
from plasmapy.utils.decorators import validate_quantities
# TODO: Use @check_relativistic and @particle_input
@validate_quantities(
V={"can_be_negative": True}, validations_on_return={"can_be_negative": False}
)
def deBroglie_wavelength(V: u.m / u.s, particle) -> u.m:
r"""
Return the de Broglie wavelength.
The de Broglie wavelength (:math:`λ_{dB}`) of a particle is defined by
.. math::
λ_{dB} = \frac{h}{p} = \frac{h}{γ m V}
where :math:`h` is the Planck constant, :math:`p` is the
relativistic momentum of the particle, :math:`γ` is the
Lorentz factor, :math:`m` is the mass of the particle, and
:math:`V` is the velocity of the particle.
**Aliases:** `lambdaDB_`
Parameters
----------
V : `~astropy.units.Quantity`
Particle velocity in units convertible to meters per second.
particle : `str`, `~plasmapy.particles.Particle`, or `~astropy.units.Quantity`
An instance of `~plasmapy.particles.particle_class.Particle`, or
an equvalent representation (e.g., ``'e'``, ``'p'``, ``'D+'``, or
``'He-4 1+'``), for the particle of interest, or the particle
mass in units convertible to kg. If a
`~plasmapy.particles.particle_class.Particle` instance is given, then the
particle mass is retrieved from the object.
Returns
-------
lambda_dB : `~astropy.units.Quantity`
The de Broglie wavelength in units of meters.
Raises
------
`TypeError`
If the velocity is not a `~astropy.units.Quantity` and cannot be
converted into a `~astropy.units.Quantity`.
`~astropy.units.UnitConversionError`
If the velocity is not in appropriate units.
`~plasmapy.utils.exceptions.RelativityError`
If the magnitude of ``V`` is larger than the speed of light.
Warns
-----
: `~astropy.units.UnitsWarning`
If units are not provided, SI units are assumed.
Examples
--------
>>> from astropy import units as u
>>> velocity = 1.4e7 * u.m / u.s
>>> deBroglie_wavelength(velocity, 'e')
<Quantity 5.18997095e-11 m>
>>> deBroglie_wavelength(V = 0 * u.m / u.s, particle = 'D+')
<Quantity inf m>
"""
V = np.abs(V)
if np.any(V >= c):
raise RelativityError(
"Velocity input in deBroglie_wavelength cannot "
"be greater than or equal to the speed of "
"light."
)
if not isinstance(particle, u.Quantity):
try:
# TODO: Replace with more general routine!
m = particles.particle_mass(particle)
except Exception:
raise ValueError("Unable to find particle mass.")
else:
try:
m = particle.to(u.kg)
except Exception:
raise u.UnitConversionError(
"The second argument for deBroglie_wavelength must be either a "
"representation of a particle or a"
" Quantity with units of mass."
)
if V.size > 1:
lambda_dBr = np.ones(V.shape) * np.inf * u.m
indices = V.value != 0
lambda_dBr[indices] = h / (m * V[indices] * Lorentz_factor(V[indices]))
else:
if V == 0 * u.m / u.s:
lambda_dBr = np.inf * u.m
else:
lambda_dBr = h / (Lorentz_factor(V) * m * V)
return lambda_dBr
lambdaDB_ = deBroglie_wavelength
""" Alias to :func:`deBroglie_wavelength`. """
@validate_quantities(
T_e={"can_be_negative": False, "equivalencies": u.temperature_energy()},
validations_on_return={"can_be_negative": False},
)
def thermal_deBroglie_wavelength(T_e: u.K) -> u.m:
r"""
Calculate the thermal de Broglie wavelength for electrons.
**Aliases:** `lambdaDB_th_`
Parameters
----------
T_e : `~astropy.units.Quantity`
Electron temperature.
Returns
-------
lambda_dbTh : `~astropy.units.Quantity`
The thermal de Broglie wavelength for electrons in meters.
Raises
------
`TypeError`
If argument is not a `~astropy.units.Quantity`.
`~astropy.units.UnitConversionError`
If argument is in incorrect units.
`ValueError`
If argument contains invalid values.
Warns
-----
: `~astropy.units.UnitsWarning`
If units are not provided, SI units are assumed.
Notes
-----
The thermal de Broglie wavelength is approximately the average de Broglie
wavelength for electrons in an ideal gas and is given by
.. math::
λ_{dbTh} = \frac{h}{\sqrt{2 π m_e k_B T_e}}
Example
-------
>>> from astropy import units as u
>>> thermal_deBroglie_wavelength(1 * u.eV)
<Quantity 6.9193675e-10 m>
"""
lambda_dbTh = h / np.sqrt(2 * np.pi * m_e * k_B * T_e)
return lambda_dbTh
lambdaDB_th_ = thermal_deBroglie_wavelength
""" Alias to :func:`thermal_deBroglie_wavelength`. """
@validate_quantities(
n_e={"can_be_negative": False}, validations_on_return={"can_be_negative": False}
)
def Fermi_energy(n_e: u.m ** -3) -> u.J:
r"""
Calculate the kinetic energy in a degenerate electron gas.
**Aliases:** `Ef_`
Parameters
----------
n_e : ~astropy.units.Quantity
Electron number density.
Returns
-------
energy_F : `~astropy.units.Quantity`
The Fermi energy in joules.
Raises
------
`TypeError`
If argument is not a `~astropy.units.Quantity`.
`~astropy.units.UnitConversionError`
If argument is in incorrect units.
`ValueError`
If argument contains invalid values.
Warns
-----
: `~astropy.units.UnitsWarning`
If units are not provided, SI units are assumed.
Notes
-----
The Fermi energy is the kinetic energy in a degenerate electron gas
and is given by
.. math::
E_F = \frac{π^2 ℏ^2}{2 m_e}
\left( \frac{3 n_e}{π} \right)^{2/3}
This quantity is often used in place of thermal energy for analysis
of cold, dense plasmas (e.g. warm dense matter, condensed matter).
See Also
--------
Thomas_Fermi_length
Example
-------
>>> from astropy import units as u
>>> Fermi_energy(1e23 * u.cm**-3)
<Quantity 1.2586761e-18 J>
"""
coeff = (np.pi * hbar) ** 2 / (2 * m_e)
energy_F = coeff * (3 * n_e / np.pi) ** (2 / 3)
return energy_F
Ef_ = Fermi_energy
""" Alias to :func:`Fermi_energy`. """
@validate_quantities(
n_e={"can_be_negative": False}, validations_on_return={"can_be_negative": False}
)
def Thomas_Fermi_length(n_e: u.m ** -3) -> u.m:
r"""
Calculate the exponential scale length for charge screening
for cold and dense plasmas.
Parameters
----------
n_e : `~astropy.units.Quantity`
Electron number density.
Returns
-------
lambda_TF : `~astropy.units.Quantity`
The Thomas-Fermi screening length in meters.
Raises
------
`TypeError`
If argument is not a `~astropy.units.Quantity`.
`~astropy.units.UnitConversionError`
If argument is in incorrect units.
`ValueError`
If argument contains invalid values.
Warns
-----
: `~astropy.units.UnitsWarning`
If units are not provided, SI units are assumed.
Notes
-----
The Thomas-Fermi screening length is the exponential scale length for
charge screening and is given by
.. math::
λ_{TF} = \sqrt{\frac{2 ε_0 E_F}{3 n_e e^2}}
for an electron degenerate gas.
This quantity is often used in place of the Debye length for analysis
of cold, dense plasmas (e.g. warm dense matter, condensed matter).
The electrical potential will drop by a factor of :math:`1/e` every
Thomas-Fermi screening length.
Plasmas will generally be quasineutral on length scales significantly
larger than the Thomas-Fermi screening length.
See Also
--------
Fermi_energy
plasmapy.formulary.Debye_length
Example
-------
>>> from astropy import units as u
>>> Thomas_Fermi_length(1e23 * u.cm**-3)
<Quantity 5.37991409e-11 m>
"""
energy_F = Fermi_energy(n_e)
lambda_TF = np.sqrt(2 * eps0 * energy_F / (3 * n_e * e ** 2))
return lambda_TF
@validate_quantities(
n={"can_be_negative": False}, validations_on_return={"can_be_negative": False}
)
def Wigner_Seitz_radius(n: u.m ** -3) -> u.m:
r"""
Calculate the Wigner-Seitz radius, which approximates the inter-particle
spacing.
This function returns the radius of a sphere whose volume is
equal to the mean volume per atom in a solid. This parameter is
often used to calculate the coupling parameter.
When ion density is used, this is the ion sphere radius, i.e., the
space occupied by a single ion with no other ions in that space. Higher
density means less space for each ion, so the radius is smaller.
Parameters
----------
n : `~astropy.units.Quantity`
Particle number density.
Returns
-------
radius : `~astropy.units.Quantity`
The Wigner-Seitz radius in meters.
Raises
------
`TypeError`
If argument is not a ~astropy.units.Quantity.
`~astropy.units.UnitConversionError`
If argument is in incorrect units.
`ValueError`
If argument contains invalid values.
Warns
-----
: `~astropy.units.UnitsWarning`
If units are not provided, SI units are assumed.
Notes
-----
The Wigner-Seitz radius approximates the interparticle spacing.
It is the radius of a sphere whose volume is equal to the mean
volume per atom in a solid:
.. math::
r = \left(\frac{3}{4 π n}\right)^{1/3}
See Also
--------
Fermi_energy
Example
-------
>>> from astropy import units as u
>>> Wigner_Seitz_radius(1e29 * u.m**-3)
<Quantity 1.33650462e-10 m>
"""
radius = (3 / (4 * np.pi * n)) ** (1 / 3)
return radius
# TODO: remove NotImplementedError and 'doctest: +SKIP' when the following issues are addressed...
# https://github.com/PlasmaPy/PlasmaPy/issues/726
# https://github.com/astropy/astropy/issues/9721
@validate_quantities(
n_e={"can_be_negative": False},
T={"can_be_negative": False, "equivalencies": u.temperature_energy()},
)
def chemical_potential(n_e: u.m ** -3, T: u.K) -> u.dimensionless_unscaled:
r"""
Calculate the ideal chemical potential.
Parameters
----------
n_e : `~astropy.units.Quantity`
Electron number density.
T : ~astropy.units.Quantity
The temperature.
Returns
-------
beta_mu : `~astropy.units.Quantity`
The dimensionless ideal chemical potential. That is the ratio of
the ideal chemical potential to the thermal energy.
Raises
------
`TypeError`
If argument is not a `~astropy.units.Quantity`.
`~astropy.units.UnitConversionError`
If argument is in incorrect units.
`ValueError`
If argument contains invalid values.
Warns
-----
: `~astropy.units.UnitsWarning`
If units are not provided, SI units are assumed.
Notes
-----
The ideal chemical potential is given by [1]_:
.. math::
χ_a = I_{1/2}(β μ_a^{ideal})
where :math:`χ` is the degeneracy parameter, :math:`I_{1/2}` is the
Fermi integral with order 1/2, :math:`β` is the inverse thermal
energy :math:`β = 1/(k_B T)`, and :math:`μ_a^{ideal}`
is the ideal chemical potential.
The definition for the ideal chemical potential is implicit, so it must
be obtained numerically by solving for the Fermi integral for values
of chemical potential approaching the degeneracy parameter. Since values
returned from the Fermi_integral are complex, a nonlinear
Levenberg-Marquardt least squares method is used to iteratively approach
a value of :math:`μ` which minimizes
:math:`I_{1/2}(β μ_a^{ideal}) - χ_a`
This function returns :math:`β μ^{ideal}` the dimensionless
ideal chemical potential.
Warning: at present this function is limited to relatively small
arguments due to limitations in the `~mpmath` package's implementation
of `~mpmath.polylog`, which PlasmaPy uses in calculating the Fermi
integral.
References
----------
.. [1] Bonitz, Michael. Quantum kinetic theory. Stuttgart: Teubner, 1998.
Example
-------
>>> from astropy import units as u
>>> chemical_potential(n_e=1e21*u.cm**-3,T=11000*u.K) # doctest: +SKIP
<Quantity 2.00039985e-12>
"""
raise NotImplementedError(
"This function has been temporarily disabled due to a bug.\n"
"Please refer to https://github.com/PlasmaPy/PlasmaPy/issues/726 \n"
"and https://github.com/astropy/astropy/issues/9721 "
"for progress in fixing it."
)
# deBroglie wavelength
lambdaDB = thermal_deBroglie_wavelength(T)
# degeneracy parameter
degen = (n_e * lambdaDB ** 3).to(u.dimensionless_unscaled)
def residual(params, data, eps_data):
"""Residual function for fitting parameters to Fermi_integral."""
alpha = params["alpha"].value
# note that alpha = mu / (k_B * T)
model = mathematics.Fermi_integral(alpha, 0.5)
complexResidue = (data - model) / eps_data
return complexResidue.view(np.float)
# setting parameters for fitting along with bounds
alphaGuess = 1 * u.dimensionless_unscaled
try:
from lmfit import minimize, Parameters
except (ImportError, ModuleNotFoundError) as e:
from plasmapy.optional_deps import lmfit_import_error
raise lmfit_import_error from e
params = Parameters()
params.add("alpha", value=alphaGuess, min=0.0)
# calling minimize function from lmfit to fit by minimizing the residual
data = np.array([degen]) # result of Fermi_integral - degen should be zero
eps_data = np.array([1e-15]) # numerical error
minFit = minimize(residual, params, args=(data, eps_data))
beta_mu = minFit.params["alpha"].value * u.dimensionless_unscaled
return beta_mu
# TODO: decorate with validate_quantities
# TODO: remove NotImplementedError and 'doctest: +SKIP' when the following issues are addressed...
# https://github.com/PlasmaPy/PlasmaPy/issues/726
# https://github.com/astropy/astropy/issues/9721
def _chemical_potential_interp(n_e, T):
r"""
Fitting formula for interpolating chemical potential between classical
and quantum regimes.
See [1]_, [2]_ for more information.
Parameters
----------
n_e : `~astropy.units.Quantity`
Electron number density.
T : `~astropy.units.Quantity`
Temperature in units of temperature or energy.
Returns
-------
beta_mu : `~astropy.units.Quantity`
The dimensionless chemical potential, which is a ratio of
chemical potential energy to thermal kinetic energy.
Raises
------
`TypeError`
If argument is not a `~astropy.units.Quantity`.
`~astropy.units.UnitConversionError`
If argument is in incorrect units.
`ValueError`
If argument contains invalid values.
Warns
-----
: `~astropy.units.UnitsWarning`
If units are not provided, SI units are assumed.
Notes
-----
The ideal chemical potential is given by [1]_:
.. math::
\frac{μ}{k_B T_e} = - \frac{3}{2} \ln Θ + \ln
\frac{4}{3 \sqrt{π}} +
\frac{A Θ^{-b - 1} + B Θ^{-(b + 1) / 2}}{1 + A Θ^{-b}}
where
.. math::
Θ = \frac{k_B T_e}{E_F}
is the degeneracy parameter, comparing the thermal energy to the
Fermi energy, and the coefficients for the fitting formula are
:math:`A = 0.25945`\ , :math:`B = 0.0072`\ , and :math:`b = 0.858`\ .
References
----------
.. [1] Ichimaru, Statistical Plasma Physics Addison-Wesley,
Reading, MA, 1991.
.. [2] <NAME>., et al. "Theoretical model of x-ray scattering as a
dense matter probe." Physical Review E 67.2 (2003): 026412.
Example
-------
>>> from astropy import units as u
>>> _chemical_potential_interp(n_e=1e23*u.cm**-3, T=11000*u.K) # doctest: +SKIP
<Quantity 8.17649>
"""
raise NotImplementedError(
"This function has been temporarily disabled due to a bug.\n"
"Please refer to https://github.com/PlasmaPy/PlasmaPy/issues/726 \n"
"and https://github.com/astropy/astropy/issues/9721 "
"for progress in fixing it."
)
A = 0.25945
B = 0.072
b = 0.858
theta = k_B * T / Fermi_energy(n_e)
term1 = -3 / 2 * np.log(theta)
term2 = np.log(4 / (3 * np.sqrt(np.pi)))
term3num = A * theta ** (-b - 1) + B * theta ** (-(b + 1) / 2)
term3den = 1 + A * theta ** (-b)
term3 = term3num / term3den
beta_mu = term1 + term2 + term3
return beta_mu.to(u.dimensionless_unscaled)
| [
"astropy.units.temperature_energy",
"plasmapy.utils.decorators.validate_quantities",
"numpy.abs",
"numpy.log",
"plasmapy.utils.RelativityError",
"plasmapy.particles.particle_mass",
"astropy.units.UnitConversionError",
"numpy.ones",
"plasmapy.formulary.mathematics.Fermi_integral",
"plasmapy.formula... | [((726, 829), 'plasmapy.utils.decorators.validate_quantities', 'validate_quantities', ([], {'V': "{'can_be_negative': True}", 'validations_on_return': "{'can_be_negative': False}"}), "(V={'can_be_negative': True}, validations_on_return={\n 'can_be_negative': False})\n", (745, 829), False, 'from plasmapy.utils.decorators import validate_quantities\n'), ((5539, 5645), 'plasmapy.utils.decorators.validate_quantities', 'validate_quantities', ([], {'n_e': "{'can_be_negative': False}", 'validations_on_return': "{'can_be_negative': False}"}), "(n_e={'can_be_negative': False}, validations_on_return={\n 'can_be_negative': False})\n", (5558, 5645), False, 'from plasmapy.utils.decorators import validate_quantities\n'), ((7064, 7170), 'plasmapy.utils.decorators.validate_quantities', 'validate_quantities', ([], {'n_e': "{'can_be_negative': False}", 'validations_on_return': "{'can_be_negative': False}"}), "(n_e={'can_be_negative': False}, validations_on_return={\n 'can_be_negative': False})\n", (7083, 7170), False, 'from plasmapy.utils.decorators import validate_quantities\n'), ((8871, 8975), 'plasmapy.utils.decorators.validate_quantities', 'validate_quantities', ([], {'n': "{'can_be_negative': False}", 'validations_on_return': "{'can_be_negative': False}"}), "(n={'can_be_negative': False}, validations_on_return={\n 'can_be_negative': False})\n", (8890, 8975), False, 'from plasmapy.utils.decorators import validate_quantities\n'), ((2847, 2856), 'numpy.abs', 'np.abs', (['V'], {}), '(V)\n', (2853, 2856), True, 'import numpy as np\n'), ((2865, 2879), 'numpy.any', 'np.any', (['(V >= c)'], {}), '(V >= c)\n', (2871, 2879), True, 'import numpy as np\n'), ((8797, 8846), 'numpy.sqrt', 'np.sqrt', (['(2 * eps0 * energy_F / (3 * n_e * e ** 2))'], {}), '(2 * eps0 * energy_F / (3 * n_e * e ** 2))\n', (8804, 8846), True, 'import numpy as np\n'), ((14325, 14337), 'lmfit.Parameters', 'Parameters', ([], {}), '()\n', (14335, 14337), False, 'from lmfit import minimize, Parameters\n'), ((14477, 14494), 'numpy.array', 'np.array', (['[degen]'], {}), '([degen])\n', (14485, 14494), True, 'import numpy as np\n'), ((14561, 14578), 'numpy.array', 'np.array', (['[1e-15]'], {}), '([1e-15])\n', (14569, 14578), True, 'import numpy as np\n'), ((14611, 14660), 'lmfit.minimize', 'minimize', (['residual', 'params'], {'args': '(data, eps_data)'}), '(residual, params, args=(data, eps_data))\n', (14619, 14660), False, 'from lmfit import minimize, Parameters\n'), ((2895, 3017), 'plasmapy.utils.RelativityError', 'RelativityError', (['"""Velocity input in deBroglie_wavelength cannot be greater than or equal to the speed of light."""'], {}), "(\n 'Velocity input in deBroglie_wavelength cannot be greater than or equal to the speed of light.'\n )\n", (2910, 3017), False, 'from plasmapy.utils import RelativityError\n'), ((5375, 5411), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * m_e * k_B * T_e)'], {}), '(2 * np.pi * m_e * k_B * T_e)\n', (5382, 5411), True, 'import numpy as np\n'), ((13863, 13901), 'plasmapy.formulary.mathematics.Fermi_integral', 'mathematics.Fermi_integral', (['alpha', '(0.5)'], {}), '(alpha, 0.5)\n', (13889, 13901), False, 'from plasmapy.formulary import mathematics\n'), ((17267, 17280), 'numpy.log', 'np.log', (['theta'], {}), '(theta)\n', (17273, 17280), True, 'import numpy as np\n'), ((3190, 3223), 'plasmapy.particles.particle_mass', 'particles.particle_mass', (['particle'], {}), '(particle)\n', (3213, 3223), False, 'from plasmapy import particles\n'), ((4149, 4171), 'astropy.units.temperature_energy', 'u.temperature_energy', ([], {}), '()\n', (4169, 4171), True, 'import astropy.units as u\n'), ((10911, 10933), 'astropy.units.temperature_energy', 'u.temperature_energy', ([], {}), '()\n', (10931, 10933), True, 'import astropy.units as u\n'), ((3413, 3572), 'astropy.units.UnitConversionError', 'u.UnitConversionError', (['"""The second argument for deBroglie_wavelength must be either a representation of a particle or a Quantity with units of mass."""'], {}), "(\n 'The second argument for deBroglie_wavelength must be either a representation of a particle or a Quantity with units of mass.'\n )\n", (3434, 3572), True, 'import astropy.units as u\n'), ((3673, 3689), 'numpy.ones', 'np.ones', (['V.shape'], {}), '(V.shape)\n', (3680, 3689), True, 'import numpy as np\n'), ((3788, 3814), 'plasmapy.formulary.relativity.Lorentz_factor', 'Lorentz_factor', (['V[indices]'], {}), '(V[indices])\n', (3802, 3814), False, 'from plasmapy.formulary.relativity import Lorentz_factor\n'), ((17309, 17323), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (17316, 17323), True, 'import numpy as np\n'), ((3941, 3958), 'plasmapy.formulary.relativity.Lorentz_factor', 'Lorentz_factor', (['V'], {}), '(V)\n', (3955, 3958), False, 'from plasmapy.formulary.relativity import Lorentz_factor\n')] |
import astropy
from astropy import units as u
from astropy.cosmology import Planck15
import numpy as np
from scipy.special import sici
import warnings
from .core import Profile
from .helpers.decorators import array, inMpc
from .helpers.lensing import BaseLensing
from .helpers.spherical import mass_from_radius, radius_from_mass
class BaseNFW(Profile):
"""Base class for NFW-like density profiles"""
def __init__(self, mass, c, z, overdensity: float=500,
background: str='c', cosmo: astropy.cosmology.FLRW=Planck15,
frame: str='comoving', **numeric_kwargs):
if isinstance(mass, u.Quantity):
mass = mass.to(u.Msun).value
if not np.iterable(mass):
mass = np.array([mass])
self.mass = mass
self.c = c
# additional NFW convenience attributes
self._delta_c = None
self._rs = None
self._radius = None
self._sigma_s = None
super().__init__(
z, overdensity=overdensity, cosmo=cosmo, background=background,
frame=frame, **numeric_kwargs)
### attributes ###
@property
def delta_c(self):
if self._delta_c is None:
self._delta_c = self._f_delta_c(self.c, self.overdensity)
return self._delta_c
@property
def rs(self):
if self._rs is None:
self._rs = self.radius / self.c
return self._rs
@property
def radius(self):
if self._radius is None:
self._radius = radius_from_mass(
self.mass, self.overdensity, self.rho_bg)
return self._radius
@property
def sigma_s(self):
if self._sigma_s is None:
self._sigma_s = self.rs * self.delta_c * self.rho_bg
return self._sigma_s
### hidden methods ###
def _f_delta_c(self, c, overdensity):
return (overdensity*c**3/3) / (np.log(1+c) - c/(1+c))
### methods ###
def mdelta(self, overdensity: float, background: str='c', err: float=1e-3,
n_guess_rng: int=1000, max_iter: int=50):
"""Calculate mass at any overdensity from the original mass
definition
Parameters
----------
overdensity : float
overdensity at which the mass should be calculated
Optional parameters
-------------------
background : one of ('c','m')
background density as a reference for ``overdensity``.
WARNING: currently only the same background as used in
defining this object is implemented
err: float
maximum error on ``delta_c`` that can be tolerated to claim
convergence
n_guess_rng : int, optional
how many samples of ``c`` to obtain in each iteration. See
Notes below.
max_iter : int, optional
maximum number of iterations
Returns
-------
mdelta, cdelta : ndarray, shape ``self.c.shape``
mass and concentrations calculated at the requested
overdensity
"""
self._assert_background(background)
if overdensity == self.overdensity \
and background == self.background:
return self.mass
# do we need to convert the background density?
if background == self.background:
bgfactor = 1
else:
# this is m to c
bgfactor = self.mean_density / self.critical_density
# reciprocal for c to m
if background == 'm':
bgfactor = 1 / bgfactor
# to handle arbitrary dimensions
c_shape = self.c.shape
self_c = self.c.reshape(-1)
self_delta_c = self.delta_c.reshape(-1)
# I found that this guess is good to within 20% typically
c_guess = (self.overdensity/overdensity)**0.5 * self_c
# so we create a +/-50% search range to be sure
c_rng = np.linspace(0.5*c_guess, 1.5*c_guess, n_guess_rng)
delta_c_rng = self._f_delta_c(c_rng, overdensity)
delta_c_diff = np.abs(delta_c_rng/self_delta_c - 1)
argmins = np.argmin(delta_c_diff, axis=0)
# without the copy I am not allowed to write into this array
cdelta = np.diagonal(c_rng[argmins], axis1=-2, axis2=-1).copy()
# delta_c_err are the minima of delta_c_diff
delta_c_err = np.diagonal(
delta_c_diff[argmins], axis1=-2, axis2=-1).copy()
i = 0
while np.any(delta_c_err > err):
k = (delta_c_err > err)
# if our best guess is at the edge then we don't want to
# shrink the search range, but if we don't shrink it
# progressively otherwise then we'll never improve our answer
# width=0.1 refers to a 10% search range
if np.any(argmins == 0) or np.any(argmins == n_guess_rng-1):
width = 0.1
else:
width = 0.1 / (i+1)
c_rng = np.linspace(
(1-width)*cdelta[k], (1+width)*cdelta[k], n_guess_rng)
delta_c_diff = np.abs(
self._f_delta_c(c_rng, overdensity)/self_delta_c[k]-1)
argmins = np.argmin(delta_c_diff, axis=0)
delta_c_err[k] = np.diagonal(
delta_c_diff[argmins], axis1=-2, axis2=-1)
if (delta_c_err[k] <= err).sum():
j = (delta_c_err <= err)
cdelta[k & j] = np.diagonal(
c_rng[argmins], axis1=-2, axis2=-1)[j[k]]
i += 1
if i == max_iter:
warn = f'Did not achieve convergence after {max_iter}' \
f' iterations; error on delta_c =' \
f' {delta_c_err[k].mean():.2e} +/-' \
f' {delta_c_err[k].std():.2e}' \
f' (max err = {delta_c_err[k].max():.2e})'
warnings.warn(warn)
break
# back to the original shape, also correcting for different
# background, if applicable
cdelta = bgfactor**(1/3) * cdelta.reshape(c_shape)
# calculate mass from the relation between mass, c, and overdensity
mfactor = (overdensity/self.overdensity) * (cdelta/self.c)**3
return mfactor*self.mass, cdelta
def density(self, *args, **kwargs):
"""Alias for ``self.profile``"""
return self.profile(*args, **kwargs)
class GNFW(BaseNFW):
"""Generalized NFW profile
Density profile:
.. math::
\rho(r) = \frac{\delta_\mathrm{c}\rho_\mathrm{bg}}
{(r/r_\mathrm{s})^\gamma
\left[1+(r/r_\mathrm{s})^\alpha\right]^(\beta-\gamma)/\alpha
Parameters
----------
mass, c, z : float or np.ndarray
mass, concentration, and redshift defining the NFW profile.
Their shapes are arbitrary but they must be such that they can
be multiplied together as they come
Optional parameters
-------------------
alpha : float or np.ndarray
sharpness of the transition between inner and outer slope
around the scale radius. A larger value produces a sharper
transition.
beta : float or np.ndarray
slope of the density profile at large radii
gamma : float or np.ndarray
slope of the density profile at small radii
For additional optional parameters see ``NFW``
Notes
-----
- A common but more restriced GNFW profile can be recovered by setting
alpha=1, beta=3 and varying gamma alone
- The default parameters (alpha=1, beta=3, gamma=1) correspond to the
regular NFW profile
"""
def __init__(self, mass, c, z, alpha=1, beta=3, gamma=1, overdensity=500,
background='c', frame='comoving', cosmo=Planck15, **kwargs):
self._set_shape(mass*c*z*alpha*beta*gamma)
super().__init__(
mass, c, z, overdensity=overdensity, background=background,
frame=frame, cosmo=cosmo, **kwargs)
self.alpha = alpha
self.beta = beta
self.gamma = gamma
### main methods ###
@inMpc
@array
def profile(self, r):
exp = (self.beta-self.gamma) / self.alpha
return self.delta_c * self.rho_bg \
/ ((r/self.rs)**self.gamma * (1+(r/self.rs)**self.alpha)**exp)
class NFW(BaseNFW):
"""Navarro-Frenk-White profile (Navarro et al. 1995)
Density profile:
.. math::
\rho(r) = \frac{\delta_\mathrm{c}\rho_\mathrm{bg}}
{(r/r_\mathrm{s})(1+r/r_\mathrm{s})^2}
Parameters
----------
mass, c, z : float or np.ndarray
mass, concentration, and redshift defining the NFW profile.
Their shapes are arbitrary but they must be such that they can
be multiplied together as they come
Optional parameters
-------------------
overdensity : float
overdensity with respect to the background density
background : str
'c' (critical) or 'm' (mean) background density
cosmo : Astropy.cosmology.FLRW
cosmology object
"""
def __init__(self, mass, c, z, overdensity=500, background='c',
frame='comoving', cosmo=Planck15, **kwargs):
self._set_shape(mass*c*z)
super(NFW, self).__init__(
mass, c, z, overdensity=overdensity, background=background,
frame=frame, cosmo=cosmo, **kwargs)
def __repr__(self):
msg = f'NFW profile object containing {np.prod(self._shape)}' \
f' profiles. shape: {self._shape}'
od_msg = f'overdensity: {self.overdensity}{self.background}'
if np.iterable(self.mass) and self.mass.min() < self.mass.max():
mass_msg = 'log10 mass/Msun range =' \
f' {np.log10(self.mass.min()):.2f}' \
f'-{np.log10(self.mass.max()):.2f}'
else:
mass_msg = 'log10 mass/Msun =' \
f' {np.log10(np.unique(self.mass)[0]):.2f}'
if np.iterable(self.c) and self.c.min() < self.c.max():
c_msg = 'concentration range =' \
f' {self.c.min():.2f}-{self.c.max():.2f}'
else:
c_msg = f'concentration = {np.unique(self.c)[0]:.2f}'
if np.iterable(self.z) and self.z.min() < self.z.max():
z_msg = f'redshift range = {self.z.min():.2f}-{self.z.max():.2f}'
else:
z_msg = f'redshift = {np.unique(self.z)[0]:.2f}'
return '\n '.join([msg, od_msg, mass_msg, c_msg, z_msg])
### main methods ###
@inMpc
@array
def profile(self, r):
"""Three-dimensional density profile"""
return self.delta_c * self.rho_bg / (r/self.rs * (1+r/self.rs)**2)
@inMpc
@array
def projected(self, R, **kwargs):
"""Analytical projected NFW at distance(s) R"""
x = R / self.rs
s = np.ones_like(x) / 3
s[x == 0] = 0
j = (x > 0) & (x < 1)
s[j] = (1 - 2*np.arctanh(((1-x[j]) / (1+x[j]))**0.5)
/ (1-x[j]**2)**0.5) \
/ (x[j]**2 - 1)
j = x > 1
s[j] = (1 - 2*np.arctan(((x[j]-1) / (1+x[j]))**0.5) \
/ (x[j]**2-1)**0.5) \
/ (x[j]**2 - 1)
return 2 * self.sigma_s * s
@inMpc
@array
def projected_cumulative(self, R, **kwargs):
"""Analytical cumulative projected NFW profile"""
x = R / self.rs
s = np.ones_like(x) + np.log(0.5)
s[x == 0] = 0
j = (x > 0) & (x < 1)
s[j] = (np.log(0.5*x[j]) \
+ 2 * np.arctanh(((1 - x[j])/(1 + x[j]))**0.5) \
/ (1 - x[j]**2)**0.5) \
/ x[j]**2
j = x > 1
s[j] = (2 * np.arctan(((x[j] - 1)/(1 + x[j]))**0.5) / (x[j]**2-1)**0.5 \
+ np.log(0.5*x[j])) / x[j]**2
return 4 * self.sigma_s * s
@array
def fourier(self, k):
"""Fourier transform"""
ki = k * self.rs
bs, bc = sici(ki)
asi, ac = sici((1+self.c)*ki)
return 4 * np.pi * self.rho_bg * self.delta_c * self.rs**3 / self.mass \
* (np.sin(ki)*(asi-bs) - (np.sin(self.c*ki) / ((1+self.c)*ki)) \
+ np.cos(ki)*(ac-bc))
class TNFW(BaseNFW):
"""Truncated NFW profile
The density profile is given by
.. math::
\rho(r) = \frac{\delta_\mathrm{c}\rho_\mathrm{bg}}{x(1+x)^2}
\left(\frac{\tau^2}{\tau^2+x^2}\right)^{\mathrm{eta}}
with
.. math::
x = r/r_\mathrm{s}
and
.. math::
tau = r_\mathrm{t}/r_\mathrm{s}
the truncation radius in units of the scale radius.
Analytical expressions for projected profiles have been derived by
Baltz, Marshall & Oguri for the cases of ``eta={1,2}``. Here the
projected profiles are calculated numerically.
Parameters
----------
mass, c, z : float or np.ndarray
mass, concentration, and redshift defining the NFW profile.
Their shapes are arbitrary but they must be such that they can
be multiplied together as they come
Optional parameters
-------------------
tau : float or np.ndarray
truncation radius, in units of the scale radius
eta : float or np.ndarray
exponent of the decay beyond the truncation radius. Set to zero
to recover regular NFW
For additional optional parameters see ``NFW``
"""
def __init__(self, mass, c, z, tau=1, eta=1, **kwargs):
self._set_shape(mass*c*z*tau*eta)
super().__init__(mass, c, z, **kwargs)
self.tau = tau
self.eta = eta
### main methods ###
@inMpc
@array
def profile(self, r):
x = r / self.rs
return self.delta_c * self.rho_bg / (x * (1+x)**2) \
* (self.tau**2 / (self.tau**2 + x**2))**self.eta
class Hernquist(GNFW):
"""Hernquist (1990) profile.
This is a special case of the GNFW profile with :math:`\alpha=1`,
:math:`\beta=4`, and :math:`\gamma=1`.
"""
def __init__(self, mass, c, z, **kwargs):
super().__init__(mass, c, z, alpha=1, beta=4, gamma=1, **kwargs)
| [
"numpy.arctanh",
"numpy.abs",
"numpy.ones_like",
"numpy.log",
"numpy.unique",
"scipy.special.sici",
"numpy.argmin",
"numpy.any",
"numpy.sin",
"numpy.array",
"numpy.linspace",
"numpy.iterable",
"numpy.cos",
"warnings.warn",
"numpy.arctan",
"numpy.prod",
"numpy.diagonal"
] | [((3958, 4012), 'numpy.linspace', 'np.linspace', (['(0.5 * c_guess)', '(1.5 * c_guess)', 'n_guess_rng'], {}), '(0.5 * c_guess, 1.5 * c_guess, n_guess_rng)\n', (3969, 4012), True, 'import numpy as np\n'), ((4090, 4128), 'numpy.abs', 'np.abs', (['(delta_c_rng / self_delta_c - 1)'], {}), '(delta_c_rng / self_delta_c - 1)\n', (4096, 4128), True, 'import numpy as np\n'), ((4145, 4176), 'numpy.argmin', 'np.argmin', (['delta_c_diff'], {'axis': '(0)'}), '(delta_c_diff, axis=0)\n', (4154, 4176), True, 'import numpy as np\n'), ((4496, 4521), 'numpy.any', 'np.any', (['(delta_c_err > err)'], {}), '(delta_c_err > err)\n', (4502, 4521), True, 'import numpy as np\n'), ((12008, 12016), 'scipy.special.sici', 'sici', (['ki'], {}), '(ki)\n', (12012, 12016), False, 'from scipy.special import sici\n'), ((12035, 12058), 'scipy.special.sici', 'sici', (['((1 + self.c) * ki)'], {}), '((1 + self.c) * ki)\n', (12039, 12058), False, 'from scipy.special import sici\n'), ((701, 718), 'numpy.iterable', 'np.iterable', (['mass'], {}), '(mass)\n', (712, 718), True, 'import numpy as np\n'), ((739, 755), 'numpy.array', 'np.array', (['[mass]'], {}), '([mass])\n', (747, 755), True, 'import numpy as np\n'), ((4995, 5069), 'numpy.linspace', 'np.linspace', (['((1 - width) * cdelta[k])', '((1 + width) * cdelta[k])', 'n_guess_rng'], {}), '((1 - width) * cdelta[k], (1 + width) * cdelta[k], n_guess_rng)\n', (5006, 5069), True, 'import numpy as np\n'), ((5207, 5238), 'numpy.argmin', 'np.argmin', (['delta_c_diff'], {'axis': '(0)'}), '(delta_c_diff, axis=0)\n', (5216, 5238), True, 'import numpy as np\n'), ((5268, 5322), 'numpy.diagonal', 'np.diagonal', (['delta_c_diff[argmins]'], {'axis1': '(-2)', 'axis2': '(-1)'}), '(delta_c_diff[argmins], axis1=-2, axis2=-1)\n', (5279, 5322), True, 'import numpy as np\n'), ((9659, 9681), 'numpy.iterable', 'np.iterable', (['self.mass'], {}), '(self.mass)\n', (9670, 9681), True, 'import numpy as np\n'), ((10029, 10048), 'numpy.iterable', 'np.iterable', (['self.c'], {}), '(self.c)\n', (10040, 10048), True, 'import numpy as np\n'), ((10281, 10300), 'numpy.iterable', 'np.iterable', (['self.z'], {}), '(self.z)\n', (10292, 10300), True, 'import numpy as np\n'), ((10904, 10919), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (10916, 10919), True, 'import numpy as np\n'), ((11466, 11481), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (11478, 11481), True, 'import numpy as np\n'), ((11484, 11495), 'numpy.log', 'np.log', (['(0.5)'], {}), '(0.5)\n', (11490, 11495), True, 'import numpy as np\n'), ((1905, 1918), 'numpy.log', 'np.log', (['(1 + c)'], {}), '(1 + c)\n', (1911, 1918), True, 'import numpy as np\n'), ((4263, 4310), 'numpy.diagonal', 'np.diagonal', (['c_rng[argmins]'], {'axis1': '(-2)', 'axis2': '(-1)'}), '(c_rng[argmins], axis1=-2, axis2=-1)\n', (4274, 4310), True, 'import numpy as np\n'), ((4393, 4447), 'numpy.diagonal', 'np.diagonal', (['delta_c_diff[argmins]'], {'axis1': '(-2)', 'axis2': '(-1)'}), '(delta_c_diff[argmins], axis1=-2, axis2=-1)\n', (4404, 4447), True, 'import numpy as np\n'), ((4835, 4855), 'numpy.any', 'np.any', (['(argmins == 0)'], {}), '(argmins == 0)\n', (4841, 4855), True, 'import numpy as np\n'), ((4859, 4893), 'numpy.any', 'np.any', (['(argmins == n_guess_rng - 1)'], {}), '(argmins == n_guess_rng - 1)\n', (4865, 4893), True, 'import numpy as np\n'), ((5915, 5934), 'warnings.warn', 'warnings.warn', (['warn'], {}), '(warn)\n', (5928, 5934), False, 'import warnings\n'), ((9505, 9525), 'numpy.prod', 'np.prod', (['self._shape'], {}), '(self._shape)\n', (9512, 9525), True, 'import numpy as np\n'), ((11564, 11582), 'numpy.log', 'np.log', (['(0.5 * x[j])'], {}), '(0.5 * x[j])\n', (11570, 11582), True, 'import numpy as np\n'), ((11832, 11850), 'numpy.log', 'np.log', (['(0.5 * x[j])'], {}), '(0.5 * x[j])\n', (11838, 11850), True, 'import numpy as np\n'), ((5459, 5506), 'numpy.diagonal', 'np.diagonal', (['c_rng[argmins]'], {'axis1': '(-2)', 'axis2': '(-1)'}), '(c_rng[argmins], axis1=-2, axis2=-1)\n', (5470, 5506), True, 'import numpy as np\n'), ((12230, 12240), 'numpy.cos', 'np.cos', (['ki'], {}), '(ki)\n', (12236, 12240), True, 'import numpy as np\n'), ((10243, 10260), 'numpy.unique', 'np.unique', (['self.c'], {}), '(self.c)\n', (10252, 10260), True, 'import numpy as np\n'), ((10460, 10477), 'numpy.unique', 'np.unique', (['self.z'], {}), '(self.z)\n', (10469, 10477), True, 'import numpy as np\n'), ((10998, 11042), 'numpy.arctanh', 'np.arctanh', (['(((1 - x[j]) / (1 + x[j])) ** 0.5)'], {}), '(((1 - x[j]) / (1 + x[j])) ** 0.5)\n', (11008, 11042), True, 'import numpy as np\n'), ((11151, 11194), 'numpy.arctan', 'np.arctan', (['(((x[j] - 1) / (1 + x[j])) ** 0.5)'], {}), '(((x[j] - 1) / (1 + x[j])) ** 0.5)\n', (11160, 11194), True, 'import numpy as np\n'), ((11605, 11649), 'numpy.arctanh', 'np.arctanh', (['(((1 - x[j]) / (1 + x[j])) ** 0.5)'], {}), '(((1 - x[j]) / (1 + x[j])) ** 0.5)\n', (11615, 11649), True, 'import numpy as np\n'), ((11753, 11796), 'numpy.arctan', 'np.arctan', (['(((x[j] - 1) / (1 + x[j])) ** 0.5)'], {}), '(((x[j] - 1) / (1 + x[j])) ** 0.5)\n', (11762, 11796), True, 'import numpy as np\n'), ((12151, 12161), 'numpy.sin', 'np.sin', (['ki'], {}), '(ki)\n', (12157, 12161), True, 'import numpy as np\n'), ((12174, 12193), 'numpy.sin', 'np.sin', (['(self.c * ki)'], {}), '(self.c * ki)\n', (12180, 12193), True, 'import numpy as np\n'), ((9987, 10007), 'numpy.unique', 'np.unique', (['self.mass'], {}), '(self.mass)\n', (9996, 10007), True, 'import numpy as np\n')] |
"""
Defines the DataSet class and supporting classes and functions
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import bisect as _bisect
import copy as _copy
import itertools as _itertools
import numbers as _numbers
import pickle as _pickle
import uuid as _uuid
import warnings as _warnings
from collections import OrderedDict as _OrderedDict
from collections import defaultdict as _defaultdict
import numpy as _np
from pygsti.circuits import circuit as _cir
from pygsti.baseobjs import outcomelabeldict as _ld, _compatibility as _compat
from pygsti.tools import NamedDict as _NamedDict
from pygsti.tools import listtools as _lt
from pygsti.tools.legacytools import deprecate as _deprecated_fn
# import scipy.special as _sps
# import scipy.fftpack as _fft
# from scipy.integrate import quad as _quad
# from scipy.interpolate import interp1d as _interp1d
#from . import dataset as _ds
Oindex_type = _np.uint32
Time_type = _np.float64
Repcount_type = _np.float32
_DATAROW_AUTOCACHECOUNT_THRESHOLD = 256
# thought: _np.uint16 but doesn't play well with rescaling
class _DataSetKVIterator(object):
"""
Iterator class for op_string,_DataSetRow pairs of a DataSet
Parameters
----------
dataset : DataSet
The parent data set.
"""
def __init__(self, dataset):
self.dataset = dataset
self.gsIter = dataset.cirIndex.__iter__()
oliData = self.dataset.oliData
timeData = self.dataset.timeData
repData = self.dataset.repData
auxInfo = dataset.auxInfo
def getcache(opstr):
return dataset.cnt_cache[opstr] if dataset.bStatic else None
if repData is None:
self.tupIter = ((oliData[gsi], timeData[gsi], None, getcache(opstr), auxInfo[opstr])
for opstr, gsi in self.dataset.cirIndex.items())
else:
self.tupIter = ((oliData[gsi], timeData[gsi], repData[gsi], getcache(opstr), auxInfo[opstr])
for opstr, gsi in self.dataset.cirIndex.items())
#Note: gsi above will be an index for a non-static dataset and
# a slice for a static dataset.
def __iter__(self):
return self
def __next__(self):
return next(self.gsIter), _DataSetRow(self.dataset, *(next(self.tupIter)))
next = __next__
class _DataSetValueIterator(object):
"""
Iterator class for _DataSetRow values of a DataSet
Parameters
----------
dataset : DataSet
The parent data set.
"""
def __init__(self, dataset):
self.dataset = dataset
oliData = self.dataset.oliData
timeData = self.dataset.timeData
repData = self.dataset.repData
auxInfo = dataset.auxInfo
def getcache(opstr):
return dataset.cnt_cache[opstr] if dataset.bStatic else None
if repData is None:
self.tupIter = ((oliData[gsi], timeData[gsi], None, getcache(opstr), auxInfo[opstr])
for opstr, gsi in self.dataset.cirIndex.items())
else:
self.tupIter = ((oliData[gsi], timeData[gsi], repData[gsi], getcache(opstr), auxInfo[opstr])
for opstr, gsi in self.dataset.cirIndex.items())
#Note: gsi above will be an index for a non-static dataset and
# a slice for a static dataset.
def __iter__(self):
return self
def __next__(self):
return _DataSetRow(self.dataset, *(next(self.tupIter)))
next = __next__
class _DataSetRow(object):
"""
Encapsulates DataSet time series data for a single circuit.
Outwardly, it looks similar to a list with
`(outcome_label, time_index, repetition_count)` tuples as the values.
Parameters
----------
dataset : DataSet
The parent data set.
row_oli_data : numpy.ndarray
The outcome label indices for each bin of this row.
row_time_data : numpy.ndarray
The timestamps for each bin of this row.
row_rep_data : numpy.ndarray
The repetition counts for each bin of this row (if None, assume 1 per bin).
cached_cnts : dict
A cached pre-computed count dictionary (for speed).
aux : dict
Dictionary of auxiliary information.
Attributes
----------
outcomes : list
Returns this row's sequence of outcome labels, one per "bin" of repetition
counts (returned by :method:`get_counts`).
counts : dict
a dictionary of per-outcome counts.
allcounts : dict
a dictionary of per-outcome counts with *all* possible outcomes as keys
and zero values when an outcome didn't occur. Note this can be expensive
to compute for many-qubit data.
fractions : dict
a dictionary of per-outcome fractions.
total : int
Returns the total number of counts contained in this row.
"""
def __init__(self, dataset, row_oli_data, row_time_data, row_rep_data,
cached_cnts, aux):
self.dataset = dataset
self.oli = row_oli_data
self.time = row_time_data
self.reps = row_rep_data
self._cntcache = cached_cnts
self.aux = aux
@property
def outcomes(self):
"""
This row's sequence of outcome labels, one per "bin" of repetition counts.
"""
return [self.dataset.ol[i] for i in self.oli]
@outcomes.setter
def outcomes(self, value):
"""
This row's sequence of outcome labels, one per "bin" of repetition counts.
"""
raise ValueError("outcomes property is read-only")
@property
def unique_outcomes(self):
"""
This row's unique set of outcome labels, as a list
"""
return _lt.remove_duplicates(self.outcomes) # More efficient/cached in future?
@property
def expanded_ol(self):
"""
This row's sequence of outcome labels, with repetition counts expanded.
Thus, there's one element in the returned list for *each* count.
Returns
-------
list
"""
if self.reps is not None:
ol = []
for oli, _, nreps in zip(self.oli, self.time, self.reps):
nreps = _round_int_repcnt(nreps)
ol.extend([self.dataset.ol[oli]] * nreps)
return ol
else: return self.outcomes
@property
def expanded_oli(self):
"""
This row's sequence of outcome label indices, with repetition counts expanded.
Thus, there's one element in the returned list for *each* count.
Returns
-------
numpy.ndarray
"""
if self.reps is not None:
inds = []
for oli, _, nreps in zip(self.oli, self.time, self.reps):
nreps = _round_int_repcnt(nreps)
inds.extend([oli] * nreps)
return _np.array(inds, dtype=self.dataset.oliType)
else: return self.oli.copy()
@property
def expanded_times(self):
"""
This row's sequence of time stamps, with repetition counts expanded.
Thus, there's one element in the returned list for *each* count.
Returns
-------
numpy.ndarray
"""
if self.reps is not None:
times = []
for _, time, nreps in zip(self.oli, self.time, self.reps):
nreps = _round_int_repcnt(nreps)
times.extend([time] * nreps)
return _np.array(times, dtype=self.dataset.timeType)
else: return self.time.copy()
@property
def times(self):
"""
A list containing the unique data collection times at which there is at least one measurement result.
Returns
-------
list
"""
times = []
last_time = None
for t in self.time:
if t != last_time:
times.append(t)
last_time = t
return times
@property
def timeseries_for_outcomes(self):
"""
Row data in a time-series format.
This can be a much less succinct format than returned by `counts_as_timeseries`.
E.g., it is highly inefficient for many-qubit data.
Returns
-------
times : list
The time steps, containing the unique data collection times.
reps : dict
A dictionary of lists containing the number of times each
measurement outcome was observed at the unique data collection
times in `times`.
"""
times = []
last_time = None
seriesDict = {self.dataset.olIndex[ol]: [] for ol in self.dataset.outcome_labels}
#REMOVED: (though this gives slightly different behavior)
#for outcome_label in self.outcomes:
# if outcome_label not in seriesDict.keys():
# seriesDict[outcome_label] = []
if self.reps is None:
reps = _np.ones(len(self.time), _np.int64)
else: reps = self.reps
# An alternate implementation that appears to be (surprisingly?) slower...
##Get time bin locations
#time_bins_borders = []
#last_time = None
#for i, t in enumerate(self.time):
# if t != last_time:
# time_bins_borders.append(i)
# last_time = t
#time_bins_borders.append(len(self.time))
#nTimes = len(time_bins_borders) - 1
#
#seriesDict = {self.dataset.olIndex[ol]: _np.zeros(nTimes, _np.int64) for ol in self.dataset.outcome_labels}
#
#for i in range(nTimes):
# slc = slice(time_bins_borders[i],time_bins_borders[i+1])
# times.append( self.time[slc.start] )
# for oli, rep in zip(self.oli[slc], reps[slc]):
# seriesDict[oli][i] += rep
for t, oli, rep in zip(self.time, self.oli, reps):
if t != last_time:
times.append(t)
last_time = t
for sd_oli in seriesDict.keys():
if sd_oli == oli: seriesDict[sd_oli].append(rep)
else: seriesDict[sd_oli].append(0)
else:
seriesDict[oli][-1] += rep
return times, {ol: seriesDict[oli] for ol, oli in self.dataset.olIndex.items()}
def counts_as_timeseries(self):
"""
Returns data in a time-series format.
Returns
-------
times : list
The time steps, containing the unique data collection times.
reps : list
A list of dictionaries containing the counts dict corresponding
to the list of unique data collection times in `times`.
"""
times = []
series = []
last_time = None
if self.reps is None:
reps = list(_np.ones(len(self.time), _np.int64))
else: reps = self.reps
for t, outcome_label, rep in zip(self.time, self.outcomes, reps):
if t != last_time:
times.append(t)
last_time = t
series.append({outcome_label: rep})
else:
if outcome_label in series[-1]:
series[-1][outcome_label] += rep
else:
series[-1][outcome_label] = rep
return times, series
@property
def reps_timeseries(self):
"""
The number of measurement results at each data collection time.
Returns
-------
times : list
The time steps.
reps : list
The total number of counts at each time step.
"""
times = []
reps = []
last_time = None
if self.reps is None:
return list(self.time), list(_np.ones(len(self.time), _np.int64))
else:
for t, rep in zip(self.time, self.reps):
if t != last_time:
times.append(t)
last_time = t
reps.append(rep)
else:
reps[-1] += rep
return times, reps
@property
def number_of_times(self):
"""
Returns the number of data collection times.
Returns
-------
int
"""
return len(self.times)
@property
def has_constant_totalcounts(self):
"""
True if the numbers of counts is the same at all data collection times. Otherwise False.
Returns
-------
bool
"""
times, reps = self.reps_timeseries
firstrep = reps[0]
fixedtotalcounts = all([firstrep == i for i in reps])
return fixedtotalcounts
@property
def totalcounts_per_timestep(self):
"""
The number of total counts per time-step, when this is constant.
If the total counts vary over the times that there is at least
one measurement result, then this function will raise an error.
Returns
-------
int
"""
times, reps = self.reps_timeseries
firstrep = reps[0]
assert(all([firstrep == i for i in reps])), "The total counts is not the same at all time steps!"
return firstrep
@property
def meantimestep(self):
"""
The mean time-step.
Will raise an error for data that is a trivial time-series (i.e., data all at one time).
Returns
-------
float
"""
times = _np.array(self.times)
assert(len(times) >= 2), "Mean time-step is ill-defined when there is not multiple data times!"
return _np.mean(_np.diff(times))
def __iter__(self):
if self.reps is not None:
return ((self.dataset.ol[i], t, n) for (i, t, n) in zip(self.oli, self.time, self.reps))
else:
return ((self.dataset.ol[i], t, 1) for (i, t) in zip(self.oli, self.time))
def __contains__(self, outcome_label):
""" Checks whether data counts for `outcomelabel` are available."""
return outcome_label in self.counts
def __getitem__(self, index_or_outcome_label):
if isinstance(index_or_outcome_label, _numbers.Integral): # raw index
i = index_or_outcome_label
if self.reps is not None:
return (self.dataset.ol[self.oli[i]], self.time[i], self.reps[i])
else:
return (self.dataset.ol[self.oli[i]], self.time[i], 1)
elif isinstance(index_or_outcome_label, _numbers.Real): # timestamp
return self.counts_at_time(index_or_outcome_label)
else:
if len(self.dataset.olIndex) > _DATAROW_AUTOCACHECOUNT_THRESHOLD:
#There are a lot of outcomes in this dataset - it's not worth computing
# and caching *all* of the counts just to extract the one being asked for now.
outcome_label = _ld.OutcomeLabelDict.to_outcome(index_or_outcome_label)
if outcome_label not in self.dataset.olIndex:
raise KeyError("%s is not an index, timestamp, or outcome label!"
% str(index_or_outcome_label))
return self._get_single_count(outcome_label)
else:
#Compute and cache *all* of the counts, since there aren't so many of them.
try:
return self.counts[index_or_outcome_label]
except KeyError:
# if outcome label isn't in counts but *is* in the dataset's
# outcome labels then return 0 (~= return self.allcounts[...])
key = _ld.OutcomeLabelDict.to_outcome(index_or_outcome_label)
if key in self.dataset.outcome_labels: return 0
raise KeyError("%s is not an index, timestamp, or outcome label!"
% str(index_or_outcome_label))
def __setitem__(self, index_or_outcome_label, val):
if isinstance(index_or_outcome_label, _numbers.Integral):
index = index_or_outcome_label; tup = val
assert(len(tup) in (2, 3)), "Must set to a (<outcomeLabel>,<time>[,<repetitions>]) value"
ol = _ld.OutcomeLabelDict.to_outcome(tup[0]) # strings -> tuple outcome labels
self.oli[index] = self.dataset.olIndex[ol]
self.time[index] = tup[1]
if self.reps is not None:
self.reps[index] = tup[2] if len(tup) == 3 else 1
else:
assert(len(tup) == 2 or tup[2] == 1), "Repetitions must == 1 (not tracking reps)"
else:
outcomeLbl = _ld.OutcomeLabelDict.to_outcome(index_or_outcome_label) # strings -> tuple outcome labels
count = val
assert(all([t == self.time[0] for t in self.time])), \
"Cannot set outcome counts directly on a DataSet with non-trivially timestamped data"
assert(self.reps is not None), \
"Cannot set outcome counts directly on a DataSet without repetition data"
outcomeIndxToLookFor = self.dataset.olIndex.get(outcomeLbl, None)
for i, outcomeIndx in enumerate(self.oli):
if outcomeIndx == outcomeIndxToLookFor:
self.reps[i] = count; break
else: # need to add a new label & entry to reps[]
raise NotImplementedError("Cannot create new outcome labels by assignment")
def get(self, index_or_outcome_label, default_value):
"""
The the number of counts for an index or outcome label.
If the index or outcome is nor present, `default_value` is returned.
Parameters
----------
index_or_outcome_label : int or str or tuple
The index or outcome label to lookup.
default_value : object
The value to return if this data row doesn't contain data
at the given index.
Returns
-------
int or float
"""
try:
return self[index_or_outcome_label]
except KeyError:
return default_value
def _get_single_count(self, outcome_label, timestamp=None):
if timestamp is not None:
tslc = _np.where(_np.isclose(self.time, timestamp))[0]
else: tslc = slice(None)
if self.reps is None:
i = self.dataset.olIndex[outcome_label]
return float(_np.count_nonzero(_np.equal(self.oli[tslc], i)))
else:
i = self.dataset.olIndex[outcome_label]
inds = _np.nonzero(_np.equal(self.oli[tslc], i))[0]
if len(inds) > 0:
return float(sum(self.reps[tslc][inds]))
else:
return 0.0
def _get_counts(self, timestamp=None, all_outcomes=False):
"""
Returns this row's sequence of "repetition counts", that is, the number of
repetitions of each outcome label in the `outcomes` list, or
equivalently, each outcome label index in this rows `.oli` member.
"""
#Note: when all_outcomes == False we don't add outcome labels that
# aren't present for any of this row's elements (i.e. the #summed
# is zero)
cntDict = _ld.OutcomeLabelDict()
if timestamp is not None:
tslc = _np.where(_np.isclose(self.time, timestamp))[0]
else: tslc = slice(None)
nOutcomes = len(self.dataset.olIndex)
nIndices = len(self.oli[tslc])
if nOutcomes <= nIndices or all_outcomes:
if self.reps is None:
for ol, i in self.dataset.olIndex.items():
cnt = float(_np.count_nonzero(_np.equal(self.oli[tslc], i)))
if all_outcomes or cnt > 0:
cntDict.setitem_unsafe(ol, cnt)
else:
for ol, i in self.dataset.olIndex.items():
inds = _np.nonzero(_np.equal(self.oli[tslc], i))[0]
if all_outcomes or len(inds) > 0:
cntDict.setitem_unsafe(ol, float(sum(self.reps[tslc][inds])))
else:
if self.reps is None:
for ol_index in self.oli[tslc]:
ol = self.dataset.ol[ol_index]
cntDict.setitem_unsafe(ol, 1.0 + cntDict.getitem_unsafe(ol, 0.0))
else:
for ol_index, reps in zip(self.oli[tslc], self.reps[tslc]):
ol = self.dataset.ol[ol_index]
cntDict.setitem_unsafe(ol, reps + cntDict.getitem_unsafe(ol, 0.0))
return cntDict
@property
def counts(self):
"""
Dictionary of per-outcome counts.
"""
if self._cntcache: return self._cntcache # if not None *and* len > 0
ret = self._get_counts()
if self._cntcache is not None: # == and empty dict {}
self._cntcache.update(ret)
return ret
@property
def allcounts(self):
"""
Dictionary of per-outcome counts with *all* possible outcomes as keys.
This means that and zero values are included when an outcome didn't occur.
Note this can be expensive to assemble for many-qubit data.
"""
return self._get_counts(all_outcomes=True)
@property
def fractions(self, all_outcomes=False):
"""
Dictionary of per-outcome fractions.
"""
cnts = self._get_counts(None, all_outcomes)
total = sum(cnts.values())
return _ld.OutcomeLabelDict([(k, cnt / total) for k, cnt in cnts.items()])
@property
def total(self):
"""
The total number of counts contained in this row.
"""
if self.reps is None:
return float(len(self.oli))
else:
return sum(self.reps)
@_deprecated_fn('DataSetRow.fractions')
def fraction(self, outcomelabel):
"""
The fraction of total counts for `outcomelabel`.
Parameters
----------
outcomelabel : str or tuple
The outcome label, e.g. `'010'` or `('0','11')`.
Returns
-------
float
"""
d = self.counts
if outcomelabel not in d:
return 0.0 # Note: similar to an "all_outcomes=True" default
total = sum(d.values())
return d[outcomelabel] / total
def counts_at_time(self, timestamp):
"""
Returns a dictionary of counts at a particular time
Parameters
----------
timestamp : float
the time to get counts at.
Returns
-------
int
"""
return self._get_counts(timestamp)
def timeseries(self, outcomelabel, timestamps=None):
"""
Retrieve timestamps and counts for a single outcome label or for aggregated counts if `outcomelabel == "all"`.
Parameters
----------
outcomelabel : str or tuple
The outcome label to extract a series for. If the special value
`"all"` is used, total (aggregated over all outcomes) counts are
returned.
timestamps : list or array, optional
If not None, an array of time stamps to extract counts for,
which will also be returned as `times`. Times at which
there is no data will be returned as zero-counts.
Returns
-------
times, counts : numpy.ndarray
"""
if outcomelabel == 'all':
olis = list(self.dataset.olIndex.values())
else:
outcomelabel = _ld.OutcomeLabelDict.to_outcome(outcomelabel)
olis = [self.dataset.olIndex[outcomelabel]]
times = []
counts = []
last_t = -1e100
tsIndx = 0
for i, (t, oli) in enumerate(zip(self.time, self.oli)):
if timestamps is not None:
while tsIndx < len(timestamps) and t > timestamps[tsIndx] \
and not _np.isclose(t, timestamps[tsIndx], rtol=0., atol=1e-12):
times.append(timestamps[tsIndx])
counts.append(0)
tsIndx += 1
if oli in olis and (timestamps is None or _np.isclose(t, timestamps[tsIndx], rtol=0., atol=1e-12)):
if not _np.isclose(t, last_t, rtol=0., atol=1e-12):
times.append(t); tsIndx += 1
counts.append(0)
last_t = t
counts[-1] += 1 if (self.reps is None) else self.reps[i]
if timestamps is not None:
while tsIndx < len(timestamps):
times.append(timestamps[tsIndx])
counts.append(0)
tsIndx += 1
return _np.array(times, self.dataset.timeType), \
_np.array(counts, self.dataset.repType)
def scale_inplace(self, factor):
"""
Scales all the counts of this row by the given factor
Parameters
----------
factor : float
scaling factor.
Returns
-------
None
"""
if self.dataset.bStatic: raise ValueError("Cannot scale rows of a *static* DataSet.")
if self.reps is None:
raise ValueError(("Cannot scale a DataSet without repetition "
"counts. Call DataSet._add_explicit_repetition_counts()"
" and try this again."))
for i, cnt in enumerate(self.reps):
self.reps[i] = cnt * factor
def to_dict(self):
"""
Returns the (outcomeLabel,count) pairs as a dictionary.
Returns
-------
dict
"""
return dict(self.counts)
def to_str(self, mode="auto"):
"""
Render this _DataSetRow as a string.
Parameters
----------
mode : {"auto","time-dependent","time-independent"}
Whether to display the data as time-series of outcome counts
(`"time-dependent"`) or to report per-outcome counts aggregated over
time (`"time-independent"`). If `"auto"` is specified, then the
time-independent mode is used only if all time stamps in the
_DataSetRow are equal (trivial time dependence).
Returns
-------
str
"""
if mode == "auto":
if all([t == self.time[0] for t in self.time]):
mode = "time-independent"
else: mode = "time-dependent"
assert(mode in ('time-dependent', 'time-independent')), "Invalid `mode` argument: %s" % mode
if mode == "time-dependent":
s = "Outcome Label Indices = " + str(self.oli) + "\n"
s += "Time stamps = " + str(self.time) + "\n"
if self.reps is not None:
s += "Repetitions = " + str(self.reps) + "\n"
else:
s += "( no repetitions )\n"
return s
else: # time-independent
return str(self.to_dict())
def __str__(self):
return self.to_str()
def __len__(self):
return len(self.oli)
def _round_int_repcnt(nreps):
""" Helper function to localize warning message """
if float(nreps).is_integer():
return int(nreps)
else:
_warnings.warn("Rounding fractional repetition count to next lowest whole number!")
return int(round(nreps))
class DataSet(object):
"""
An association between Circuits and outcome counts, serving as the input data for many QCVV protocols.
The DataSet class associates circuits with counts or time series of
counts for each outcome label, and can be thought of as a table with gate
strings labeling the rows and outcome labels and/or time labeling the
columns. It is designed to behave similarly to a dictionary of
dictionaries, so that counts are accessed by:
`count = dataset[circuit][outcomeLabel]`
in the time-independent case, and in the time-dependent case, for *integer*
time index `i >= 0`,
`outcomeLabel = dataset[circuit][i].outcome`
`count = dataset[circuit][i].count`
`time = dataset[circuit][i].time`
Parameters
----------
oli_data : list or numpy.ndarray
When `static == True`, a 1D numpy array containing outcome label
indices (integers), concatenated for all sequences. Otherwise, a
list of 1D numpy arrays, one array per gate sequence. In either
case, this quantity is indexed by the values of `circuit_indices`
or the index of `circuits`.
time_data : list or numpy.ndarray
Same format at `oli_data` except stores floating-point timestamp
values.
rep_data : list or numpy.ndarray
Same format at `oli_data` except stores integer repetition counts
for each "data bin" (i.e. (outcome,time) pair). If all repetitions
equal 1 ("single-shot" timestampted data), then `rep_data` can be
`None` (no repetitions).
circuits : list of (tuples or Circuits)
Each element is a tuple of operation labels or a Circuit object. Indices for these strings
are assumed to ascend from 0. These indices must correspond to the time series of spam-label
indices (above). Only specify this argument OR circuit_indices, not both.
circuit_indices : ordered dictionary
An OrderedDict with keys equal to circuits (tuples of operation labels) and values equal to
integer indices associating a row/element of counts with the circuit. Only
specify this argument OR circuits, not both.
outcome_labels : list of strings or int
Specifies the set of spam labels for the DataSet. Indices for the spam labels
are assumed to ascend from 0, starting with the first element of this list. These
indices will associate each elememtn of `timeseries` with a spam label. Only
specify this argument OR outcome_label_indices, not both. If an int, specifies that
the outcome labels should be those for a standard set of this many qubits.
outcome_label_indices : ordered dictionary
An OrderedDict with keys equal to spam labels (strings) and value equal to
integer indices associating a spam label with given index. Only
specify this argument OR outcome_labels, not both.
static : bool
When True, create a read-only, i.e. "static" DataSet which cannot be modified. In
this case you must specify the timeseries data, circuits, and spam labels.
When False, create a DataSet that can have time series data added to it. In this case,
you only need to specify the spam labels.
file_to_load_from : string or file object
Specify this argument and no others to create a static DataSet by loading
from a file (just like using the load(...) function).
collision_action : {"aggregate","overwrite","keepseparate"}
Specifies how duplicate circuits should be handled. "aggregate"
adds duplicate-circuit counts to the same circuit's data at the
next integer timestamp. "overwrite" only keeps the latest given
data for a circuit. "keepseparate" tags duplicate-circuits by
setting the `.occurrence` ID of added circuits that are already
contained in this data set to the next available positive integer.
comment : string, optional
A user-specified comment string that gets carried around with the
data. A common use for this field is to attach to the data details
regarding its collection.
aux_info : dict, optional
A user-specified dictionary of per-circuit auxiliary information.
Keys should be the circuits in this DataSet and value should
be Python dictionaries.
"""
def __init__(self, oli_data=None, time_data=None, rep_data=None,
circuits=None, circuit_indices=None,
outcome_labels=None, outcome_label_indices=None,
static=False, file_to_load_from=None, collision_action="aggregate",
comment=None, aux_info=None):
"""
Initialize a DataSet.
Parameters
----------
oli_data : list or numpy.ndarray
When `static == True`, a 1D numpy array containing outcome label
indices (integers), concatenated for all sequences. Otherwise, a
list of 1D numpy arrays, one array per gate sequence. In either
case, this quantity is indexed by the values of `circuit_indices`
or the index of `circuits`.
time_data : list or numpy.ndarray
Same format at `oli_data` except stores floating-point timestamp
values.
rep_data : list or numpy.ndarray
Same format at `oli_data` except stores integer repetition counts
for each "data bin" (i.e. (outcome,time) pair). If all repetitions
equal 1 ("single-shot" timestampted data), then `rep_data` can be
`None` (no repetitions).
circuits : list of (tuples or Circuits)
Each element is a tuple of operation labels or a Circuit object. Indices for these strings
are assumed to ascend from 0. These indices must correspond to the time series of spam-label
indices (above). Only specify this argument OR circuit_indices, not both.
circuit_indices : ordered dictionary
An OrderedDict with keys equal to circuits (tuples of operation labels) and values equal to
integer indices associating a row/element of counts with the circuit. Only
specify this argument OR circuits, not both.
outcome_labels : list of strings or int
Specifies the set of spam labels for the DataSet. Indices for the spam labels
are assumed to ascend from 0, starting with the first element of this list. These
indices will associate each elememtn of `timeseries` with a spam label. Only
specify this argument OR outcome_label_indices, not both. If an int, specifies that
the outcome labels should be those for a standard set of this many qubits.
outcome_label_indices : ordered dictionary
An OrderedDict with keys equal to spam labels (strings) and value equal to
integer indices associating a spam label with given index. Only
specify this argument OR outcome_labels, not both.
static : bool
When True, create a read-only, i.e. "static" DataSet which cannot be modified. In
this case you must specify the timeseries data, circuits, and spam labels.
When False, create a DataSet that can have time series data added to it. In this case,
you only need to specify the spam labels.
file_to_load_from : string or file object
Specify this argument and no others to create a static DataSet by loading
from a file (just like using the load(...) function).
collision_action : {"aggregate","overwrite","keepseparate"}
Specifies how duplicate circuits should be handled. "aggregate"
adds duplicate-circuit counts to the same circuit's data at the
next integer timestamp. "overwrite" only keeps the latest given
data for a circuit. "keepseparate" tags duplicate-circuits by
setting the `.occurrence` ID of added circuits that are already
contained in this data set to the next available positive integer.
comment : string, optional
A user-specified comment string that gets carried around with the
data. A common use for this field is to attach to the data details
regarding its collection.
aux_info : dict, optional
A user-specified dictionary of per-circuit auxiliary information.
Keys should be the circuits in this DataSet and value should
be Python dictionaries.
Returns
-------
DataSet
a new data set object.
"""
# uuid for efficient hashing (set when done adding data or loading from file)
self.uuid = None
#Optionally load from a file
if file_to_load_from is not None:
assert(oli_data is None and time_data is None and rep_data is None
and circuits is None and circuit_indices is None
and outcome_labels is None and outcome_label_indices is None)
self.read_binary(file_to_load_from)
return
# self.cirIndex : Ordered dictionary where keys = Circuit objects,
# values = slices into oli, time, & rep arrays (static case) or
# integer list indices (non-static case)
if circuit_indices is not None:
self.cirIndex = _OrderedDict([(opstr if isinstance(opstr, _cir.Circuit) else _cir.Circuit(opstr), i)
for opstr, i in circuit_indices.items()])
#convert keys to Circuits if necessary
elif not static:
if circuits is not None:
dictData = [(opstr if isinstance(opstr, _cir.Circuit) else _cir.Circuit(opstr), i)
for (i, opstr) in enumerate(circuits)] # convert to Circuits if necessary
self.cirIndex = _OrderedDict(dictData)
else:
self.cirIndex = _OrderedDict()
else: raise ValueError("Must specify circuit_indices when creating a static DataSet")
# self.olIndex : Ordered dictionary where
# keys = outcome labels (strings or tuples),
# values = integer indices mapping oli_data (integers) onto
# the outcome labels.
if outcome_label_indices is not None:
self.olIndex = outcome_label_indices
self.olIndex_max = max(self.olIndex.values()) if len(self.olIndex) > 0 else -1
elif outcome_labels is not None:
if isinstance(outcome_labels, _np.int64):
nqubits = outcome_labels
tup_outcomeLabels = [("".join(x),) for x in _itertools.product(*([('0', '1')] * nqubits))]
else:
tup_outcomeLabels = [_ld.OutcomeLabelDict.to_outcome(ol)
for ol in outcome_labels] # strings -> tuple outcome labels
self.olIndex = _OrderedDict([(ol, i) for (i, ol) in enumerate(tup_outcomeLabels)])
self.olIndex_max = len(tup_outcomeLabels) - 1
else:
self.olIndex = _OrderedDict() # OK, as outcome labels are added as they appear
self.olIndex_max = -1
# self.ol : Ordered dictionary where keys = integer indices, values = outcome
# labels (strings or tuples) -- just the reverse of self.olIndex
self.ol = _OrderedDict([(i, ol) for (ol, i) in self.olIndex.items()])
# sanity checks that indices are >= 0
if not static: # otherwise values() below are slices
if self.cirIndex: assert(min(self.cirIndex.values()) >= 0)
if self.olIndex: assert(min(self.olIndex.values()) >= 0)
# self.oliData : when static == True a 1D numpy array containing concatenated outcome label indices.
# when static == False a list of 1D numpy arrays, one array per gate sequence.
# self.timeData : when static == True a 1D numpy array containing concatenated time stamps.
# when static == False a list of 1D numpy arrays, one array per gate sequence.
# self.repData : when static == True a 1D numpy array containing concatenated repetition counts.
# when static == False a list of 1D numpy arrays, one array per gate sequence.
# (can be None, in which case no repetitions are assumed)
if oli_data is not None:
# check that sizes/lengths all match
assert(len(time_data) == len(oli_data)), "time_data must be same size as oli_data"
if rep_data is not None:
assert(len(rep_data) == len(oli_data)), "rep_data must be same size as oli_data"
self.oliData = oli_data
self.timeData = time_data
self.repData = rep_data
if len(self.cirIndex) > 0:
maxOlIndex = self.olIndex_max
if static:
assert(max([_np.amax(self.oliData[i]) if (len(self.oliData[i]) > 0) else 0
for i in self.cirIndex.values()]) <= maxOlIndex)
# self.oliData.shape[0] > maxIndex doesn't make sense since cirIndex holds slices
else:
#Note: for non-static data, assume *all* data in self.oliData is "in" this data set, i.e.,
# it can't be that this is a truncated dataset with pointers to more data than it actually owns.
maxIndex = max(self.cirIndex.values())
assert(len(self.oliData) > maxIndex)
if len(self.oliData) > 0:
assert(all([max(oliSeries) <= maxOlIndex for oliSeries in self.oliData]))
#else cirIndex has length 0 so there are no circuits in this dataset (even though oli_data can contain data)
elif not static:
assert(time_data is None), "time_data must be None when oli_data is"
assert(rep_data is None), "rep_data must be None when oli_data is"
assert(len(self.cirIndex) == 0), "circuit specified without data!"
self.oliData = []
self.timeData = []
self.repData = None
else:
raise ValueError("Series data must be specified when creating a static DataSet")
# self.bStatic
self.bStatic = static
# collision action
assert(collision_action in ('aggregate', 'overwrite', 'keepseparate'))
self.collisionAction = collision_action
# comment
self.comment = comment
# self.ffdata : fourier filtering data
self.ffdata = {}
#data types - should stay in sync with MultiDataSet
self.oliType = Oindex_type
self.timeType = Time_type
self.repType = Repcount_type
#auxiliary info
if aux_info is None:
self.auxInfo = _defaultdict(dict)
else:
self.auxInfo = _defaultdict(dict, aux_info)
# count cache (only used when static; not saved/loaded from disk)
if static:
self.cnt_cache = {opstr: _ld.OutcomeLabelDict() for opstr in self.cirIndex}
else:
self.cnt_cache = None
def __iter__(self):
return self.cirIndex.__iter__() # iterator over circuits
def __len__(self):
return len(self.cirIndex)
def __contains__(self, circuit):
"""
Test whether data set contains a given circuit.
Parameters
----------
circuit : tuple or Circuit
A tuple of operation labels or a Circuit instance
which specifies the the circuit to check for.
Returns
-------
bool
whether circuit was found.
"""
if not isinstance(circuit, _cir.Circuit):
circuit = _cir.Circuit(circuit)
return circuit in self.cirIndex
def __hash__(self):
if self.uuid is not None:
return hash(self.uuid)
else:
raise TypeError('Use digest hash')
def __getitem__(self, circuit):
return self._get_row(circuit)
def __setitem__(self, circuit, outcome_dict_or_series):
ca = self.collisionAction
self.collisionAction = 'overwrite' # overwrite data when assigning (this seems mose natural)
try:
ret = self._set_row(circuit, outcome_dict_or_series)
finally:
self.collisionAction = ca
return ret
def __delitem__(self, circuit):
if not isinstance(circuit, _cir.Circuit):
circuit = _cir.Circuit(circuit)
self._remove([self.cirIndex[circuit]])
def _get_row(self, circuit):
"""
Get a row of data from this DataSet.
Parameters
----------
circuit : Circuit or tuple
The gate sequence to extract data for.
Returns
-------
_DataSetRow
"""
#Convert to circuit
# needed because name-only Labels don't hash the same as strings
# so key lookups need to be done at least with tuples of Labels.
circuit = _cir.Circuit.cast(circuit)
#Note: cirIndex value is either an int (non-static) or a slice (static)
repData = self.repData[self.cirIndex[circuit]] \
if (self.repData is not None) else None
return _DataSetRow(self, self.oliData[self.cirIndex[circuit]],
self.timeData[self.cirIndex[circuit]], repData,
self.cnt_cache[circuit] if self.bStatic else None,
self.auxInfo[circuit])
def _set_row(self, circuit, outcome_dict_or_series):
"""
Set the counts for a row of this DataSet.
Parameters
----------
circuit : Circuit or tuple
The gate sequence to extract data for.
outcome_dict_or_series : dict or tuple
The outcome count data, either a dictionary of outcome counts (with keys
as outcome labels) or a tuple of lists. In the latter case this can be
a 2-tuple: (outcome-label-list, timestamp-list) or a 3-tuple:
(outcome-label-list, timestamp-list, repetition-count-list).
Returns
-------
None
"""
circuit = _cir.Circuit.cast(circuit)
if isinstance(outcome_dict_or_series, dict): # a dict of counts
self.add_count_dict(circuit, outcome_dict_or_series)
else: # a tuple of lists
assert(len(outcome_dict_or_series) >= 2), \
"Must minimally set with (outcome-label-list, time-stamp-list)"
self.add_raw_series_data(circuit, *outcome_dict_or_series)
def keys(self):
"""
Returns the circuits used as keys of this DataSet.
Returns
-------
list
A list of Circuit objects which index the data
counts within this data set.
"""
yield from self.cirIndex.keys()
def items(self):
"""
Iterator over `(circuit, timeSeries)` pairs.
Here `circuit` is a tuple of operation labels and `timeSeries` is a
:class:`_DataSetRow` instance, which behaves similarly to a list of spam
labels whose index corresponds to the time step.
Returns
-------
_DataSetKVIterator
"""
return _DataSetKVIterator(self)
def values(self):
"""
Iterator over _DataSetRow instances corresponding to the time series data for each circuit.
Returns
-------
_DataSetValueIterator
"""
return _DataSetValueIterator(self)
@property
def outcome_labels(self):
"""
Get a list of *all* the outcome labels contained in this DataSet.
Returns
-------
list of strings or tuples
A list where each element is an outcome label (which can
be a string or a tuple of strings).
"""
return list(self.olIndex.keys())
@property
def timestamps(self):
"""
Get a list of *all* the (unique) timestamps contained in this DataSet.
Returns
-------
list of floats
A list where each element is a timestamp.
"""
ret = set()
for row in self.values():
ret.update(row.time)
return sorted(list(ret))
def gate_labels(self, prefix='G'):
"""
Get a list of all the distinct operation labels used in the circuits of this dataset.
Parameters
----------
prefix : str
Filter the circuit labels so that only elements beginning with
this prefix are returned. `None` performs no filtering.
Returns
-------
list of strings
A list where each element is a operation label.
"""
opLabels = []
for opLabelString in self:
for opLabel in opLabelString:
if not prefix or opLabel.name.startswith(prefix):
if opLabel not in opLabels: opLabels.append(opLabel)
return opLabels
def degrees_of_freedom(self, circuits=None, method="present_outcomes-1",
aggregate_times=True):
"""
Returns the number of independent degrees of freedom in the data for the circuits in `circuits`.
Parameters
----------
circuits : list of Circuits
The list of circuits to count degrees of freedom for. If `None`
then all of the `DataSet`'s strings are used.
method : {'all_outcomes-1', 'present_outcomes-1', 'tuned'}
How the degrees of freedom should be computed. 'all_outcomes-1' takes
the number of circuits and multiplies this by the *total* number of outcomes
(the length of what is returned by `outcome_labels()`) minus one.
'present_outcomes-1' counts on a per-circuit basis the number of
present (usually = non-zero) outcomes recorded minus one. 'tuned' should
be the most accurate, as it accounts for low-N "Poisson bump" behavior,
but it is not the default because it is still under development. For
timestamped data, see `aggreate_times` below.
aggregate_times : bool, optional
Whether counts that occur at different times should be tallied separately.
If True, then even when counts occur at different times degrees of freedom
are tallied on a per-circuit basis. If False, then counts occuring at
distinct times are treated as independent of those an any other time, and
are tallied separately. So, for example, if `aggregate_times` is False and
a data row has 0- and 1-counts of 45 & 55 at time=0 and 42 and 58 at time=1
this row would contribute *2* degrees of freedom, not 1. It can sometimes be
useful to set this to False when the `DataSet` holds coarse-grained data, but
usually you want this to be left as True (especially for time-series data).
Returns
-------
int
"""
if circuits is None:
circuits = list(self.keys())
nDOF = 0
Nout = len(self.olIndex)
def compute_tuned_expected_llr(cur_outcomes):
contribs = [] # LLR_expectation = 0.0
for cnt in cur_outcomes.values():
if cnt >= 6: contribs.append(1) # LLR_expectation += 1
elif cnt == 5: contribs.append(1.1)
elif cnt == 4: contribs.append(1.2)
elif cnt == 3: contribs.append(0.8)
elif cnt == 2: contribs.append(0.65) # LLR_expectation += 0.6 #1.05
elif cnt == 1: contribs.append(2.5) # LLR_expectation += 2.4 #1.1
elif cnt == 0: contribs.append(0) # LLR_expectation += 0.0 #0.18
LLR_expectation = sum(contribs)
nZeros = Nout - len(cur_outcomes) # number of (implied) zero-counts
if nZeros == 0:
LLR_expectation -= min(contribs)
# subtract contribution from one (we choose lowest-contributing) outcome b/c sum constrained to == 1
return LLR_expectation
for opstr in circuits:
dsRow = self[opstr]
cur_t = dsRow.time[0]
#cur_outcomes = set() # holds *distinct* outcomes at current time
cur_outcomes = _defaultdict(lambda: 0) # holds *distinct* outcomes at current time
for ol, t, rep in dsRow:
if aggregate_times or t == cur_t:
#cur_outcomes.add(ol)
cur_outcomes[ol] += rep
else:
#assume final outcome at each time is constrained
if method == 'all_outcomes-1': nOutcomes = Nout
elif method == 'present_outcomes-1': nOutcomes = len(cur_outcomes)
else: # "tuned"
nOutcomes = compute_tuned_expected_llr(cur_outcomes)
nOutcomes += 1 # +1 to counteract -1 below, as this is already <LLR>
nDOF += nOutcomes - 1
#cur_outcomes = set([ol])
cur_outcomes = _defaultdict(lambda: 0); cur_outcomes[ol] += rep
cur_t = t
if method == 'all_outcomes-1': nOutcomes = Nout
elif method == 'present_outcomes-1': nOutcomes = len(cur_outcomes)
elif method == 'tuned':
nOutcomes = compute_tuned_expected_llr(cur_outcomes)
nOutcomes += 1 # +1 to counteract -1 below, as this is already <LLR>
else: raise ValueError("Invalid `method` argument: %s" % method)
nDOF += nOutcomes - 1 # last time stamp
return nDOF
def _collisionaction_update_circuit(self, circuit):
if not isinstance(circuit, _cir.Circuit):
circuit = _cir.Circuit(circuit) # make sure we have a Circuit
# if "keepseparate" mode, set occurrence id existing circuits to next available (positive) integer.
if self.collisionAction == "keepseparate":
if circuit in self.cirIndex:
tagged_circuit = circuit.copy()
i = 1; tagged_circuit.occurrence = i
while tagged_circuit in self.cirIndex:
i += 1; tagged_circuit.occurrence = i
#add data for a new (duplicate) circuit
circuit = tagged_circuit
# in other modes ("overwrite" and "aggregate"), strip off occurrence so duplicates are acted on appropriately
elif circuit.occurrence is not None:
stripped_circuit = circuit.copy()
stripped_circuit.occurrence = None
circuit = stripped_circuit
return circuit
def _add_explicit_repetition_counts(self):
"""
Build internal repetition counts if they don't exist already.
This method is usually unnecessary, as repetition counts are
almost always build as soon as they are needed.
Returns
-------
None
"""
if self.repData is not None: return
if self.bStatic:
raise ValueError("Cannot build repetition counts in a static DataSet object")
self.repData = []
for oliAr in self.oliData:
self.repData.append(_np.ones(len(oliAr), self.repType))
def add_count_dict(self, circuit, count_dict, record_zero_counts=True, aux=None, update_ol=True):
"""
Add a single circuit's counts to this DataSet
Parameters
----------
circuit : tuple or Circuit
A tuple of operation labels specifying the circuit or a Circuit object
count_dict : dict
A dictionary with keys = outcome labels and values = counts
record_zero_counts : bool, optional
Whether zero-counts are actually recorded (stored) in this DataSet.
If False, then zero counts are ignored, except for potentially
registering new outcome labels.
aux : dict, optional
A dictionary of auxiliary meta information to be included with
this set of data counts (associated with `circuit`).
update_ol : bool, optional
This argument is for internal use only and should be left as True.
Returns
-------
None
"""
if self.bStatic: raise ValueError("Cannot add data to a static DataSet object")
#Convert input to an OutcomeLabelDict
if isinstance(count_dict, _ld.OutcomeLabelDict):
outcomeCounts = count_dict
elif isinstance(count_dict, _OrderedDict): # then don't sort keys
outcomeCounts = _ld.OutcomeLabelDict(list(count_dict.items()))
else:
# sort key for deterministic ordering of *new* outcome labels)
outcomeCounts = _ld.OutcomeLabelDict([
(lbl, count_dict[lbl]) for lbl in sorted(list(count_dict.keys()))])
outcomeLabelList = list(outcomeCounts.keys())
countList = list(outcomeCounts.values())
circuit = self._collisionaction_update_circuit(circuit)
if self.collisionAction == "aggregate" and circuit in self:
iNext = int(max(self[circuit].time)) + 1 \
if (len(self[circuit].time) > 0) else 0
timeStampList = [iNext] * len(countList)
overwriteExisting = False
else:
timeStampList = [0] * len(countList)
overwriteExisting = True
self.add_raw_series_data(circuit, outcomeLabelList, timeStampList,
countList, overwriteExisting, record_zero_counts,
aux, update_ol, unsafe=True)
#unsafe=True OK b/c outcome_label_list contains the keys of an OutcomeLabelDict
def add_count_list(self, circuit, outcome_labels, counts, record_zero_counts=True,
aux=None, update_ol=True, unsafe=False):
"""
Add a single circuit's counts to this DataSet
Parameters
----------
circuit : tuple or Circuit
A tuple of operation labels specifying the circuit or a Circuit object
outcome_labels : list or tuple
The outcome labels corresponding to `counts`.
counts : list or tuple
The counts themselves.
record_zero_counts : bool, optional
Whether zero-counts are actually recorded (stored) in this DataSet.
If False, then zero counts are ignored, except for potentially
registering new outcome labels.
aux : dict, optional
A dictionary of auxiliary meta information to be included with
this set of data counts (associated with `circuit`).
update_ol : bool, optional
This argument is for internal use only and should be left as True.
unsafe : bool, optional
`True` means that `outcome_labels` is guaranteed to hold tuple-type
outcome labels and never plain strings. Only set this to `True` if
you know what you're doing.
Returns
-------
None
"""
if self.bStatic: raise ValueError("Cannot add data to a static DataSet object")
circuit = self._collisionaction_update_circuit(circuit)
if self.collisionAction == "aggregate" and circuit in self:
iNext = int(max(self[circuit].time)) + 1 \
if (len(self[circuit].time) > 0) else 0
timeStampList = [iNext] * len(counts)
overwriteExisting = False
else:
timeStampList = [0] * len(counts)
overwriteExisting = True
self.add_raw_series_data(circuit, outcome_labels, timeStampList,
counts, overwriteExisting, record_zero_counts,
aux, update_ol, unsafe=unsafe)
def add_count_arrays(self, circuit, outcome_index_array, count_array,
record_zero_counts=True, aux=None):
"""
Add the outcomes for a single circuit, formatted as raw data arrays.
Parameters
----------
circuit : Circuit
The circuit to add data for.
outcome_index_array : numpy.ndarray
An array of outcome indices, which must be values of `self.olIndex`
(which maps outcome labels to indices).
count_array : numpy.ndarray
An array of integer (or sometimes floating point) counts, one corresponding
to each outcome index (element of `outcome_index_array`).
record_zero_counts : bool, optional
Whether zero counts (zeros in `count_array` should be stored explicitly or
not stored and inferred. Setting to False reduces the space taken by data
sets containing lots of zero counts, but makes some objective function evaluations
less precise.
aux : dict or None, optional
If not `None` a dictionary of user-defined auxiliary information that
should be associated with this circuit.
Returns
-------
None
"""
if self.collisionAction == "aggregate" and circuit in self:
iNext = int(max(self[circuit].time)) + 1 \
if (len(self[circuit].time) > 0) else 0
time_array = iNext * _np.ones(count_array.shape[0], self.timeType)
overwriteExisting = False
else:
time_array = _np.zeros(count_array.shape[0], self.timeType)
overwriteExisting = True
self._add_raw_arrays(circuit, outcome_index_array, time_array, count_array,
overwriteExisting, record_zero_counts, aux)
def add_cirq_trial_result(self, circuit, trial_result, key):
"""
Add a single circuit's counts --- stored in a Cirq TrialResult --- to this DataSet
Parameters
----------
circuit : tuple or Circuit
A tuple of operation labels specifying the circuit or a Circuit object.
Note that this must be a PyGSTi circuit --- not a Cirq circuit.
trial_result : cirq.TrialResult
The TrialResult to add
key : str
The string key of the measurement. Set by cirq.measure.
Returns
-------
None
"""
try:
import cirq
except ImportError:
raise ImportError("Cirq is required for this operation, and it does not appear to be installed.")
# TrialResult.histogram returns a collections.Counter object, which is a subclass of dict.
histogram_counter = trial_result.histogram(key=key)
# The keys in histogram_counter are integers, but pyGSTi likes dictionary keys to be strings.
count_dict = {str(key): value for key, value in histogram_counter.items()}
self.add_count_dict(circuit, count_dict)
def add_raw_series_data(self, circuit, outcome_label_list, time_stamp_list,
rep_count_list=None, overwrite_existing=True,
record_zero_counts=True, aux=None, update_ol=True,
unsafe=False):
"""
Add a single circuit's counts to this DataSet
Parameters
----------
circuit : tuple or Circuit
A tuple of operation labels specifying the circuit or a Circuit object
outcome_label_list : list
A list of outcome labels (strings or tuples). An element's index
links it to a particular time step (i.e. the i-th element of the
list specifies the outcome of the i-th measurement in the series).
time_stamp_list : list
A list of floating point timestamps, each associated with the single
corresponding outcome in `outcome_label_list`. Must be the same length
as `outcome_label_list`.
rep_count_list : list, optional
A list of integer counts specifying how many outcomes of type given
by `outcome_label_list` occurred at the time given by `time_stamp_list`.
If None, then all counts are assumed to be 1. When not None, must
be the same length as `outcome_label_list`.
overwrite_existing : bool, optional
Whether to overwrite the data for `circuit` (if it exists). If
False, then the given lists are appended (added) to existing data.
record_zero_counts : bool, optional
Whether zero-counts (elements of `rep_count_list` that are zero) are
actually recorded (stored) in this DataSet. If False, then zero
counts are ignored, except for potentially registering new outcome
labels.
aux : dict, optional
A dictionary of auxiliary meta information to be included with
this set of data counts (associated with `circuit`).
update_ol : bool, optional
This argument is for internal use only and should be left as True.
unsafe : bool, optional
When True, don't bother checking that outcome_label_list contains
tuple-type outcome labels and automatically upgrading strings to
1-tuples. Only set this to True if you know what you're doing
and need the marginally faster performance.
Returns
-------
None
"""
if self.bStatic: raise ValueError("Cannot add data to a static DataSet object")
circuit = self._collisionaction_update_circuit(circuit)
if unsafe:
tup_outcomeLabelList = outcome_label_list
else:
#strings -> tuple outcome labels
tup_outcomeLabelList = [_ld.OutcomeLabelDict.to_outcome(ol)
for ol in outcome_label_list]
#Add any new outcome labels
self.add_outcome_labels(tup_outcomeLabelList, update_ol)
oliArray = _np.array([self.olIndex[ol] for ol in tup_outcomeLabelList], self.oliType)
timeArray = _np.array(time_stamp_list, self.timeType)
assert(oliArray.shape == timeArray.shape), \
"Outcome-label and time stamp lists must have the same length!"
if rep_count_list is not None:
repArray = _np.array(rep_count_list, self.repType)
else:
repArray = None
self._add_raw_arrays(circuit, oliArray, timeArray, repArray,
overwrite_existing, record_zero_counts, aux)
def _add_raw_arrays(self, circuit, oli_array, time_array, rep_array,
overwrite_existing, record_zero_counts, aux):
if rep_array is None:
if self.repData is not None:
rep_array = _np.ones(len(oli_array), self.repType)
else:
if self.repData is None:
#rep count data was given, but we're not currently holding repdata,
# so we need to build this up for all existings sequences:
self._add_explicit_repetition_counts()
if not record_zero_counts:
# Go through repArray and remove any zeros, along with
# corresponding elements of oliArray and timeArray
mask = rep_array != 0 # boolean array (note: == float comparison *is* desired)
if not _np.all(mask):
rep_array = rep_array[mask]
oli_array = oli_array[mask]
time_array = time_array[mask]
if circuit in self.cirIndex:
circuitIndx = self.cirIndex[circuit]
if overwrite_existing:
self.oliData[circuitIndx] = oli_array # OVERWRITE existing time series
self.timeData[circuitIndx] = time_array # OVERWRITE existing time series
if rep_array is not None: self.repData[circuitIndx] = rep_array
else:
self.oliData[circuitIndx] = _np.concatenate((self.oliData[circuitIndx], oli_array))
self.timeData[circuitIndx] = _np.concatenate((self.timeData[circuitIndx], time_array))
if rep_array is not None:
self.repData[circuitIndx] = _np.concatenate((self.repData[circuitIndx], rep_array))
else:
#add data for a new circuit
assert(len(self.oliData) == len(self.timeData)), "OLI and TIME data are out of sync!!"
circuitIndx = len(self.oliData) # index of to-be-added circuit
self.oliData.append(oli_array)
self.timeData.append(time_array)
if rep_array is not None: self.repData.append(rep_array)
self.cirIndex[circuit] = circuitIndx
if aux is not None: self.add_auxiliary_info(circuit, aux)
def update_ol(self):
"""
Updates the internal outcome-label list in this dataset.
Call this after calling add_count_dict(...) or add_raw_series_data(...)
with `update_olIndex=False`.
Returns
-------
None
"""
self.ol = _OrderedDict([(i, sl) for (sl, i) in self.olIndex.items()])
def add_series_data(self, circuit, count_dict_list, time_stamp_list,
overwrite_existing=True, record_zero_counts=True, aux=None):
"""
Add a single circuit's counts to this DataSet
Parameters
----------
circuit : tuple or Circuit
A tuple of operation labels specifying the circuit or a Circuit object
count_dict_list : list
A list of dictionaries holding the outcome-label:count pairs for each
time step (times given by `time_stamp_list`.
time_stamp_list : list
A list of floating point timestamps, each associated with an entire
dictionary of outcomes specified by `count_dict_list`.
overwrite_existing : bool, optional
If `True`, overwrite any existing data for the `circuit`. If
`False`, add the count data with the next non-negative integer
timestamp.
record_zero_counts : bool, optional
Whether zero-counts (elements of the dictionaries in `count_dict_list` that
are zero) are actually recorded (stored) in this DataSet. If False, then
zero counts are ignored, except for potentially registering new outcome
labels.
aux : dict, optional
A dictionary of auxiliary meta information to be included with
this set of data counts (associated with `circuit`).
Returns
-------
None
"""
expanded_outcomeList = []
expanded_timeList = []
expanded_repList = []
for (cntDict, t) in zip(count_dict_list, time_stamp_list):
if not isinstance(cntDict, _OrderedDict):
ols = sorted(list(cntDict.keys()))
else: ols = list(cntDict.keys())
for ol in ols: # loop over outcome labels
expanded_outcomeList.append(ol)
expanded_timeList.append(t)
expanded_repList.append(cntDict[ol]) # could do this only for counts > 1
return self.add_raw_series_data(circuit, expanded_outcomeList,
expanded_timeList, expanded_repList,
overwrite_existing, record_zero_counts, aux)
def aggregate_outcomes(self, label_merge_dict, record_zero_counts=True):
"""
Creates a DataSet which merges certain outcomes in this DataSet.
Used, for example, to aggregate a 2-qubit 4-outcome DataSet into a 1-qubit 2-outcome
DataSet.
Parameters
----------
label_merge_dict : dictionary
The dictionary whose keys define the new DataSet outcomes, and whose items
are lists of input DataSet outcomes that are to be summed together. For example,
if a two-qubit DataSet has outcome labels "00", "01", "10", and "11", and
we want to ''aggregate out'' the second qubit, we could use label_merge_dict =
{'0':['00','01'],'1':['10','11']}. When doing this, however, it may be better
to use :function:`filter_qubits` which also updates the circuits.
record_zero_counts : bool, optional
Whether zero-counts are actually recorded (stored) in the returned
(merged) DataSet. If False, then zero counts are ignored, except for
potentially registering new outcome labels.
Returns
-------
merged_dataset : DataSet object
The DataSet with outcomes merged according to the rules given in label_merge_dict.
"""
#static_self = self.copy()
#static_self.done_adding_data() # makes static, so we can assume this below
# strings -> tuple outcome labels in keys and values of label_merge_dict
to_outcome = _ld.OutcomeLabelDict.to_outcome # shorthand
label_merge_dict = {to_outcome(key): list(map(to_outcome, val))
for key, val in label_merge_dict.items()}
merge_dict_old_outcomes = [outcome for sublist in label_merge_dict.values() for outcome in sublist]
if not set(self.outcome_labels).issubset(merge_dict_old_outcomes):
raise ValueError(
"`label_merge_dict` must account for all the outcomes in original dataset."
" It's missing directives for:\n%s" %
'\n'.join(set(map(str, self.outcome_labels)) - set(map(str, merge_dict_old_outcomes)))
)
new_outcomes = sorted(list(label_merge_dict.keys()))
new_outcome_indices = _OrderedDict([(ol, i) for i, ol in enumerate(new_outcomes)])
nNewOutcomes = len(new_outcomes)
#Count the number of time steps so we allocate enough space
nSteps = 0
for key, dsrow in self.items():
cur_t = None
for t in dsrow.time:
if t != cur_t:
nSteps += 1
cur_t = t
#idea is that we create oliData, timeData, repData, and circuitIndices for the
# merged dataset rather than looping over insertion, as this is faster
oliData = _np.empty(nSteps * nNewOutcomes, self.oliType)
repData = _np.empty(nSteps * nNewOutcomes, self.repType)
timeData = _np.empty(nSteps * nNewOutcomes, self.timeType)
oli_map = {} # maps old outcome label indices to new ones
for new_outcome, old_outcome_list in label_merge_dict.items():
new_index = new_outcome_indices[new_outcome]
for old_outcome in old_outcome_list:
oli_map[self.olIndex[old_outcome]] = new_index
#Future - when record_zero_counts=False these may not need to be so large
new_olis = _np.array(range(nNewOutcomes), _np.int64)
new_cnts = _np.zeros(nNewOutcomes, self.repType)
if record_zero_counts:
def add_cnts(t, cnts, offset): # cnts is an array here
new_cnts[:] = 0
for nonzero_oli, cnt in cnts.items():
new_cnts[nonzero_oli] = cnt
timeData[offset:offset + nNewOutcomes] = t
oliData[offset:offset + nNewOutcomes] = new_olis
repData[offset:offset + nNewOutcomes] = new_cnts # a length-nNewOutcomes array
return nNewOutcomes
else:
def add_cnts(t, cnts, offset): # cnts is a dict here
nNewCnts = len(cnts)
#new_olis = _np.empty(nNewCnts, _np.int64)
#new_cnts = _np.empty(nNewCnts, self.repType)
for ii, (nonzero_oli, cnt) in enumerate(cnts.items()):
new_olis[ii] = nonzero_oli
new_cnts[ii] = cnt
timeData[offset:offset + nNewCnts] = t
oliData[offset:offset + nNewCnts] = new_olis[0:nNewCnts]
repData[offset:offset + nNewCnts] = new_cnts[0:nNewCnts]
return nNewCnts # return the number of added counts
k = 0 # beginning of current circuit data in 1D arrays: oliData, timeData, repData
circuitIndices = _OrderedDict()
for key, dsrow in self.items():
last_t = dsrow.time[0]
#Below code is faster version of: mapped_oli = [oli_map[x] for x in dsrow.oli]
mapped_oli = dsrow.oli.copy()
for from_oli, to_oli in oli_map.items():
mapped_oli[dsrow.oli == from_oli] = to_oli
reps = _np.ones(len(dsrow.time), self.timeType) if (self.repData is None) else dsrow.reps
cnts = _defaultdict(lambda: 0)
i = 0 # offset to current timeslice
for oli, t, reps in zip(mapped_oli, dsrow.time, reps):
if t != last_t:
i += add_cnts(last_t, cnts, k + i)
last_t = t; cnts.clear()
cnts[oli] += reps
if len(cnts) > 0:
i += add_cnts(last_t, cnts, k + i)
circuitIndices[key] = slice(k, k + i)
k += i
merged_dataset = DataSet(oliData[0:k], timeData[0:k], repData[0:k], circuit_indices=circuitIndices,
outcome_label_indices=new_outcome_indices, static=True)
return merged_dataset
def aggregate_std_nqubit_outcomes(self, qubit_indices_to_keep, record_zero_counts=True):
"""
Creates a DataSet which merges certain outcomes in this DataSet.
Used, for example, to aggregate a 2-qubit 4-outcome DataSet into a 1-qubit 2-outcome
DataSet. This assumes that outcome labels are in the standard format
whereby each qubit corresponds to a single '0' or '1' character.
Parameters
----------
qubit_indices_to_keep : list
A list of integers specifying which qubits should be kept, that is,
*not* aggregated.
record_zero_counts : bool, optional
Whether zero-counts are actually recorded (stored) in the returned
(merged) DataSet. If False, then zero counts are ignored, except for
potentially registering new outcome labels.
Returns
-------
merged_dataset : DataSet object
The DataSet with outcomes merged.
"""
label_merge_dict = _defaultdict(list)
for ol, i in self.olIndex.items():
assert(len(ol) == 1), "Cannot merge non-simple outcomes!" # should be a 1-tuple
reduced = (''.join([ol[0][i] for i in qubit_indices_to_keep]),) # a tuple
label_merge_dict[reduced].append(ol)
label_merge_dict = dict(label_merge_dict) # return a *normal* dict
new_outcomes = sorted(list(label_merge_dict.keys()))
new_outcome_indices = _OrderedDict([(ol, i) for i, ol in enumerate(new_outcomes)])
nNewOutcomes = len(new_outcomes)
#Count the number of time steps so we allocate enough space
nSteps = 0
for dsrow in self.values():
cur_t = None
for t in dsrow.time:
if t != cur_t:
nSteps += 1
cur_t = t
#idea is that we create oliData, timeData, repData, and circuitIndices for the
# merged dataset rather than looping over insertion, as this is faster
oliData = _np.empty(nSteps * nNewOutcomes, self.oliType)
repData = _np.empty(nSteps * nNewOutcomes, self.repType)
timeData = _np.empty(nSteps * nNewOutcomes, self.timeType)
oli_map = {} # maps old outcome label indices to new ones
for new_outcome, old_outcome_list in label_merge_dict.items():
new_index = new_outcome_indices[new_outcome]
for old_outcome in old_outcome_list:
oli_map[self.olIndex[old_outcome]] = new_index
#Future - when record_zero_counts=False these may not need to be so large
new_olis = _np.array(range(nNewOutcomes), _np.int64)
new_cnts = _np.zeros(nNewOutcomes, self.repType)
if record_zero_counts:
def add_cnts(t, cnts, offset): # cnts is an array here
new_cnts[:] = 0
for nonzero_oli, cnt in cnts.items():
new_cnts[nonzero_oli] = cnt
timeData[offset:offset + nNewOutcomes] = t
oliData[offset:offset + nNewOutcomes] = new_olis
repData[offset:offset + nNewOutcomes] = new_cnts # a length-nNewOutcomes array
return nNewOutcomes
else:
def add_cnts(t, cnts, offset): # cnts is a dict here
nNewCnts = len(cnts)
#new_olis = _np.empty(nNewCnts, _np.int64)
#new_cnts = _np.empty(nNewCnts, self.repType)
for ii, (nonzero_oli, cnt) in enumerate(cnts.items()):
new_olis[ii] = nonzero_oli
new_cnts[ii] = cnt
timeData[offset:offset + nNewCnts] = t
oliData[offset:offset + nNewCnts] = new_olis[0:nNewCnts]
repData[offset:offset + nNewCnts] = new_cnts[0:nNewCnts]
return nNewCnts # return the number of added counts
k = 0 # beginning of current circuit data in 1D arrays: oliData, timeData, repData
circuitIndices = _OrderedDict()
for key, dsrow in self.items():
last_t = dsrow.time[0] if len(dsrow.time) > 0 else None
if len(dsrow.oli) < len(oli_map):
mapped_oli = _np.array([oli_map[x] for x in dsrow.oli])
else:
mapped_oli = dsrow.oli.copy()
for from_oli, to_oli in oli_map.items():
mapped_oli[dsrow.oli == from_oli] = to_oli
reps = _np.ones(len(dsrow.time), self.timeType) if (self.repData is None) else dsrow.reps
cnts = _defaultdict(lambda: 0)
i = 0 # offset to current timeslice
for oli, t, reps in zip(mapped_oli, dsrow.time, reps):
if t != last_t:
i += add_cnts(last_t, cnts, k + i)
last_t = t; cnts.clear()
cnts[oli] += reps
if len(cnts) > 0:
i += add_cnts(last_t, cnts, k + i)
circuitIndices[key] = slice(k, k + i)
k += i
merged_dataset = DataSet(oliData[0:k], timeData[0:k], repData[0:k], circuit_indices=circuitIndices,
outcome_label_indices=new_outcome_indices, static=True)
return merged_dataset
def add_auxiliary_info(self, circuit, aux):
"""
Add auxiliary meta information to `circuit`.
Parameters
----------
circuit : tuple or Circuit
A tuple of operation labels specifying the circuit or a Circuit object
aux : dict, optional
A dictionary of auxiliary meta information to be included with
this set of data counts (associated with `circuit`).
Returns
-------
None
"""
self.auxInfo[circuit].clear() # needed? (could just update?)
self.auxInfo[circuit].update(aux)
def add_counts_from_dataset(self, other_data_set):
"""
Append another DataSet's data to this DataSet
Parameters
----------
other_data_set : DataSet
The dataset to take counts from.
Returns
-------
None
"""
return self.add_series_from_dataset(other_data_set)
def add_series_from_dataset(self, other_data_set):
"""
Append another DataSet's series data to this DataSet
Parameters
----------
other_data_set : DataSet
The dataset to take time series data from.
Returns
-------
None
"""
if self.bStatic: raise ValueError("Cannot add data to a static DataSet object")
for circuit, dsRow in other_data_set.items():
self.add_raw_series_data(circuit, dsRow.outcomes, dsRow.time, dsRow.reps, False)
@property
def meantimestep(self):
"""
The mean time-step, averaged over the time-step for each circuit and over circuits.
Returns
-------
float
"""
timesteps = []
for key in self.keys():
timesteps.append(self[key].meantimestep)
return _np.mean(timesteps)
@property
def has_constant_totalcounts_pertime(self):
"""
True if the data for every circuit has the same number of total counts at every data collection time.
This will return True if there is a different number of total counts
per circuit (i.e., after aggregating over time), as long as every
circuit has the same total counts per time step (this will happen
when the number of time-steps varies between circuit).
Returns
-------
bool
"""
for key in self.keys():
numtotalcountspertime = None
dsrow = self[key]
if not dsrow.has_constant_totalcounts:
return False
if numtotalcountspertime is None:
numtotalcountspertime = dsrow.totalcounts_per_timestep
else:
if numtotalcountspertime != dsrow.totalcounts_per_timestep:
return False
return True
@property
def totalcounts_pertime(self):
"""
Total counts per time, if this is constant over times and circuits.
When that doesn't hold, an error is raised.
Returns
-------
float or int
"""
self.has_constant_totalcounts_pertime
key = list(self.keys())[0]
totalcountspertime = self[key].totalcounts_per_timestep
return totalcountspertime
@property
def has_constant_totalcounts(self):
"""
`True` if the data for every circuit has the same number of total counts.
Returns
-------
bool
"""
reps = []
for key in self.keys():
reps.append(sum(list(self[key].counts.values())))
firstrep = reps[0]
fixedtotalcounts = all([firstrep == i for i in reps])
return fixedtotalcounts
@property
def has_trivial_timedependence(self):
"""
`True` if all the data in this DataSet occurs at time 0.
Returns
-------
bool
"""
return all([_np.all(self.timeData[gsi] == 0) for gsi in self.cirIndex.values()])
def __str__(self):
return self.to_str()
def to_str(self, mode="auto"):
"""
Render this DataSet as a string.
Parameters
----------
mode : {"auto","time-dependent","time-independent"}
Whether to display the data as time-series of outcome counts
(`"time-dependent"`) or to report per-outcome counts aggregated over
time (`"time-independent"`). If `"auto"` is specified, then the
time-independent mode is used only if all time stamps in the
DataSet are equal to zero (trivial time dependence).
Returns
-------
str
"""
if mode == "auto":
mode = "time-independent" if self.has_trivial_timedependence else "time-dependent"
assert(mode in ('time-dependent', 'time-independent')), "Invalid `mode` argument: %s" % mode
if mode == "time-dependent":
s = "Dataset outcomes: " + str(self.olIndex) + "\n"
for circuit in self: # tuple-type operation label strings are keys
s += "%s :\n%s\n" % (circuit.str, self[circuit].to_str(mode))
return s + "\n"
else: # time-independent
s = ""
for circuit in self: # tuple-type operation label strings are keys
s += "%s : %s\n" % (circuit.str, self[circuit].to_str(mode))
return s + "\n"
def truncate(self, list_of_circuits_to_keep, missing_action='raise'):
"""
Create a truncated dataset comprised of a subset of the circuits in this dataset.
Parameters
----------
list_of_circuits_to_keep : list of (tuples or Circuits)
A list of the circuits for the new returned dataset. If a
circuit is given in this list that isn't in the original
data set, `missing_action` determines the behavior.
missing_action : {"raise","warn","ignore"}
What to do when a string in `list_of_circuits_to_keep` is not in
the data set (raise a KeyError, issue a warning, or do nothing).
Returns
-------
DataSet
The truncated data set.
"""
missingStrs = [] # to issue warning - only used if missing_action=="warn"
if self.bStatic:
circuitIndices = []
circuits = []
used_oli = set()
for opstr in list_of_circuits_to_keep:
circuit = opstr if isinstance(opstr, _cir.Circuit) else _cir.Circuit(opstr)
if circuit not in self.cirIndex:
if missing_action == "raise":
raise KeyError(("Circuit %s was not found in "
"dataset being truncated and "
"`missing_action` == 'raise'") % str(circuit))
elif missing_action == "warn":
missingStrs.append(circuit)
continue
elif missing_action == "ignore":
continue
else:
raise ValueError("Invalid `missing_action`: %s" % str(missing_action))
#only keep track of circuits if they could be different from list_of_circuits_to_keep
if missing_action != "raise": circuits.append(circuit)
i = self.cirIndex[circuit]
circuitIndices.append(i)
used_oli.update(self.oliData[i])
if missing_action == "raise": circuits = list_of_circuits_to_keep
trunc_cirIndex = _OrderedDict(zip(circuits, circuitIndices))
trunc_olIndex = _OrderedDict([(self.ol[i], i) for i in sorted(used_oli)])
trunc_dataset = DataSet(self.oliData, self.timeData, self.repData,
circuit_indices=trunc_cirIndex,
outcome_label_indices=trunc_olIndex, static=True) # reference (don't copy) counts
else:
trunc_dataset = DataSet(outcome_labels=[]) # let outcome labels be added automatically
for opstr in _lt.remove_duplicates(list_of_circuits_to_keep):
circuit = opstr if isinstance(opstr, _cir.Circuit) else _cir.Circuit(opstr)
if circuit in self.cirIndex:
circuitIndx = self.cirIndex[circuit]
repData = self.repData[circuitIndx].copy() if (self.repData is not None) else None
trunc_dataset.add_raw_series_data(circuit,
[self.ol[i] for i in self.oliData[circuitIndx]],
self.timeData[circuitIndx].copy(),
repData, unsafe=True) # copy so truncated dataset can be modified
elif missing_action == "raise":
raise KeyError(("Circuit %s was not found in "
"dataset being truncated and "
"`missing_action` == 'raise'") % str(circuit))
elif missing_action == "warn":
missingStrs.append(circuit)
elif missing_action != "ignore":
raise ValueError("Invalid `missing_action`: %s" % str(missing_action))
if len(missingStrs) > 0:
missingStrs.append("...") # so elipses are shown when there's more strings
_warnings.warn(("DataSet.truncate(...) was given %s strings to keep"
" that weren't in the original dataset:\n%s") %
(len(missingStrs) - 1, "\n".join(map(str, missingStrs[0:10]))))
return trunc_dataset
def time_slice(self, start_time, end_time, aggregate_to_time=None):
"""
Creates a DataSet by aggregating the counts within the [`start_time`,`end_time`) interval.
Parameters
----------
start_time : float
The starting time.
end_time : float
The ending time.
aggregate_to_time : float, optional
If not None, a single timestamp to give all the data in
the specified range, resulting in time-independent
`DataSet`. If None, then the original timestamps are
preserved.
Returns
-------
DataSet
"""
tot = 0
ds = DataSet(outcome_label_indices=self.olIndex)
for opStr, dsRow in self.items():
if dsRow.reps is None:
reps = _np.ones(dsRow.oli.shape, self.repType)
else: reps = dsRow.reps
count_dict = {ol: 0 for ol in self.olIndex.keys()}
times = []; ols = []; repCnts = []
for oli, t, rep in zip(dsRow.oli, dsRow.time, reps):
ol = self.ol[oli] # index -> outcome label
if start_time <= t < end_time:
if aggregate_to_time is not None:
count_dict[ol] += rep
else:
times.append(t)
ols.append(ol)
repCnts.append(rep)
tot += rep
if aggregate_to_time is not None:
ols = [k for k in count_dict.keys() if count_dict[k] > 0]
repCnts = [count_dict[k] for k in ols]
times = [aggregate_to_time] * len(repCnts)
ds.add_raw_series_data(opStr, ols, times, repCnts)
if tot == 0:
_warnings.warn("No counts in the requested time range: empty DataSet created")
ds.done_adding_data()
return ds
def split_by_time(self, aggregate_to_time=None):
"""
Creates a dictionary of DataSets, each of which is a equal-time slice of this DataSet.
The keys of the returned dictionary are the distinct timestamps in this dataset.
Parameters
----------
aggregate_to_time : float, optional
If not None, a single timestamp to give all the data in
each returned data set, resulting in time-independent
`DataSet`s. If None, then the original timestamps are
preserved.
Returns
-------
OrderedDict
A dictionary of :class:`DataSet` objects whose keys are the
timestamp values of the original (this) data set in sorted order.
"""
dsDict = _defaultdict(lambda: DataSet(outcome_label_indices=self.olIndex))
for opStr, dsRow in self.items():
if dsRow.reps is None:
reps = _np.ones(dsRow.oli.shape, self.repType)
else: reps = dsRow.reps
last_t = dsRow.time[0] if len(dsRow.time) > 0 else None
assert(_np.all(_np.diff(dsRow.time) >= 0)), "This function assumes timestamps are sorted!"
if aggregate_to_time is None:
times = []; ols = []; repCnts = []
for oli, t, rep in zip(dsRow.oli, dsRow.time, reps):
ol = self.ol[oli] # index -> outcome label
if t == last_t:
times.append(t)
ols.append(ol)
repCnts.append(rep)
else:
dsDict[last_t].add_raw_series_data(opStr, ols, times, repCnts)
times = [t]; ols = [ol]; repCnts = [rep]
last_t = t
if len(times) > 0:
dsDict[last_t].add_raw_series_data(opStr, ols, times, repCnts)
else:
count_dict = {ol: 0 for ol in self.olIndex.keys()}
for oli, t, rep in zip(dsRow.oli, dsRow.time, reps):
ol = self.ol[oli] # index -> outcome label
if t == last_t:
count_dict[ol] += rep
else:
ols = [k for k in count_dict.keys() if count_dict[k] > 0]
repCnts = [count_dict[k] for k in ols]
times = [aggregate_to_time] * len(repCnts)
dsDict[last_t].add_raw_series_data(opStr, ols, times, repCnts)
times = [t]; ols = [ol]; repCnts = [rep]
last_t = t
if len(times) > 0:
ols = [k for k in count_dict.keys() if count_dict[k] > 0]
repCnts = [count_dict[k] for k in ols]
times = [aggregate_to_time] * len(repCnts)
dsDict[last_t].add_raw_series_data(opStr, ols, times, repCnts)
for ds in dsDict.values():
ds.done_adding_data()
return _OrderedDict([(t, dsDict[t]) for t in sorted(dsDict.keys())])
def drop_zero_counts(self):
"""
Creates a copy of this data set that doesn't include any zero counts.
Returns
-------
DataSet
"""
self_sparse = DataSet(outcome_label_indices=self.olIndex)
for circuit, datarow in self.items():
self_sparse.add_raw_series_data(circuit, datarow.outcomes, datarow.time, datarow.reps,
record_zero_counts=False)
self_sparse.done_adding_data()
return self_sparse
def process_times(self, process_times_array_fn):
"""
Manipulate this DataSet's timestamps according to `processor_fn`.
For example, using, the folloing `process_times_array_fn` would change
the timestamps for each circuit to sequential integers.
```
def process_times_array_fn(times):
return list(range(len(times)))
```
Parameters
----------
process_times_array_fn : function
A function which takes a single array-of-timestamps argument
and returns another similarly-sized array. This function is
called, once per circuit, with the circuit's array of timestamps.
Returns
-------
DataSet
A new data set with altered timestamps.
"""
processed_ds = DataSet(outcome_label_indices=self.olIndex)
for circuit, datarow in self.items():
processed_time = _np.array(process_times_array_fn(datarow.time))
assert(processed_time.shape == datarow.time.shape), "process_times_array_fn returned the wrong shape!"
processed_ds.add_raw_series_data(circuit, datarow.outcomes, processed_time, datarow.reps,
record_zero_counts=True)
processed_ds.done_adding_data()
return processed_ds
def process_circuits(self, processor_fn, aggregate=False):
"""
Create a new data set by manipulating this DataSet's circuits (keys) according to `processor_fn`.
The new DataSet's circuits result from by running each of this DataSet's
circuits through `processor_fn`. This can be useful when "tracing out" qubits
in a dataset containing multi-qubit data.
Parameters
----------
processor_fn : function
A function which takes a single Circuit argument and returns
another (or the same) Circuit. This function may also return
`None`, in which case the data for that string is deleted.
aggregate : bool, optional
When `True`, aggregate the data for ciruits that `processor_fn`
assigns to the same "new" circuit. When `False`, use the data
from the *last* original circuit that maps to a given "new" circuit.
Returns
-------
DataSet
"""
ds_copy = self.copy_nonstatic()
ds_copy.process_circuits_inplace(processor_fn, aggregate)
if self.bStatic: ds_copy.done_adding_data()
return ds_copy
def process_circuits_inplace(self, processor_fn, aggregate=False):
"""
Manipulate this DataSet's circuits (keys) in-place according to `processor_fn`.
All of this DataSet's circuits are updated by running each one
through `processor_fn`. This can be useful when "tracing out" qubits
in a dataset containing multi-qubit data.
Parameters
----------
processor_fn : function
A function which takes a single Circuit argument and returns
another (or the same) Circuit. This function may also return
`None`, in which case the data for that string is deleted.
aggregate : bool, optional
When `True`, aggregate the data for ciruits that `processor_fn`
assigns to the same "new" circuit. When `False`, use the data
from the *last* original circuit that maps to a given "new" circuit.
Returns
-------
None
"""
if self.bStatic: raise ValueError("Cannot process_circuits_inplace on a static DataSet object")
to_delete = []
new_cirIndex = _OrderedDict()
for opstr, indx in self.cirIndex.items():
new_gstr = processor_fn(opstr)
if new_gstr is None:
to_delete.append(indx)
elif new_gstr not in new_cirIndex or not aggregate:
assert(isinstance(new_gstr, _cir.Circuit)), "`processor_fn` must return a Circuit!"
new_cirIndex[new_gstr] = indx
else: # aggregate data from indx --> new_cirIndex[new_gstr]
# A subset of what is in add_raw_series_data(...), but we
# don't need to do many of the checks there since the
# incoming data is known to have no new outcome labels, etc.
assert(isinstance(new_gstr, _cir.Circuit)), "`processor_fn` must return a Circuit!"
iSrc, iDest = indx, new_cirIndex[new_gstr]
self.oliData[iDest] = _np.concatenate((self.oliData[iDest], self.oliData[iSrc]))
self.timeData[iDest] = _np.concatenate((self.timeData[iDest], self.timeData[iSrc]))
if self.repData is not None:
self.repData[iDest] = _np.concatenate((self.repData[iDest], self.repData[iSrc]))
#FUTURE: just add counts for same timestamp & same outcome
# label data? (and in add_raw_series_data(...) too).
# mark indx for deletion (don't do it yet, as this will
# mess up the values in new_cirIndex)
to_delete.append(indx)
self.cirIndex = new_cirIndex
self._remove(to_delete)
#Note: self.cnt_cache just remains None (a non-static DataSet)
#Process self.auxInfo
auxInfo = _defaultdict(dict)
for opstr in self.auxInfo.keys():
new_gstr = processor_fn(opstr)
if new_gstr is None:
continue
elif new_gstr not in auxInfo or not aggregate:
auxInfo[new_gstr] = self.auxInfo[opstr]
else: # "aggregate" auxinfo by merging dictionaries
#FUTURE: better merging - do something for key collisions?
auxInfo[new_gstr].update(self.auxInfo[opstr])
self.auxInfo = auxInfo
def remove(self, circuits, missing_action="raise"):
"""
Remove (delete) the data for `circuits` from this :class:`DataSet`.
Parameters
----------
circuits : iterable
An iterable over Circuit-like objects specifying the keys
(circuits) to remove.
missing_action : {"raise","warn","ignore"}
What to do when a string in `circuits` is not in this data
set (raise a KeyError, issue a warning, or do nothing).
Returns
-------
None
"""
missingStrs = [] # to issue warning - only used if missing_action=="warn"
gstr_indices = []; auxkeys_to_remove = []
for opstr in circuits:
if not isinstance(opstr, _cir.Circuit):
opstr = _cir.Circuit(opstr)
if opstr in self:
gstr_indices.append(self.cirIndex[opstr])
if opstr in self.auxInfo:
auxkeys_to_remove.append(opstr)
elif missing_action == "raise":
raise KeyError(("Circuit %s does not exist and therefore "
"cannot be removed when `missing_action` == "
"'raise'") % str(opstr))
elif missing_action == "warn":
missingStrs.append(opstr)
elif missing_action != "ignore":
raise ValueError("Invalid `missing_action`: %s" % str(missing_action))
# the actual removal operations
self._remove(gstr_indices)
for ky in auxkeys_to_remove:
del self.auxInfo[ky]
if len(missingStrs) > 0: # Print a warning with missing strings
missingStrs.append("...") # so elipses are shown when there's more strings
_warnings.warn(("DataSet.remove(...) cannot remove %s strings because"
" they don't exist in the original dataset:\n%s") %
(len(missingStrs) - 1, "\n".join(map(str, missingStrs[0:10]))))
def _remove(self, gstr_indices):
""" Removes the data in indices given by gstr_indices """
if self.bStatic: raise ValueError("Cannot _remove on a static DataSet object")
#Removing elements from oli_data, time_data, and rep_data is easy since
# these are just lists. Hard part is adjusting cirIndex values: we
# need to subtract k from index n, where k is the number of indices
# in `gstr_indices` less than n.
inds = sorted(list(gstr_indices))
#remove indices from lists (high->low)
for i in reversed(inds):
del self.oliData[i]
del self.timeData[i]
if self.repData:
del self.repData[i]
#remove elements of cirIndex assoc. w/deleted indices
keys_to_delete = []; inds_set = set(inds)
for k, v in self.cirIndex.items():
if v in inds_set:
keys_to_delete.append(k)
for k in keys_to_delete:
del self.cirIndex[k]
#adjust remaining indices in cirIndex
inds_ar = _np.array(inds, _np.int64)
for k in self.cirIndex.keys():
cnt = _bisect.bisect(inds_ar, self.cirIndex[k]) # cnt == number of removed
self.cirIndex[k] -= cnt # indices < self.cirIndex[k]
def copy(self):
"""
Make a copy of this DataSet.
Returns
-------
DataSet
"""
if self.bStatic:
return self # doesn't need to be copied since data can't change
else:
copyOfMe = DataSet(outcome_labels=self.outcome_labels,
collision_action=self.collisionAction)
copyOfMe.cirIndex = _copy.deepcopy(self.cirIndex)
copyOfMe.oliData = [el.copy() for el in self.oliData]
copyOfMe.timeData = [el.copy() for el in self.timeData]
if self.repData is not None:
copyOfMe.repData = [el.copy() for el in self.repData]
else: copyOfMe.repData = None
copyOfMe.oliType = self.oliType
copyOfMe.timeType = self.timeType
copyOfMe.repType = self.repType
copyOfMe.cnt_cache = None
copyOfMe.auxInfo = self.auxInfo.copy()
return copyOfMe
def copy_nonstatic(self):
"""
Make a non-static copy of this DataSet.
Returns
-------
DataSet
"""
if self.bStatic:
copyOfMe = DataSet(outcome_labels=self.outcome_labels,
collision_action=self.collisionAction)
copyOfMe.cirIndex = _OrderedDict([(opstr, i) for i, opstr in enumerate(self.cirIndex.keys())])
copyOfMe.oliData = []
copyOfMe.timeData = []
copyOfMe.repData = None if (self.repData is None) else []
for slc in self.cirIndex.values():
copyOfMe.oliData.append(self.oliData[slc].copy())
copyOfMe.timeData.append(self.timeData[slc].copy())
if self.repData is not None:
copyOfMe.repData.append(self.repData[slc].copy())
copyOfMe.oliType = self.oliType
copyOfMe.timeType = self.timeType
copyOfMe.repType = self.repType
copyOfMe.cnt_cache = None
copyOfMe.auxInfo = self.auxInfo.copy()
return copyOfMe
else:
return self.copy()
def done_adding_data(self):
"""
Promotes a non-static DataSet to a static (read-only) DataSet.
This method should be called after all data has been added.
Returns
-------
None
"""
if self.bStatic: return
#Convert normal dataset to static mode.
# olIndex stays the same
# cirIndex changes to hold slices into 1D arrays
# oli_data, time_data, & rep_data change from being lists of arrays to
# single 1D arrays.
if len(self.oliData) > 0:
new_cirIndex = _OrderedDict()
curIndx = 0
to_concat_oli = []
to_concat_time = []
to_concat_rep = []
for circuit, indx in self.cirIndex.items():
seriesLen = len(self.oliData[indx])
to_concat_oli.append(self.oliData[indx]) # just build up lists of
to_concat_time.append(self.timeData[indx]) # reference, not copies
assert(seriesLen == len(self.timeData[indx])), "TIME & OLI out of sync!"
if self.repData is not None:
to_concat_rep.append(self.repData[indx])
assert(seriesLen == len(self.repData[indx])), "REP & OLI out of sync!"
new_cirIndex[circuit] = slice(curIndx, curIndx + seriesLen)
curIndx += seriesLen
self.cirIndex = new_cirIndex
self.oliData = _np.concatenate(to_concat_oli)
self.timeData = _np.concatenate(to_concat_time)
if self.repData is not None:
self.repData = _np.concatenate(to_concat_rep)
else:
#leave cirIndex alone (should be empty anyway?)
self.oliData = _np.empty((0,), self.oliType)
self.timeData = _np.empty((0,), self.timeType)
if self.repData is not None:
self.repData = _np.empty((0,), self.repType)
self.cnt_cache = {opstr: _ld.OutcomeLabelDict() for opstr in self.cirIndex}
self.bStatic = True
self.uuid = _uuid.uuid4()
def __getstate__(self):
toPickle = {'cirIndexKeys': list(map(_cir.CompressedCircuit, self.cirIndex.keys())),
'cirIndexVals': list(self.cirIndex.values()),
'olIndex': self.olIndex,
'olIndex_max': self.olIndex_max,
'ol': self.ol,
'bStatic': self.bStatic,
'oliData': self.oliData,
'timeData': self.timeData,
'repData': self.repData,
'oliType': _np.dtype(self.oliType).str,
'timeType': _np.dtype(self.timeType).str,
'repType': _np.dtype(self.repType).str,
'collisionAction': self.collisionAction,
'uuid': self.uuid,
'auxInfo': self.auxInfo,
'comment': self.comment}
return toPickle
def __setstate__(self, state_dict):
bStatic = state_dict['bStatic']
if "gsIndexKeys" in state_dict:
_warnings.warn("Unpickling a deprecated-format DataSet. Please re-save/pickle asap.")
cirIndexKeys = [cgstr.expand() for cgstr in state_dict['gsIndexKeys']]
cirIndex = _OrderedDict(list(zip(cirIndexKeys, state_dict['gsIndexVals'])))
else:
cirIndexKeys = [cgstr.expand() for cgstr in state_dict['cirIndexKeys']]
cirIndex = _OrderedDict(list(zip(cirIndexKeys, state_dict['cirIndexVals'])))
if "slIndex" in state_dict:
#print("DB: UNPICKLING AN OLD DATASET"); print("Keys = ",state_dict.keys())
_warnings.warn("Unpickling a *very* deprecated-format DataSet. Please re-save/pickle asap.")
#Turn spam labels into outcome labels
self.cirIndex = _OrderedDict()
self.olIndex = _OrderedDict([((str(sl),), i) for sl, i in state_dict['slIndex'].items()])
self.ol = _OrderedDict([(i, ol) for (ol, i) in self.olIndex.items()])
self.oliData = []
self.timeData = []
self.repData = []
self.comment = ''
self.oliType = Oindex_type
self.timeType = Time_type
self.repType = Repcount_type
self.bStatic = False # for adding data
for opstr, indx in cirIndex.items():
count_row = state_dict['counts'][indx]
count_dict = _OrderedDict([(ol, count_row[i]) for ol, i in self.olIndex.items()])
self.add_count_dict(opstr, count_dict)
if not self.bStatic: self.done_adding_data()
else: # Normal case
self.bStatic = bStatic
self.cirIndex = cirIndex
self.olIndex = state_dict['olIndex']
self.olIndex_max = state_dict.get('olIndex_max',
max(self.olIndex.values()) if len(self.olIndex) > 0 else -1)
self.ol = state_dict['ol']
self.oliData = state_dict['oliData']
self.timeData = state_dict['timeData']
self.repData = state_dict['repData']
self.oliType = _np.dtype(state_dict['oliType'])
self.timeType = _np.dtype(state_dict['timeType'])
self.repType = _np.dtype(state_dict['repType'])
self.comment = state_dict.get('comment', '')
if bStatic: # always empty - don't save this, just init
self.cnt_cache = {opstr: _ld.OutcomeLabelDict() for opstr in self.cirIndex}
else: self.cnt_cache = None
self.auxInfo = state_dict.get('auxInfo', _defaultdict(dict))
if not isinstance(self.auxInfo, _defaultdict) and isinstance(self.auxInfo, dict):
self.auxInfo = _defaultdict(dict, self.auxInfo)
# some types of serialization (e.g. JSON) just save a *normal* dict
# so promote to a defaultdict if needed..
self.collisionAction = state_dict.get('collisionAction', 'aggregate')
self.uuid = state_dict.get('uuid', None)
@_deprecated_fn('write_binary')
def save(self, file_or_filename):
return self.write_binary(file_or_filename)
def write_binary(self, file_or_filename):
"""
Write this data set to a binary-format file.
Parameters
----------
file_or_filename : string or file object
If a string, interpreted as a filename. If this filename ends
in ".gz", the file will be gzip compressed.
Returns
-------
None
"""
toPickle = {'cirIndexKeys': list(map(_cir.CompressedCircuit, self.cirIndex.keys())) if self.cirIndex else [],
'cirIndexVals': list(self.cirIndex.values()) if self.cirIndex else [],
'olIndex': self.olIndex,
'olIndex_max': self.olIndex_max,
'ol': self.ol,
'bStatic': self.bStatic,
'oliType': self.oliType,
'timeType': self.timeType,
'repType': self.repType,
'useReps': bool(self.repData is not None),
'collisionAction': self.collisionAction,
'uuid': self.uuid,
'auxInfo': self.auxInfo,
'comment': self.comment} # Don't pickle counts numpy data b/c it's inefficient
if not self.bStatic: toPickle['nRows'] = len(self.oliData)
bOpen = isinstance(file_or_filename, str)
if bOpen:
if file_or_filename.endswith(".gz"):
import gzip as _gzip
f = _gzip.open(file_or_filename, "wb")
else:
f = open(file_or_filename, "wb")
else:
f = file_or_filename
_pickle.dump(toPickle, f)
if self.bStatic:
_np.save(f, self.oliData)
_np.save(f, self.timeData)
if self.repData is not None:
_np.save(f, self.repData)
else:
for row in self.oliData: _np.save(f, row)
for row in self.timeData: _np.save(f, row)
if self.repData is not None:
for row in self.repData: _np.save(f, row)
if bOpen: f.close()
@_deprecated_fn('read_binary')
def load(self, file_or_filename):
return self.read_binary(file_or_filename)
def read_binary(self, file_or_filename):
"""
Read a DataSet from a binary file, clearing any data is contained previously.
The file should have been created with :method:`DataSet.write_binary`
Parameters
----------
file_or_filename : str or buffer
The file or filename to load from.
Returns
-------
None
"""
bOpen = isinstance(file_or_filename, str)
if bOpen:
if file_or_filename.endswith(".gz"):
import gzip as _gzip
f = _gzip.open(file_or_filename, "rb")
else:
f = open(file_or_filename, "rb")
else:
f = file_or_filename
with _compat.patched_uuid():
state_dict = _pickle.load(f)
if "gsIndexKeys" in state_dict:
_warnings.warn("Loading a deprecated-format DataSet. Please re-save asap.")
state_dict['cirIndexKeys'] = state_dict['gsIndexKeys']
state_dict['cirIndexVals'] = state_dict['gsIndexVals']
del state_dict['gsIndexKeys']
del state_dict['gsIndexVals']
def expand(x): # to be backward compatible
""" Expand a compressed circuit """
if isinstance(x, _cir.CompressedCircuit): return x.expand()
elif hasattr(x, '__class__') and x.__class__.__name__ == "dummy_CompressedGateString":
return _cir.Circuit(_cir.CompressedCircuit.expand_op_label_tuple(x._tup), stringrep=x.str)
#for backward compatibility, needed for Python2 only, which doesn't call __new__ when
# unpickling protocol=0 (the default) info.
else:
_warnings.warn("Deprecated dataset format. Please re-save "
"this dataset soon to avoid future incompatibility.")
return _cir.Circuit(_cir.CompressedCircuit.expand_op_label_tuple(x))
cirIndexKeys = [expand(cgstr) for cgstr in state_dict['cirIndexKeys']]
#cirIndexKeys = [ cgs.expand() for cgs in state_dict['cirIndexKeys'] ]
self.cirIndex = _OrderedDict(list(zip(cirIndexKeys, state_dict['cirIndexVals'])))
self.olIndex = state_dict['olIndex']
self.olIndex_max = state_dict.get('olIndex_max',
max(self.olIndex.values()) if len(self.olIndex) > 0 else -1)
self.ol = state_dict['ol']
self.bStatic = state_dict['bStatic']
self.oliType = state_dict['oliType']
self.timeType = state_dict['timeType']
self.repType = state_dict['repType']
self.collisionAction = state_dict['collisionAction']
self.uuid = state_dict['uuid']
self.auxInfo = state_dict.get('auxInfo', _defaultdict(dict)) # backward compat
self.comment = state_dict.get('comment', '') # backward compat
useReps = state_dict['useReps']
if self.bStatic:
self.oliData = _np.lib.format.read_array(f) # _np.load(f) doesn't play nice with gzip
self.timeData = _np.lib.format.read_array(f) # _np.load(f) doesn't play nice with gzip
if useReps:
self.repData = _np.lib.format.read_array(f) # _np.load(f) doesn't play nice with gzip
self.cnt_cache = {opstr: _ld.OutcomeLabelDict() for opstr in self.cirIndex} # init cnt_cache afresh
else:
self.oliData = []
for _ in range(state_dict['nRows']):
self.oliData.append(_np.lib.format.read_array(f)) # _np.load(f) doesn't play nice with gzip
self.timeData = []
for _ in range(state_dict['nRows']):
self.timeData.append(_np.lib.format.read_array(f)) # _np.load(f) doesn't play nice with gzip
if useReps:
self.repData = []
for _ in range(state_dict['nRows']):
self.repData.append(_np.lib.format.read_array(f)) # _np.load(f) doesn't play nice with gzip
else:
self.repData = None
self.cnt_cache = None
if bOpen: f.close()
def rename_outcome_labels(self, old_to_new_dict):
"""
Replaces existing output labels with new ones as per `old_to_new_dict`.
Parameters
----------
old_to_new_dict : dict
A mapping from old/existing outcome labels to new ones. Strings
in keys or values are automatically converted to 1-tuples. Missing
outcome labels are left unaltered.
Returns
-------
None
"""
mapdict = {}
for old, new in old_to_new_dict.items():
if isinstance(old, str): old = (old,)
if isinstance(new, str): new = (new,)
mapdict[old] = new
new_olIndex = _OrderedDict()
for ol, i in self.olIndex.items():
if ol in mapdict:
new_olIndex[mapdict[ol]] = i
else:
new_olIndex[ol] = i
#Note: rebuild reverse-dict self.ol:
self.olIndex = new_olIndex
self.ol = _OrderedDict([(i, ol) for (ol, i) in self.olIndex.items()])
def add_std_nqubit_outcome_labels(self, nqubits):
"""
Adds all the "standard" outcome labels (e.g. '0010') on `nqubits` qubits.
This is useful to ensure that, even if not all outcomes appear in the
data, that all are recognized as being potentially valid outcomes (and
so attempts to get counts for these outcomes will be 0 rather than
raising an error).
Parameters
----------
nqubits : int
The number of qubits. For example, if equal to 3 the outcome labels
'000', '001', ... '111' are added.
Returns
-------
None
"""
self.add_outcome_labels((("".join(x),) for x in _itertools.product(*([('0', '1')] * nqubits))))
def add_outcome_labels(self, outcome_labels, update_ol=True):
"""
Adds new valid outcome labels.
Ensures that all the elements of `outcome_labels` are stored as
valid outcomes for circuits in this DataSet, adding new outcomes
as necessary.
Parameters
----------
outcome_labels : list or generator
A list or generator of string- or tuple-valued outcome labels.
update_ol : bool, optional
Whether to update internal mappings to reflect the new outcome labels.
Leave this as True unless you really know what you're doing.
Returns
-------
None
"""
added = False
iNext = self.olIndex_max
for ol in outcome_labels:
if ol not in self.olIndex:
iNext += 1
self.olIndex[ol] = iNext; added = True
if added and update_ol: # rebuild self.ol because olIndex has changed
self.update_ol()
self.olIndex_max = iNext
def auxinfo_dataframe(self, pivot_valuename=None, pivot_value=None, drop_columns=False):
"""
Create a Pandas dataframe with aux-data from this dataset.
Parameters
----------
pivot_valuename : str, optional
If not None, the resulting dataframe is pivoted using `pivot_valuename`
as the column whose values name the pivoted table's column names.
If None and `pivot_value` is not None,`"ValueName"` is used.
pivot_value : str, optional
If not None, the resulting dataframe is pivoted such that values of
the `pivot_value` column are rearranged into new columns whose names
are given by the values of the `pivot_valuename` column. If None and
`pivot_valuename` is not None,`"Value"` is used.
drop_columns : bool or list, optional
A list of column names to drop (prior to performing any pivot). If
`True` appears in this list or is given directly, then all
constant-valued columns are dropped as well. No columns are dropped
when `drop_columns == False`.
Returns
-------
pandas.DataFrame
"""
from pygsti.tools.dataframetools import _process_dataframe
cdict = _NamedDict('Circuit', None)
for cir, raw_auxdict in self.auxInfo.items():
cdict[cir.str] = _NamedDict('ValueName', 'category', items=raw_auxdict)
df = cdict.to_dataframe()
return _process_dataframe(df, pivot_valuename, pivot_value, drop_columns)
| [
"pickle.dump",
"numpy.empty",
"numpy.ones",
"collections.defaultdict",
"pygsti.circuits.circuit.CompressedCircuit.expand_op_label_tuple",
"pygsti.circuits.circuit.Circuit",
"numpy.mean",
"pygsti.tools.legacytools.deprecate",
"pickle.load",
"numpy.isclose",
"pygsti.baseobjs._compatibility.patched... | [((22499, 22537), 'pygsti.tools.legacytools.deprecate', '_deprecated_fn', (['"""DataSetRow.fractions"""'], {}), "('DataSetRow.fractions')\n", (22513, 22537), True, 'from pygsti.tools.legacytools import deprecate as _deprecated_fn\n'), ((115927, 115957), 'pygsti.tools.legacytools.deprecate', '_deprecated_fn', (['"""write_binary"""'], {}), "('write_binary')\n", (115941, 115957), True, 'from pygsti.tools.legacytools import deprecate as _deprecated_fn\n'), ((118137, 118166), 'pygsti.tools.legacytools.deprecate', '_deprecated_fn', (['"""read_binary"""'], {}), "('read_binary')\n", (118151, 118166), True, 'from pygsti.tools.legacytools import deprecate as _deprecated_fn\n'), ((6384, 6420), 'pygsti.tools.listtools.remove_duplicates', '_lt.remove_duplicates', (['self.outcomes'], {}), '(self.outcomes)\n', (6405, 6420), True, 'from pygsti.tools import listtools as _lt\n'), ((14154, 14175), 'numpy.array', '_np.array', (['self.times'], {}), '(self.times)\n', (14163, 14175), True, 'import numpy as _np\n'), ((19928, 19950), 'pygsti.baseobjs.outcomelabeldict.OutcomeLabelDict', '_ld.OutcomeLabelDict', ([], {}), '()\n', (19948, 19950), True, 'from pygsti.baseobjs import outcomelabeldict as _ld, _compatibility as _compat\n'), ((27963, 28051), 'warnings.warn', '_warnings.warn', (['"""Rounding fractional repetition count to next lowest whole number!"""'], {}), "(\n 'Rounding fractional repetition count to next lowest whole number!')\n", (27977, 28051), True, 'import warnings as _warnings\n'), ((45360, 45386), 'pygsti.circuits.circuit.Circuit.cast', '_cir.Circuit.cast', (['circuit'], {}), '(circuit)\n', (45377, 45386), True, 'from pygsti.circuits import circuit as _cir\n'), ((46536, 46562), 'pygsti.circuits.circuit.Circuit.cast', '_cir.Circuit.cast', (['circuit'], {}), '(circuit)\n', (46553, 46562), True, 'from pygsti.circuits import circuit as _cir\n'), ((66386, 66460), 'numpy.array', '_np.array', (['[self.olIndex[ol] for ol in tup_outcomeLabelList]', 'self.oliType'], {}), '([self.olIndex[ol] for ol in tup_outcomeLabelList], self.oliType)\n', (66395, 66460), True, 'import numpy as _np\n'), ((66481, 66522), 'numpy.array', '_np.array', (['time_stamp_list', 'self.timeType'], {}), '(time_stamp_list, self.timeType)\n', (66490, 66522), True, 'import numpy as _np\n'), ((74671, 74717), 'numpy.empty', '_np.empty', (['(nSteps * nNewOutcomes)', 'self.oliType'], {}), '(nSteps * nNewOutcomes, self.oliType)\n', (74680, 74717), True, 'import numpy as _np\n'), ((74736, 74782), 'numpy.empty', '_np.empty', (['(nSteps * nNewOutcomes)', 'self.repType'], {}), '(nSteps * nNewOutcomes, self.repType)\n', (74745, 74782), True, 'import numpy as _np\n'), ((74802, 74849), 'numpy.empty', '_np.empty', (['(nSteps * nNewOutcomes)', 'self.timeType'], {}), '(nSteps * nNewOutcomes, self.timeType)\n', (74811, 74849), True, 'import numpy as _np\n'), ((75321, 75358), 'numpy.zeros', '_np.zeros', (['nNewOutcomes', 'self.repType'], {}), '(nNewOutcomes, self.repType)\n', (75330, 75358), True, 'import numpy as _np\n'), ((76633, 76647), 'collections.OrderedDict', '_OrderedDict', ([], {}), '()\n', (76645, 76647), True, 'from collections import OrderedDict as _OrderedDict\n'), ((78809, 78827), 'collections.defaultdict', '_defaultdict', (['list'], {}), '(list)\n', (78821, 78827), True, 'from collections import defaultdict as _defaultdict\n'), ((79830, 79876), 'numpy.empty', '_np.empty', (['(nSteps * nNewOutcomes)', 'self.oliType'], {}), '(nSteps * nNewOutcomes, self.oliType)\n', (79839, 79876), True, 'import numpy as _np\n'), ((79895, 79941), 'numpy.empty', '_np.empty', (['(nSteps * nNewOutcomes)', 'self.repType'], {}), '(nSteps * nNewOutcomes, self.repType)\n', (79904, 79941), True, 'import numpy as _np\n'), ((79961, 80008), 'numpy.empty', '_np.empty', (['(nSteps * nNewOutcomes)', 'self.timeType'], {}), '(nSteps * nNewOutcomes, self.timeType)\n', (79970, 80008), True, 'import numpy as _np\n'), ((80480, 80517), 'numpy.zeros', '_np.zeros', (['nNewOutcomes', 'self.repType'], {}), '(nNewOutcomes, self.repType)\n', (80489, 80517), True, 'import numpy as _np\n'), ((81792, 81806), 'collections.OrderedDict', '_OrderedDict', ([], {}), '()\n', (81804, 81806), True, 'from collections import OrderedDict as _OrderedDict\n'), ((84874, 84893), 'numpy.mean', '_np.mean', (['timesteps'], {}), '(timesteps)\n', (84882, 84893), True, 'import numpy as _np\n'), ((102101, 102115), 'collections.OrderedDict', '_OrderedDict', ([], {}), '()\n', (102113, 102115), True, 'from collections import OrderedDict as _OrderedDict\n'), ((103789, 103807), 'collections.defaultdict', '_defaultdict', (['dict'], {}), '(dict)\n', (103801, 103807), True, 'from collections import defaultdict as _defaultdict\n'), ((107416, 107442), 'numpy.array', '_np.array', (['inds', '_np.int64'], {}), '(inds, _np.int64)\n', (107425, 107442), True, 'import numpy as _np\n'), ((111884, 111897), 'uuid.uuid4', '_uuid.uuid4', ([], {}), '()\n', (111895, 111897), True, 'import uuid as _uuid\n'), ((117670, 117695), 'pickle.dump', '_pickle.dump', (['toPickle', 'f'], {}), '(toPickle, f)\n', (117682, 117695), True, 'import pickle as _pickle\n'), ((123094, 123108), 'collections.OrderedDict', '_OrderedDict', ([], {}), '()\n', (123106, 123108), True, 'from collections import OrderedDict as _OrderedDict\n'), ((126545, 126572), 'pygsti.tools.NamedDict', '_NamedDict', (['"""Circuit"""', 'None'], {}), "('Circuit', None)\n", (126555, 126572), True, 'from pygsti.tools import NamedDict as _NamedDict\n'), ((126761, 126827), 'pygsti.tools.dataframetools._process_dataframe', '_process_dataframe', (['df', 'pivot_valuename', 'pivot_value', 'drop_columns'], {}), '(df, pivot_valuename, pivot_value, drop_columns)\n', (126779, 126827), False, 'from pygsti.tools.dataframetools import _process_dataframe\n'), ((7531, 7574), 'numpy.array', '_np.array', (['inds'], {'dtype': 'self.dataset.oliType'}), '(inds, dtype=self.dataset.oliType)\n', (7540, 7574), True, 'import numpy as _np\n'), ((8128, 8173), 'numpy.array', '_np.array', (['times'], {'dtype': 'self.dataset.timeType'}), '(times, dtype=self.dataset.timeType)\n', (8137, 8173), True, 'import numpy as _np\n'), ((14305, 14320), 'numpy.diff', '_np.diff', (['times'], {}), '(times)\n', (14313, 14320), True, 'import numpy as _np\n'), ((16894, 16933), 'pygsti.baseobjs.outcomelabeldict.OutcomeLabelDict.to_outcome', '_ld.OutcomeLabelDict.to_outcome', (['tup[0]'], {}), '(tup[0])\n', (16925, 16933), True, 'from pygsti.baseobjs import outcomelabeldict as _ld, _compatibility as _compat\n'), ((17322, 17377), 'pygsti.baseobjs.outcomelabeldict.OutcomeLabelDict.to_outcome', '_ld.OutcomeLabelDict.to_outcome', (['index_or_outcome_label'], {}), '(index_or_outcome_label)\n', (17353, 17377), True, 'from pygsti.baseobjs import outcomelabeldict as _ld, _compatibility as _compat\n'), ((24262, 24307), 'pygsti.baseobjs.outcomelabeldict.OutcomeLabelDict.to_outcome', '_ld.OutcomeLabelDict.to_outcome', (['outcomelabel'], {}), '(outcomelabel)\n', (24293, 24307), True, 'from pygsti.baseobjs import outcomelabeldict as _ld, _compatibility as _compat\n'), ((25415, 25454), 'numpy.array', '_np.array', (['times', 'self.dataset.timeType'], {}), '(times, self.dataset.timeType)\n', (25424, 25454), True, 'import numpy as _np\n'), ((25470, 25509), 'numpy.array', '_np.array', (['counts', 'self.dataset.repType'], {}), '(counts, self.dataset.repType)\n', (25479, 25509), True, 'import numpy as _np\n'), ((43130, 43148), 'collections.defaultdict', '_defaultdict', (['dict'], {}), '(dict)\n', (43142, 43148), True, 'from collections import defaultdict as _defaultdict\n'), ((43190, 43218), 'collections.defaultdict', '_defaultdict', (['dict', 'aux_info'], {}), '(dict, aux_info)\n', (43202, 43218), True, 'from collections import defaultdict as _defaultdict\n'), ((44067, 44088), 'pygsti.circuits.circuit.Circuit', '_cir.Circuit', (['circuit'], {}), '(circuit)\n', (44079, 44088), True, 'from pygsti.circuits import circuit as _cir\n'), ((44817, 44838), 'pygsti.circuits.circuit.Circuit', '_cir.Circuit', (['circuit'], {}), '(circuit)\n', (44829, 44838), True, 'from pygsti.circuits import circuit as _cir\n'), ((52735, 52759), 'collections.defaultdict', '_defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (52747, 52759), True, 'from collections import defaultdict as _defaultdict\n'), ((54243, 54264), 'pygsti.circuits.circuit.Circuit', '_cir.Circuit', (['circuit'], {}), '(circuit)\n', (54255, 54264), True, 'from pygsti.circuits import circuit as _cir\n'), ((61898, 61944), 'numpy.zeros', '_np.zeros', (['count_array.shape[0]', 'self.timeType'], {}), '(count_array.shape[0], self.timeType)\n', (61907, 61944), True, 'import numpy as _np\n'), ((66715, 66754), 'numpy.array', '_np.array', (['rep_count_list', 'self.repType'], {}), '(rep_count_list, self.repType)\n', (66724, 66754), True, 'import numpy as _np\n'), ((77092, 77116), 'collections.defaultdict', '_defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (77104, 77116), True, 'from collections import defaultdict as _defaultdict\n'), ((82341, 82365), 'collections.defaultdict', '_defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (82353, 82365), True, 'from collections import defaultdict as _defaultdict\n'), ((91200, 91247), 'pygsti.tools.listtools.remove_duplicates', '_lt.remove_duplicates', (['list_of_circuits_to_keep'], {}), '(list_of_circuits_to_keep)\n', (91221, 91247), True, 'from pygsti.tools import listtools as _lt\n'), ((94631, 94709), 'warnings.warn', '_warnings.warn', (['"""No counts in the requested time range: empty DataSet created"""'], {}), "('No counts in the requested time range: empty DataSet created')\n", (94645, 94709), True, 'import warnings as _warnings\n'), ((107500, 107541), 'bisect.bisect', '_bisect.bisect', (['inds_ar', 'self.cirIndex[k]'], {}), '(inds_ar, self.cirIndex[k])\n', (107514, 107541), True, 'import bisect as _bisect\n'), ((108075, 108104), 'copy.deepcopy', '_copy.deepcopy', (['self.cirIndex'], {}), '(self.cirIndex)\n', (108089, 108104), True, 'import copy as _copy\n'), ((110385, 110399), 'collections.OrderedDict', '_OrderedDict', ([], {}), '()\n', (110397, 110399), True, 'from collections import OrderedDict as _OrderedDict\n'), ((111264, 111294), 'numpy.concatenate', '_np.concatenate', (['to_concat_oli'], {}), '(to_concat_oli)\n', (111279, 111294), True, 'import numpy as _np\n'), ((111323, 111354), 'numpy.concatenate', '_np.concatenate', (['to_concat_time'], {}), '(to_concat_time)\n', (111338, 111354), True, 'import numpy as _np\n'), ((111560, 111589), 'numpy.empty', '_np.empty', (['(0,)', 'self.oliType'], {}), '((0,), self.oliType)\n', (111569, 111589), True, 'import numpy as _np\n'), ((111618, 111648), 'numpy.empty', '_np.empty', (['(0,)', 'self.timeType'], {}), '((0,), self.timeType)\n', (111627, 111648), True, 'import numpy as _np\n'), ((111785, 111807), 'pygsti.baseobjs.outcomelabeldict.OutcomeLabelDict', '_ld.OutcomeLabelDict', ([], {}), '()\n', (111805, 111807), True, 'from pygsti.baseobjs import outcomelabeldict as _ld, _compatibility as _compat\n'), ((112931, 113022), 'warnings.warn', '_warnings.warn', (['"""Unpickling a deprecated-format DataSet. Please re-save/pickle asap."""'], {}), "(\n 'Unpickling a deprecated-format DataSet. Please re-save/pickle asap.')\n", (112945, 113022), True, 'import warnings as _warnings\n'), ((113513, 113616), 'warnings.warn', '_warnings.warn', (['"""Unpickling a *very* deprecated-format DataSet. Please re-save/pickle asap."""'], {}), "(\n 'Unpickling a *very* deprecated-format DataSet. Please re-save/pickle asap.'\n )\n", (113527, 113616), True, 'import warnings as _warnings\n'), ((113686, 113700), 'collections.OrderedDict', '_OrderedDict', ([], {}), '()\n', (113698, 113700), True, 'from collections import OrderedDict as _OrderedDict\n'), ((115026, 115058), 'numpy.dtype', '_np.dtype', (["state_dict['oliType']"], {}), "(state_dict['oliType'])\n", (115035, 115058), True, 'import numpy as _np\n'), ((115087, 115120), 'numpy.dtype', '_np.dtype', (["state_dict['timeType']"], {}), "(state_dict['timeType'])\n", (115096, 115120), True, 'import numpy as _np\n'), ((115148, 115180), 'numpy.dtype', '_np.dtype', (["state_dict['repType']"], {}), "(state_dict['repType'])\n", (115157, 115180), True, 'import numpy as _np\n'), ((115489, 115507), 'collections.defaultdict', '_defaultdict', (['dict'], {}), '(dict)\n', (115501, 115507), True, 'from collections import defaultdict as _defaultdict\n'), ((115626, 115658), 'collections.defaultdict', '_defaultdict', (['dict', 'self.auxInfo'], {}), '(dict, self.auxInfo)\n', (115638, 115658), True, 'from collections import defaultdict as _defaultdict\n'), ((117733, 117758), 'numpy.save', '_np.save', (['f', 'self.oliData'], {}), '(f, self.oliData)\n', (117741, 117758), True, 'import numpy as _np\n'), ((117771, 117797), 'numpy.save', '_np.save', (['f', 'self.timeData'], {}), '(f, self.timeData)\n', (117779, 117797), True, 'import numpy as _np\n'), ((119000, 119022), 'pygsti.baseobjs._compatibility.patched_uuid', '_compat.patched_uuid', ([], {}), '()\n', (119020, 119022), True, 'from pygsti.baseobjs import outcomelabeldict as _ld, _compatibility as _compat\n'), ((119049, 119064), 'pickle.load', '_pickle.load', (['f'], {}), '(f)\n', (119061, 119064), True, 'import pickle as _pickle\n'), ((119118, 119194), 'warnings.warn', '_warnings.warn', (['"""Loading a deprecated-format DataSet. Please re-save asap."""'], {}), "('Loading a deprecated-format DataSet. Please re-save asap.')\n", (119132, 119194), True, 'import warnings as _warnings\n'), ((121039, 121057), 'collections.defaultdict', '_defaultdict', (['dict'], {}), '(dict)\n', (121051, 121057), True, 'from collections import defaultdict as _defaultdict\n'), ((121244, 121272), 'numpy.lib.format.read_array', '_np.lib.format.read_array', (['f'], {}), '(f)\n', (121269, 121272), True, 'import numpy as _np\n'), ((121344, 121372), 'numpy.lib.format.read_array', '_np.lib.format.read_array', (['f'], {}), '(f)\n', (121369, 121372), True, 'import numpy as _np\n'), ((126656, 126710), 'pygsti.tools.NamedDict', '_NamedDict', (['"""ValueName"""', '"""category"""'], {'items': 'raw_auxdict'}), "('ValueName', 'category', items=raw_auxdict)\n", (126666, 126710), True, 'from pygsti.tools import NamedDict as _NamedDict\n'), ((39348, 39362), 'collections.OrderedDict', '_OrderedDict', ([], {}), '()\n', (39360, 39362), True, 'from collections import OrderedDict as _OrderedDict\n'), ((43350, 43372), 'pygsti.baseobjs.outcomelabeldict.OutcomeLabelDict', '_ld.OutcomeLabelDict', ([], {}), '()\n', (43370, 43372), True, 'from pygsti.baseobjs import outcomelabeldict as _ld, _compatibility as _compat\n'), ((61775, 61820), 'numpy.ones', '_np.ones', (['count_array.shape[0]', 'self.timeType'], {}), '(count_array.shape[0], self.timeType)\n', (61783, 61820), True, 'import numpy as _np\n'), ((66162, 66197), 'pygsti.baseobjs.outcomelabeldict.OutcomeLabelDict.to_outcome', '_ld.OutcomeLabelDict.to_outcome', (['ol'], {}), '(ol)\n', (66193, 66197), True, 'from pygsti.baseobjs import outcomelabeldict as _ld, _compatibility as _compat\n'), ((67766, 67779), 'numpy.all', '_np.all', (['mask'], {}), '(mask)\n', (67773, 67779), True, 'import numpy as _np\n'), ((68357, 68412), 'numpy.concatenate', '_np.concatenate', (['(self.oliData[circuitIndx], oli_array)'], {}), '((self.oliData[circuitIndx], oli_array))\n', (68372, 68412), True, 'import numpy as _np\n'), ((68458, 68515), 'numpy.concatenate', '_np.concatenate', (['(self.timeData[circuitIndx], time_array)'], {}), '((self.timeData[circuitIndx], time_array))\n', (68473, 68515), True, 'import numpy as _np\n'), ((81992, 82034), 'numpy.array', '_np.array', (['[oli_map[x] for x in dsrow.oli]'], {}), '([oli_map[x] for x in dsrow.oli])\n', (82001, 82034), True, 'import numpy as _np\n'), ((86964, 86996), 'numpy.all', '_np.all', (['(self.timeData[gsi] == 0)'], {}), '(self.timeData[gsi] == 0)\n', (86971, 86996), True, 'import numpy as _np\n'), ((93658, 93697), 'numpy.ones', '_np.ones', (['dsRow.oli.shape', 'self.repType'], {}), '(dsRow.oli.shape, self.repType)\n', (93666, 93697), True, 'import numpy as _np\n'), ((95715, 95754), 'numpy.ones', '_np.ones', (['dsRow.oli.shape', 'self.repType'], {}), '(dsRow.oli.shape, self.repType)\n', (95723, 95754), True, 'import numpy as _np\n'), ((105104, 105123), 'pygsti.circuits.circuit.Circuit', '_cir.Circuit', (['opstr'], {}), '(opstr)\n', (105116, 105123), True, 'from pygsti.circuits import circuit as _cir\n'), ((111427, 111457), 'numpy.concatenate', '_np.concatenate', (['to_concat_rep'], {}), '(to_concat_rep)\n', (111442, 111457), True, 'import numpy as _np\n'), ((111721, 111750), 'numpy.empty', '_np.empty', (['(0,)', 'self.repType'], {}), '((0,), self.repType)\n', (111730, 111750), True, 'import numpy as _np\n'), ((112432, 112455), 'numpy.dtype', '_np.dtype', (['self.oliType'], {}), '(self.oliType)\n', (112441, 112455), True, 'import numpy as _np\n'), ((112493, 112517), 'numpy.dtype', '_np.dtype', (['self.timeType'], {}), '(self.timeType)\n', (112502, 112517), True, 'import numpy as _np\n'), ((112554, 112577), 'numpy.dtype', '_np.dtype', (['self.repType'], {}), '(self.repType)\n', (112563, 112577), True, 'import numpy as _np\n'), ((117512, 117546), 'gzip.open', '_gzip.open', (['file_or_filename', '"""wb"""'], {}), "(file_or_filename, 'wb')\n", (117522, 117546), True, 'import gzip as _gzip\n'), ((117855, 117880), 'numpy.save', '_np.save', (['f', 'self.repData'], {}), '(f, self.repData)\n', (117863, 117880), True, 'import numpy as _np\n'), ((117932, 117948), 'numpy.save', '_np.save', (['f', 'row'], {}), '(f, row)\n', (117940, 117948), True, 'import numpy as _np\n'), ((117987, 118003), 'numpy.save', '_np.save', (['f', 'row'], {}), '(f, row)\n', (117995, 118003), True, 'import numpy as _np\n'), ((118837, 118871), 'gzip.open', '_gzip.open', (['file_or_filename', '"""rb"""'], {}), "(file_or_filename, 'rb')\n", (118847, 118871), True, 'import gzip as _gzip\n'), ((121471, 121499), 'numpy.lib.format.read_array', '_np.lib.format.read_array', (['f'], {}), '(f)\n', (121496, 121499), True, 'import numpy as _np\n'), ((121580, 121602), 'pygsti.baseobjs.outcomelabeldict.OutcomeLabelDict', '_ld.OutcomeLabelDict', ([], {}), '()\n', (121600, 121602), True, 'from pygsti.baseobjs import outcomelabeldict as _ld, _compatibility as _compat\n'), ((15573, 15628), 'pygsti.baseobjs.outcomelabeldict.OutcomeLabelDict.to_outcome', '_ld.OutcomeLabelDict.to_outcome', (['index_or_outcome_label'], {}), '(index_or_outcome_label)\n', (15604, 15628), True, 'from pygsti.baseobjs import outcomelabeldict as _ld, _compatibility as _compat\n'), ((18937, 18970), 'numpy.isclose', '_np.isclose', (['self.time', 'timestamp'], {}), '(self.time, timestamp)\n', (18948, 18970), True, 'import numpy as _np\n'), ((19134, 19162), 'numpy.equal', '_np.equal', (['self.oli[tslc]', 'i'], {}), '(self.oli[tslc], i)\n', (19143, 19162), True, 'import numpy as _np\n'), ((19262, 19290), 'numpy.equal', '_np.equal', (['self.oli[tslc]', 'i'], {}), '(self.oli[tslc], i)\n', (19271, 19290), True, 'import numpy as _np\n'), ((20014, 20047), 'numpy.isclose', '_np.isclose', (['self.time', 'timestamp'], {}), '(self.time, timestamp)\n', (20025, 20047), True, 'import numpy as _np\n'), ((24893, 24949), 'numpy.isclose', '_np.isclose', (['t', 'timestamps[tsIndx]'], {'rtol': '(0.0)', 'atol': '(1e-12)'}), '(t, timestamps[tsIndx], rtol=0.0, atol=1e-12)\n', (24904, 24949), True, 'import numpy as _np\n'), ((24974, 25018), 'numpy.isclose', '_np.isclose', (['t', 'last_t'], {'rtol': '(0.0)', 'atol': '(1e-12)'}), '(t, last_t, rtol=0.0, atol=1e-12)\n', (24985, 25018), True, 'import numpy as _np\n'), ((38090, 38112), 'collections.OrderedDict', '_OrderedDict', (['dictData'], {}), '(dictData)\n', (38102, 38112), True, 'from collections import OrderedDict as _OrderedDict\n'), ((38163, 38177), 'collections.OrderedDict', '_OrderedDict', ([], {}), '()\n', (38175, 38177), True, 'from collections import OrderedDict as _OrderedDict\n'), ((53555, 53579), 'collections.defaultdict', '_defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (53567, 53579), True, 'from collections import defaultdict as _defaultdict\n'), ((68606, 68661), 'numpy.concatenate', '_np.concatenate', (['(self.repData[circuitIndx], rep_array)'], {}), '((self.repData[circuitIndx], rep_array))\n', (68621, 68661), True, 'import numpy as _np\n'), ((89557, 89576), 'pygsti.circuits.circuit.Circuit', '_cir.Circuit', (['opstr'], {}), '(opstr)\n', (89569, 89576), True, 'from pygsti.circuits import circuit as _cir\n'), ((91321, 91340), 'pygsti.circuits.circuit.Circuit', '_cir.Circuit', (['opstr'], {}), '(opstr)\n', (91333, 91340), True, 'from pygsti.circuits import circuit as _cir\n'), ((95887, 95907), 'numpy.diff', '_np.diff', (['dsRow.time'], {}), '(dsRow.time)\n', (95895, 95907), True, 'import numpy as _np\n'), ((102982, 103040), 'numpy.concatenate', '_np.concatenate', (['(self.oliData[iDest], self.oliData[iSrc])'], {}), '((self.oliData[iDest], self.oliData[iSrc]))\n', (102997, 103040), True, 'import numpy as _np\n'), ((103080, 103140), 'numpy.concatenate', '_np.concatenate', (['(self.timeData[iDest], self.timeData[iSrc])'], {}), '((self.timeData[iDest], self.timeData[iSrc]))\n', (103095, 103140), True, 'import numpy as _np\n'), ((115348, 115370), 'pygsti.baseobjs.outcomelabeldict.OutcomeLabelDict', '_ld.OutcomeLabelDict', ([], {}), '()\n', (115368, 115370), True, 'from pygsti.baseobjs import outcomelabeldict as _ld, _compatibility as _compat\n'), ((118086, 118102), 'numpy.save', '_np.save', (['f', 'row'], {}), '(f, row)\n', (118094, 118102), True, 'import numpy as _np\n'), ((119988, 120109), 'warnings.warn', '_warnings.warn', (['"""Deprecated dataset format. Please re-save this dataset soon to avoid future incompatibility."""'], {}), "(\n 'Deprecated dataset format. Please re-save this dataset soon to avoid future incompatibility.'\n )\n", (120002, 120109), True, 'import warnings as _warnings\n'), ((121785, 121813), 'numpy.lib.format.read_array', '_np.lib.format.read_array', (['f'], {}), '(f)\n', (121810, 121813), True, 'import numpy as _np\n'), ((121976, 122004), 'numpy.lib.format.read_array', '_np.lib.format.read_array', (['f'], {}), '(f)\n', (122001, 122004), True, 'import numpy as _np\n'), ((124152, 124197), 'itertools.product', '_itertools.product', (["*([('0', '1')] * nqubits)"], {}), "(*([('0', '1')] * nqubits))\n", (124170, 124197), True, 'import itertools as _itertools\n'), ((24659, 24715), 'numpy.isclose', '_np.isclose', (['t', 'timestamps[tsIndx]'], {'rtol': '(0.0)', 'atol': '(1e-12)'}), '(t, timestamps[tsIndx], rtol=0.0, atol=1e-12)\n', (24670, 24715), True, 'import numpy as _np\n'), ((39020, 39055), 'pygsti.baseobjs.outcomelabeldict.OutcomeLabelDict.to_outcome', '_ld.OutcomeLabelDict.to_outcome', (['ol'], {}), '(ol)\n', (39051, 39055), True, 'from pygsti.baseobjs import outcomelabeldict as _ld, _compatibility as _compat\n'), ((103228, 103286), 'numpy.concatenate', '_np.concatenate', (['(self.repData[iDest], self.repData[iSrc])'], {}), '((self.repData[iDest], self.repData[iSrc]))\n', (103243, 103286), True, 'import numpy as _np\n'), ((119721, 119773), 'pygsti.circuits.circuit.CompressedCircuit.expand_op_label_tuple', '_cir.CompressedCircuit.expand_op_label_tuple', (['x._tup'], {}), '(x._tup)\n', (119765, 119773), True, 'from pygsti.circuits import circuit as _cir\n'), ((120170, 120217), 'pygsti.circuits.circuit.CompressedCircuit.expand_op_label_tuple', '_cir.CompressedCircuit.expand_op_label_tuple', (['x'], {}), '(x)\n', (120214, 120217), True, 'from pygsti.circuits import circuit as _cir\n'), ((122201, 122229), 'numpy.lib.format.read_array', '_np.lib.format.read_array', (['f'], {}), '(f)\n', (122226, 122229), True, 'import numpy as _np\n'), ((16322, 16377), 'pygsti.baseobjs.outcomelabeldict.OutcomeLabelDict.to_outcome', '_ld.OutcomeLabelDict.to_outcome', (['index_or_outcome_label'], {}), '(index_or_outcome_label)\n', (16353, 16377), True, 'from pygsti.baseobjs import outcomelabeldict as _ld, _compatibility as _compat\n'), ((20364, 20392), 'numpy.equal', '_np.equal', (['self.oli[tslc]', 'i'], {}), '(self.oli[tslc], i)\n', (20373, 20392), True, 'import numpy as _np\n'), ((20615, 20643), 'numpy.equal', '_np.equal', (['self.oli[tslc]', 'i'], {}), '(self.oli[tslc], i)\n', (20624, 20643), True, 'import numpy as _np\n'), ((37635, 37654), 'pygsti.circuits.circuit.Circuit', '_cir.Circuit', (['opstr'], {}), '(opstr)\n', (37647, 37654), True, 'from pygsti.circuits import circuit as _cir\n'), ((38918, 38963), 'itertools.product', '_itertools.product', (["*([('0', '1')] * nqubits)"], {}), "(*([('0', '1')] * nqubits))\n", (38936, 38963), True, 'import itertools as _itertools\n'), ((37931, 37950), 'pygsti.circuits.circuit.Circuit', '_cir.Circuit', (['opstr'], {}), '(opstr)\n', (37943, 37950), True, 'from pygsti.circuits import circuit as _cir\n'), ((41207, 41232), 'numpy.amax', '_np.amax', (['self.oliData[i]'], {}), '(self.oliData[i])\n', (41215, 41232), True, 'import numpy as _np\n')] |
import pandas as pd
import numpy as np
import glob
import matplotlib.pyplot as plt
## constants
c = 2.99e10 ## speed of light in cm/s
secyr = 3.154e7 ## seconds per year
Myr = 1e6 ## years per Myr
Msun = 1.989e33 ## grams per solar mass
Lsun = 3.839e33 ## erg/sec per solar luminosity
def calc_luminosity(current_BH_mass, initial_BH_mass, mdot_BH):
bolometric_correction = 0.8
eta = 1 - np.sqrt(1-(current_BH_mass/(3*initial_BH_mass))**2)
## calculate accretion rate and luminosity
acc_rate = mdot_BH/(1-eta)
luminosity = bolometric_correction*eta*acc_rate*c**2*Msun/secyr ## accretion luminosity in erg/sec
return luminosity
path = "../cosmic_output/cosmic_3.0_fp_largerbcm/"
## calculate luminosities for all evolved merging tracks
binpath = path + "evolved_merger_tracks/*bcm.csv"
all_binaries = glob.glob(binpath)
luminosities = []
times = []
time_differences = []
for binary in all_binaries:
bcm = pd.read_csv(binary)
obs_start_time = 0
obs_end_time = 0
system_lum = []
system_time = []
try: BH1_index = np.where(bcm['kstar_1'] == 14)[0][0]
except:
print ("check for loop:", binary)
continue
try: BH2_index = np.where(bcm['kstar_2'] == 14)[0][0]
except:
print ("check for loop:", binary)
continue
## BH1 is formed first
if BH2_index > BH1_index:
## check for non MS donors
if bcm['kstar_2'][BH1_index] != 1:
print ("different HMXB objects:", binary)
print (bcm['kstar_2'][BH1_index])
BH_initial_mass = bcm['mass_1'][BH1_index]
BH_mdot = bcm['deltam_1']
HMXB_start = np.where(BH_mdot > 0)[0][0]
if (BH1_index != HMXB_start):
print (BH1_index, HMXB_start)
i = HMXB_start
while (i < len(bcm['bin_num']) and BH_mdot[i] > 0):
step_lum = calc_luminosity(bcm['mass_1'][i], BH_initial_mass, BH_mdot[i])
if step_lum > 1e35:
if obs_start_time == 0:
obs_start_time = bcm['tphys'][i]
else:
obs_end_time = bcm['tphys'][i]
system_lum.append(step_lum)
system_time.append(bcm['tphys'][i])
i += 1
## BH2 is formed first
else:
## check for non MS donors
if bcm['kstar_1'][BH1_index] != 1:
print ("different HMXB objects:", binary)
print (bcm['kstar_1'][BH1_index])
BH_initial_mass = bcm['mass_2'][BH1_index]
BH_mdot = bcm['deltam_2']
try: HMXB_start = np.where(BH_mdot > 0)[0][0]
except:
print ("check for loop:", binary)
continue
if (BH1_index != HMXB_start):
print (BH1_index, HMXB_start)
i = HMXB_start
while (i < len(bcm['bin_num']) and BH_mdot[i] > 0):
step_lum = calc_luminosity(bcm['mass_2'][i], BH_initial_mass, BH_mdot[i])
if step_lum > 1e35:
if obs_start_time == 0:
obs_start_time = bcm['tphys'][i]
else:
obs_end_time = bcm['tphys'][i]
system_lum.append(step_lum)
system_time.append(bcm['tphys'][i])
i += 1
time_differences.append(np.abs(obs_end_time - obs_start_time))
luminosities.append(system_lum)
times.append(system_time)
np.save(path + "final_merger_luminosities", luminosities)
np.save(path + "final_merger_times", times)
np.save(path + "final_observable_time_spans", time_differences)
| [
"numpy.save",
"numpy.abs",
"pandas.read_csv",
"numpy.where",
"glob.glob",
"numpy.sqrt"
] | [((877, 895), 'glob.glob', 'glob.glob', (['binpath'], {}), '(binpath)\n', (886, 895), False, 'import glob\n'), ((3512, 3569), 'numpy.save', 'np.save', (["(path + 'final_merger_luminosities')", 'luminosities'], {}), "(path + 'final_merger_luminosities', luminosities)\n", (3519, 3569), True, 'import numpy as np\n'), ((3570, 3613), 'numpy.save', 'np.save', (["(path + 'final_merger_times')", 'times'], {}), "(path + 'final_merger_times', times)\n", (3577, 3613), True, 'import numpy as np\n'), ((3614, 3677), 'numpy.save', 'np.save', (["(path + 'final_observable_time_spans')", 'time_differences'], {}), "(path + 'final_observable_time_spans', time_differences)\n", (3621, 3677), True, 'import numpy as np\n'), ((988, 1007), 'pandas.read_csv', 'pd.read_csv', (['binary'], {}), '(binary)\n', (999, 1007), True, 'import pandas as pd\n'), ((428, 487), 'numpy.sqrt', 'np.sqrt', (['(1 - (current_BH_mass / (3 * initial_BH_mass)) ** 2)'], {}), '(1 - (current_BH_mass / (3 * initial_BH_mass)) ** 2)\n', (435, 487), True, 'import numpy as np\n'), ((3406, 3443), 'numpy.abs', 'np.abs', (['(obs_end_time - obs_start_time)'], {}), '(obs_end_time - obs_start_time)\n', (3412, 3443), True, 'import numpy as np\n'), ((1124, 1154), 'numpy.where', 'np.where', (["(bcm['kstar_1'] == 14)"], {}), "(bcm['kstar_1'] == 14)\n", (1132, 1154), True, 'import numpy as np\n'), ((1252, 1282), 'numpy.where', 'np.where', (["(bcm['kstar_2'] == 14)"], {}), "(bcm['kstar_2'] == 14)\n", (1260, 1282), True, 'import numpy as np\n'), ((1736, 1757), 'numpy.where', 'np.where', (['(BH_mdot > 0)'], {}), '(BH_mdot > 0)\n', (1744, 1757), True, 'import numpy as np\n'), ((2711, 2732), 'numpy.where', 'np.where', (['(BH_mdot > 0)'], {}), '(BH_mdot > 0)\n', (2719, 2732), True, 'import numpy as np\n')] |
import math
import logging
import cv2
import numpy
from scipy.ndimage.filters import maximum_filter
import os.path
import sys
import numpy as np
from os import listdir
from os.path import join
if sys.version_info[0] != 2:
raise Exception("This script was written for Python version 2. You're running Python %s." % sys.version)
logger = logging.getLogger(__name__)
# Ideal image size. Program will scale input image appropriately
dim_rows = 640
dim_cols = 480
def features(image, channel, levels=9, start_size=(dim_rows, dim_cols), ):
#def features(image, channel, levels=9, start_size=(1983, 1088), ):
"""
Extracts features by down-scaling the image levels times,
transforms the image by applying the function channel to
each scaled version and computing the difference between
the scaled, transformed versions.
image : the image
channel : a function which transforms the image into
another image of the same size
levels : number of scaling levels
start_size : tuple. The size of the biggest image in
the scaling pyramid. The image is first
scaled to that size and then scaled by half
levels times. Therefore, both entries in
start_size must be divisible by 2^levels.
"""
image = channel(image)
if image.shape != start_size:
image = cv2.resize(image, dsize=start_size)
scales = [image]
for l in xrange(levels - 1):
logger.debug("scaling at level %d", l)
scales.append(cv2.pyrDown(scales[-1]))
features = []
for i in xrange(1, levels - 5):
big = scales[i]
for j in (3,4):
logger.debug("computing features for levels %d and %d", i, i + j)
small = scales[i + j]
srcsize = small.shape[1],small.shape[0]
dstsize = big.shape[1],big.shape[0]
logger.debug("Shape source: %s, Shape target :%s", srcsize, dstsize)
scaled = cv2.resize(src=small, dsize=dstsize)
features.append(((i+1,j+1),cv2.absdiff(big, scaled)))
return features
def intensity(image):
"""
Converts a color image into grayscale.
Used as `channel' argument to function `features'
"""
return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
def makeGaborFilter(dims, lambd, theta, psi, sigma, gamma):
"""
Creates a Gabor filter (an array) with parameters labmbd, theta,
psi, sigma, and gamma of size dims. Returns a function which
can be passed to `features' as `channel' argument.
In some versions of OpenCV, sizes greater than (11,11) will lead
to segfaults (see http://code.opencv.org/issues/2644).
"""
def xpf(i,j):
return i*math.cos(theta) + j*math.sin(theta)
def ypf(i,j):
return -i*math.sin(theta) + j*math.cos(theta)
def gabor(i,j):
xp = xpf(i,j)
yp = ypf(i,j)
return math.exp(-(xp**2 + gamma**2*yp**2)/2*sigma**2) * math.cos(2*math.pi*xp/lambd + psi)
halfwidth = dims[0]/2
halfheight = dims[1]/2
kernel = numpy.array([[gabor(halfwidth - i,halfheight - j) for j in range(dims[1])] for i in range(dims[1])])
def theFilter(image):
return cv2.filter2D(src = image, ddepth = -1, kernel = kernel, )
return theFilter
def intensityConspicuity(image):
"""
Creates the conspicuity map for the channel `intensity'.
"""
fs = features(image = im, channel = intensity)
return sumNormalizedFeatures(fs)
def gaborConspicuity(image, steps):
"""
Creates the conspicuity map for the channel `orientations'.
"""
# gaborConspicuity_ = numpy.zeros((1088, 1983), numpy.uint8)
gaborConspicuity_ = numpy.zeros((dim_cols, dim_rows), numpy.uint8)
for step in range(steps):
theta = step * (math.pi/steps)
gaborFilter = makeGaborFilter(dims=(10,10), lambd=2.5, theta=theta, psi=math.pi/2, sigma=2.5, gamma=.5)
gaborFeatures = features(image = intensity(im), channel = gaborFilter)
summedFeatures = sumNormalizedFeatures(gaborFeatures)
#gaborConspicuity_ += N(summedFeatures)
np.add(gaborConspicuity_, N(summedFeatures), out=gaborConspicuity_, casting="unsafe")
return gaborConspicuity_
def rgConspicuity(image):
"""
Creates the conspicuity map for the sub channel `red-green conspicuity'.
of the color channel.
"""
def rg(image):
r,g,_,__ = cv2.split(image)
return cv2.absdiff(r,g)
fs = features(image = image, channel = rg)
return sumNormalizedFeatures(fs)
def byConspicuity(image):
"""
Creates the conspicuity map for the sub channel `blue-yellow conspicuity'.
of the color channel.
"""
def by(image):
_,__,b,y = cv2.split(image)
return cv2.absdiff(b,y)
fs = features(image = image, channel = by)
return sumNormalizedFeatures(fs)
def sumNormalizedFeatures(features, levels=9, startSize=(dim_rows*8, dim_cols*8)):
# def sumNormalizedFeatures(features, levels=9, startSize=(1983*8, 1088*8)):
"""
Normalizes the feature maps in argument features and combines them into one.
Arguments:
features : list of feature maps (images)
levels : the levels of the Gaussian pyramid used to
calculate the feature maps.
startSize : the base size of the Gaussian pyramit used to
calculate the feature maps.
returns:
a combined feature map.
"""
commonWidth = startSize[0] / 2**(levels/2 - 1)
commonHeight = startSize[1] / 2**(levels/2 - 1)
commonSize = commonWidth, commonHeight
logger.info("Size of conspicuity map: %s", commonSize)
consp = N(cv2.resize(features[0][1], commonSize))
for f in features[1:]:
resized = N(cv2.resize(f[1], commonSize))
consp = cv2.add(consp, resized)
return consp
def N(image):
"""
Normalization parameter as per Itti et al. (1998).
returns a normalized feature map image.
"""
M = 8. # an arbitrary global maximum to which the image is scaled.
# (When saving saliency maps as images, pixel values may become
# too large or too small for the chosen image format depending
# on this constant)
image = cv2.convertScaleAbs(image, alpha=M/image.max(), beta=0.)
w,h = image.shape
maxima = maximum_filter(image, size=(w/10,h/1))
maxima = (image == maxima)
mnum = maxima.sum()
logger.debug("Found %d local maxima.", mnum)
maxima = numpy.multiply(maxima, image)
mbar = float(maxima.sum()) / mnum
logger.debug("Average of local maxima: %f. Global maximum: %f", mbar, M)
return image * (M-mbar)**2
def makeNormalizedColorChannels(image, thresholdRatio=10.):
"""
Creates a version of the (3-channel color) input image in which each of
the (4) channels is normalized. Implements color opponencies as per
Itti et al. (1998).
Arguments:
image : input image (3 color channels)
thresholdRatio : the threshold below which to set all color values
to zero.
Returns:
an output image with four normalized color channels for red, green,
blue and yellow.
"""
intens = intensity(image)
threshold = intens.max() / thresholdRatio
logger.debug("Threshold: %d", threshold)
r,g,b = cv2.split(image)
cv2.threshold(src=r, dst=r, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO)
cv2.threshold(src=g, dst=g, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO)
cv2.threshold(src=b, dst=b, thresh=threshold, maxval=0.0, type=cv2.THRESH_TOZERO)
R = r - (g + b) / 2
G = g - (r + b) / 2
B = b - (g + r) / 2
Y = (r + g) / 2 - cv2.absdiff(r,g) / 2 - b
# Negative values are set to zero.
cv2.threshold(src=R, dst=R, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
cv2.threshold(src=G, dst=G, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
cv2.threshold(src=B, dst=B, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
cv2.threshold(src=Y, dst=Y, thresh=0., maxval=0.0, type=cv2.THRESH_TOZERO)
image = cv2.merge((R,G,B,Y))
return image
def markMaxima(saliency):
"""
Mark the maxima in a saliency map (a gray-scale image).
"""
maxima = maximum_filter(saliency, size=(5, 5))
maxima = numpy.array(saliency == maxima, dtype=numpy.float64) * 255
g = cv2.max(saliency, maxima)
r = saliency
b = saliency
marked = cv2.merge((b,g,r))
return marked
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
import argparse
import sys
parser = argparse.ArgumentParser(description = "Simple Itti-Koch-style conspicuity.")
parser.add_argument('--fileList', type=str, dest='fileList', action='store', help='Text file containing input file names, one per line.')
parser.add_argument('--inputFile', type=str, dest='inputFile', action='store', help='File to compute saliency list for.')
parser.add_argument('--inputDir', type=str, dest='inputDir', action='store', help='Directory to compute saliency list for. Need --fileList or --inputFile or --inputDir.')
parser.add_argument('--outputDir', type=str, dest='outputDir', action='store', help="Output directory for all maps.")
parser.add_argument("--markMaxima", action='store_true', help="Mark maximum saliency in output image.")
args = parser.parse_args()
if args.fileList is None and args.inputFile is None and args.inputDir is None:
logger.error("Need either --fileList or --inputFile or --inputDir cmd line arguments.")
sys.exit()
else:
if args.fileList:
# we are reading filenames from a file.
filenames = (filename[:-1] for filename in open(args.fileList)) # remove end-of line character
elif args.inputFile:
# filenames were given on the command line.
filenames = [args.inputFile]
else:
# read filenames from directory.
filenames = [join(args.inputDir, f) for f in listdir(args.inputDir)]
for filename in filenames:
im = cv2.imread(filename, cv2.COLOR_BGR2RGB) # assume BGR, convert to RGB---more intuitive code.
if im is None:
logger.fatal("Could not load file \"%s.\"", filename)
sys.exit()
intensty = intensityConspicuity(im)
gabor = gaborConspicuity(im, 4)
im = makeNormalizedColorChannels(im)
rg = rgConspicuity(im)
by = byConspicuity(im)
c = rg + by
saliency = 1./3 * (N(intensty) + N(c) + N(gabor))
if args.markMaxima:
saliency = markMaxima(saliency)
def writeCond(outputDir, image, desc='saliency'):
name, ext = os.path.splitext(os.path.basename(filename))
if outputDir:
cv2.imwrite(join(outputDir, name + '_' + desc + ext), image)
'''
writeCond(args.outputDir, intensty, 'intensity')
writeCond(args.outputDir, gabor, 'gabor')
writeCond(args.outputDir, rg, 'rg')
writeCond(args.outputDir, by, 'by')
writeCond(args.outputDir, .25 * c, 'c')
'''
writeCond(args.outputDir, saliency)
| [
"cv2.max",
"argparse.ArgumentParser",
"cv2.absdiff",
"cv2.pyrDown",
"os.path.join",
"numpy.multiply",
"cv2.filter2D",
"cv2.cvtColor",
"cv2.split",
"math.cos",
"cv2.resize",
"math.sin",
"cv2.merge",
"cv2.add",
"sys.exit",
"os.listdir",
"math.exp",
"scipy.ndimage.filters.maximum_filt... | [((342, 369), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (359, 369), False, 'import logging\n'), ((2071, 2110), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2GRAY'], {}), '(image, cv2.COLOR_RGB2GRAY)\n', (2083, 2110), False, 'import cv2\n'), ((3404, 3450), 'numpy.zeros', 'numpy.zeros', (['(dim_cols, dim_rows)', 'numpy.uint8'], {}), '((dim_cols, dim_rows), numpy.uint8)\n', (3415, 3450), False, 'import numpy\n'), ((5807, 5850), 'scipy.ndimage.filters.maximum_filter', 'maximum_filter', (['image'], {'size': '(w / 10, h / 1)'}), '(image, size=(w / 10, h / 1))\n', (5821, 5850), False, 'from scipy.ndimage.filters import maximum_filter\n'), ((5951, 5980), 'numpy.multiply', 'numpy.multiply', (['maxima', 'image'], {}), '(maxima, image)\n', (5965, 5980), False, 'import numpy\n'), ((6724, 6740), 'cv2.split', 'cv2.split', (['image'], {}), '(image)\n', (6733, 6740), False, 'import cv2\n'), ((6742, 6828), 'cv2.threshold', 'cv2.threshold', ([], {'src': 'r', 'dst': 'r', 'thresh': 'threshold', 'maxval': '(0.0)', 'type': 'cv2.THRESH_TOZERO'}), '(src=r, dst=r, thresh=threshold, maxval=0.0, type=cv2.\n THRESH_TOZERO)\n', (6755, 6828), False, 'import cv2\n'), ((6825, 6911), 'cv2.threshold', 'cv2.threshold', ([], {'src': 'g', 'dst': 'g', 'thresh': 'threshold', 'maxval': '(0.0)', 'type': 'cv2.THRESH_TOZERO'}), '(src=g, dst=g, thresh=threshold, maxval=0.0, type=cv2.\n THRESH_TOZERO)\n', (6838, 6911), False, 'import cv2\n'), ((6908, 6994), 'cv2.threshold', 'cv2.threshold', ([], {'src': 'b', 'dst': 'b', 'thresh': 'threshold', 'maxval': '(0.0)', 'type': 'cv2.THRESH_TOZERO'}), '(src=b, dst=b, thresh=threshold, maxval=0.0, type=cv2.\n THRESH_TOZERO)\n', (6921, 6994), False, 'import cv2\n'), ((7135, 7210), 'cv2.threshold', 'cv2.threshold', ([], {'src': 'R', 'dst': 'R', 'thresh': '(0.0)', 'maxval': '(0.0)', 'type': 'cv2.THRESH_TOZERO'}), '(src=R, dst=R, thresh=0.0, maxval=0.0, type=cv2.THRESH_TOZERO)\n', (7148, 7210), False, 'import cv2\n'), ((7211, 7286), 'cv2.threshold', 'cv2.threshold', ([], {'src': 'G', 'dst': 'G', 'thresh': '(0.0)', 'maxval': '(0.0)', 'type': 'cv2.THRESH_TOZERO'}), '(src=G, dst=G, thresh=0.0, maxval=0.0, type=cv2.THRESH_TOZERO)\n', (7224, 7286), False, 'import cv2\n'), ((7287, 7362), 'cv2.threshold', 'cv2.threshold', ([], {'src': 'B', 'dst': 'B', 'thresh': '(0.0)', 'maxval': '(0.0)', 'type': 'cv2.THRESH_TOZERO'}), '(src=B, dst=B, thresh=0.0, maxval=0.0, type=cv2.THRESH_TOZERO)\n', (7300, 7362), False, 'import cv2\n'), ((7363, 7438), 'cv2.threshold', 'cv2.threshold', ([], {'src': 'Y', 'dst': 'Y', 'thresh': '(0.0)', 'maxval': '(0.0)', 'type': 'cv2.THRESH_TOZERO'}), '(src=Y, dst=Y, thresh=0.0, maxval=0.0, type=cv2.THRESH_TOZERO)\n', (7376, 7438), False, 'import cv2\n'), ((7448, 7471), 'cv2.merge', 'cv2.merge', (['(R, G, B, Y)'], {}), '((R, G, B, Y))\n', (7457, 7471), False, 'import cv2\n'), ((7588, 7625), 'scipy.ndimage.filters.maximum_filter', 'maximum_filter', (['saliency'], {'size': '(5, 5)'}), '(saliency, size=(5, 5))\n', (7602, 7625), False, 'from scipy.ndimage.filters import maximum_filter\n'), ((7700, 7725), 'cv2.max', 'cv2.max', (['saliency', 'maxima'], {}), '(saliency, maxima)\n', (7707, 7725), False, 'import cv2\n'), ((7764, 7784), 'cv2.merge', 'cv2.merge', (['(b, g, r)'], {}), '((b, g, r))\n', (7773, 7784), False, 'import cv2\n'), ((7828, 7868), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (7847, 7868), False, 'import logging\n'), ((7909, 7983), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Simple Itti-Koch-style conspicuity."""'}), "(description='Simple Itti-Koch-style conspicuity.')\n", (7932, 7983), False, 'import argparse\n'), ((1313, 1348), 'cv2.resize', 'cv2.resize', (['image'], {'dsize': 'start_size'}), '(image, dsize=start_size)\n', (1323, 1348), False, 'import cv2\n'), ((2950, 2999), 'cv2.filter2D', 'cv2.filter2D', ([], {'src': 'image', 'ddepth': '(-1)', 'kernel': 'kernel'}), '(src=image, ddepth=-1, kernel=kernel)\n', (2962, 2999), False, 'import cv2\n'), ((4067, 4083), 'cv2.split', 'cv2.split', (['image'], {}), '(image)\n', (4076, 4083), False, 'import cv2\n'), ((4093, 4110), 'cv2.absdiff', 'cv2.absdiff', (['r', 'g'], {}), '(r, g)\n', (4104, 4110), False, 'import cv2\n'), ((4355, 4371), 'cv2.split', 'cv2.split', (['image'], {}), '(image)\n', (4364, 4371), False, 'import cv2\n'), ((4381, 4398), 'cv2.absdiff', 'cv2.absdiff', (['b', 'y'], {}), '(b, y)\n', (4392, 4398), False, 'import cv2\n'), ((5212, 5250), 'cv2.resize', 'cv2.resize', (['features[0][1]', 'commonSize'], {}), '(features[0][1], commonSize)\n', (5222, 5250), False, 'import cv2\n'), ((5330, 5353), 'cv2.add', 'cv2.add', (['consp', 'resized'], {}), '(consp, resized)\n', (5337, 5353), False, 'import cv2\n'), ((7636, 7688), 'numpy.array', 'numpy.array', (['(saliency == maxima)'], {'dtype': 'numpy.float64'}), '(saliency == maxima, dtype=numpy.float64)\n', (7647, 7688), False, 'import numpy\n'), ((8846, 8856), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8854, 8856), False, 'import sys\n'), ((1455, 1478), 'cv2.pyrDown', 'cv2.pyrDown', (['scales[-1]'], {}), '(scales[-1])\n', (1466, 1478), False, 'import cv2\n'), ((1825, 1861), 'cv2.resize', 'cv2.resize', ([], {'src': 'small', 'dsize': 'dstsize'}), '(src=small, dsize=dstsize)\n', (1835, 1861), False, 'import cv2\n'), ((2673, 2733), 'math.exp', 'math.exp', (['(-(xp ** 2 + gamma ** 2 * yp ** 2) / 2 * sigma ** 2)'], {}), '(-(xp ** 2 + gamma ** 2 * yp ** 2) / 2 * sigma ** 2)\n', (2681, 2733), False, 'import math\n'), ((2722, 2762), 'math.cos', 'math.cos', (['(2 * math.pi * xp / lambd + psi)'], {}), '(2 * math.pi * xp / lambd + psi)\n', (2730, 2762), False, 'import math\n'), ((5290, 5318), 'cv2.resize', 'cv2.resize', (['f[1]', 'commonSize'], {}), '(f[1], commonSize)\n', (5300, 5318), False, 'import cv2\n'), ((9281, 9320), 'cv2.imread', 'cv2.imread', (['filename', 'cv2.COLOR_BGR2RGB'], {}), '(filename, cv2.COLOR_BGR2RGB)\n', (9291, 9320), False, 'import cv2\n'), ((2516, 2531), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (2524, 2531), False, 'import math\n'), ((2536, 2551), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (2544, 2551), False, 'import math\n'), ((2579, 2594), 'math.sin', 'math.sin', (['theta'], {}), '(theta)\n', (2587, 2594), False, 'import math\n'), ((2599, 2614), 'math.cos', 'math.cos', (['theta'], {}), '(theta)\n', (2607, 2614), False, 'import math\n'), ((7072, 7089), 'cv2.absdiff', 'cv2.absdiff', (['r', 'g'], {}), '(r, g)\n', (7083, 7089), False, 'import cv2\n'), ((9454, 9464), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9462, 9464), False, 'import sys\n'), ((1892, 1916), 'cv2.absdiff', 'cv2.absdiff', (['big', 'scaled'], {}), '(big, scaled)\n', (1903, 1916), False, 'import cv2\n'), ((9187, 9209), 'os.path.join', 'join', (['args.inputDir', 'f'], {}), '(args.inputDir, f)\n', (9191, 9209), False, 'from os.path import join\n'), ((9219, 9241), 'os.listdir', 'listdir', (['args.inputDir'], {}), '(args.inputDir)\n', (9226, 9241), False, 'from os import listdir\n'), ((9912, 9952), 'os.path.join', 'join', (['outputDir', "(name + '_' + desc + ext)"], {}), "(outputDir, name + '_' + desc + ext)\n", (9916, 9952), False, 'from os.path import join\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import os
from data import mnist_iterator
import mxnet as mx
import numpy as np
import logging
class Softmax(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
x = in_data[0].asnumpy()
y = np.exp(x - x.max(axis=1).reshape((x.shape[0], 1)))
y /= y.sum(axis=1).reshape((x.shape[0], 1))
self.assign(out_data[0], req[0], mx.nd.array(y))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
l = in_data[1].asnumpy().ravel().astype(np.int)
y = out_data[0].asnumpy()
y[np.arange(l.shape[0]), l] -= 1.0
self.assign(in_grad[0], req[0], mx.nd.array(y))
@mx.operator.register("softmax")
class SoftmaxProp(mx.operator.CustomOpProp):
def __init__(self):
super(SoftmaxProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = (in_shape[0][0],)
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape], []
def infer_type(self, in_type):
return in_type, [in_type[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Softmax()
# define mlp
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
#mlp = mx.symbol.Softmax(data = fc3, name = 'softmax')
mlp = mx.symbol.Custom(data=fc3, name='softmax', op_type='softmax')
# data
train, val = mnist_iterator(batch_size=100, input_shape = (784,))
# train
logging.basicConfig(level=logging.DEBUG)
# MXNET_CPU_WORKER_NTHREADS must be greater than 1 for custom op to work on CPU
model = mx.model.FeedForward(
ctx = mx.cpu(0), symbol = mlp, num_epoch = 20,
learning_rate = 0.1, momentum = 0.9, wd = 0.00001)
model.fit(X=train, eval_data=val,
batch_end_callback=mx.callback.Speedometer(100,100))
| [
"mxnet.callback.Speedometer",
"mxnet.symbol.Activation",
"mxnet.symbol.FullyConnected",
"mxnet.symbol.Custom",
"logging.basicConfig",
"mxnet.operator.register",
"mxnet.symbol.Variable",
"numpy.arange",
"mxnet.cpu",
"mxnet.nd.array",
"data.mnist_iterator"
] | [((1471, 1502), 'mxnet.operator.register', 'mx.operator.register', (['"""softmax"""'], {}), "('softmax')\n", (1491, 1502), True, 'import mxnet as mx\n'), ((2138, 2164), 'mxnet.symbol.Variable', 'mx.symbol.Variable', (['"""data"""'], {}), "('data')\n", (2156, 2164), True, 'import mxnet as mx\n'), ((2171, 2234), 'mxnet.symbol.FullyConnected', 'mx.symbol.FullyConnected', ([], {'data': 'data', 'name': '"""fc1"""', 'num_hidden': '(128)'}), "(data=data, name='fc1', num_hidden=128)\n", (2195, 2234), True, 'import mxnet as mx\n'), ((2244, 2305), 'mxnet.symbol.Activation', 'mx.symbol.Activation', ([], {'data': 'fc1', 'name': '"""relu1"""', 'act_type': '"""relu"""'}), "(data=fc1, name='relu1', act_type='relu')\n", (2264, 2305), True, 'import mxnet as mx\n'), ((2314, 2376), 'mxnet.symbol.FullyConnected', 'mx.symbol.FullyConnected', ([], {'data': 'act1', 'name': '"""fc2"""', 'num_hidden': '(64)'}), "(data=act1, name='fc2', num_hidden=64)\n", (2338, 2376), True, 'import mxnet as mx\n'), ((2390, 2451), 'mxnet.symbol.Activation', 'mx.symbol.Activation', ([], {'data': 'fc2', 'name': '"""relu2"""', 'act_type': '"""relu"""'}), "(data=fc2, name='relu2', act_type='relu')\n", (2410, 2451), True, 'import mxnet as mx\n'), ((2460, 2522), 'mxnet.symbol.FullyConnected', 'mx.symbol.FullyConnected', ([], {'data': 'act2', 'name': '"""fc3"""', 'num_hidden': '(10)'}), "(data=act2, name='fc3', num_hidden=10)\n", (2484, 2522), True, 'import mxnet as mx\n'), ((2586, 2647), 'mxnet.symbol.Custom', 'mx.symbol.Custom', ([], {'data': 'fc3', 'name': '"""softmax"""', 'op_type': '"""softmax"""'}), "(data=fc3, name='softmax', op_type='softmax')\n", (2602, 2647), True, 'import mxnet as mx\n'), ((2670, 2720), 'data.mnist_iterator', 'mnist_iterator', ([], {'batch_size': '(100)', 'input_shape': '(784,)'}), '(batch_size=100, input_shape=(784,))\n', (2684, 2720), False, 'from data import mnist_iterator\n'), ((2733, 2773), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (2752, 2773), False, 'import logging\n'), ((2895, 2904), 'mxnet.cpu', 'mx.cpu', (['(0)'], {}), '(0)\n', (2901, 2904), True, 'import mxnet as mx\n'), ((3055, 3088), 'mxnet.callback.Speedometer', 'mx.callback.Speedometer', (['(100)', '(100)'], {}), '(100, 100)\n', (3078, 3088), True, 'import mxnet as mx\n'), ((1191, 1205), 'mxnet.nd.array', 'mx.nd.array', (['y'], {}), '(y)\n', (1202, 1205), True, 'import mxnet as mx\n'), ((1453, 1467), 'mxnet.nd.array', 'mx.nd.array', (['y'], {}), '(y)\n', (1464, 1467), True, 'import mxnet as mx\n'), ((1380, 1401), 'numpy.arange', 'np.arange', (['l.shape[0]'], {}), '(l.shape[0])\n', (1389, 1401), True, 'import numpy as np\n')] |
from __future__ import print_function, division
import matplotlib.pyplot as plt
import numpy as np
import scipy
from keras.datasets import mnist
from keras.layers import BatchNormalization
from keras.layers import Concatenate
from keras.layers import Input, Dense, Flatten, Dropout
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras.utils import to_categorical
from keras_contrib.layers.normalization import InstanceNormalization
from .gan_base import GANBase
class CCGAN(GANBase):
def __init__(self, *args, **kwargs):
super(CCGAN, self).__init__(*args, **kwargs)
self.img_rows = 32
self.img_cols = 32
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.mask_height = 10
self.mask_width = 10
self.num_classes = 10
# Number of filters in first layer of generator and discriminator
self.gf = 32
self.df = 32
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss=['mse', 'categorical_crossentropy'],
loss_weights=[0.5, 0.5],
optimizer=self.get_optimizer(),
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generates imgs
masked_img = Input(shape=self.img_shape)
gen_img = self.generator(masked_img)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The valid takes generated images as input and determines validity
valid, _ = self.discriminator(gen_img)
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = Model(masked_img, valid)
self.combined.compile(loss=['mse'],
optimizer=self.get_optimizer())
def build_generator(self):
"""U-Net Generator"""
def conv2d(layer_input, filters, f_size=4, bn=True):
"""Layers used during downsampling"""
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
d = LeakyReLU(alpha=0.2)(d)
if bn:
d = BatchNormalization(momentum=0.8)(d)
return d
def deconv2d(layer_input, skip_input, filters, f_size=4, dropout_rate=0):
"""Layers used during upsampling"""
u = UpSampling2D(size=2)(layer_input)
u = Conv2D(filters, kernel_size=f_size, strides=1, padding='same', activation='relu')(u)
if dropout_rate:
u = Dropout(dropout_rate)(u)
u = BatchNormalization(momentum=0.8)(u)
u = Concatenate()([u, skip_input])
return u
img = Input(shape=self.img_shape)
# Downsampling
d1 = conv2d(img, self.gf, bn=False)
d2 = conv2d(d1, self.gf * 2)
d3 = conv2d(d2, self.gf * 4)
d4 = conv2d(d3, self.gf * 8)
# Upsampling
u1 = deconv2d(d4, d3, self.gf * 4)
u2 = deconv2d(u1, d2, self.gf * 2)
u3 = deconv2d(u2, d1, self.gf)
u4 = UpSampling2D(size=2)(u3)
output_img = Conv2D(self.channels, kernel_size=4, strides=1, padding='same', activation='tanh')(u4)
return Model(img, output_img)
def build_discriminator(self):
img = Input(shape=self.img_shape)
model = Sequential()
model.add(Conv2D(64, kernel_size=4, strides=2, padding='same', input_shape=self.img_shape))
model.add(LeakyReLU(alpha=0.8))
model.add(Conv2D(128, kernel_size=4, strides=2, padding='same'))
model.add(LeakyReLU(alpha=0.2))
model.add(InstanceNormalization())
model.add(Conv2D(256, kernel_size=4, strides=2, padding='same'))
model.add(LeakyReLU(alpha=0.2))
model.add(InstanceNormalization())
model.summary()
img = Input(shape=self.img_shape)
features = model(img)
validity = Conv2D(1, kernel_size=4, strides=1, padding='same')(features)
label = Flatten()(features)
label = Dense(self.num_classes + 1, activation="softmax")(label)
return Model(img, [validity, label])
def mask_randomly(self, imgs):
y1 = np.random.randint(0, self.img_rows - self.mask_height, imgs.shape[0])
y2 = y1 + self.mask_height
x1 = np.random.randint(0, self.img_rows - self.mask_width, imgs.shape[0])
x2 = x1 + self.mask_width
masked_imgs = np.empty_like(imgs)
for i, img in enumerate(imgs):
masked_img = img.copy()
_y1, _y2, _x1, _x2 = y1[i], y2[i], x1[i], x2[i],
masked_img[_y1:_y2, _x1:_x2, :] = 0
masked_imgs[i] = masked_img
return masked_imgs
def train(self, epochs, batch_size=128, sample_interval=50):
# Load the dataset
(X_train, y_train), (_, _) = mnist.load_data()
# Rescale MNIST to 32x32
X_train = np.array([scipy.misc.imresize(x, [self.img_rows, self.img_cols]) for x in X_train])
# Rescale -1 to 1
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = np.expand_dims(X_train, axis=3)
y_train = y_train.reshape(-1, 1)
# Adversarial ground truths
valid = np.ones((batch_size, 4, 4, 1))
fake = np.zeros((batch_size, 4, 4, 1))
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Sample half batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
labels = y_train[idx]
masked_imgs = self.mask_randomly(imgs)
# Generate a half batch of new images
gen_imgs = self.generator.predict(masked_imgs)
# One-hot encoding of labels
labels = to_categorical(labels, num_classes=self.num_classes + 1)
fake_labels = to_categorical(np.full((batch_size, 1), self.num_classes), num_classes=self.num_classes + 1)
# Train the discriminator
d_loss_real = self.discriminator.train_on_batch(imgs, [valid, labels])
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, [fake, fake_labels])
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
# Train the generator
g_loss = self.combined.train_on_batch(masked_imgs, valid)
# Plot the progress
print("%d [D loss: %f, op_acc: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100 * d_loss[4], g_loss))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
# Select a random half batch of images
idx = np.random.randint(0, X_train.shape[0], 6)
imgs = X_train[idx]
self.sample_images(epoch, imgs)
self.save_model()
def sample_images(self, epoch, imgs):
r, c = 3, 6
masked_imgs = self.mask_randomly(imgs)
gen_imgs = self.generator.predict(masked_imgs)
imgs = (imgs + 1.0) * 0.5
masked_imgs = (masked_imgs + 1.0) * 0.5
gen_imgs = (gen_imgs + 1.0) * 0.5
gen_imgs = np.where(gen_imgs < 0, 0, gen_imgs)
fig, axs = plt.subplots(r, c)
for i in range(c):
axs[0, i].imshow(imgs[i, :, :, 0], cmap='gray')
axs[0, i].axis('off')
axs[1, i].imshow(masked_imgs[i, :, :, 0], cmap='gray')
axs[1, i].axis('off')
axs[2, i].imshow(gen_imgs[i, :, :, 0], cmap='gray')
axs[2, i].axis('off')
fig.savefig("images/%d.png" % epoch)
plt.close()
def save_model(self):
def save(model, model_name):
model_path = "saved_model/%s.json" % model_name
weights_path = "saved_model/%s_weights.hdf5" % model_name
options = {"file_arch": model_path,
"file_weight": weights_path}
json_string = model.to_json()
open(options['file_arch'], 'w').write(json_string)
model.save_weights(options['file_weight'])
save(self.generator, "ccgan_generator")
save(self.discriminator, "ccgan_discriminator")
if __name__ == '__main__':
ccgan = CCGAN()
ccgan.train(epochs=20000, batch_size=32, sample_interval=200)
| [
"numpy.ones",
"keras.models.Model",
"numpy.random.randint",
"keras_contrib.layers.normalization.InstanceNormalization",
"keras.layers.Input",
"numpy.full",
"keras.layers.convolutional.UpSampling2D",
"matplotlib.pyplot.close",
"numpy.empty_like",
"keras.layers.Flatten",
"numpy.add",
"matplotlib... | [((1619, 1646), 'keras.layers.Input', 'Input', ([], {'shape': 'self.img_shape'}), '(shape=self.img_shape)\n', (1624, 1646), False, 'from keras.layers import Input, Dense, Flatten, Dropout\n'), ((2078, 2102), 'keras.models.Model', 'Model', (['masked_img', 'valid'], {}), '(masked_img, valid)\n', (2083, 2102), False, 'from keras.models import Sequential, Model\n'), ((3102, 3129), 'keras.layers.Input', 'Input', ([], {'shape': 'self.img_shape'}), '(shape=self.img_shape)\n', (3107, 3129), False, 'from keras.layers import Input, Dense, Flatten, Dropout\n'), ((3619, 3641), 'keras.models.Model', 'Model', (['img', 'output_img'], {}), '(img, output_img)\n', (3624, 3641), False, 'from keras.models import Sequential, Model\n'), ((3693, 3720), 'keras.layers.Input', 'Input', ([], {'shape': 'self.img_shape'}), '(shape=self.img_shape)\n', (3698, 3720), False, 'from keras.layers import Input, Dense, Flatten, Dropout\n'), ((3738, 3750), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3748, 3750), False, 'from keras.models import Sequential, Model\n'), ((4243, 4270), 'keras.layers.Input', 'Input', ([], {'shape': 'self.img_shape'}), '(shape=self.img_shape)\n', (4248, 4270), False, 'from keras.layers import Input, Dense, Flatten, Dropout\n'), ((4509, 4538), 'keras.models.Model', 'Model', (['img', '[validity, label]'], {}), '(img, [validity, label])\n', (4514, 4538), False, 'from keras.models import Sequential, Model\n'), ((4588, 4657), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.img_rows - self.mask_height)', 'imgs.shape[0]'], {}), '(0, self.img_rows - self.mask_height, imgs.shape[0])\n', (4605, 4657), True, 'import numpy as np\n'), ((4706, 4774), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.img_rows - self.mask_width)', 'imgs.shape[0]'], {}), '(0, self.img_rows - self.mask_width, imgs.shape[0])\n', (4723, 4774), True, 'import numpy as np\n'), ((4832, 4851), 'numpy.empty_like', 'np.empty_like', (['imgs'], {}), '(imgs)\n', (4845, 4851), True, 'import numpy as np\n'), ((5235, 5252), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (5250, 5252), False, 'from keras.datasets import mnist\n'), ((5497, 5528), 'numpy.expand_dims', 'np.expand_dims', (['X_train'], {'axis': '(3)'}), '(X_train, axis=3)\n', (5511, 5528), True, 'import numpy as np\n'), ((5623, 5653), 'numpy.ones', 'np.ones', (['(batch_size, 4, 4, 1)'], {}), '((batch_size, 4, 4, 1))\n', (5630, 5653), True, 'import numpy as np\n'), ((5669, 5700), 'numpy.zeros', 'np.zeros', (['(batch_size, 4, 4, 1)'], {}), '((batch_size, 4, 4, 1))\n', (5677, 5700), True, 'import numpy as np\n'), ((7712, 7747), 'numpy.where', 'np.where', (['(gen_imgs < 0)', '(0)', 'gen_imgs'], {}), '(gen_imgs < 0, 0, gen_imgs)\n', (7720, 7747), True, 'import numpy as np\n'), ((7768, 7786), 'matplotlib.pyplot.subplots', 'plt.subplots', (['r', 'c'], {}), '(r, c)\n', (7780, 7786), True, 'import matplotlib.pyplot as plt\n'), ((8160, 8171), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8169, 8171), True, 'import matplotlib.pyplot as plt\n'), ((3470, 3490), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ([], {'size': '(2)'}), '(size=2)\n', (3482, 3490), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((3516, 3603), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['self.channels'], {'kernel_size': '(4)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""tanh"""'}), "(self.channels, kernel_size=4, strides=1, padding='same', activation=\n 'tanh')\n", (3522, 3603), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((3769, 3854), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(4)', 'strides': '(2)', 'padding': '"""same"""', 'input_shape': 'self.img_shape'}), "(64, kernel_size=4, strides=2, padding='same', input_shape=self.img_shape\n )\n", (3775, 3854), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((3869, 3889), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.8)'}), '(alpha=0.8)\n', (3878, 3889), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((3909, 3962), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(128)'], {'kernel_size': '(4)', 'strides': '(2)', 'padding': '"""same"""'}), "(128, kernel_size=4, strides=2, padding='same')\n", (3915, 3962), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((3982, 4002), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (3991, 4002), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((4022, 4045), 'keras_contrib.layers.normalization.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (4043, 4045), False, 'from keras_contrib.layers.normalization import InstanceNormalization\n'), ((4065, 4118), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(256)'], {'kernel_size': '(4)', 'strides': '(2)', 'padding': '"""same"""'}), "(256, kernel_size=4, strides=2, padding='same')\n", (4071, 4118), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((4138, 4158), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (4147, 4158), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((4178, 4201), 'keras_contrib.layers.normalization.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (4199, 4201), False, 'from keras_contrib.layers.normalization import InstanceNormalization\n'), ((4321, 4372), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['(1)'], {'kernel_size': '(4)', 'strides': '(1)', 'padding': '"""same"""'}), "(1, kernel_size=4, strides=1, padding='same')\n", (4327, 4372), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((4400, 4409), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4407, 4409), False, 'from keras.layers import Input, Dense, Flatten, Dropout\n'), ((4436, 4485), 'keras.layers.Dense', 'Dense', (['(self.num_classes + 1)'], {'activation': '"""softmax"""'}), "(self.num_classes + 1, activation='softmax')\n", (4441, 4485), False, 'from keras.layers import Input, Dense, Flatten, Dropout\n'), ((5907, 5957), 'numpy.random.randint', 'np.random.randint', (['(0)', 'X_train.shape[0]', 'batch_size'], {}), '(0, X_train.shape[0], batch_size)\n', (5924, 5957), True, 'import numpy as np\n'), ((6249, 6305), 'keras.utils.to_categorical', 'to_categorical', (['labels'], {'num_classes': '(self.num_classes + 1)'}), '(labels, num_classes=self.num_classes + 1)\n', (6263, 6305), False, 'from keras.utils import to_categorical\n'), ((2399, 2461), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['filters'], {'kernel_size': 'f_size', 'strides': '(2)', 'padding': '"""same"""'}), "(filters, kernel_size=f_size, strides=2, padding='same')\n", (2405, 2461), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((2491, 2511), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (2500, 2511), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((2758, 2778), 'keras.layers.convolutional.UpSampling2D', 'UpSampling2D', ([], {'size': '(2)'}), '(size=2)\n', (2770, 2778), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((2808, 2894), 'keras.layers.convolutional.Conv2D', 'Conv2D', (['filters'], {'kernel_size': 'f_size', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""'}), "(filters, kernel_size=f_size, strides=1, padding='same', activation=\n 'relu')\n", (2814, 2894), False, 'from keras.layers.convolutional import UpSampling2D, Conv2D\n'), ((2983, 3015), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (3001, 3015), False, 'from keras.layers import BatchNormalization\n'), ((3035, 3048), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (3046, 3048), False, 'from keras.layers import Concatenate\n'), ((5315, 5369), 'scipy.misc.imresize', 'scipy.misc.imresize', (['x', '[self.img_rows, self.img_cols]'], {}), '(x, [self.img_rows, self.img_cols])\n', (5334, 5369), False, 'import scipy\n'), ((6347, 6389), 'numpy.full', 'np.full', (['(batch_size, 1)', 'self.num_classes'], {}), '((batch_size, 1), self.num_classes)\n', (6354, 6389), True, 'import numpy as np\n'), ((6665, 6697), 'numpy.add', 'np.add', (['d_loss_real', 'd_loss_fake'], {}), '(d_loss_real, d_loss_fake)\n', (6671, 6697), True, 'import numpy as np\n'), ((7241, 7282), 'numpy.random.randint', 'np.random.randint', (['(0)', 'X_train.shape[0]', '(6)'], {}), '(0, X_train.shape[0], 6)\n', (7258, 7282), True, 'import numpy as np\n'), ((2554, 2586), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (2572, 2586), False, 'from keras.layers import BatchNormalization\n'), ((2942, 2963), 'keras.layers.Dropout', 'Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (2949, 2963), False, 'from keras.layers import Input, Dense, Flatten, Dropout\n')] |
from tensorflow.keras.layers import Conv1D as _Conv1D
from tensorflow.keras.layers import Conv2D as _Conv2D
from tensorflow.keras.layers import MaxPooling2D as _MaxPooling2D
from tensorflow.keras.layers import Dense as _Dense
from tensorflow.keras.layers import Embedding as _Embedding
from tensorflow.keras.layers import LSTM as _LSTM
from tensorflow.keras.layers import Bidirectional
from tensorflow.keras.layers import Dropout as _Dropout
from tensorflow.keras.layers import BatchNormalization as _BatchNormalization
from tensorflow.keras.layers import TimeDistributed as _TimeDistributed
from tensorflow.keras.layers import Activation as _Activation
from tensorflow.keras.layers import Flatten as _Flatten
from tensorflow.keras.layers import Reshape as _Reshape
from tensorflow.keras import regularizers
from numpy import log2
from autogoal.grammar import (
BooleanValue,
CategoricalValue,
DiscreteValue,
ContinuousValue,
)
from autogoal.utils import nice_repr
@nice_repr
class Seq2SeqLSTM(_LSTM):
def __init__(
self,
units: DiscreteValue(32, 1024),
activation_fn: CategoricalValue("tanh", "sigmoid", "relu", "linear"),
recurrent_activation_fn: CategoricalValue("tanh", "sigmoid", "relu", "linear"),
dropout: ContinuousValue(0, 0.5),
recurrent_dropout: ContinuousValue(0, 0.5),
**kwargs
):
super().__init__(
units=units,
activation=activation_fn,
recurrent_activation=recurrent_activation_fn,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
return_sequences=True,
**kwargs
)
self.activation_fn = activation_fn
self.recurrent_activation_fn = recurrent_activation_fn
def get_config(self):
config = super().get_config()
config["activation_fn"] = self.activation_fn
config["recurrent_activation_fn"] = self.recurrent_activation_fn
config.pop("return_sequences", None)
config.pop("activation", None)
config.pop("recurrent_activation", None)
return config
@nice_repr
class Seq2VecLSTM(_LSTM):
def __init__(
self,
units: DiscreteValue(32, 1024),
activation_fn: CategoricalValue("tanh", "sigmoid", "relu", "linear"),
recurrent_activation_fn: CategoricalValue("tanh", "sigmoid", "relu", "linear"),
dropout: ContinuousValue(0, 0.5),
recurrent_dropout: ContinuousValue(0, 0.5),
**kwargs
):
super().__init__(
units=units,
activation=activation_fn,
recurrent_activation=recurrent_activation_fn,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
return_sequences=False,
**kwargs
)
self.activation_fn = activation_fn
self.recurrent_activation_fn = recurrent_activation_fn
def get_config(self):
config = super().get_config()
config["activation_fn"] = self.activation_fn
config["recurrent_activation_fn"] = self.recurrent_activation_fn
config.pop("return_sequences", None)
config.pop("activation", None)
config.pop("recurrent_activation", None)
return config
@nice_repr
class Seq2SeqBiLSTM(Bidirectional):
def __init__(
self,
merge_mode: CategoricalValue("sum", "mul", "concat", "ave"),
units: DiscreteValue(32, 1024),
activation_fn: CategoricalValue("tanh", "sigmoid", "relu", "linear"),
recurrent_activation_fn: CategoricalValue("tanh", "sigmoid", "relu", "linear"),
dropout: ContinuousValue(0, 0.5),
recurrent_dropout: ContinuousValue(0, 0.5),
**kwargs
):
super().__init__(
layer=_LSTM(
units=units,
activation=activation_fn,
recurrent_activation=recurrent_activation_fn,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
return_sequences=True,
),
merge_mode=merge_mode,
**kwargs
)
self.activation_fn = activation_fn
self.recurrent_activation_fn = recurrent_activation_fn
def get_config(self):
config = super().get_config()
config["units"] = self.layer.units
config["activation_fn"] = self.activation_fn
config["recurrent_activation_fn"] = self.recurrent_activation_fn
config["dropout"] = self.layer.dropout
config["recurrent_dropout"] = self.layer.recurrent_dropout
config.pop("layer", None)
return config
@classmethod
def from_config(cls, config):
return cls(**config)
@nice_repr
class Seq2VecBiLSTM(Bidirectional):
def __init__(
self,
merge_mode: CategoricalValue("sum", "mul", "concat", "ave"),
units: DiscreteValue(32, 1024),
activation_fn: CategoricalValue("tanh", "sigmoid", "relu", "linear"),
recurrent_activation_fn: CategoricalValue("tanh", "sigmoid", "relu", "linear"),
dropout: ContinuousValue(0, 0.5),
recurrent_dropout: ContinuousValue(0, 0.5),
**kwargs
):
super().__init__(
layer=_LSTM(
units=units,
activation=activation_fn,
recurrent_activation=recurrent_activation_fn,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
return_sequences=False,
),
merge_mode=merge_mode,
**kwargs
)
self.activation_fn = activation_fn
self.recurrent_activation_fn = recurrent_activation_fn
def get_config(self):
config = super().get_config()
config["units"] = self.layer.units
config["activation_fn"] = self.activation_fn
config["recurrent_activation_fn"] = self.recurrent_activation_fn
config["dropout"] = self.layer.dropout
config["recurrent_dropout"] = self.layer.recurrent_dropout
config.pop("layer", None)
return config
@classmethod
def from_config(cls, config):
return cls(**config)
@nice_repr
class Reshape2D(_Reshape):
def __init__(self, **kwargs):
super().__init__(target_shape=(-1, 1), **kwargs)
def get_config(self):
config = super().get_config()
config.pop("target_shape", None)
return config
@nice_repr
class Embedding(_Embedding):
def __init__(self, output_dim: DiscreteValue(32, 128), **kwargs):
super().__init__(input_dim=1000, output_dim=output_dim, **kwargs)
def get_config(self):
config = super().get_config()
config.pop("input_dim", None)
return config
@nice_repr
class Dense(_Dense):
def __init__(self, units: DiscreteValue(128, 1024), **kwargs):
super().__init__(units=units, **kwargs)
@nice_repr
class Conv1D(_Conv1D):
def __init__(
self,
filters: DiscreteValue(2, 8),
kernel_size: CategoricalValue(3, 5, 7),
**kwargs
):
super().__init__(
filters=2 ** filters, kernel_size=kernel_size, padding="causal", **kwargs
)
def get_config(self):
config = super().get_config()
config["filters"] = log2(config["filters"])
config.pop("padding", None)
return config
@nice_repr
class Conv2D(_Conv2D):
def __init__(
self,
filters: DiscreteValue(2, 8),
kernel_size: CategoricalValue(3, 5, 7),
l1: ContinuousValue(0, 1e-3),
l2: ContinuousValue(0, 1e-3),
**kwargs
):
self.l1 = l1
self.l2 = l2
super().__init__(
filters=2 ** filters,
kernel_size=(kernel_size, kernel_size),
kernel_regularizer=regularizers.l1_l2(l1=l1, l2=l2),
padding="same",
data_format="channels_last",
**kwargs
)
def get_config(self):
config = super().get_config()
config["l1"] = self.l1
config["l2"] = self.l2
config["kernel_size"] = self.kernel_size[0]
config["filters"] = log2(config["filters"])
config.pop("kernel_regularizer", None)
config.pop("padding", None)
config.pop("data_format", None)
return config
@nice_repr
class MaxPooling2D(_MaxPooling2D):
def __init__(self, **kwargs):
super().__init__(data_format="channels_last", padding="same", **kwargs)
def get_config(self):
config = super().get_config()
config.pop("data_format", None)
config.pop("padding", None)
return config
@nice_repr
class TimeDistributed(_TimeDistributed):
def __init__(self, layer: Dense, **kwargs):
super().__init__(layer, **kwargs)
@nice_repr
class Dropout(_Dropout):
def __init__(self, rate: ContinuousValue(0, 0.5), **kwargs):
super().__init__(rate=rate, **kwargs)
@nice_repr
class BatchNormalization(_BatchNormalization):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@nice_repr
class Activation(_Activation):
def __init__(
self,
function: CategoricalValue(
"elu",
"selu",
"relu",
"tanh",
"sigmoid",
"hard_sigmoid",
"exponential",
"linear",
),
**kwargs
):
self.function = function
super().__init__(activation=function, **kwargs)
def get_config(self):
config = super().get_config()
config["function"] = config["activation"]
config.pop("activation", None)
return config
@nice_repr
class Flatten(_Flatten):
def __init__(self, **kwargs):
super().__init__(**kwargs)
| [
"numpy.log2",
"autogoal.grammar.ContinuousValue",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.regularizers.l1_l2",
"autogoal.grammar.CategoricalValue",
"autogoal.grammar.DiscreteValue"
] | [((7287, 7310), 'numpy.log2', 'log2', (["config['filters']"], {}), "(config['filters'])\n", (7291, 7310), False, 'from numpy import log2\n'), ((8149, 8172), 'numpy.log2', 'log2', (["config['filters']"], {}), "(config['filters'])\n", (8153, 8172), False, 'from numpy import log2\n'), ((1069, 1092), 'autogoal.grammar.DiscreteValue', 'DiscreteValue', (['(32)', '(1024)'], {}), '(32, 1024)\n', (1082, 1092), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((1117, 1170), 'autogoal.grammar.CategoricalValue', 'CategoricalValue', (['"""tanh"""', '"""sigmoid"""', '"""relu"""', '"""linear"""'], {}), "('tanh', 'sigmoid', 'relu', 'linear')\n", (1133, 1170), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((1205, 1258), 'autogoal.grammar.CategoricalValue', 'CategoricalValue', (['"""tanh"""', '"""sigmoid"""', '"""relu"""', '"""linear"""'], {}), "('tanh', 'sigmoid', 'relu', 'linear')\n", (1221, 1258), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((1277, 1300), 'autogoal.grammar.ContinuousValue', 'ContinuousValue', (['(0)', '(0.5)'], {}), '(0, 0.5)\n', (1292, 1300), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((1329, 1352), 'autogoal.grammar.ContinuousValue', 'ContinuousValue', (['(0)', '(0.5)'], {}), '(0, 0.5)\n', (1344, 1352), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((2208, 2231), 'autogoal.grammar.DiscreteValue', 'DiscreteValue', (['(32)', '(1024)'], {}), '(32, 1024)\n', (2221, 2231), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((2256, 2309), 'autogoal.grammar.CategoricalValue', 'CategoricalValue', (['"""tanh"""', '"""sigmoid"""', '"""relu"""', '"""linear"""'], {}), "('tanh', 'sigmoid', 'relu', 'linear')\n", (2272, 2309), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((2344, 2397), 'autogoal.grammar.CategoricalValue', 'CategoricalValue', (['"""tanh"""', '"""sigmoid"""', '"""relu"""', '"""linear"""'], {}), "('tanh', 'sigmoid', 'relu', 'linear')\n", (2360, 2397), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((2416, 2439), 'autogoal.grammar.ContinuousValue', 'ContinuousValue', (['(0)', '(0.5)'], {}), '(0, 0.5)\n', (2431, 2439), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((2468, 2491), 'autogoal.grammar.ContinuousValue', 'ContinuousValue', (['(0)', '(0.5)'], {}), '(0, 0.5)\n', (2483, 2491), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((3363, 3410), 'autogoal.grammar.CategoricalValue', 'CategoricalValue', (['"""sum"""', '"""mul"""', '"""concat"""', '"""ave"""'], {}), "('sum', 'mul', 'concat', 'ave')\n", (3379, 3410), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((3427, 3450), 'autogoal.grammar.DiscreteValue', 'DiscreteValue', (['(32)', '(1024)'], {}), '(32, 1024)\n', (3440, 3450), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((3475, 3528), 'autogoal.grammar.CategoricalValue', 'CategoricalValue', (['"""tanh"""', '"""sigmoid"""', '"""relu"""', '"""linear"""'], {}), "('tanh', 'sigmoid', 'relu', 'linear')\n", (3491, 3528), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((3563, 3616), 'autogoal.grammar.CategoricalValue', 'CategoricalValue', (['"""tanh"""', '"""sigmoid"""', '"""relu"""', '"""linear"""'], {}), "('tanh', 'sigmoid', 'relu', 'linear')\n", (3579, 3616), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((3635, 3658), 'autogoal.grammar.ContinuousValue', 'ContinuousValue', (['(0)', '(0.5)'], {}), '(0, 0.5)\n', (3650, 3658), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((3687, 3710), 'autogoal.grammar.ContinuousValue', 'ContinuousValue', (['(0)', '(0.5)'], {}), '(0, 0.5)\n', (3702, 3710), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((4819, 4866), 'autogoal.grammar.CategoricalValue', 'CategoricalValue', (['"""sum"""', '"""mul"""', '"""concat"""', '"""ave"""'], {}), "('sum', 'mul', 'concat', 'ave')\n", (4835, 4866), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((4883, 4906), 'autogoal.grammar.DiscreteValue', 'DiscreteValue', (['(32)', '(1024)'], {}), '(32, 1024)\n', (4896, 4906), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((4931, 4984), 'autogoal.grammar.CategoricalValue', 'CategoricalValue', (['"""tanh"""', '"""sigmoid"""', '"""relu"""', '"""linear"""'], {}), "('tanh', 'sigmoid', 'relu', 'linear')\n", (4947, 4984), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((5019, 5072), 'autogoal.grammar.CategoricalValue', 'CategoricalValue', (['"""tanh"""', '"""sigmoid"""', '"""relu"""', '"""linear"""'], {}), "('tanh', 'sigmoid', 'relu', 'linear')\n", (5035, 5072), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((5091, 5114), 'autogoal.grammar.ContinuousValue', 'ContinuousValue', (['(0)', '(0.5)'], {}), '(0, 0.5)\n', (5106, 5114), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((5143, 5166), 'autogoal.grammar.ContinuousValue', 'ContinuousValue', (['(0)', '(0.5)'], {}), '(0, 0.5)\n', (5158, 5166), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((6511, 6533), 'autogoal.grammar.DiscreteValue', 'DiscreteValue', (['(32)', '(128)'], {}), '(32, 128)\n', (6524, 6533), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((6809, 6833), 'autogoal.grammar.DiscreteValue', 'DiscreteValue', (['(128)', '(1024)'], {}), '(128, 1024)\n', (6822, 6833), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((6979, 6998), 'autogoal.grammar.DiscreteValue', 'DiscreteValue', (['(2)', '(8)'], {}), '(2, 8)\n', (6992, 6998), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((7021, 7046), 'autogoal.grammar.CategoricalValue', 'CategoricalValue', (['(3)', '(5)', '(7)'], {}), '(3, 5, 7)\n', (7037, 7046), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((7454, 7473), 'autogoal.grammar.DiscreteValue', 'DiscreteValue', (['(2)', '(8)'], {}), '(2, 8)\n', (7467, 7473), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((7496, 7521), 'autogoal.grammar.CategoricalValue', 'CategoricalValue', (['(3)', '(5)', '(7)'], {}), '(3, 5, 7)\n', (7512, 7521), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((7535, 7560), 'autogoal.grammar.ContinuousValue', 'ContinuousValue', (['(0)', '(0.001)'], {}), '(0, 0.001)\n', (7550, 7560), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((7573, 7598), 'autogoal.grammar.ContinuousValue', 'ContinuousValue', (['(0)', '(0.001)'], {}), '(0, 0.001)\n', (7588, 7598), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((8854, 8877), 'autogoal.grammar.ContinuousValue', 'ContinuousValue', (['(0)', '(0.5)'], {}), '(0, 0.5)\n', (8869, 8877), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((9159, 9262), 'autogoal.grammar.CategoricalValue', 'CategoricalValue', (['"""elu"""', '"""selu"""', '"""relu"""', '"""tanh"""', '"""sigmoid"""', '"""hard_sigmoid"""', '"""exponential"""', '"""linear"""'], {}), "('elu', 'selu', 'relu', 'tanh', 'sigmoid', 'hard_sigmoid',\n 'exponential', 'linear')\n", (9175, 9262), False, 'from autogoal.grammar import BooleanValue, CategoricalValue, DiscreteValue, ContinuousValue\n'), ((3780, 3957), 'tensorflow.keras.layers.LSTM', '_LSTM', ([], {'units': 'units', 'activation': 'activation_fn', 'recurrent_activation': 'recurrent_activation_fn', 'dropout': 'dropout', 'recurrent_dropout': 'recurrent_dropout', 'return_sequences': '(True)'}), '(units=units, activation=activation_fn, recurrent_activation=\n recurrent_activation_fn, dropout=dropout, recurrent_dropout=\n recurrent_dropout, return_sequences=True)\n', (3785, 3957), True, 'from tensorflow.keras.layers import LSTM as _LSTM\n'), ((5236, 5414), 'tensorflow.keras.layers.LSTM', '_LSTM', ([], {'units': 'units', 'activation': 'activation_fn', 'recurrent_activation': 'recurrent_activation_fn', 'dropout': 'dropout', 'recurrent_dropout': 'recurrent_dropout', 'return_sequences': '(False)'}), '(units=units, activation=activation_fn, recurrent_activation=\n recurrent_activation_fn, dropout=dropout, recurrent_dropout=\n recurrent_dropout, return_sequences=False)\n', (5241, 5414), True, 'from tensorflow.keras.layers import LSTM as _LSTM\n'), ((7808, 7840), 'tensorflow.keras.regularizers.l1_l2', 'regularizers.l1_l2', ([], {'l1': 'l1', 'l2': 'l2'}), '(l1=l1, l2=l2)\n', (7826, 7840), False, 'from tensorflow.keras import regularizers\n')] |
# Copyright 2020 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for wfa_cardinality_estimation_evaluation_framework.simulations.set_generator."""
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from wfa_cardinality_estimation_evaluation_framework.common.analysis import relative_error
import wfa_cardinality_estimation_evaluation_framework.common.random
from wfa_cardinality_estimation_evaluation_framework.estimators.exact_set import ExactMultiSet
from wfa_cardinality_estimation_evaluation_framework.estimators.exact_set import LosslessEstimator
from wfa_cardinality_estimation_evaluation_framework.simulations import frequency_set_generator
from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator
TEST_UNIVERSE_SIZE = 200
TEST_NUM_SETS = 3
TEST_SET_SIZE = 50
TEST_SET_SIZE_LIST = [TEST_SET_SIZE] * TEST_NUM_SETS
class SetGeneratorTest(parameterized.TestCase):
@parameterized.parameters(
(set_generator.IndependentSetGenerator,
{'universe_size': TEST_UNIVERSE_SIZE}),
(set_generator.ExponentialBowSetGenerator,
{'universe_size': TEST_UNIVERSE_SIZE,
'user_activity_association': 'independent'}),
(set_generator.ExponentialBowSetGenerator,
{'universe_size': TEST_UNIVERSE_SIZE,
'user_activity_association': 'identical'}),
(set_generator.SequentiallyCorrelatedSetGenerator,
{'order': 'original', 'correlated_sets': 'all', 'shared_prop': 0.2}),
(set_generator.SequentiallyCorrelatedSetGenerator,
{'order': 'reversed', 'correlated_sets': 'all', 'shared_prop': 0.2}),
(set_generator.SequentiallyCorrelatedSetGenerator,
{'order': 'random', 'correlated_sets': 'all', 'shared_prop': 0.2}),
(set_generator.SequentiallyCorrelatedSetGenerator,
{'order': 'original', 'correlated_sets': 'one', 'shared_prop': 0.2}),
(set_generator.SequentiallyCorrelatedSetGenerator,
{'order': 'reversed', 'correlated_sets': 'one', 'shared_prop': 0.2}),
(set_generator.SequentiallyCorrelatedSetGenerator,
{'order': 'random', 'correlated_sets': 'one', 'shared_prop': 0.2}),
(set_generator.DisjointSetGenerator, {}),
(frequency_set_generator.HomogeneousMultiSetGenerator,
{'universe_size': TEST_UNIVERSE_SIZE,
'freq_rates': np.ones_like(TEST_SET_SIZE_LIST), 'freq_cap': 2}),
(frequency_set_generator.HeterogeneousMultiSetGenerator,
{'universe_size': TEST_UNIVERSE_SIZE,
'gamma_params': [(1,1) for i in range(TEST_NUM_SETS)],
'freq_cap': 2})
)
def test_set_generator_factory_with_num_and_size_corresponding_to_list(
self, set_generator_class, kwargs):
"""Test generator_factory_with_num_and_size for partial set_generators.
These set_generators take set_size_list as an argument. We test whether
generator_factory_with_num_and_size gives the same result as directly
specifying set_size_list in set_generator.
Args:
set_generator_class: class name of different set_generators.
kwargs: kwargs expect universe_size, set_size_list and random_state, in
each set_generator. In this test, universe_size and set_sizes are
given by the global variables TEST_UNIVERSE_SIZE and TEST_SET_SIZE_LIST.
"""
factory = set_generator_class.get_generator_factory_with_num_and_size(
num_sets=TEST_NUM_SETS,
set_size=TEST_SET_SIZE, **kwargs)
gen_from_factory = factory(np.random.RandomState(1))
gen_from_class = set_generator_class(
set_sizes=TEST_SET_SIZE_LIST,
**kwargs, random_state=np.random.RandomState(1))
set_list_gen_from_factory = []
for ids in gen_from_factory:
set_list_gen_from_factory.append(list(ids))
set_list_gen_from_class = []
for ids in gen_from_class:
set_list_gen_from_class.append(list(ids))
self.assertSameElements(set_list_gen_from_factory, set_list_gen_from_class)
@parameterized.parameters(
(set_generator.FullyOverlapSetGenerator,
{'num_sets': 10, 'set_size': 5}),
(set_generator.SubSetGenerator,
{'order': 'original', 'num_large_sets': 2, 'num_small_sets': 3,
'large_set_size': 5, 'small_set_size': 2}),
(set_generator.SubSetGenerator,
{'order': 'reversed', 'num_large_sets': 2, 'num_small_sets': 3,
'large_set_size': 5, 'small_set_size': 2}),
(set_generator.SubSetGenerator,
{'order': 'random', 'num_large_sets': 2, 'num_small_sets': 3,
'large_set_size': 5, 'small_set_size': 2}),
)
def test_set_generator_factory_with_num_and_size_just_testing_random_state(
self, set_generator_class, kwargs):
"""Test generator_factory_with_num_and_size for other set_generators.
Until the previous test, this test is for the set_generators that
do not take set_size_list as an argument. So here we just test whether
the random state is correctly processed in the factory.
Args:
set_generator_class: class name of different set_generators.
kwargs: kwargs expect universe_size and random_state in each
set_generator. In this test, universe_size is given by the global
variable TEST_UNIVERSE_SIZE.
"""
factory = set_generator_class.get_generator_factory_with_num_and_size(
universe_size=TEST_UNIVERSE_SIZE, **kwargs)
gen_from_factory = factory(np.random.RandomState(1))
gen_from_class = set_generator_class(
universe_size=TEST_UNIVERSE_SIZE, **kwargs,
random_state=np.random.RandomState(1))
set_list_gen_from_factory = []
for ids in gen_from_factory:
set_list_gen_from_factory.append(list(ids))
set_list_gen_from_class = []
for ids in gen_from_class:
set_list_gen_from_class.append(list(ids))
self.assertSameElements(set_list_gen_from_factory, set_list_gen_from_class)
@parameterized.parameters(
(set_generator.IndependentSetGenerator,
{'universe_size': TEST_UNIVERSE_SIZE}),
(set_generator.ExponentialBowSetGenerator,
{'universe_size': TEST_UNIVERSE_SIZE,
'user_activity_association': 'independent'}),
(set_generator.ExponentialBowSetGenerator,
{'universe_size': TEST_UNIVERSE_SIZE,
'user_activity_association': 'identical'}),
(set_generator.SequentiallyCorrelatedSetGenerator,
{'order': 'original', 'correlated_sets': 'all', 'shared_prop': 0.2}),
(set_generator.SequentiallyCorrelatedSetGenerator,
{'order': 'original', 'correlated_sets': 'one', 'shared_prop': 0.2}),
(frequency_set_generator.HomogeneousMultiSetGenerator,
{'universe_size': TEST_UNIVERSE_SIZE,
'freq_rates': np.ones_like(TEST_SET_SIZE_LIST), 'freq_cap': 2}),
(frequency_set_generator.HeterogeneousMultiSetGenerator,
{'universe_size': TEST_UNIVERSE_SIZE,
'gamma_params': [(1,1) for i in range(TEST_NUM_SETS)],
'freq_cap': 2})
)
def test_set_generator_factory_with_set_size_list(self, set_generator_class,
kwargs):
factory = set_generator_class.get_generator_factory_with_set_size_list(
set_size_list=TEST_SET_SIZE_LIST, **kwargs)
gen_from_factory = factory(np.random.RandomState(1))
gen_from_class = set_generator_class(
set_sizes=TEST_SET_SIZE_LIST,
**kwargs, random_state=np.random.RandomState(1))
set_list_gen_from_factory = []
for ids in gen_from_factory:
set_list_gen_from_factory.append(list(ids))
set_list_gen_from_class = []
for ids in gen_from_class:
set_list_gen_from_class.append(list(ids))
self.assertSameElements(set_list_gen_from_factory, set_list_gen_from_class)
def test_independent_generator_constructor(self):
rs = np.random.RandomState(1)
ind_gen = set_generator.IndependentSetGenerator(
universe_size=10000, set_sizes=[100] * 5, random_state=rs)
for campaign_id in ind_gen:
self.assertLen(campaign_id, 100)
def test_independent_generator_constructor_different_sizes(self):
rs = np.random.RandomState(1)
ind_gen = set_generator.IndependentSetGenerator(
universe_size=10000, set_sizes=[10, 10, 10, 20, 20],
random_state=rs)
set_ids_list = []
for set_ids in ind_gen:
set_ids_list.append(set_ids)
for set_ids in set_ids_list[:3]:
self.assertLen(set_ids, 10)
for set_ids in set_ids_list[3:]:
self.assertLen(set_ids, 20)
def test_independent_generator_single_universe(self):
rs = np.random.RandomState(1)
ind_gen = set_generator.IndependentSetGenerator(
universe_size=1, set_sizes=[1], random_state=rs)
for campaign_id in ind_gen:
self.assertLen(campaign_id, 1)
self.assertEqual(campaign_id[0], 0)
def test_exponential_bow_generator_constructor(self):
rs = np.random.RandomState(1)
# Low reach case, actual set size should be close to input set size
eb_gen = set_generator.ExponentialBowSetGenerator(
user_activity_association='independent',
universe_size=10000, set_sizes=[1000] * 5, random_state=rs)
for set_ids in eb_gen:
re = relative_error(len(set_ids), 1000)
self.assertLess(abs(re), 0.01,
msg='relative error > 0.01 in the low reach case')
# High reach case, allow actual size to be more different from input size
eb_gen = set_generator.ExponentialBowSetGenerator(
user_activity_association='independent',
universe_size=10000, set_sizes=[5000] * 5, random_state=rs)
for set_ids in eb_gen:
re = relative_error(len(set_ids), 5000)
self.assertLess(abs(re), 0.2,
msg='relative error > 0.2 in the high reach case')
def test_exponential_bow_generator_constructor_different_sizes(self):
rs = np.random.RandomState(1)
# Low reach case, actual set size should be close to input set size
low_set_size_list = [600, 800, 1000]
eb_gen = set_generator.ExponentialBowSetGenerator(
user_activity_association='independent',
universe_size=10000, set_sizes=low_set_size_list, random_state=rs)
actual_set_size = iter(low_set_size_list)
for set_ids in eb_gen:
re = relative_error(len(set_ids), next(actual_set_size))
self.assertLess(abs(re), 0.01,
msg='relative error > 0.01 in the low reach case')
# High reach case, allow actual size to be more different from input size
high_set_size_list = [4000, 5000, 6000]
eb_gen = set_generator.ExponentialBowSetGenerator(
user_activity_association='independent',
universe_size=10000, set_sizes=high_set_size_list, random_state=rs)
actual_set_size = iter(high_set_size_list)
for set_ids in eb_gen:
re = relative_error(len(set_ids), next(actual_set_size))
self.assertLess(abs(re), 0.2,
msg='relative error > 0.2 in the high reach case')
def test_exponential_bow_generator_raise_error(self):
rs = np.random.RandomState(1)
# invalid user_activity_association
with self.assertRaises(ValueError):
_ = set_generator.ExponentialBowSetGenerator(
user_activity_association=0.5,
universe_size=10000, set_sizes=[1000] * 3, random_state=rs)
# Two small set size
with self.assertRaises(ValueError):
_ = set_generator.ExponentialBowSetGenerator(
user_activity_association='independent',
universe_size=10000, set_sizes=[10] * 3, random_state=rs)
def test_fully_overlap_generator_constructor(self):
rs = np.random.RandomState(1)
fo_gen = set_generator.FullyOverlapSetGenerator(
universe_size=10000, num_sets=5, set_size=100, random_state=rs)
for set_ids in fo_gen:
self.assertLen(set_ids, 100)
def test_fully_overlap_generator_single_universe(self):
rs = np.random.RandomState(1)
fo_gen = set_generator.FullyOverlapSetGenerator(
universe_size=1, num_sets=1, set_size=1, random_state=rs)
for set_ids in fo_gen:
self.assertLen(set_ids, 1)
self.assertEqual(set_ids[0], 0)
def test_fully_overlap_generator_same_ids(self):
rs = np.random.RandomState(1)
fo_gen = set_generator.FullyOverlapSetGenerator(
universe_size=10, num_sets=10, set_size=5, random_state=rs)
set_ids_list = []
for set_ids in fo_gen:
set_ids_list.append(set_ids)
for set_ids in set_ids_list[1:]:
self.assertSameElements(set_ids_list[0], set_ids)
self.assertLen(set_ids, 5)
def test_subset_generator_constructor_original_order(self):
rs = np.random.RandomState(1)
ss_gen = set_generator.SubSetGenerator(
order='original', universe_size=10000, num_large_sets=2,
num_small_sets=8, large_set_size=100, small_set_size=50,
random_state=rs)
set_ids_list = []
for set_ids in ss_gen:
set_ids_list.append(set_ids)
for set_ids in set_ids_list[:2]:
self.assertLen(set_ids, 100)
for set_ids in set_ids_list[2:]:
self.assertLen(set_ids, 50)
def test_subset_generator_constructor_reversed_order(self):
rs = np.random.RandomState(1)
ss_gen = set_generator.SubSetGenerator(
order='reversed', universe_size=10000, num_large_sets=2,
num_small_sets=8, large_set_size=100, small_set_size=50,
random_state=rs)
set_ids_list = []
for set_ids in ss_gen:
set_ids_list.append(set_ids)
for set_ids in set_ids_list[:8]:
self.assertLen(set_ids, 50)
for set_ids in set_ids_list[8:]:
self.assertLen(set_ids, 100)
def test_subset_generator_constructor_random_order(self):
rs = np.random.RandomState(1)
ss_gen = set_generator.SubSetGenerator(
order='random', universe_size=10000, num_large_sets=2,
num_small_sets=8, large_set_size=100, small_set_size=50,
random_state=rs)
set_ids_list = []
for set_ids in ss_gen:
set_ids_list.append(set_ids)
actual_num_large_sets = 0
actual_num_small_sets = 0
for set_ids in set_ids_list:
if len(set_ids) == 100:
actual_num_large_sets += 1
if len(set_ids) == 50:
actual_num_small_sets += 1
self.assertEqual(actual_num_large_sets, 2,
msg='Number of large sets is not correct.')
self.assertEqual(actual_num_small_sets, 8,
msg='Number of small sets is not correct.')
def test_subset_generator_single_universe(self):
rs = np.random.RandomState(1)
ss_gen = set_generator.SubSetGenerator(
order='original', universe_size=1, num_large_sets=1,
num_small_sets=1, large_set_size=1, small_set_size=1,
random_state=rs)
for set_ids in ss_gen:
self.assertLen(set_ids, 1)
self.assertEqual(set_ids[0], 0)
def test_subset_generator_same_ids(self):
rs = np.random.RandomState(1)
ss_gen = set_generator.SubSetGenerator(
order='original', universe_size=10, num_large_sets=3,
num_small_sets=7, large_set_size=5, small_set_size=3,
random_state=rs)
set_ids_list = []
for set_ids in ss_gen:
set_ids_list.append(set_ids)
for set_ids in set_ids_list[1:3]:
self.assertSameElements(set_ids_list[0], set_ids)
self.assertLen(set_ids, 5)
for set_ids in set_ids_list[4:]:
self.assertSameElements(set_ids_list[3], set_ids)
self.assertLen(set_ids, 3)
def test_subset_generator_generator_raise_error(self):
rs = np.random.RandomState(1)
with self.assertRaises(ValueError):
_ = set_generator.SubSetGenerator(
order='not_implemented', universe_size=10, num_large_sets=3,
num_small_sets=7, large_set_size=5, small_set_size=3,
random_state=rs)
def test_sequentially_correlated_all_previous_generator_original(self):
rs = np.random.RandomState(1)
sc_gen = set_generator.SequentiallyCorrelatedSetGenerator(
order='original', correlated_sets='all',
shared_prop=0.2, set_sizes=[10] * 3, random_state=rs)
set_ids_list = [set_ids for set_ids in sc_gen]
previous_set_ids = set(set_ids_list[0])
for set_ids in set_ids_list[1:]:
shared_ids = previous_set_ids.intersection(set_ids)
self.assertLen(shared_ids, 2)
previous_set_ids.update(set_ids)
def test_sequentially_correlated_all_previous_generator_different_sizes(self):
rs = np.random.RandomState(1)
sc_gen = set_generator.SequentiallyCorrelatedSetGenerator(
order='original', correlated_sets='all',
shared_prop=0.2, set_sizes=[10, 15, 20, 20], random_state=rs)
expected_overlap_size = iter([3, 4, 4])
set_ids_list = [set_ids for set_ids in sc_gen]
previous_set_ids = set(set_ids_list[0])
for set_ids in set_ids_list[1:]:
shared_ids = previous_set_ids.intersection(set_ids)
self.assertLen(shared_ids, next(expected_overlap_size))
previous_set_ids.update(set_ids)
def test_sequentially_correlated_all_previous_generator_reversed(self):
rs = np.random.RandomState(1)
sc_gen = set_generator.SequentiallyCorrelatedSetGenerator(
order='reversed', correlated_sets='all',
shared_prop=0.2, set_sizes=[10] * 3, random_state=rs)
set_ids_list = [set_ids for set_ids in sc_gen][::-1]
previous_set_ids = set(set_ids_list[0])
for set_ids in set_ids_list[1:]:
shared_ids = previous_set_ids.intersection(set_ids)
self.assertLen(shared_ids, 2)
previous_set_ids.update(set_ids)
def test_sequentially_correlated_one_previous_generator_original(self):
rs = np.random.RandomState(1)
sc_gen = set_generator.SequentiallyCorrelatedSetGenerator(
order='original', correlated_sets='one',
shared_prop=0.2, set_sizes=[10] * 3, random_state=rs)
set_ids_list = [set_ids for set_ids in sc_gen]
previous_set_ids = set(set_ids_list[0])
union_set_ids = set(set_ids_list[0])
for set_ids in set_ids_list[1:]:
self.assertLen(previous_set_ids.intersection(set_ids), 2)
self.assertLen(union_set_ids.intersection(set_ids), 2)
previous_set_ids = set(set_ids)
union_set_ids.update(set_ids)
def test_sequentially_correlated_one_previous_generator_reversed(self):
rs = np.random.RandomState(1)
sc_gen = set_generator.SequentiallyCorrelatedSetGenerator(
order='reversed', correlated_sets='one',
shared_prop=0.2, set_sizes=[10] * 3, random_state=rs)
set_ids_list = [set_ids for set_ids in sc_gen][::-1]
previous_set_ids = set(set_ids_list[0])
union_set_ids = set(set_ids_list[0])
for set_ids in set_ids_list[1:]:
self.assertLen(previous_set_ids.intersection(set_ids), 2)
self.assertLen(union_set_ids.intersection(set_ids), 2)
previous_set_ids = set(set_ids)
union_set_ids.update(set_ids)
def test_sequentially_correlated_all_previous_generator_raise_error(self):
rs = np.random.RandomState(1)
with self.assertRaises(ValueError):
_ = set_generator.SequentiallyCorrelatedSetGenerator(
order='not_implemented', correlated_sets='all',
shared_prop=0.2, set_sizes=[10] * 3, random_state=rs)
with self.assertRaises(ValueError):
_ = set_generator.SequentiallyCorrelatedSetGenerator(
order='random', correlated_sets='not_implemented',
shared_prop=0.2, set_sizes=[10] * 3, random_state=rs)
@parameterized.parameters(
(set_generator.CORRELATED_SETS_ALL,),
(set_generator.CORRELATED_SETS_ONE,))
def test_sequentially_correlated_generator_overlap_size_not_enough(
self, correlation_type):
rs = np.random.RandomState(1)
set_sizes = [1, 10]
sc_gen = set_generator.SequentiallyCorrelatedSetGenerator(
order=set_generator.ORDER_ORIGINAL, correlated_sets=correlation_type,
shared_prop=0.5,
set_sizes=set_sizes,
random_state=rs)
set_ids_list = [set_ids for set_ids in sc_gen]
self.assertLen(set_ids_list[0], set_sizes[0],
f'{correlation_type}: First set size not correct.')
self.assertLen(set_ids_list[1], set_sizes[1],
f'{correlation_type}: Second set size not correct.')
self.assertLen(np.intersect1d(set_ids_list[0], set_ids_list[1]),
1,
f'{correlation_type}: Overlap set size not correct.')
def test_disjoint_set_generator(self):
gen = set_generator.DisjointSetGenerator(set_sizes=[1, 2])
set_ids_list = [set_ids for set_ids in gen]
expected = [np.array(ids) for ids in [[0], [1, 2]]]
self.assertEqual(len(set_ids_list), len(expected))
for x, y in zip(set_ids_list, expected):
self.assertTrue(all(x == y))
if __name__ == '__main__':
absltest.main()
| [
"absl.testing.absltest.main",
"wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.IndependentSetGenerator",
"wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.SequentiallyCorrelatedSetGenerator",
"numpy.ones_like",
"wfa_cardinality_estimation_evaluation_framewo... | [((4539, 5088), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["(set_generator.FullyOverlapSetGenerator, {'num_sets': 10, 'set_size': 5})", "(set_generator.SubSetGenerator, {'order': 'original', 'num_large_sets': 2,\n 'num_small_sets': 3, 'large_set_size': 5, 'small_set_size': 2})", "(set_generator.SubSetGenerator, {'order': 'reversed', 'num_large_sets': 2,\n 'num_small_sets': 3, 'large_set_size': 5, 'small_set_size': 2})", "(set_generator.SubSetGenerator, {'order': 'random', 'num_large_sets': 2,\n 'num_small_sets': 3, 'large_set_size': 5, 'small_set_size': 2})"], {}), "((set_generator.FullyOverlapSetGenerator, {\n 'num_sets': 10, 'set_size': 5}), (set_generator.SubSetGenerator, {\n 'order': 'original', 'num_large_sets': 2, 'num_small_sets': 3,\n 'large_set_size': 5, 'small_set_size': 2}), (set_generator.\n SubSetGenerator, {'order': 'reversed', 'num_large_sets': 2,\n 'num_small_sets': 3, 'large_set_size': 5, 'small_set_size': 2}), (\n set_generator.SubSetGenerator, {'order': 'random', 'num_large_sets': 2,\n 'num_small_sets': 3, 'large_set_size': 5, 'small_set_size': 2}))\n", (4563, 5088), False, 'from absl.testing import parameterized\n'), ((19849, 19954), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (['(set_generator.CORRELATED_SETS_ALL,)', '(set_generator.CORRELATED_SETS_ONE,)'], {}), '((set_generator.CORRELATED_SETS_ALL,), (\n set_generator.CORRELATED_SETS_ONE,))\n', (19873, 19954), False, 'from absl.testing import parameterized\n'), ((21175, 21190), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (21188, 21190), False, 'from absl.testing import absltest\n'), ((8334, 8358), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (8355, 8358), True, 'import numpy as np\n'), ((8373, 8473), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.IndependentSetGenerator', 'set_generator.IndependentSetGenerator', ([], {'universe_size': '(10000)', 'set_sizes': '([100] * 5)', 'random_state': 'rs'}), '(universe_size=10000, set_sizes=[100] *\n 5, random_state=rs)\n', (8410, 8473), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((8628, 8652), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (8649, 8652), True, 'import numpy as np\n'), ((8667, 8779), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.IndependentSetGenerator', 'set_generator.IndependentSetGenerator', ([], {'universe_size': '(10000)', 'set_sizes': '[10, 10, 10, 20, 20]', 'random_state': 'rs'}), '(universe_size=10000, set_sizes=[10, \n 10, 10, 20, 20], random_state=rs)\n', (8704, 8779), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((9085, 9109), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (9106, 9109), True, 'import numpy as np\n'), ((9124, 9214), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.IndependentSetGenerator', 'set_generator.IndependentSetGenerator', ([], {'universe_size': '(1)', 'set_sizes': '[1]', 'random_state': 'rs'}), '(universe_size=1, set_sizes=[1],\n random_state=rs)\n', (9161, 9214), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((9397, 9421), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (9418, 9421), True, 'import numpy as np\n'), ((9507, 9653), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.ExponentialBowSetGenerator', 'set_generator.ExponentialBowSetGenerator', ([], {'user_activity_association': '"""independent"""', 'universe_size': '(10000)', 'set_sizes': '([1000] * 5)', 'random_state': 'rs'}), "(user_activity_association=\n 'independent', universe_size=10000, set_sizes=[1000] * 5, random_state=rs)\n", (9547, 9653), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((9940, 10086), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.ExponentialBowSetGenerator', 'set_generator.ExponentialBowSetGenerator', ([], {'user_activity_association': '"""independent"""', 'universe_size': '(10000)', 'set_sizes': '([5000] * 5)', 'random_state': 'rs'}), "(user_activity_association=\n 'independent', universe_size=10000, set_sizes=[5000] * 5, random_state=rs)\n", (9980, 10086), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((10363, 10387), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (10384, 10387), True, 'import numpy as np\n'), ((10514, 10671), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.ExponentialBowSetGenerator', 'set_generator.ExponentialBowSetGenerator', ([], {'user_activity_association': '"""independent"""', 'universe_size': '(10000)', 'set_sizes': 'low_set_size_list', 'random_state': 'rs'}), "(user_activity_association=\n 'independent', universe_size=10000, set_sizes=low_set_size_list,\n random_state=rs)\n", (10554, 10671), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((11061, 11219), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.ExponentialBowSetGenerator', 'set_generator.ExponentialBowSetGenerator', ([], {'user_activity_association': '"""independent"""', 'universe_size': '(10000)', 'set_sizes': 'high_set_size_list', 'random_state': 'rs'}), "(user_activity_association=\n 'independent', universe_size=10000, set_sizes=high_set_size_list,\n random_state=rs)\n", (11101, 11219), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((11540, 11564), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (11561, 11564), True, 'import numpy as np\n'), ((12108, 12132), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (12129, 12132), True, 'import numpy as np\n'), ((12146, 12252), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.FullyOverlapSetGenerator', 'set_generator.FullyOverlapSetGenerator', ([], {'universe_size': '(10000)', 'num_sets': '(5)', 'set_size': '(100)', 'random_state': 'rs'}), '(universe_size=10000, num_sets=5,\n set_size=100, random_state=rs)\n', (12184, 12252), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((12388, 12412), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (12409, 12412), True, 'import numpy as np\n'), ((12426, 12526), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.FullyOverlapSetGenerator', 'set_generator.FullyOverlapSetGenerator', ([], {'universe_size': '(1)', 'num_sets': '(1)', 'set_size': '(1)', 'random_state': 'rs'}), '(universe_size=1, num_sets=1,\n set_size=1, random_state=rs)\n', (12464, 12526), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((12691, 12715), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (12712, 12715), True, 'import numpy as np\n'), ((12729, 12831), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.FullyOverlapSetGenerator', 'set_generator.FullyOverlapSetGenerator', ([], {'universe_size': '(10)', 'num_sets': '(10)', 'set_size': '(5)', 'random_state': 'rs'}), '(universe_size=10, num_sets=10,\n set_size=5, random_state=rs)\n', (12767, 12831), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((13119, 13143), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (13140, 13143), True, 'import numpy as np\n'), ((13157, 13326), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.SubSetGenerator', 'set_generator.SubSetGenerator', ([], {'order': '"""original"""', 'universe_size': '(10000)', 'num_large_sets': '(2)', 'num_small_sets': '(8)', 'large_set_size': '(100)', 'small_set_size': '(50)', 'random_state': 'rs'}), "(order='original', universe_size=10000,\n num_large_sets=2, num_small_sets=8, large_set_size=100, small_set_size=\n 50, random_state=rs)\n", (13186, 13326), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((13642, 13666), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (13663, 13666), True, 'import numpy as np\n'), ((13680, 13849), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.SubSetGenerator', 'set_generator.SubSetGenerator', ([], {'order': '"""reversed"""', 'universe_size': '(10000)', 'num_large_sets': '(2)', 'num_small_sets': '(8)', 'large_set_size': '(100)', 'small_set_size': '(50)', 'random_state': 'rs'}), "(order='reversed', universe_size=10000,\n num_large_sets=2, num_small_sets=8, large_set_size=100, small_set_size=\n 50, random_state=rs)\n", (13709, 13849), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((14163, 14187), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (14184, 14187), True, 'import numpy as np\n'), ((14201, 14368), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.SubSetGenerator', 'set_generator.SubSetGenerator', ([], {'order': '"""random"""', 'universe_size': '(10000)', 'num_large_sets': '(2)', 'num_small_sets': '(8)', 'large_set_size': '(100)', 'small_set_size': '(50)', 'random_state': 'rs'}), "(order='random', universe_size=10000,\n num_large_sets=2, num_small_sets=8, large_set_size=100, small_set_size=\n 50, random_state=rs)\n", (14230, 14368), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((14976, 15000), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (14997, 15000), True, 'import numpy as np\n'), ((15014, 15175), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.SubSetGenerator', 'set_generator.SubSetGenerator', ([], {'order': '"""original"""', 'universe_size': '(1)', 'num_large_sets': '(1)', 'num_small_sets': '(1)', 'large_set_size': '(1)', 'small_set_size': '(1)', 'random_state': 'rs'}), "(order='original', universe_size=1,\n num_large_sets=1, num_small_sets=1, large_set_size=1, small_set_size=1,\n random_state=rs)\n", (15043, 15175), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((15345, 15369), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (15366, 15369), True, 'import numpy as np\n'), ((15383, 15545), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.SubSetGenerator', 'set_generator.SubSetGenerator', ([], {'order': '"""original"""', 'universe_size': '(10)', 'num_large_sets': '(3)', 'num_small_sets': '(7)', 'large_set_size': '(5)', 'small_set_size': '(3)', 'random_state': 'rs'}), "(order='original', universe_size=10,\n num_large_sets=3, num_small_sets=7, large_set_size=5, small_set_size=3,\n random_state=rs)\n", (15412, 15545), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((15967, 15991), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (15988, 15991), True, 'import numpy as np\n'), ((16319, 16343), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (16340, 16343), True, 'import numpy as np\n'), ((16357, 16509), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.SequentiallyCorrelatedSetGenerator', 'set_generator.SequentiallyCorrelatedSetGenerator', ([], {'order': '"""original"""', 'correlated_sets': '"""all"""', 'shared_prop': '(0.2)', 'set_sizes': '([10] * 3)', 'random_state': 'rs'}), "(order='original',\n correlated_sets='all', shared_prop=0.2, set_sizes=[10] * 3, random_state=rs\n )\n", (16405, 16509), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((16874, 16898), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (16895, 16898), True, 'import numpy as np\n'), ((16912, 17071), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.SequentiallyCorrelatedSetGenerator', 'set_generator.SequentiallyCorrelatedSetGenerator', ([], {'order': '"""original"""', 'correlated_sets': '"""all"""', 'shared_prop': '(0.2)', 'set_sizes': '[10, 15, 20, 20]', 'random_state': 'rs'}), "(order='original',\n correlated_sets='all', shared_prop=0.2, set_sizes=[10, 15, 20, 20],\n random_state=rs)\n", (16960, 17071), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((17500, 17524), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (17521, 17524), True, 'import numpy as np\n'), ((17538, 17690), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.SequentiallyCorrelatedSetGenerator', 'set_generator.SequentiallyCorrelatedSetGenerator', ([], {'order': '"""reversed"""', 'correlated_sets': '"""all"""', 'shared_prop': '(0.2)', 'set_sizes': '([10] * 3)', 'random_state': 'rs'}), "(order='reversed',\n correlated_sets='all', shared_prop=0.2, set_sizes=[10] * 3, random_state=rs\n )\n", (17586, 17690), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((18054, 18078), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (18075, 18078), True, 'import numpy as np\n'), ((18092, 18244), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.SequentiallyCorrelatedSetGenerator', 'set_generator.SequentiallyCorrelatedSetGenerator', ([], {'order': '"""original"""', 'correlated_sets': '"""one"""', 'shared_prop': '(0.2)', 'set_sizes': '([10] * 3)', 'random_state': 'rs'}), "(order='original',\n correlated_sets='one', shared_prop=0.2, set_sizes=[10] * 3, random_state=rs\n )\n", (18140, 18244), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((18709, 18733), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (18730, 18733), True, 'import numpy as np\n'), ((18747, 18899), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.SequentiallyCorrelatedSetGenerator', 'set_generator.SequentiallyCorrelatedSetGenerator', ([], {'order': '"""reversed"""', 'correlated_sets': '"""one"""', 'shared_prop': '(0.2)', 'set_sizes': '([10] * 3)', 'random_state': 'rs'}), "(order='reversed',\n correlated_sets='one', shared_prop=0.2, set_sizes=[10] * 3, random_state=rs\n )\n", (18795, 18899), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((19373, 19397), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (19394, 19397), True, 'import numpy as np\n'), ((20073, 20097), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (20094, 20097), True, 'import numpy as np\n'), ((20135, 20317), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.SequentiallyCorrelatedSetGenerator', 'set_generator.SequentiallyCorrelatedSetGenerator', ([], {'order': 'set_generator.ORDER_ORIGINAL', 'correlated_sets': 'correlation_type', 'shared_prop': '(0.5)', 'set_sizes': 'set_sizes', 'random_state': 'rs'}), '(order=set_generator.\n ORDER_ORIGINAL, correlated_sets=correlation_type, shared_prop=0.5,\n set_sizes=set_sizes, random_state=rs)\n', (20183, 20317), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((20852, 20904), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.DisjointSetGenerator', 'set_generator.DisjointSetGenerator', ([], {'set_sizes': '[1, 2]'}), '(set_sizes=[1, 2])\n', (20886, 20904), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((4062, 4086), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (4083, 4086), True, 'import numpy as np\n'), ((5960, 5984), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (5981, 5984), True, 'import numpy as np\n'), ((7799, 7823), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (7820, 7823), True, 'import numpy as np\n'), ((11655, 11790), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.ExponentialBowSetGenerator', 'set_generator.ExponentialBowSetGenerator', ([], {'user_activity_association': '(0.5)', 'universe_size': '(10000)', 'set_sizes': '([1000] * 3)', 'random_state': 'rs'}), '(user_activity_association=0.5,\n universe_size=10000, set_sizes=[1000] * 3, random_state=rs)\n', (11695, 11790), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((11883, 12027), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.ExponentialBowSetGenerator', 'set_generator.ExponentialBowSetGenerator', ([], {'user_activity_association': '"""independent"""', 'universe_size': '(10000)', 'set_sizes': '([10] * 3)', 'random_state': 'rs'}), "(user_activity_association=\n 'independent', universe_size=10000, set_sizes=[10] * 3, random_state=rs)\n", (11923, 12027), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((16042, 16211), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.SubSetGenerator', 'set_generator.SubSetGenerator', ([], {'order': '"""not_implemented"""', 'universe_size': '(10)', 'num_large_sets': '(3)', 'num_small_sets': '(7)', 'large_set_size': '(5)', 'small_set_size': '(3)', 'random_state': 'rs'}), "(order='not_implemented', universe_size=10,\n num_large_sets=3, num_small_sets=7, large_set_size=5, small_set_size=3,\n random_state=rs)\n", (16071, 16211), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((19448, 19607), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.SequentiallyCorrelatedSetGenerator', 'set_generator.SequentiallyCorrelatedSetGenerator', ([], {'order': '"""not_implemented"""', 'correlated_sets': '"""all"""', 'shared_prop': '(0.2)', 'set_sizes': '([10] * 3)', 'random_state': 'rs'}), "(order='not_implemented',\n correlated_sets='all', shared_prop=0.2, set_sizes=[10] * 3, random_state=rs\n )\n", (19496, 19607), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((19670, 19831), 'wfa_cardinality_estimation_evaluation_framework.simulations.set_generator.SequentiallyCorrelatedSetGenerator', 'set_generator.SequentiallyCorrelatedSetGenerator', ([], {'order': '"""random"""', 'correlated_sets': '"""not_implemented"""', 'shared_prop': '(0.2)', 'set_sizes': '([10] * 3)', 'random_state': 'rs'}), "(order='random',\n correlated_sets='not_implemented', shared_prop=0.2, set_sizes=[10] * 3,\n random_state=rs)\n", (19718, 19831), False, 'from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator\n'), ((20655, 20703), 'numpy.intersect1d', 'np.intersect1d', (['set_ids_list[0]', 'set_ids_list[1]'], {}), '(set_ids_list[0], set_ids_list[1])\n', (20669, 20703), True, 'import numpy as np\n'), ((20969, 20982), 'numpy.array', 'np.array', (['ids'], {}), '(ids)\n', (20977, 20982), True, 'import numpy as np\n'), ((4199, 4223), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (4220, 4223), True, 'import numpy as np\n'), ((2923, 2955), 'numpy.ones_like', 'np.ones_like', (['TEST_SET_SIZE_LIST'], {}), '(TEST_SET_SIZE_LIST)\n', (2935, 2955), True, 'import numpy as np\n'), ((6101, 6125), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (6122, 6125), True, 'import numpy as np\n'), ((7936, 7960), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (7957, 7960), True, 'import numpy as np\n'), ((7250, 7282), 'numpy.ones_like', 'np.ones_like', (['TEST_SET_SIZE_LIST'], {}), '(TEST_SET_SIZE_LIST)\n', (7262, 7282), True, 'import numpy as np\n')] |
from my_pybullet_envs.inmoov_shadow_hand_v2 import InmoovShadowNew
import pybullet as p
import time
import gym, gym.utils.seeding, gym.spaces
import numpy as np
import math
import pickle
import random
from typing import *
import sys
import os
import inspect
currentdir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe()))
)
from ns_vqa_dart.bullet.dash_object import DashObject, DashTable
from ns_vqa_dart.bullet.renderer import BulletRenderer
# from ns_vqa_dart.bullet.state_saver import StateSaver
from ns_vqa_dart.bullet.vision_inference import VisionInference
import ns_vqa_dart.bullet.util
# p = ns_vqa_dart.bullet.util.create_bullet_client(mode="direct")
class InmoovShadowHandPlaceEnvV8(gym.Env):
metadata = {
"render.modes": ["human", "rgb_array"],
"video.frames_per_second": 50,
}
def __init__(
self,
renders=False,
init_noise=True, # variation during reset
up=True,
random_shape=False,
random_size=True,
default_box=True, # if not random shape, false: cylinder as default
place_floor=False,
use_gt_6d=True,
gt_only_init=False,
grasp_pi_name=None,
exclude_hard=False,
vision_skip=3,
control_skip=4,
obs_noise=False, # noisy (imperfect) observation
use_vision_obs=False,
save_states=False,
states_dir=None,
):
self.renders = renders
self.init_noise = init_noise
self.up = up
self.random_shape = random_shape
self.random_size = random_size
self.default_box = default_box
self.place_floor = place_floor
self.use_gt_6d = use_gt_6d
self.gt_only_init = gt_only_init
self.exclude_hard = exclude_hard
self.obs_noise = obs_noise
self.use_vision_obs = use_vision_obs
self.save_states = save_states
# Object-related configurations.
self.obj_id = None
self.bottom_obj_id = None
self.colors = ["red", "blue", "yellow", "green"]
self.btm_object = DashObject(
shape="cylinder",
color=None,
radius=0.05,
height=0.18,
position=[0.0, 0.0, 0.0], # To be overridden.
)
self.table_object = DashTable(position=[0.2, 0.2, 0.0])
top_shape = "box" if self.default_box else "cyl"
# Vision-related configurations.
self.vision_skip = vision_skip
self.vision_counter = 0
self.state_saver = StateSaver(
p=p,
dataset_dir=f"/home/michelle/datasets/delay_{top_shape}_states",
)
# If user indicates wanting pose source from vision, initialize vision
# inference module.
if self.use_vision_obs:
self.vision_module = VisionInference(
# p=p,
state_saver=self.state_saver,
# checkpoint_path=f"/home/michelle/outputs/stacking_v002_{top_shape}/checkpoint_best.pt",
# camera_position=[
# -0.2237938867122504,
# 0.03198004185028341,
# 0.5425,
# ],
checkpoint_path=f"/home/michelle/outputs/stacking_v003/checkpoint_best.pt",
camera_position=[-0.2237938867122504, 0.0, 0.5425],
camera_offset=[0.0, self.table_object.position[1], 0.0],
apply_offset_to_preds=False,
# html_dir="/home/michelle/html/enjoy_v8_stacking_v002_{top_shape}",
html_dir=f"/home/michelle/html/enjoy_v8_stacking_v003_{top_shape}",
)
self.hard_orn_thres = 0.9
self.obj_mass = 3.5
self.half_height = -1 # dummy, to be overwritten
# TODO: hardcoded here
if grasp_pi_name is None:
if not random_shape:
if default_box:
self.grasp_pi_name = "0302_box_20_n_80_99"
else:
self.grasp_pi_name = "0302_cyl_4_n_80_100" # TODO: ball
else:
pass # TODO
else:
self.grasp_pi_name = grasp_pi_name
# self.half_obj_height = 0.065 if self.is_small else 0.09
self.start_clearance = 0.14
self.btm_obj_height = 0.18 # always place on larger one
self.cand_angles = [
0.0,
3.14 / 3,
6.28 / 3,
3.14,
-6.28 / 3,
-3.14 / 3,
] # TODO: finer grid?
self.cand_quats = [
p.getQuaternionFromEuler([0, 0, cand_angle])
for cand_angle in self.cand_angles
]
self._timeStep = 1.0 / 240.0
# if self.renders:
# p.connect(p.GUI)
# else:
# p.connect(p.DIRECT)
self.np_random = None
self.robot = None
self.viewer = None
self.timer = 0
self.control_skip = int(control_skip)
# shadow hand is 22-5=17dof
self.action_scale = np.array(
[0.009 / self.control_skip] * 7 + [0.024 / self.control_skip] * 17
)
self.tx = -1 # dummy
self.ty = -1 # dummy
self.tz = -1 # dummy
self.tx_act = -1
self.ty_act = -1
self.desired_obj_pos_final = None
self.saved_file = None
with open(
os.path.join(
currentdir,
"assets/place_init_dist/final_states_"
+ self.grasp_pi_name
+ ".pickle",
),
"rb",
) as handle:
self.saved_file = pickle.load(handle)
assert self.saved_file is not None
self.o_pos_pf_ave = self.saved_file["ave_obj_pos_in_palm"]
self.o_quat_pf_ave = self.saved_file["ave_obj_quat_in_palm"]
self.o_quat_pf_ave /= np.linalg.norm(
self.o_quat_pf_ave
) # in case not normalized
self.init_states = self.saved_file["init_states"] # a list of dicts
# print(self.o_pos_pf_ave)
# print(self.o_quat_pf_ave)
# print(self.init_states[10])
# print(self.init_states[51])
# print(self.init_states[89])
self.seed(
0
) # used once temporarily, will be overwritten outside by env
self.robot = InmoovShadowNew(
init_noise=False, timestep=self._timeStep, np_random=self.np_random
)
self.state_saver.set_robot_id(self.robot.arm_id)
self.observation = self.getExtendedObservation()
action_dim = len(self.action_scale)
self.act = self.action_scale * 0.0
self.action_space = gym.spaces.Box(
low=np.array([-1.0] * action_dim),
high=np.array([+1.0] * action_dim),
)
obs_dim = len(self.observation)
obs_dummy = np.array([1.12234567] * obs_dim)
self.observation_space = gym.spaces.Box(
low=-np.inf * obs_dummy, high=np.inf * obs_dummy
)
#
# input("press enter")
def perturb(self, arr, r=0.02):
r = np.abs(r)
return np.copy(
np.array(arr)
+ self.np_random.uniform(low=-r, high=r, size=len(arr))
)
def create_prim_2_grasp(
self, shape, dim, init_xyz, init_quat=(0, 0, 0, 1)
):
# shape: p.GEOM_SPHERE or p.GEOM_BOX or p.GEOM_CYLINDER
# dim: halfExtents (vec3) for box, (radius, length)vec2 for cylinder
# init_xyz vec3 of obj location
visual_shape_id = None
collision_shape_id = None
if shape == p.GEOM_BOX:
visual_shape_id = p.createVisualShape(
shapeType=shape, halfExtents=dim
)
collision_shape_id = p.createCollisionShape(
shapeType=shape, halfExtents=dim
)
elif shape == p.GEOM_CYLINDER:
# visual_shape_id = p.createVisualShape(shapeType=shape, radius=dim[0], length=dim[1])
visual_shape_id = p.createVisualShape(
shape, dim[0], [1, 1, 1], dim[1]
)
# collision_shape_id = p.createCollisionShape(shapeType=shape, radius=dim[0], length=dim[1])
collision_shape_id = p.createCollisionShape(
shape, dim[0], [1, 1, 1], dim[1]
)
elif shape == p.GEOM_SPHERE:
pass
# visual_shape_id = p.createVisualShape(shape, radius=dim[0])
# collision_shape_id = p.createCollisionShape(shape, radius=dim[0])
sid = p.createMultiBody(
baseMass=self.obj_mass,
baseInertialFramePosition=[0, 0, 0],
baseCollisionShapeIndex=collision_shape_id,
baseVisualShapeIndex=visual_shape_id,
basePosition=init_xyz,
baseOrientation=init_quat,
)
return sid
def reset_robot_object_from_sample(self, state, arm_q):
o_pos_pf = state["obj_pos_in_palm"]
o_quat_pf = state["obj_quat_in_palm"]
if self.init_noise:
o_pos_pf = list(self.perturb(o_pos_pf, 0.005))
o_quat_pf = list(self.perturb(o_quat_pf, 0.005))
all_fin_q_init = state["all_fin_q"]
tar_fin_q_init = state["fin_tar_q"]
self.robot.reset_with_certain_arm_q_finger_states(
arm_q, all_fin_q_init, tar_fin_q_init
)
p_pos, p_quat = self.robot.get_link_pos_quat(self.robot.ee_id)
o_pos, o_quat = p.multiplyTransforms(
p_pos, p_quat, o_pos_pf, o_quat_pf
)
z_axis, _ = p.multiplyTransforms(
[0, 0, 0], o_quat, [0, 0, 1], [0, 0, 0, 1]
) # R_cl * unitz[0,0,1]
rotMetric = np.array(z_axis).dot(np.array([0, 0, 1]))
# print(rotMetric, rotMetric)
if self.exclude_hard and rotMetric < self.hard_orn_thres:
return False
self.is_box = (
True if state["obj_shape"] == p.GEOM_BOX else False
) # TODO: ball
self.dim = state["obj_dim"]
if self.is_box:
self.half_height = self.dim[-1]
else:
self.half_height = self.dim[-1] / 2.0 # TODO: ball
# `o_pos` is the position of the COM; compute the position of the base
# because the renderer (below) expects base position.
base_position = list(o_pos).copy()
base_position[2] -= self.half_height
self.obj_id = self.create_prim_2_grasp(
state["obj_shape"], self.dim, o_pos, o_quat
)
self.state_saver.track_object(
oid=self.obj_id,
shape="box" if self.is_box else "cylinder",
radius=self.dim[0],
height=self.half_height * 2,
)
mu_obj = self.np_random.uniform(0.8, 1.2)
p.changeDynamics(self.obj_id, -1, lateralFriction=mu_obj)
return True
def get_optimal_init_arm_q(self, desired_obj_pos):
# TODO: desired obj init pos -> should add clearance to z.
# uses (self.o_pos_pf_ave, self.o_quat_pf_ave), so set mean stats to load properly
arm_q = None
cost = 1e30
ref = np.array([0.0] * 3 + [-1.57] + [0.0] * 3)
for ind, cand_quat in enumerate(self.cand_quats):
p_pos_of_ave, p_quat_of_ave = p.invertTransform(
self.o_pos_pf_ave, self.o_quat_pf_ave
)
p_pos, p_quat = p.multiplyTransforms(
desired_obj_pos, cand_quat, p_pos_of_ave, p_quat_of_ave
)
cand_arm_q = self.robot.solve_arm_IK(p_pos, p_quat)
if cand_arm_q is not None:
this_cost = np.sum(
np.abs(np.array(cand_arm_q) - ref)
) # change to l1
if this_cost < cost:
arm_q = cand_arm_q
cost = this_cost
return arm_q
def sample_valid_arm_q(self):
self.tz = self.btm_obj_height if not self.place_floor else 0.0
while True:
if self.up:
self.tx = self.np_random.uniform(low=0, high=0.3)
self.ty = self.np_random.uniform(low=-0.1, high=0.5)
# self.tx = self.np_random.uniform(low=0, high=0.2)
# self.ty = self.np_random.uniform(low=-0.2, high=0.0)
else:
self.tx = 0.0
self.ty = 0.0
desired_obj_pos = [
self.tx,
self.ty,
self.start_clearance + self.tz,
]
self.desired_obj_pos_final = [
self.tx,
self.ty,
self.half_height + self.tz,
]
arm_q = self.get_optimal_init_arm_q(desired_obj_pos)
if arm_q is None:
continue
else:
return arm_q
def reset(self):
p.resetSimulation()
p.setPhysicsEngineParameter(numSolverIterations=200)
p.setTimeStep(self._timeStep)
p.setGravity(0, 0, -10)
self.timer = 0
self.vision_counter = 0
self.state_saver.reset()
self.robot = InmoovShadowNew(
init_noise=False, timestep=self._timeStep, np_random=self.np_random
)
self.state_saver.set_robot_id(self.robot.arm_id)
arm_q = self.sample_valid_arm_q() # reset done during solving IK
init_done = False
while not init_done:
init_state = self.sample_init_state()
init_done = self.reset_robot_object_from_sample(init_state, arm_q)
if self.place_floor:
self.bottom_obj_id = p.loadURDF(
os.path.join(currentdir, "assets/tabletop.urdf"),
self.table_object.position,
useFixedBase=1,
)
mu_f = self.np_random.uniform(0.8, 1.2)
p.changeDynamics(self.bottom_obj_id, -1, lateralFriction=mu_f)
else:
self.tx_act = self.tx
self.ty_act = self.ty
if self.init_noise:
self.tx_act += self.np_random.uniform(low=-0.015, high=0.015)
self.ty_act += self.np_random.uniform(low=-0.015, high=0.015)
com_position = np.array([self.tx_act, self.ty_act, self.tz / 2.0])
self.btm_object.position = [com_position[0], com_position[1], 0.0]
self.btm_object.color = random.choice(self.colors)
self.bottom_obj_id = p.loadURDF(
os.path.join(currentdir, "assets/cylinder.urdf"),
com_position,
useFixedBase=0,
)
self.floor_id = p.loadURDF(
os.path.join(currentdir, "assets/tabletop.urdf"),
self.table_object.position,
useFixedBase=1,
)
self.btm_object.oid = self.bottom_obj_id
self.state_saver.track_object(
oid=self.bottom_obj_id,
shape="cylinder",
radius=self.btm_object.radius,
height=self.btm_object.height,
)
mu_f = self.np_random.uniform(0.8, 1.2)
mu_b = self.np_random.uniform(0.8, 1.2)
p.changeDynamics(self.bottom_obj_id, -1, lateralFriction=mu_b)
p.changeDynamics(self.floor_id, -1, lateralFriction=mu_f)
p.stepSimulation() # TODO
# init obj pose
self.t_pos, t_orn = p.getBasePositionAndOrientation(self.obj_id)
self.b_pos, b_orn = p.getBasePositionAndOrientation(self.bottom_obj_id)
self.t_up = self.orn_to_upv(orn=t_orn)
self.b_up = self.orn_to_upv(orn=b_orn)
self.last_t_pos, self.last_t_up = self.t_pos, self.t_up
self.last_b_pos, self.last_b_up = self.b_pos, self.b_up
self.observation = self.getExtendedObservation()
return np.array(self.observation)
def sample_init_state(self):
ran_ind = int(
self.np_random.uniform(low=0, high=len(self.init_states) - 0.1)
)
return self.init_states[ran_ind]
def __del__(self):
p.disconnect()
def step(self, action):
for _ in range(self.control_skip):
# action is not in -1,1
if action is not None:
# action = np.clip(np.array(action), -1, 1) # TODO
self.act = action
self.robot.apply_action(self.act * self.action_scale)
p.stepSimulation()
if self.renders:
time.sleep(self._timeStep * 0.5)
self.timer += 1
# if upright, reward
# if no contact, reward
# if use small force, reward
reward = 0.0
clPos, clQuat = p.getBasePositionAndOrientation(self.obj_id)
clVels = p.getBaseVelocity(self.obj_id)
clLinV = np.array(clVels[0])
clAngV = np.array(clVels[1])
# we only care about the upright(z) direction
z_axis, _ = p.multiplyTransforms(
[0, 0, 0], clQuat, [0, 0, 1], [0, 0, 0, 1]
) # R_cl * unitz[0,0,1]
rotMetric = np.array(z_axis).dot(np.array([0, 0, 1]))
# enlarge 0.15 -> 0.45
# xyzMetric = 1 - (np.minimum(np.linalg.norm(np.array(self.desired_obj_pos_final) - np.array(clPos)), 0.45) / 0.15)
# TODO:tmp change to xy metric, allow it to free drop
xyzMetric = 1 - (
np.minimum(
np.linalg.norm(
np.array(self.desired_obj_pos_final[:2])
- np.array(clPos[:2])
),
0.45,
)
/ 0.15
)
linV_R = np.linalg.norm(clLinV)
angV_R = np.linalg.norm(clAngV)
velMetric = 1 - np.minimum(linV_R + angV_R / 2.0, 5.0) / 5.0
reward += np.maximum(rotMetric * 20 - 15, 0.0)
# print(np.maximum(rotMetric * 20 - 15, 0.))
reward += xyzMetric * 5
# print(xyzMetric * 5)
reward += velMetric * 5
# print(velMetric * 5)
total_nf = 0
cps_floor = p.getContactPoints(self.obj_id, self.bottom_obj_id, -1, -1)
for cp in cps_floor:
total_nf += cp[9]
if np.abs(total_nf) > (
self.obj_mass * 4.0
): # mg # TODO:tmp contact force hack
meaningful_c = True
reward += 5.0
else:
meaningful_c = False
# # reward += np.abs(total_nf) / 10.
# not used when placing on floor
btm_vels = p.getBaseVelocity(self.bottom_obj_id)
btm_linv = np.array(btm_vels[0])
btm_angv = np.array(btm_vels[1])
reward += (
np.maximum(
-np.linalg.norm(btm_linv) - np.linalg.norm(btm_angv), -10.0
)
* 0.3
)
# print(np.maximum(-np.linalg.norm(btm_linv) - np.linalg.norm(btm_angv), -10.0) * 0.3)
diff_norm = self.robot.get_norm_diff_tar()
reward += 15.0 / (diff_norm + 1.0)
# print(15. / (diff_norm + 1.))
anyHandContact = False
hand_r = 0
for i in range(self.robot.ee_id, p.getNumJoints(self.robot.arm_id)):
cps = p.getContactPoints(self.obj_id, self.robot.arm_id, -1, i)
if len(cps) == 0:
hand_r += 1.0 # the fewer links in contact, the better
else:
anyHandContact = True
# print(hand_r)
reward += hand_r - 15
if (
rotMetric > 0.9
and xyzMetric > 0.8
and velMetric > 0.8
and meaningful_c
): # close to placing
reward += 5.0
# print("upright")
if not anyHandContact:
reward += 20
# print("no hand con")
# print("r_total", reward)
obs = self.getExtendedObservation()
if self.save_states:
self.state_saver.save()
return obs, reward, False, {}
def obj6DtoObs_UpVec(self, o_pos, o_orn):
objObs = []
o_pos = np.array(o_pos)
if self.up: # TODO: center o_pos
if self.obs_noise:
o_pos -= [self.tx, self.ty, 0]
else:
o_pos -= [self.tx_act, self.ty_act, 0]
# TODO: scale up since we do not have obs normalization
if self.obs_noise:
o_pos = self.perturb(o_pos, r=0.02) * 3.0
else:
o_pos = o_pos * 3.0
o_upv = self.orn_to_upv(orn=o_orn)
if self.obs_noise:
o_upv = self.perturb(o_upv, r=0.03)
else:
o_upv = o_upv
objObs.extend(list(self.perturb(o_pos)))
objObs.extend(list(self.perturb(o_upv)))
# o_pos = o_pos * 3.0
# o_rotmat = np.array(p.getMatrixFromQuaternion(o_orn))
# o_upv = [o_rotmat[2], o_rotmat[5], o_rotmat[8]]
#
# objObs.extend(list(self.perturb(o_pos, r=0.10)))
# objObs.extend(list(self.perturb(o_pos, r=0.10)))
# objObs.extend(list(self.perturb(o_upv, r=0.04)))
# objObs.extend(list(self.perturb(o_upv, r=0.04)))
return objObs
def obj_pos_up_to_obs(self, pos, upv):
objObs = []
pos = np.array(pos)
if self.up: # TODO: center pos
if self.obs_noise:
pos -= [self.tx, self.ty, 0]
else:
pos -= [self.tx_act, self.ty_act, 0]
# TODO: scale up since we do not have obs normalization
if self.obs_noise:
pos = self.perturb(pos, r=0.02) * 3.0
else:
pos = pos * 3.0
if self.obs_noise:
upv = self.perturb(upv, r=0.03)
objObs.extend(list(self.perturb(pos)))
objObs.extend(list(self.perturb(upv)))
# o_pos = o_pos * 3.0
# o_rotmat = np.array(p.getMatrixFromQuaternion(o_orn))
# o_upv = [o_rotmat[2], o_rotmat[5], o_rotmat[8]]
#
# objObs.extend(list(self.perturb(o_pos, r=0.10)))
# objObs.extend(list(self.perturb(o_pos, r=0.10)))
# objObs.extend(list(self.perturb(o_upv, r=0.04)))
# objObs.extend(list(self.perturb(o_upv, r=0.04)))
return objObs
def orn_to_upv(self, orn: List[float]) -> List[float]:
rotmat = np.array(p.getMatrixFromQuaternion(orn))
upv = [rotmat[2], rotmat[5], rotmat[8]]
return upv
# change to tar pos fin pos diff
# change to tx ty diff
def getExtendedObservation(self):
self.observation = self.robot.get_robot_observation(diff_tar=True)
curContact = []
for i in range(self.robot.ee_id, p.getNumJoints(self.robot.arm_id)):
cps = p.getContactPoints(bodyA=self.robot.arm_id, linkIndexA=i)
con_this_link = False
for cp in cps:
if cp[1] != cp[2]: # not self-collision of the robot
con_this_link = True
break
if con_this_link:
curContact.extend([1.0])
else:
curContact.extend([-1.0])
self.observation.extend(curContact)
if self.up:
xy = np.array([self.tx, self.ty])
self.observation.extend(list(xy))
if self.obs_noise:
self.observation.extend(list(xy))
else:
self.observation.extend([self.tx_act, self.ty_act])
if self.random_size:
if self.obs_noise:
self.half_height_est = (
self.half_height
+ self.np_random.uniform(low=-0.01, high=0.01)
)
else:
self.half_height_est = self.half_height
self.observation.extend([self.half_height_est])
# TODO: if random_shape
if self.use_gt_6d:
self.vision_counter += 1
oid2pose = self.get_pose_for_oids(
oids=[self.obj_id, self.bottom_obj_id]
)
pos = oid2pose[self.obj_id]["position"]
upv = oid2pose[self.obj_id]["up_vector"]
if self.obj_id is not None:
if self.gt_only_init:
pos, upv = self.t_pos, self.t_up
else:
# model both delayed and low-freq vision input
# every vision_skip steps, update cur 6D
# but feed policy with last-time updated 6D
if self.vision_counter % self.vision_skip == 0:
self.last_t_pos, self.last_t_up = (
self.t_pos,
self.t_up,
)
self.t_pos = pos
self.t_up = upv
pos, upv = self.last_t_pos, self.last_t_up
self.observation.extend(self.obj_pos_up_to_obs(pos, upv))
# if stacking & real-time, include bottom 6D
if not self.place_floor and not self.gt_only_init:
pos = oid2pose[self.bottom_obj_id]["position"]
upv = oid2pose[self.bottom_obj_id]["up_vector"]
if self.bottom_obj_id is not None:
# model both delayed and low-freq vision input
# every vision_skip steps, update cur 6D
# but feed policy with last-time updated 6D
if self.vision_counter % self.vision_skip == 0:
self.last_b_pos, self.last_b_up = (
self.b_pos,
self.b_up,
)
bottom_pose = oid2pose[self.bottom_obj_id]
self.b_pos = bottom_pose["position"]
self.b_up = bottom_pose["up_vector"]
pos, upv = self.last_b_pos, self.last_b_up
self.observation.extend(self.obj_pos_up_to_obs(pos, upv))
return self.observation
def get_pose_for_oids(
self, oids: List[int]
) -> Tuple[List[float], List[float]]:
"""Gets the observations for an object.
Args:
oids: Object IDs to get pose for.
Returns:
oid2pose: A dictionary mapping from oid to pose.
"""
oid2pose = {}
loaded_oids = []
for oid in oids:
if oid is None:
oid2pose[oid] = {
"position": [0, 0, 0],
"up_vector": self.orn_to_upv(orn=[0, 0, 0, 1]),
}
else:
loaded_oids.append(oid)
if self.use_vision_obs:
if len(self.state_saver.oid2attr) == 0:
for oid in loaded_oids:
oid2pose[oid] = {
"position": [0, 0, 0],
"up_vector": self.orn_to_upv(orn=[0, 0, 0, 1]),
}
else:
odicts = self.vision_module.predict(client_oids=loaded_oids)
for i in range(len(loaded_oids)):
oid = loaded_oids[i]
odict = odicts[i]
oid2pose[oid] = {
"position": odict["position"],
"up_vector": odict["up_vector"],
}
else:
for oid in loaded_oids:
pos, orn = p.getBasePositionAndOrientation(oid)
up_vector = self.orn_to_upv(orn=orn)
oid2pose[oid] = {"position": pos, "up_vector": up_vector}
return oid2pose
def seed(self, seed=None):
random.seed(seed)
self.np_random, seed = gym.utils.seeding.np_random(seed)
if self.robot is not None:
self.robot.np_random = (
self.np_random
) # use the same np_randomizer for robot as for env
return [seed]
def getSourceCode(self):
s = inspect.getsource(type(self))
s = s + inspect.getsource(type(self.robot))
return s
if __name__ == "__main__":
env = InmoovShadowHandPlaceEnvV8()
p.setPhysicsEngineParameter(numSolverIterations=200)
env.seed(303)
for _ in range(20):
env.reset()
env.robot.tar_fin_q = env.robot.get_q_dq(env.robot.fin_actdofs)[0]
for test_t in range(300):
thumb_pose = [
-0.84771132,
0.60768666,
-0.13419822,
0.52214954,
0.25141182,
]
open_up_q = np.array([0.0, 0.0, 0.0] * 4 + thumb_pose)
devi = open_up_q - env.robot.get_q_dq(env.robot.fin_actdofs)[0]
if test_t < 200:
env.robot.apply_action(
np.array([0.0] * 7 + list(devi / 150.0))
)
p.stepSimulation()
# input("press enter")
if env.renders:
time.sleep(env._timeStep * 2.0)
print(env.robot.get_q_dq(env.robot.fin_actdofs))
# input("press enter")
p.disconnect()
| [
"my_pybullet_envs.inmoov_shadow_hand_v2.InmoovShadowNew",
"numpy.abs",
"pybullet.resetSimulation",
"numpy.maximum",
"pybullet.createVisualShape",
"ns_vqa_dart.bullet.vision_inference.VisionInference",
"pickle.load",
"numpy.linalg.norm",
"pybullet.getBaseVelocity",
"os.path.join",
"pybullet.creat... | [((27951, 28003), 'pybullet.setPhysicsEngineParameter', 'p.setPhysicsEngineParameter', ([], {'numSolverIterations': '(200)'}), '(numSolverIterations=200)\n', (27978, 28003), True, 'import pybullet as p\n'), ((28879, 28893), 'pybullet.disconnect', 'p.disconnect', ([], {}), '()\n', (28891, 28893), True, 'import pybullet as p\n'), ((2103, 2200), 'ns_vqa_dart.bullet.dash_object.DashObject', 'DashObject', ([], {'shape': '"""cylinder"""', 'color': 'None', 'radius': '(0.05)', 'height': '(0.18)', 'position': '[0.0, 0.0, 0.0]'}), "(shape='cylinder', color=None, radius=0.05, height=0.18, position\n =[0.0, 0.0, 0.0])\n", (2113, 2200), False, 'from ns_vqa_dart.bullet.dash_object import DashObject, DashTable\n'), ((2316, 2351), 'ns_vqa_dart.bullet.dash_object.DashTable', 'DashTable', ([], {'position': '[0.2, 0.2, 0.0]'}), '(position=[0.2, 0.2, 0.0])\n', (2325, 2351), False, 'from ns_vqa_dart.bullet.dash_object import DashObject, DashTable\n'), ((5043, 5119), 'numpy.array', 'np.array', (['([0.009 / self.control_skip] * 7 + [0.024 / self.control_skip] * 17)'], {}), '([0.009 / self.control_skip] * 7 + [0.024 / self.control_skip] * 17)\n', (5051, 5119), True, 'import numpy as np\n'), ((5865, 5899), 'numpy.linalg.norm', 'np.linalg.norm', (['self.o_quat_pf_ave'], {}), '(self.o_quat_pf_ave)\n', (5879, 5899), True, 'import numpy as np\n'), ((6337, 6426), 'my_pybullet_envs.inmoov_shadow_hand_v2.InmoovShadowNew', 'InmoovShadowNew', ([], {'init_noise': '(False)', 'timestep': 'self._timeStep', 'np_random': 'self.np_random'}), '(init_noise=False, timestep=self._timeStep, np_random=self.\n np_random)\n', (6352, 6426), False, 'from my_pybullet_envs.inmoov_shadow_hand_v2 import InmoovShadowNew\n'), ((6855, 6887), 'numpy.array', 'np.array', (['([1.12234567] * obs_dim)'], {}), '([1.12234567] * obs_dim)\n', (6863, 6887), True, 'import numpy as np\n'), ((6921, 6985), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(-np.inf * obs_dummy)', 'high': '(np.inf * obs_dummy)'}), '(low=-np.inf * obs_dummy, high=np.inf * obs_dummy)\n', (6935, 6985), False, 'import gym, gym.utils.seeding, gym.spaces\n'), ((7098, 7107), 'numpy.abs', 'np.abs', (['r'], {}), '(r)\n', (7104, 7107), True, 'import numpy as np\n'), ((8544, 8764), 'pybullet.createMultiBody', 'p.createMultiBody', ([], {'baseMass': 'self.obj_mass', 'baseInertialFramePosition': '[0, 0, 0]', 'baseCollisionShapeIndex': 'collision_shape_id', 'baseVisualShapeIndex': 'visual_shape_id', 'basePosition': 'init_xyz', 'baseOrientation': 'init_quat'}), '(baseMass=self.obj_mass, baseInertialFramePosition=[0, 0, \n 0], baseCollisionShapeIndex=collision_shape_id, baseVisualShapeIndex=\n visual_shape_id, basePosition=init_xyz, baseOrientation=init_quat)\n', (8561, 8764), True, 'import pybullet as p\n'), ((9460, 9516), 'pybullet.multiplyTransforms', 'p.multiplyTransforms', (['p_pos', 'p_quat', 'o_pos_pf', 'o_quat_pf'], {}), '(p_pos, p_quat, o_pos_pf, o_quat_pf)\n', (9480, 9516), True, 'import pybullet as p\n'), ((9560, 9624), 'pybullet.multiplyTransforms', 'p.multiplyTransforms', (['[0, 0, 0]', 'o_quat', '[0, 0, 1]', '[0, 0, 0, 1]'], {}), '([0, 0, 0], o_quat, [0, 0, 1], [0, 0, 0, 1])\n', (9580, 9624), True, 'import pybullet as p\n'), ((10769, 10826), 'pybullet.changeDynamics', 'p.changeDynamics', (['self.obj_id', '(-1)'], {'lateralFriction': 'mu_obj'}), '(self.obj_id, -1, lateralFriction=mu_obj)\n', (10785, 10826), True, 'import pybullet as p\n'), ((11117, 11158), 'numpy.array', 'np.array', (['([0.0] * 3 + [-1.57] + [0.0] * 3)'], {}), '([0.0] * 3 + [-1.57] + [0.0] * 3)\n', (11125, 11158), True, 'import numpy as np\n'), ((12839, 12858), 'pybullet.resetSimulation', 'p.resetSimulation', ([], {}), '()\n', (12856, 12858), True, 'import pybullet as p\n'), ((12867, 12919), 'pybullet.setPhysicsEngineParameter', 'p.setPhysicsEngineParameter', ([], {'numSolverIterations': '(200)'}), '(numSolverIterations=200)\n', (12894, 12919), True, 'import pybullet as p\n'), ((12928, 12957), 'pybullet.setTimeStep', 'p.setTimeStep', (['self._timeStep'], {}), '(self._timeStep)\n', (12941, 12957), True, 'import pybullet as p\n'), ((12966, 12989), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-10)'], {}), '(0, 0, -10)\n', (12978, 12989), True, 'import pybullet as p\n'), ((13101, 13190), 'my_pybullet_envs.inmoov_shadow_hand_v2.InmoovShadowNew', 'InmoovShadowNew', ([], {'init_noise': '(False)', 'timestep': 'self._timeStep', 'np_random': 'self.np_random'}), '(init_noise=False, timestep=self._timeStep, np_random=self.\n np_random)\n', (13116, 13190), False, 'from my_pybullet_envs.inmoov_shadow_hand_v2 import InmoovShadowNew\n'), ((15298, 15316), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (15314, 15316), True, 'import pybullet as p\n'), ((15378, 15422), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['self.obj_id'], {}), '(self.obj_id)\n', (15409, 15422), True, 'import pybullet as p\n'), ((15451, 15502), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['self.bottom_obj_id'], {}), '(self.bottom_obj_id)\n', (15482, 15502), True, 'import pybullet as p\n'), ((15798, 15824), 'numpy.array', 'np.array', (['self.observation'], {}), '(self.observation)\n', (15806, 15824), True, 'import numpy as np\n'), ((16041, 16055), 'pybullet.disconnect', 'p.disconnect', ([], {}), '()\n', (16053, 16055), True, 'import pybullet as p\n'), ((16654, 16698), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['self.obj_id'], {}), '(self.obj_id)\n', (16685, 16698), True, 'import pybullet as p\n'), ((16716, 16746), 'pybullet.getBaseVelocity', 'p.getBaseVelocity', (['self.obj_id'], {}), '(self.obj_id)\n', (16733, 16746), True, 'import pybullet as p\n'), ((16764, 16783), 'numpy.array', 'np.array', (['clVels[0]'], {}), '(clVels[0])\n', (16772, 16783), True, 'import numpy as np\n'), ((16801, 16820), 'numpy.array', 'np.array', (['clVels[1]'], {}), '(clVels[1])\n', (16809, 16820), True, 'import numpy as np\n'), ((16896, 16960), 'pybullet.multiplyTransforms', 'p.multiplyTransforms', (['[0, 0, 0]', 'clQuat', '[0, 0, 1]', '[0, 0, 0, 1]'], {}), '([0, 0, 0], clQuat, [0, 0, 1], [0, 0, 0, 1])\n', (16916, 16960), True, 'import pybullet as p\n'), ((17572, 17594), 'numpy.linalg.norm', 'np.linalg.norm', (['clLinV'], {}), '(clLinV)\n', (17586, 17594), True, 'import numpy as np\n'), ((17612, 17634), 'numpy.linalg.norm', 'np.linalg.norm', (['clAngV'], {}), '(clAngV)\n', (17626, 17634), True, 'import numpy as np\n'), ((17723, 17759), 'numpy.maximum', 'np.maximum', (['(rotMetric * 20 - 15)', '(0.0)'], {}), '(rotMetric * 20 - 15, 0.0)\n', (17733, 17759), True, 'import numpy as np\n'), ((17981, 18040), 'pybullet.getContactPoints', 'p.getContactPoints', (['self.obj_id', 'self.bottom_obj_id', '(-1)', '(-1)'], {}), '(self.obj_id, self.bottom_obj_id, -1, -1)\n', (17999, 18040), True, 'import pybullet as p\n'), ((18433, 18470), 'pybullet.getBaseVelocity', 'p.getBaseVelocity', (['self.bottom_obj_id'], {}), '(self.bottom_obj_id)\n', (18450, 18470), True, 'import pybullet as p\n'), ((18490, 18511), 'numpy.array', 'np.array', (['btm_vels[0]'], {}), '(btm_vels[0])\n', (18498, 18511), True, 'import numpy as np\n'), ((18531, 18552), 'numpy.array', 'np.array', (['btm_vels[1]'], {}), '(btm_vels[1])\n', (18539, 18552), True, 'import numpy as np\n'), ((19956, 19971), 'numpy.array', 'np.array', (['o_pos'], {}), '(o_pos)\n', (19964, 19971), True, 'import numpy as np\n'), ((21114, 21127), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (21122, 21127), True, 'import numpy as np\n'), ((27465, 27482), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (27476, 27482), False, 'import random\n'), ((27514, 27547), 'gym.utils.seeding.np_random', 'gym.utils.seeding.np_random', (['seed'], {}), '(seed)\n', (27541, 27547), False, 'import gym, gym.utils.seeding, gym.spaces\n'), ((327, 349), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (347, 349), False, 'import inspect\n'), ((2837, 3181), 'ns_vqa_dart.bullet.vision_inference.VisionInference', 'VisionInference', ([], {'state_saver': 'self.state_saver', 'checkpoint_path': 'f"""/home/michelle/outputs/stacking_v003/checkpoint_best.pt"""', 'camera_position': '[-0.2237938867122504, 0.0, 0.5425]', 'camera_offset': '[0.0, self.table_object.position[1], 0.0]', 'apply_offset_to_preds': '(False)', 'html_dir': 'f"""/home/michelle/html/enjoy_v8_stacking_v003_{top_shape}"""'}), "(state_saver=self.state_saver, checkpoint_path=\n f'/home/michelle/outputs/stacking_v003/checkpoint_best.pt',\n camera_position=[-0.2237938867122504, 0.0, 0.5425], camera_offset=[0.0,\n self.table_object.position[1], 0.0], apply_offset_to_preds=False,\n html_dir=f'/home/michelle/html/enjoy_v8_stacking_v003_{top_shape}')\n", (2852, 3181), False, 'from ns_vqa_dart.bullet.vision_inference import VisionInference\n'), ((4578, 4622), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[0, 0, cand_angle]'], {}), '([0, 0, cand_angle])\n', (4602, 4622), True, 'import pybullet as p\n'), ((5635, 5654), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (5646, 5654), False, 'import pickle\n'), ((7640, 7693), 'pybullet.createVisualShape', 'p.createVisualShape', ([], {'shapeType': 'shape', 'halfExtents': 'dim'}), '(shapeType=shape, halfExtents=dim)\n', (7659, 7693), True, 'import pybullet as p\n'), ((7757, 7813), 'pybullet.createCollisionShape', 'p.createCollisionShape', ([], {'shapeType': 'shape', 'halfExtents': 'dim'}), '(shapeType=shape, halfExtents=dim)\n', (7779, 7813), True, 'import pybullet as p\n'), ((9711, 9730), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (9719, 9730), True, 'import numpy as np\n'), ((11259, 11315), 'pybullet.invertTransform', 'p.invertTransform', (['self.o_pos_pf_ave', 'self.o_quat_pf_ave'], {}), '(self.o_pos_pf_ave, self.o_quat_pf_ave)\n', (11276, 11315), True, 'import pybullet as p\n'), ((11374, 11451), 'pybullet.multiplyTransforms', 'p.multiplyTransforms', (['desired_obj_pos', 'cand_quat', 'p_pos_of_ave', 'p_quat_of_ave'], {}), '(desired_obj_pos, cand_quat, p_pos_of_ave, p_quat_of_ave)\n', (11394, 11451), True, 'import pybullet as p\n'), ((13821, 13883), 'pybullet.changeDynamics', 'p.changeDynamics', (['self.bottom_obj_id', '(-1)'], {'lateralFriction': 'mu_f'}), '(self.bottom_obj_id, -1, lateralFriction=mu_f)\n', (13837, 13883), True, 'import pybullet as p\n'), ((14181, 14232), 'numpy.array', 'np.array', (['[self.tx_act, self.ty_act, self.tz / 2.0]'], {}), '([self.tx_act, self.ty_act, self.tz / 2.0])\n', (14189, 14232), True, 'import numpy as np\n'), ((14348, 14374), 'random.choice', 'random.choice', (['self.colors'], {}), '(self.colors)\n', (14361, 14374), False, 'import random\n'), ((15156, 15218), 'pybullet.changeDynamics', 'p.changeDynamics', (['self.bottom_obj_id', '(-1)'], {'lateralFriction': 'mu_b'}), '(self.bottom_obj_id, -1, lateralFriction=mu_b)\n', (15172, 15218), True, 'import pybullet as p\n'), ((15231, 15288), 'pybullet.changeDynamics', 'p.changeDynamics', (['self.floor_id', '(-1)'], {'lateralFriction': 'mu_f'}), '(self.floor_id, -1, lateralFriction=mu_f)\n', (15247, 15288), True, 'import pybullet as p\n'), ((16384, 16402), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (16400, 16402), True, 'import pybullet as p\n'), ((17047, 17066), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (17055, 17066), True, 'import numpy as np\n'), ((18111, 18127), 'numpy.abs', 'np.abs', (['total_nf'], {}), '(total_nf)\n', (18117, 18127), True, 'import numpy as np\n'), ((19037, 19070), 'pybullet.getNumJoints', 'p.getNumJoints', (['self.robot.arm_id'], {}), '(self.robot.arm_id)\n', (19051, 19070), True, 'import pybullet as p\n'), ((19091, 19148), 'pybullet.getContactPoints', 'p.getContactPoints', (['self.obj_id', 'self.robot.arm_id', '(-1)', 'i'], {}), '(self.obj_id, self.robot.arm_id, -1, i)\n', (19109, 19148), True, 'import pybullet as p\n'), ((22173, 22203), 'pybullet.getMatrixFromQuaternion', 'p.getMatrixFromQuaternion', (['orn'], {}), '(orn)\n', (22198, 22203), True, 'import pybullet as p\n'), ((22517, 22550), 'pybullet.getNumJoints', 'p.getNumJoints', (['self.robot.arm_id'], {}), '(self.robot.arm_id)\n', (22531, 22550), True, 'import pybullet as p\n'), ((22571, 22628), 'pybullet.getContactPoints', 'p.getContactPoints', ([], {'bodyA': 'self.robot.arm_id', 'linkIndexA': 'i'}), '(bodyA=self.robot.arm_id, linkIndexA=i)\n', (22589, 22628), True, 'import pybullet as p\n'), ((23040, 23068), 'numpy.array', 'np.array', (['[self.tx, self.ty]'], {}), '([self.tx, self.ty])\n', (23048, 23068), True, 'import numpy as np\n'), ((28382, 28424), 'numpy.array', 'np.array', (['([0.0, 0.0, 0.0] * 4 + thumb_pose)'], {}), '([0.0, 0.0, 0.0] * 4 + thumb_pose)\n', (28390, 28424), True, 'import numpy as np\n'), ((28661, 28679), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (28677, 28679), True, 'import pybullet as p\n'), ((5388, 5490), 'os.path.join', 'os.path.join', (['currentdir', "('assets/place_init_dist/final_states_' + self.grasp_pi_name + '.pickle')"], {}), "(currentdir, 'assets/place_init_dist/final_states_' + self.\n grasp_pi_name + '.pickle')\n", (5400, 5490), False, 'import os\n'), ((6706, 6735), 'numpy.array', 'np.array', (['([-1.0] * action_dim)'], {}), '([-1.0] * action_dim)\n', (6714, 6735), True, 'import numpy as np\n'), ((6754, 6783), 'numpy.array', 'np.array', (['([+1.0] * action_dim)'], {}), '([+1.0] * action_dim)\n', (6762, 6783), True, 'import numpy as np\n'), ((7144, 7157), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (7152, 7157), True, 'import numpy as np\n'), ((8012, 8065), 'pybullet.createVisualShape', 'p.createVisualShape', (['shape', 'dim[0]', '[1, 1, 1]', 'dim[1]'], {}), '(shape, dim[0], [1, 1, 1], dim[1])\n', (8031, 8065), True, 'import pybullet as p\n'), ((8234, 8290), 'pybullet.createCollisionShape', 'p.createCollisionShape', (['shape', 'dim[0]', '[1, 1, 1]', 'dim[1]'], {}), '(shape, dim[0], [1, 1, 1], dim[1])\n', (8256, 8290), True, 'import pybullet as p\n'), ((9690, 9706), 'numpy.array', 'np.array', (['z_axis'], {}), '(z_axis)\n', (9698, 9706), True, 'import numpy as np\n'), ((13617, 13665), 'os.path.join', 'os.path.join', (['currentdir', '"""assets/tabletop.urdf"""'], {}), "(currentdir, 'assets/tabletop.urdf')\n", (13629, 13665), False, 'import os\n'), ((14437, 14485), 'os.path.join', 'os.path.join', (['currentdir', '"""assets/cylinder.urdf"""'], {}), "(currentdir, 'assets/cylinder.urdf')\n", (14449, 14485), False, 'import os\n'), ((14619, 14667), 'os.path.join', 'os.path.join', (['currentdir', '"""assets/tabletop.urdf"""'], {}), "(currentdir, 'assets/tabletop.urdf')\n", (14631, 14667), False, 'import os\n'), ((16448, 16480), 'time.sleep', 'time.sleep', (['(self._timeStep * 0.5)'], {}), '(self._timeStep * 0.5)\n', (16458, 16480), False, 'import time\n'), ((17026, 17042), 'numpy.array', 'np.array', (['z_axis'], {}), '(z_axis)\n', (17034, 17042), True, 'import numpy as np\n'), ((17659, 17697), 'numpy.minimum', 'np.minimum', (['(linV_R + angV_R / 2.0)', '(5.0)'], {}), '(linV_R + angV_R / 2.0, 5.0)\n', (17669, 17697), True, 'import numpy as np\n'), ((27237, 27273), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['oid'], {}), '(oid)\n', (27268, 27273), True, 'import pybullet as p\n'), ((28759, 28790), 'time.sleep', 'time.sleep', (['(env._timeStep * 2.0)'], {}), '(env._timeStep * 2.0)\n', (28769, 28790), False, 'import time\n'), ((18641, 18665), 'numpy.linalg.norm', 'np.linalg.norm', (['btm_angv'], {}), '(btm_angv)\n', (18655, 18665), True, 'import numpy as np\n'), ((18614, 18638), 'numpy.linalg.norm', 'np.linalg.norm', (['btm_linv'], {}), '(btm_linv)\n', (18628, 18638), True, 'import numpy as np\n'), ((11648, 11668), 'numpy.array', 'np.array', (['cand_arm_q'], {}), '(cand_arm_q)\n', (11656, 11668), True, 'import numpy as np\n'), ((17388, 17428), 'numpy.array', 'np.array', (['self.desired_obj_pos_final[:2]'], {}), '(self.desired_obj_pos_final[:2])\n', (17396, 17428), True, 'import numpy as np\n'), ((17451, 17470), 'numpy.array', 'np.array', (['clPos[:2]'], {}), '(clPos[:2])\n', (17459, 17470), True, 'import numpy as np\n')] |
"""
run the Coding Potential Assessment Tool (CPAT)
http://nar.oxfordjournals.org/content/early/2013/01/17/nar.gkt006.full
"""
import numpy
import shutil
import tempfile
import os
from bcbio import utils
from bcbio.rnaseq import gtf
from bcbio.utils import file_exists, safe_makedir
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
from bcbio.bam import fasta
from bcbio.pipeline import config_utils
def classify_with_cpat(assembled_gtf, ref_gtf, ref_fasta, data):
cpat_cmd = config_utils.get_program("cpat.py", data)
if not cpat_cmd:
return {}
if not gtf.is_cpat_compatible(ref_gtf):
return {}
cutoff, hexamer, logit = get_coding_potential_cutoff(ref_gtf, ref_fasta, data)
assembled_fasta = gtf.gtf_to_fasta(assembled_gtf, ref_fasta)
cpat_fn = cpat(assembled_fasta, hexamer, logit, data)
coding_probabilities = load_cpat_coding_prob(cpat_fn)
lengths = fasta.sequence_length(assembled_fasta)
classification = {}
for transcript, prob in coding_probabilities.items():
if prob > cutoff:
classification[transcript] = "protein_coding"
if lengths[transcript] > 200:
classification[transcript] = "lncRNA"
else:
classification[transcript] = "ncRNA"
return classification
def cpat(assembled_fasta, hexamer, logit, data, out_file=None):
if out_file and file_exists(out_file):
return out_file
if not out_file:
out_file = tempfile.NamedTemporaryFile(delete=False, suffix=".cpat").name
cpat_cmd = config_utils.get_program("cpat.py", data)
r_setup = utils.get_R_exports()
cmd = ("{r_setup} && {cpat_cmd} --gene={assembled_fasta} --hex={hexamer} "
"--logitModel={logit} --outfile={tx_out_file}")
message = "Predicing coding potential of %s." % (assembled_fasta)
with file_transaction(out_file) as tx_out_file:
do.run(cmd.format(**locals()), message)
return out_file
def load_cpat_coding_prob(cpat_file):
with open(cpat_file) as in_handle:
header = next(in_handle)
return {line.split()[0]: float(line.split()[5]) for line in in_handle}
def load_cpat_orf_size(cpat_file):
with open(cpat_file) as in_handle:
header = next(in_handle)
return {line.split()[0]: float(line.split()[2]) for line in in_handle}
def grade_cpat(coding_transcripts, noncoding_transcripts, cpat, cutoff):
coding_tp = 0
coding_fp = 0
noncoding_tp = 0
noncoding_fp = 0
for transcript in coding_transcripts:
if cpat[transcript] < cutoff:
noncoding_fp += 1
else:
coding_tp += 1
for transcript in noncoding_transcripts:
if cpat[transcript] >= cutoff:
coding_fp += 1
else:
noncoding_tp += 1
tp = float(coding_tp)
fp = float(coding_fp)
tn = float(noncoding_tp)
fn = float(noncoding_fp)
sensitivity = tp / (tp + fn)
specificity = tn / (tn + fp)
accuracy = (tp + tn) / (tp + tn + fp + fn)
precision = tp / (tp + fp) if (tp + fp > 0) else -1
return {"sensitivity": sensitivity, "specificity": specificity,
"accuracy": accuracy, "precision": precision}
def make_logit_model(coding_fasta, noncoding_fasta, hexamers, data, out_dir=None):
safe_makedir(out_dir)
out_prefix = os.path.join(out_dir, "logit")
out_file = out_prefix + ".logit.RData"
if file_exists(out_file):
return out_file
tx_prefix = tempfile.NamedTemporaryFile(delete=False).name
tx_out_file = tx_prefix + ".logit.RData"
logit_cmd = config_utils.get_program("make_logitModel.py", data)
r_setup = utils.get_R_exports()
cmd = ("{r_setup} && {logit_cmd} --cgene={coding_fasta} --ngene={noncoding_fasta} "
"--hex={hexamers} --outfile={tx_prefix}")
message = "Building coding/noncoding logistical model."
do.run(cmd.format(**locals()), message)
shutil.move(tx_out_file, out_file)
return out_file
def get_coding_potential_cutoff(ref_gtf, ref_fasta, data):
"""
estimate the coding potential cutoff that best classifies
coding/noncoding transcripts by splitting the reference
annotation into a test and training set and determining
the cutoff where the sensitivity and specificity meet
"""
train_gtf, test_gtf = gtf.split_gtf(ref_gtf, sample_size=2000)
coding_gtf = gtf.partition_gtf(train_gtf, coding=True)
noncoding_gtf = gtf.partition_gtf(train_gtf)
noncoding_fasta = gtf.gtf_to_fasta(noncoding_gtf, ref_fasta)
cds_fasta = gtf.gtf_to_fasta(coding_gtf, ref_fasta, cds=True)
hexamer_content = hexamer_table(cds_fasta, noncoding_fasta, data)
coding_fasta = gtf.gtf_to_fasta(coding_gtf, ref_fasta)
logit_model = make_logit_model(coding_fasta, noncoding_fasta,
hexamer_content, data, "test_gtf")
test_fasta = gtf.gtf_to_fasta(test_gtf, ref_fasta)
cpat_fn = cpat(test_fasta, hexamer_content, logit_model, data)
cpat_prob = load_cpat_coding_prob(cpat_fn)
coding, noncoding = gtf.get_coding_noncoding_transcript_ids(test_gtf)
best_score = 1
best_cutoff = 0
best_sensitivity = 0
best_specificity = 0
for cutoff in list(numpy.arange(0.1, 1, 0.01)):
grade = grade_cpat(coding, noncoding, cpat_prob, cutoff)
score = abs(grade["sensitivity"] - grade["specificity"])
if score < best_score:
best_score = score
best_cutoff = cutoff
best_sensitivity = grade["sensitivity"]
best_specificity = grade["specificity"]
return best_cutoff, hexamer_content, logit_model
def hexamer_table(cds_fasta, noncoding_fasta, data, out_file=None):
if out_file and file_exists(out_file):
return out_file
if not out_file:
out_file = tempfile.NamedTemporaryFile(delete=False, suffix=".hexamers").name
hex_cmd = config_utils.get_program("make_hexamer_tab.py", data)
cmd = ("{hex_cmd} --cod={cds_fasta} --noncod={noncoding_fasta} "
"> {tx_out_file}")
with file_transaction(out_file) as tx_out_file:
message = ("Calculating hexamer content in %s and %s."
% (cds_fasta, noncoding_fasta))
do.run(cmd.format(**locals()), message)
return out_file
| [
"bcbio.utils.file_exists",
"bcbio.rnaseq.gtf.split_gtf",
"tempfile.NamedTemporaryFile",
"bcbio.rnaseq.gtf.is_cpat_compatible",
"os.path.join",
"bcbio.rnaseq.gtf.get_coding_noncoding_transcript_ids",
"bcbio.rnaseq.gtf.partition_gtf",
"bcbio.bam.fasta.sequence_length",
"bcbio.utils.safe_makedir",
"b... | [((524, 565), 'bcbio.pipeline.config_utils.get_program', 'config_utils.get_program', (['"""cpat.py"""', 'data'], {}), "('cpat.py', data)\n", (548, 565), False, 'from bcbio.pipeline import config_utils\n'), ((772, 814), 'bcbio.rnaseq.gtf.gtf_to_fasta', 'gtf.gtf_to_fasta', (['assembled_gtf', 'ref_fasta'], {}), '(assembled_gtf, ref_fasta)\n', (788, 814), False, 'from bcbio.rnaseq import gtf\n'), ((945, 983), 'bcbio.bam.fasta.sequence_length', 'fasta.sequence_length', (['assembled_fasta'], {}), '(assembled_fasta)\n', (966, 983), False, 'from bcbio.bam import fasta\n'), ((1577, 1618), 'bcbio.pipeline.config_utils.get_program', 'config_utils.get_program', (['"""cpat.py"""', 'data'], {}), "('cpat.py', data)\n", (1601, 1618), False, 'from bcbio.pipeline import config_utils\n'), ((1633, 1654), 'bcbio.utils.get_R_exports', 'utils.get_R_exports', ([], {}), '()\n', (1652, 1654), False, 'from bcbio import utils\n'), ((3311, 3332), 'bcbio.utils.safe_makedir', 'safe_makedir', (['out_dir'], {}), '(out_dir)\n', (3323, 3332), False, 'from bcbio.utils import file_exists, safe_makedir\n'), ((3350, 3380), 'os.path.join', 'os.path.join', (['out_dir', '"""logit"""'], {}), "(out_dir, 'logit')\n", (3362, 3380), False, 'import os\n'), ((3431, 3452), 'bcbio.utils.file_exists', 'file_exists', (['out_file'], {}), '(out_file)\n', (3442, 3452), False, 'from bcbio.utils import file_exists, safe_makedir\n'), ((3602, 3654), 'bcbio.pipeline.config_utils.get_program', 'config_utils.get_program', (['"""make_logitModel.py"""', 'data'], {}), "('make_logitModel.py', data)\n", (3626, 3654), False, 'from bcbio.pipeline import config_utils\n'), ((3669, 3690), 'bcbio.utils.get_R_exports', 'utils.get_R_exports', ([], {}), '()\n', (3688, 3690), False, 'from bcbio import utils\n'), ((3940, 3974), 'shutil.move', 'shutil.move', (['tx_out_file', 'out_file'], {}), '(tx_out_file, out_file)\n', (3951, 3974), False, 'import shutil\n'), ((4337, 4377), 'bcbio.rnaseq.gtf.split_gtf', 'gtf.split_gtf', (['ref_gtf'], {'sample_size': '(2000)'}), '(ref_gtf, sample_size=2000)\n', (4350, 4377), False, 'from bcbio.rnaseq import gtf\n'), ((4395, 4436), 'bcbio.rnaseq.gtf.partition_gtf', 'gtf.partition_gtf', (['train_gtf'], {'coding': '(True)'}), '(train_gtf, coding=True)\n', (4412, 4436), False, 'from bcbio.rnaseq import gtf\n'), ((4457, 4485), 'bcbio.rnaseq.gtf.partition_gtf', 'gtf.partition_gtf', (['train_gtf'], {}), '(train_gtf)\n', (4474, 4485), False, 'from bcbio.rnaseq import gtf\n'), ((4508, 4550), 'bcbio.rnaseq.gtf.gtf_to_fasta', 'gtf.gtf_to_fasta', (['noncoding_gtf', 'ref_fasta'], {}), '(noncoding_gtf, ref_fasta)\n', (4524, 4550), False, 'from bcbio.rnaseq import gtf\n'), ((4567, 4616), 'bcbio.rnaseq.gtf.gtf_to_fasta', 'gtf.gtf_to_fasta', (['coding_gtf', 'ref_fasta'], {'cds': '(True)'}), '(coding_gtf, ref_fasta, cds=True)\n', (4583, 4616), False, 'from bcbio.rnaseq import gtf\n'), ((4706, 4745), 'bcbio.rnaseq.gtf.gtf_to_fasta', 'gtf.gtf_to_fasta', (['coding_gtf', 'ref_fasta'], {}), '(coding_gtf, ref_fasta)\n', (4722, 4745), False, 'from bcbio.rnaseq import gtf\n'), ((4903, 4940), 'bcbio.rnaseq.gtf.gtf_to_fasta', 'gtf.gtf_to_fasta', (['test_gtf', 'ref_fasta'], {}), '(test_gtf, ref_fasta)\n', (4919, 4940), False, 'from bcbio.rnaseq import gtf\n'), ((5079, 5128), 'bcbio.rnaseq.gtf.get_coding_noncoding_transcript_ids', 'gtf.get_coding_noncoding_transcript_ids', (['test_gtf'], {}), '(test_gtf)\n', (5118, 5128), False, 'from bcbio.rnaseq import gtf\n'), ((5909, 5962), 'bcbio.pipeline.config_utils.get_program', 'config_utils.get_program', (['"""make_hexamer_tab.py"""', 'data'], {}), "('make_hexamer_tab.py', data)\n", (5933, 5962), False, 'from bcbio.pipeline import config_utils\n'), ((616, 647), 'bcbio.rnaseq.gtf.is_cpat_compatible', 'gtf.is_cpat_compatible', (['ref_gtf'], {}), '(ref_gtf)\n', (638, 647), False, 'from bcbio.rnaseq import gtf\n'), ((1412, 1433), 'bcbio.utils.file_exists', 'file_exists', (['out_file'], {}), '(out_file)\n', (1423, 1433), False, 'from bcbio.utils import file_exists, safe_makedir\n'), ((1872, 1898), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['out_file'], {}), '(out_file)\n', (1888, 1898), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((3494, 3535), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (3521, 3535), False, 'import tempfile\n'), ((5241, 5267), 'numpy.arange', 'numpy.arange', (['(0.1)', '(1)', '(0.01)'], {}), '(0.1, 1, 0.01)\n', (5253, 5267), False, 'import numpy\n'), ((5741, 5762), 'bcbio.utils.file_exists', 'file_exists', (['out_file'], {}), '(out_file)\n', (5752, 5762), False, 'from bcbio.utils import file_exists, safe_makedir\n'), ((6071, 6097), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['out_file'], {}), '(out_file)\n', (6087, 6097), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((1499, 1556), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)', 'suffix': '""".cpat"""'}), "(delete=False, suffix='.cpat')\n", (1526, 1556), False, 'import tempfile\n'), ((5828, 5889), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)', 'suffix': '""".hexamers"""'}), "(delete=False, suffix='.hexamers')\n", (5855, 5889), False, 'import tempfile\n')] |
import os
import json
import random
from tqdm import tqdm, trange
import numpy as np
import torch
from apex import amp
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers.models.bert import BertTokenizer
from transformers import AutoTokenizer, AutoModelForMultipleChoice
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from data_processor import DataProcessor
from model import BertForClassification
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs==labels)
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def train(config, model, train_dataset, eval_dataset=None):
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, batch_size=config["train_batch_size"], sampler=train_sampler)
t_total = len(train_dataloader) // config["gradient_accumulation_steps"] * config["num_train_epochs"]
no_decay = ['bias', 'gamma', 'beta']
optimizer_parameters = [
{'params': [p for n, p in model.named_parameters() if n not in no_decay], 'weight_decay_rate': 0.01},
{'params': [p for n, p in model.named_parameters() if n in no_decay], 'weight_decay_rate': 0.0}
]
# optimizer = AdamW(optimizer_parameters, lr=config["learning_rate"], eps=1e-8)
optimizer = AdamW(model.parameters(), lr=config["learning_rate"], eps=1e-8)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=config["num_warmup_steps"], num_training_steps=t_total)
if config["fp16"] == 1:
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
nb_tr_examples, nb_tr_steps = 0, 0
model.zero_grad()
train_iterator = trange(int(config["num_train_epochs"]), desc="Epoch", disable=True)
set_seed(config["seed"])
for epoch in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", leave=False)
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.cuda() for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2],
'labels': batch[3]}
outputs = model(**inputs)
loss = outputs[0]
if config["gradient_accumulation_steps"] > 1:
loss = loss / config["gradient_accumulation_steps"]
if config["fp16"] == 1:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), config["max_grad_norm"])
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), config["max_grad_norm"])
tr_loss += loss.item()
logging_loss += loss.item()
nb_tr_examples += batch[0].size(0)
nb_tr_steps += 1
if (step + 1) % config["gradient_accumulation_steps"] == 0:
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
stat = 'epoch {} | step {} | lr {:.6f} | loss {:.6f}'.format(epoch, global_step, scheduler.get_last_lr()[0], logging_loss)
epoch_iterator.set_postfix_str(str(stat))
logging_loss = 0.0
# Save model checkpoint
eval_loss, eval_metric, eval_logits = evaluate(config, model, eval_dataset)
print("epoch: {}, eval_result: {:.6f}, eval_loss: {:.6f}".format(epoch, eval_metric, eval_loss))
save_dir = os.path.join(config["save_dir"], 'checkpoint-{}'.format(epoch))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
torch.save(model_to_save.state_dict(), os.path.join(save_dir, 'model.bin'))
torch.save(config, os.path.join(save_dir, 'training_args.bin'))
print("Saving model checkpoint to {}".format(save_dir))
return global_step, tr_loss / global_step
def evaluate(config, model, eval_dataset):
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, batch_size=config["eval_batch_size"], sampler=eval_sampler)
eval_loss, eval_accuracy = 0.0, 0.0
nb_eval_steps, nb_eval_examples = 0, 0
logits_all = []
for _, batch in enumerate(tqdm(eval_dataloader, desc="Evaluating", leave=False)):
model.eval()
batch = tuple(t.cuda() for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2],
'labels': batch[3] if len(batch) == 4 else None}
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
logits = logits.detach().cpu().numpy()
label_ids = batch[3].cpu().numpy()
for i in range(len(logits)):
logits_all += [logits[i]]
tmp_eval_accuracy = accuracy(logits, label_ids.reshape(-1))
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += batch[0].size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
return eval_loss, eval_accuracy, logits_all
def main():
config = json.load(open('config.json', 'r'))
set_seed(config["seed"])
if not os.path.exists(config["output_dir"]):
os.makedirs(config["output_dir"])
if not os.path.exists(config["save_dir"]):
os.makedirs(config["save_dir"])
# model_config = transformers.BertConfig.from_pretrained(config["model_name"])
# tokenizer = AutoTokenizer.from_pretrained(config["model_name"])
tokenizer = BertTokenizer.from_pretrained(config["model_name"])
model = BertForClassification(config["model_name"])
# model = AutoModelForMultipleChoice.from_pretrained(config["model_name"])
model.cuda()
processor = DataProcessor(config["data_dir"])
train_examples = processor.get_train_examples()
train_dataset = processor.get_dataset(train_examples, tokenizer, config["max_length"])
valid_examples = processor.get_dev_examples()
valid_dataset = processor.get_dataset(valid_examples, tokenizer, config["max_length"])
test_examples = processor.get_test_examples()
test_dataset = processor.get_dataset(test_examples, tokenizer, config["max_length"])
train(config, model, train_dataset, valid_dataset)
result = evaluate(config, model, test_dataset)
print(result[:2])
if __name__ == '__main__':
main()
| [
"numpy.random.seed",
"numpy.sum",
"torch.utils.data.RandomSampler",
"numpy.argmax",
"data_processor.DataProcessor",
"transformers.optimization.get_linear_schedule_with_warmup",
"torch.no_grad",
"os.path.join",
"torch.utils.data.DataLoader",
"apex.amp.master_params",
"os.path.exists",
"apex.amp... | [((538, 560), 'numpy.argmax', 'np.argmax', (['out'], {'axis': '(1)'}), '(out, axis=1)\n', (547, 560), True, 'import numpy as np\n'), ((573, 598), 'numpy.sum', 'np.sum', (['(outputs == labels)'], {}), '(outputs == labels)\n', (579, 598), True, 'import numpy as np\n'), ((625, 642), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (636, 642), False, 'import random\n'), ((648, 668), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (662, 668), True, 'import numpy as np\n'), ((674, 697), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (691, 697), False, 'import torch\n'), ((703, 735), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (729, 735), False, 'import torch\n'), ((820, 848), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_dataset'], {}), '(train_dataset)\n', (833, 848), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler\n'), ((873, 965), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': "config['train_batch_size']", 'sampler': 'train_sampler'}), "(train_dataset, batch_size=config['train_batch_size'], sampler=\n train_sampler)\n", (883, 965), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler\n'), ((1552, 1672), 'transformers.optimization.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': "config['num_warmup_steps']", 'num_training_steps': 't_total'}), "(optimizer, num_warmup_steps=config[\n 'num_warmup_steps'], num_training_steps=t_total)\n", (1583, 1672), False, 'from transformers.optimization import AdamW, get_linear_schedule_with_warmup\n'), ((4539, 4570), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['eval_dataset'], {}), '(eval_dataset)\n', (4556, 4570), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler\n'), ((4594, 4683), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_dataset'], {'batch_size': "config['eval_batch_size']", 'sampler': 'eval_sampler'}), "(eval_dataset, batch_size=config['eval_batch_size'], sampler=\n eval_sampler)\n", (4604, 4683), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler\n'), ((6308, 6359), 'transformers.models.bert.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (["config['model_name']"], {}), "(config['model_name'])\n", (6337, 6359), False, 'from transformers.models.bert import BertTokenizer\n'), ((6373, 6416), 'model.BertForClassification', 'BertForClassification', (["config['model_name']"], {}), "(config['model_name'])\n", (6394, 6416), False, 'from model import BertForClassification\n'), ((6536, 6569), 'data_processor.DataProcessor', 'DataProcessor', (["config['data_dir']"], {}), "(config['data_dir'])\n", (6549, 6569), False, 'from data_processor import DataProcessor\n'), ((1727, 1775), 'apex.amp.initialize', 'amp.initialize', (['model', 'optimizer'], {'opt_level': '"""O1"""'}), "(model, optimizer, opt_level='O1')\n", (1741, 1775), False, 'from apex import amp\n'), ((2080, 2133), 'tqdm.tqdm', 'tqdm', (['train_dataloader'], {'desc': '"""Iteration"""', 'leave': '(False)'}), "(train_dataloader, desc='Iteration', leave=False)\n", (2084, 2133), False, 'from tqdm import tqdm, trange\n'), ((4820, 4873), 'tqdm.tqdm', 'tqdm', (['eval_dataloader'], {'desc': '"""Evaluating"""', 'leave': '(False)'}), "(eval_dataloader, desc='Evaluating', leave=False)\n", (4824, 4873), False, 'from tqdm import tqdm, trange\n'), ((5964, 6000), 'os.path.exists', 'os.path.exists', (["config['output_dir']"], {}), "(config['output_dir'])\n", (5978, 6000), False, 'import os\n'), ((6011, 6044), 'os.makedirs', 'os.makedirs', (["config['output_dir']"], {}), "(config['output_dir'])\n", (6022, 6044), False, 'import os\n'), ((6057, 6091), 'os.path.exists', 'os.path.exists', (["config['save_dir']"], {}), "(config['save_dir'])\n", (6071, 6091), False, 'import os\n'), ((6102, 6133), 'os.makedirs', 'os.makedirs', (["config['save_dir']"], {}), "(config['save_dir'])\n", (6113, 6133), False, 'import os\n'), ((4015, 4039), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (4029, 4039), False, 'import os\n'), ((4054, 4075), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (4065, 4075), False, 'import os\n'), ((4247, 4282), 'os.path.join', 'os.path.join', (['save_dir', '"""model.bin"""'], {}), "(save_dir, 'model.bin')\n", (4259, 4282), False, 'import os\n'), ((4312, 4355), 'os.path.join', 'os.path.join', (['save_dir', '"""training_args.bin"""'], {}), "(save_dir, 'training_args.bin')\n", (4324, 4355), False, 'import os\n'), ((4962, 4977), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4975, 4977), False, 'import torch\n'), ((2745, 2776), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'optimizer'], {}), '(loss, optimizer)\n', (2759, 2776), False, 'from apex import amp\n'), ((2885, 2913), 'apex.amp.master_params', 'amp.master_params', (['optimizer'], {}), '(optimizer)\n', (2902, 2913), False, 'from apex import amp\n')] |
import numpy as np
import tensorly as tl
def random_matrix_generator(n, k, typ="g", target='col'):
"""
routine for usage: A \Omega or \Omega^\top x : n >> m
:param n: first dimension of random matrix to be generated
:param k: second dimension of random matrix to be generated
:param type:
:param target: for column preservation or length preservation,
for column preservation, we do not need standardization
:return:
"""
if isinstance(n, list):
n = np.prod(n)
types = set(['g', 'u', 'sp0', 'sp1'])
assert typ in types, "please aset your type of random variable correctly"
assert target in ['col', 'vec'], "target can only be col or vec"
if typ == 'g':
Omega = np.random.normal(0, 1, size=(n, k))
elif typ == 'u':
Omega = np.random.uniform(low=-1, high=1, size=(n, k)) * np.sqrt(3)
elif typ == 'sp0':
Omega = np.random.choice([-1, 0, 1], size=(n, k), p=[1 / 6, 2 / 3, 1 / 6]) * np.sqrt(3)
elif typ == 'sp1':
Omega = np.random.choice([-1, 0, 1], size=(n, k), p= \
[1 / (2 * np.sqrt(n)), 1 - 1 / np.sqrt(n), 1 / (2 * np.sqrt(n))]) * np.sqrt(np.sqrt(n))
if target == 'col':
return Omega
return Omega.transpose()/np.sqrt(k)
def tensor_random_matrix_generator(n_arr, k, typ="g", target='col'):
"""
routine for usage: A \Omega or \Omega^\top x : n >> m
:param n_arr: first dimension of random matrix to be generated as a list
n1*...n_{I+1}*...*n_N
:param k: second dimension of random matrix to be generated
:param type:
:param target: for column preservation or length preservation,
for column preservation, we do not need standardization
:return:
"""
if not isinstance(n_arr, list):
raise Exception("type of first parameter must be list")
types = set(['g', 'u', 'sp0', 'sp1'])
assert typ in types, "please aset your type of random variable correctly"
assert target in ['col', 'vec'], "target can only be col or vec"
Omegas = []
for n in n_arr:
if typ == 'g':
Omega = np.random.normal(0, 1, size=(n, k))
elif typ == 'u':
Omega = np.random.uniform(low=-1, high=1, size=(n, k)) * np.sqrt(3)
elif typ == 'sp0':
Omega = np.random.choice([-1, 0, 1], size=(n, k), p=[1 / 6, 2 / 3, 1 / 6]) * np.sqrt(3)
elif typ == 'sp1':
Omega = np.random.choice([-1, 0, 1], size=(n, k), p= \
[1 / (2 * np.sqrt(n)), 1 - 1 / np.sqrt(n), 1 / (2 * np.sqrt(n))]) * np.sqrt(np.sqrt(n))
Omegas.append(Omega)
if target == 'col':
return tl.tenalg.khatri_rao(Omegas)
return tl.tenalg.khatri_rao(Omegas).transpose()/np.sqrt(k)
if __name__ == "__main__":
def test_khatri_rao():
A = np.asarray([[1,2],[2,3],[1,2]])
print(A)
print(tl.tenalg.khatri_rao([A, A]))
# test_khatri_rao()
def test_dimension():
omega = random_matrix_generator(100, 3, typ="g", target='col')
print(omega.shape)
omega = random_matrix_generator(100, 3, typ="g", target='vec')
print(omega.shape)
omega = tensor_random_matrix_generator([20, 20], 3, typ="g", target='col')
print(omega.shape)
omega = tensor_random_matrix_generator([20, 20], 3, typ="g", target='vec')
print(omega.shape)
test_dimension()
def test_normalization():
omega1 = random_matrix_generator(100, 3, typ="g", target='col')
omega2 = random_matrix_generator(100, 3, typ="g", target='col')
print("difference for two generated random matrix is {}".format(np.linalg.norm(omega1-omega2)))
# test vector
print("test normal random projection")
x = np.random.uniform(0, 1, 100)
original_norm = np.linalg.norm(x)
res = []
for _ in range(100):
omega = random_matrix_generator(100, 20, typ="g", target='vec')
res.append(np.linalg.norm(omega@x))
ave_norm = np.mean(res)
print(f"original norm:{original_norm}, {ave_norm}")
res = []
print("test tensor random projection")
for _ in range(100):
omega = tensor_random_matrix_generator([10, 10], 20, typ="g", target='vec')
res.append(np.linalg.norm(omega@x))
ave_norm = np.mean(res)
print(f"original norm:{original_norm}, {ave_norm}")
test_normalization() | [
"numpy.random.uniform",
"tensorly.tenalg.khatri_rao",
"numpy.asarray",
"numpy.mean",
"numpy.linalg.norm",
"numpy.random.normal",
"numpy.random.choice",
"numpy.prod",
"numpy.sqrt"
] | [((501, 511), 'numpy.prod', 'np.prod', (['n'], {}), '(n)\n', (508, 511), True, 'import numpy as np\n'), ((737, 772), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(n, k)'}), '(0, 1, size=(n, k))\n', (753, 772), True, 'import numpy as np\n'), ((1249, 1259), 'numpy.sqrt', 'np.sqrt', (['k'], {}), '(k)\n', (1256, 1259), True, 'import numpy as np\n'), ((2633, 2661), 'tensorly.tenalg.khatri_rao', 'tl.tenalg.khatri_rao', (['Omegas'], {}), '(Omegas)\n', (2653, 2661), True, 'import tensorly as tl\n'), ((2714, 2724), 'numpy.sqrt', 'np.sqrt', (['k'], {}), '(k)\n', (2721, 2724), True, 'import numpy as np\n'), ((2798, 2834), 'numpy.asarray', 'np.asarray', (['[[1, 2], [2, 3], [1, 2]]'], {}), '([[1, 2], [2, 3], [1, 2]])\n', (2808, 2834), True, 'import numpy as np\n'), ((3743, 3771), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (3760, 3771), True, 'import numpy as np\n'), ((3796, 3813), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (3810, 3813), True, 'import numpy as np\n'), ((4003, 4015), 'numpy.mean', 'np.mean', (['res'], {}), '(res)\n', (4010, 4015), True, 'import numpy as np\n'), ((4325, 4337), 'numpy.mean', 'np.mean', (['res'], {}), '(res)\n', (4332, 4337), True, 'import numpy as np\n'), ((2099, 2134), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(n, k)'}), '(0, 1, size=(n, k))\n', (2115, 2134), True, 'import numpy as np\n'), ((2861, 2889), 'tensorly.tenalg.khatri_rao', 'tl.tenalg.khatri_rao', (['[A, A]'], {}), '([A, A])\n', (2881, 2889), True, 'import tensorly as tl\n'), ((810, 856), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(n, k)'}), '(low=-1, high=1, size=(n, k))\n', (827, 856), True, 'import numpy as np\n'), ((859, 869), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (866, 869), True, 'import numpy as np\n'), ((2673, 2701), 'tensorly.tenalg.khatri_rao', 'tl.tenalg.khatri_rao', (['Omegas'], {}), '(Omegas)\n', (2693, 2701), True, 'import tensorly as tl\n'), ((3629, 3660), 'numpy.linalg.norm', 'np.linalg.norm', (['(omega1 - omega2)'], {}), '(omega1 - omega2)\n', (3643, 3660), True, 'import numpy as np\n'), ((3959, 3984), 'numpy.linalg.norm', 'np.linalg.norm', (['(omega @ x)'], {}), '(omega @ x)\n', (3973, 3984), True, 'import numpy as np\n'), ((4281, 4306), 'numpy.linalg.norm', 'np.linalg.norm', (['(omega @ x)'], {}), '(omega @ x)\n', (4295, 4306), True, 'import numpy as np\n'), ((909, 975), 'numpy.random.choice', 'np.random.choice', (['[-1, 0, 1]'], {'size': '(n, k)', 'p': '[1 / 6, 2 / 3, 1 / 6]'}), '([-1, 0, 1], size=(n, k), p=[1 / 6, 2 / 3, 1 / 6])\n', (925, 975), True, 'import numpy as np\n'), ((978, 988), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (985, 988), True, 'import numpy as np\n'), ((2180, 2226), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(n, k)'}), '(low=-1, high=1, size=(n, k))\n', (2197, 2226), True, 'import numpy as np\n'), ((2229, 2239), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (2236, 2239), True, 'import numpy as np\n'), ((2287, 2353), 'numpy.random.choice', 'np.random.choice', (['[-1, 0, 1]'], {'size': '(n, k)', 'p': '[1 / 6, 2 / 3, 1 / 6]'}), '([-1, 0, 1], size=(n, k), p=[1 / 6, 2 / 3, 1 / 6])\n', (2303, 2353), True, 'import numpy as np\n'), ((2356, 2366), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (2363, 2366), True, 'import numpy as np\n'), ((1163, 1173), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (1170, 1173), True, 'import numpy as np\n'), ((2553, 2563), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2560, 2563), True, 'import numpy as np\n'), ((1097, 1107), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (1104, 1107), True, 'import numpy as np\n'), ((1118, 1128), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (1125, 1128), True, 'import numpy as np\n'), ((1139, 1149), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (1146, 1149), True, 'import numpy as np\n'), ((2487, 2497), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2494, 2497), True, 'import numpy as np\n'), ((2508, 2518), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2515, 2518), True, 'import numpy as np\n'), ((2529, 2539), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2536, 2539), True, 'import numpy as np\n')] |
import math
import numpy as np
import random,pickle
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
Nc = 2 # number of cycle
cyclelength = 14 # cycle length
result_f1 = np.array(pickle.load(open('case1.p', 'rb'))) # no butyrate
result_f2 = np.array(pickle.load(open('case2.p', 'rb'))) # with butyrate + carley
result_f3 = np.array(pickle.load(open('case3.p', 'rb'))) # with butyrate + carley + osteoblast proliferation
result_f4 = np.array(pickle.load(open('case4.p', 'rb'))) # with butyrate + carley + butyrate increase pre-osteoblast to osteoblast differentiation
result_f5 = np.array(pickle.load(open('case5.p', 'rb'))) # with butyrate + carley + butyrate increase wnt10b dependent pre-osteoblast to osteoblast differentiation
result_f6 = np.array(pickle.load(open('case6.p', 'rb'))) # with butyrate + carley + butyrate inhibiting osteoclast by osteoclast death
result_f7 = np.array(pickle.load(open('case7.p', 'rb'))) # with butyrate + carley + butyrate inhibiting osteoclast differentiation
result_f8 = np.array(pickle.load(open('case8.p', 'rb'))) # with butyrate + carley + butyrate increase osteoblast mediated bone formation
result_f9 = np.array(pickle.load(open('case9.p', 'rb'))) # with butyrate + carley + butyrate reduce osteoclast mediated bone resorption
T1 = np.arange(0.0, Nc*cyclelength, 0.001)
# Osteoblast/Osteoclast area under curve
AUC1 = (np.trapz(y=result_f1[:,13], x=T1))/(np.trapz(y=result_f1[:,14], x=T1))
AUC2 = (np.trapz(y=result_f2[:,13], x=T1))/(np.trapz(y=result_f2[:,14], x=T1))
AUC3 = (np.trapz(y=result_f3[:,13], x=T1))/(np.trapz(y=result_f3[:,14], x=T1))
AUC4 = (np.trapz(y=result_f4[:,13], x=T1))/(np.trapz(y=result_f4[:,14], x=T1))
AUC5 = (np.trapz(y=result_f5[:,13], x=T1))/(np.trapz(y=result_f5[:,14], x=T1))
AUC6 = (np.trapz(y=result_f6[:,13], x=T1))/(np.trapz(y=result_f6[:,14], x=T1))
AUC7 = (np.trapz(y=result_f7[:,13], x=T1))/(np.trapz(y=result_f7[:,14], x=T1))
AUC8 = (np.trapz(y=result_f8[:,13], x=T1))/(np.trapz(y=result_f8[:,14], x=T1))
AUC9 = (np.trapz(y=result_f9[:,13], x=T1))/(np.trapz(y=result_f9[:,14], x=T1))
AUCratio = [AUC1,AUC2, AUC3,AUC4,AUC5,AUC6,AUC7,AUC8,AUC9]
plt.rcParams.update({'font.size': 25})
r1 = np.arange(9)
prop_iter = iter(plt.rcParams['axes.prop_cycle'])
for i in range(0,len(r1)):
plt.bar(r1[i],AUCratio[i],color=next(prop_iter)['color'])
plt.xticks([r for r in range(9)], ['1','2','3','4', '5', '6', '7', '8', '9'])
plt.ylabel('Osteoblast/Osteoclast AUC')
plt.show()
# Pre-Osteoblast/Osteoblast area under curve
AUC1 = (np.trapz(y=result_f1[:,12], x=T1))/(np.trapz(y=result_f1[:,13], x=T1))
AUC2 = (np.trapz(y=result_f2[:,12], x=T1))/(np.trapz(y=result_f2[:,13], x=T1))
AUC3 = (np.trapz(y=result_f3[:,12], x=T1))/(np.trapz(y=result_f3[:,13], x=T1))
AUC4 = (np.trapz(y=result_f4[:,12], x=T1))/(np.trapz(y=result_f4[:,13], x=T1))
AUC5 = (np.trapz(y=result_f5[:,12], x=T1))/(np.trapz(y=result_f5[:,13], x=T1))
AUC6 = (np.trapz(y=result_f6[:,12], x=T1))/(np.trapz(y=result_f6[:,13], x=T1))
AUC7 = (np.trapz(y=result_f7[:,12], x=T1))/(np.trapz(y=result_f7[:,13], x=T1))
AUC8 = (np.trapz(y=result_f8[:,12], x=T1))/(np.trapz(y=result_f8[:,13], x=T1))
AUC9 = (np.trapz(y=result_f9[:,12], x=T1))/(np.trapz(y=result_f9[:,13], x=T1))
AUCratio = [AUC1,AUC2, AUC3,AUC4,AUC5,AUC6,AUC7,AUC8,AUC9]
plt.rcParams.update({'font.size': 25})
r1 = np.arange(9)
prop_iter = iter(plt.rcParams['axes.prop_cycle'])
for i in range(0,len(r1)):
plt.bar(r1[i],AUCratio[i],color=next(prop_iter)['color'])
plt.xticks([r for r in range(9)], ['1','2','3','4', '5', '6', '7', '8', '9'])
plt.ylabel('Pre-Osteoblast/Osteoblast AUC')
plt.show()
# change in osteoblast from baseline
AUC1 = ((np.trapz(y=result_f1[:,13], x=T1))-(np.trapz(y=result_f1[:,13], x=T1)))/(np.trapz(y=result_f1[:,13], x=T1))
AUC2 = ((np.trapz(y=result_f2[:,13], x=T1))-(np.trapz(y=result_f1[:,13], x=T1)))/(np.trapz(y=result_f1[:,13], x=T1))
AUC3 = ((np.trapz(y=result_f3[:,13], x=T1))-(np.trapz(y=result_f1[:,13], x=T1)))/(np.trapz(y=result_f1[:,13], x=T1))
AUC4 = ((np.trapz(y=result_f4[:,13], x=T1))-(np.trapz(y=result_f1[:,13], x=T1)))/(np.trapz(y=result_f1[:,13], x=T1))
AUC5 = ((np.trapz(y=result_f5[:,13], x=T1))-(np.trapz(y=result_f1[:,13], x=T1)))/(np.trapz(y=result_f1[:,13], x=T1))
AUC6 = ((np.trapz(y=result_f6[:,13], x=T1))-(np.trapz(y=result_f1[:,13], x=T1)))/(np.trapz(y=result_f1[:,13], x=T1))
AUC7 = ((np.trapz(y=result_f7[:,13], x=T1))-(np.trapz(y=result_f1[:,13], x=T1)))/(np.trapz(y=result_f1[:,13], x=T1))
AUC8 = ((np.trapz(y=result_f8[:,13], x=T1))-(np.trapz(y=result_f1[:,13], x=T1)))/(np.trapz(y=result_f1[:,13], x=T1))
AUC9 = ((np.trapz(y=result_f9[:,13], x=T1))-(np.trapz(y=result_f1[:,13], x=T1)))/(np.trapz(y=result_f1[:,13], x=T1))
AUCratio = [AUC1,AUC2, AUC3,AUC4,AUC5,AUC6,AUC7,AUC8,AUC9]
plt.rcParams.update({'font.size': 25})
r1 = np.arange(9)
prop_iter = iter(plt.rcParams['axes.prop_cycle'])
for i in range(0,len(r1)):
plt.bar(r1[i],AUCratio[i]*100,color=next(prop_iter)['color'])
plt.xticks([r for r in range(9)], ['1','2','3','4', '5', '6', '7', '8', '9'])
plt.ylabel('Change in Osteoblast AUC')
plt.show()
# change in osteoclast from baseline
AUC1 = ((np.trapz(y=result_f1[:,14], x=T1))-(np.trapz(y=result_f1[:,14], x=T1)))/(np.trapz(y=result_f1[:,14], x=T1))
AUC2 = ((np.trapz(y=result_f2[:,14], x=T1))-(np.trapz(y=result_f1[:,14], x=T1)))/(np.trapz(y=result_f1[:,14], x=T1))
AUC3 = ((np.trapz(y=result_f3[:,14], x=T1))-(np.trapz(y=result_f1[:,14], x=T1)))/(np.trapz(y=result_f1[:,14], x=T1))
AUC4 = ((np.trapz(y=result_f4[:,14], x=T1))-(np.trapz(y=result_f1[:,14], x=T1)))/(np.trapz(y=result_f1[:,14], x=T1))
AUC5 = ((np.trapz(y=result_f5[:,14], x=T1))-(np.trapz(y=result_f1[:,14], x=T1)))/(np.trapz(y=result_f1[:,14], x=T1))
AUC6 = ((np.trapz(y=result_f6[:,14], x=T1))-(np.trapz(y=result_f1[:,14], x=T1)))/(np.trapz(y=result_f1[:,14], x=T1))
AUC7 = ((np.trapz(y=result_f7[:,14], x=T1))-(np.trapz(y=result_f1[:,14], x=T1)))/(np.trapz(y=result_f1[:,14], x=T1))
AUC8 = ((np.trapz(y=result_f8[:,14], x=T1))-(np.trapz(y=result_f1[:,14], x=T1)))/(np.trapz(y=result_f1[:,14], x=T1))
AUC9 = ((np.trapz(y=result_f9[:,14], x=T1))-(np.trapz(y=result_f1[:,14], x=T1)))/(np.trapz(y=result_f1[:,14], x=T1))
AUCratio = [AUC1,AUC2, AUC3,AUC4,AUC5,AUC6,AUC7,AUC8,AUC9]
plt.rcParams.update({'font.size': 25})
r1 = np.arange(9)
prop_iter = iter(plt.rcParams['axes.prop_cycle'])
for i in range(0,len(r1)):
plt.bar(r1[i],AUCratio[i]*100,color=next(prop_iter)['color'])
plt.xticks([r for r in range(9)], ['1','2','3','4', '5', '6', '7', '8', '9'])
plt.ylabel('Change in Osteoclast AUC')
plt.show()
plt.xticks(np.arange(0, 30, step=7))
plt.plot(T1, result_f1[:,15],T1, result_f2[:,15],T1, result_f3[:,15], T1, result_f4[:,15],T1, result_f5[:,15],T1, result_f6[:,15],T1, result_f7[:,15],T1, result_f8[:,15],T1, result_f9[:,15], linewidth=3)
plt.xlabel('Time (days)')
plt.ylabel('Relative bone volume (%)')
plt.errorbar([28],[136.6], yerr=[6.47], fmt='o',color='r', elinewidth=3, markersize=10, capsize=6, capthick=3, barsabove= False)
plt.show()
plt.xticks(np.arange(0, 30, step=7))
plt.plot(T1, result_f1[:,11],T1, result_f2[:,11],T1, result_f3[:,11], T1, result_f4[:,11],T1, result_f5[:,11],T1, result_f6[:,11],T1, result_f7[:,11],T1, result_f8[:,11],T1, result_f9[:,11], linewidth=3)
plt.xlabel('Time (days)')
plt.ylabel('Osteocyte cells')
plt.show()
plt.xticks(np.arange(0, 30, step=7))
plt.plot(T1, result_f1[:,12],T1, result_f2[:,12],T1, result_f3[:,12], T1, result_f4[:,12],T1, result_f5[:,12],T1, result_f6[:,12],T1, result_f7[:,12],T1, result_f8[:,12],T1, result_f9[:,12], linewidth=3)
plt.xlabel('Time (days)')
plt.ylabel('Pre-osteoblast cells')
plt.show()
plt.xticks(np.arange(0, 30, step=7))
plt.plot(T1, result_f1[:,13],T1, result_f2[:,13],T1, result_f3[:,13], T1, result_f4[:,13],T1, result_f5[:,13],T1, result_f6[:,13],T1, result_f7[:,13],T1, result_f8[:,13],T1, result_f9[:,13], linewidth=3)
plt.xlabel('Time (days)')
plt.ylabel('Osteoblast cells')
plt.show()
plt.xticks(np.arange(0, 30, step=7))
plt.plot(T1, result_f1[:,14],T1, result_f2[:,14],T1, result_f3[:,14], T1, result_f4[:,14],T1, result_f5[:,14],T1, result_f6[:,14],T1, result_f7[:,14],T1, result_f8[:,14],T1, result_f9[:,14], linewidth=3)
plt.xlabel('Time (days)')
plt.ylabel('Osteoclast cells')
plt.show()
plt.xticks(np.arange(0, 30, step=7))
plt.plot(T1, result_f1[:,10],'--',color = 'magenta', linewidth=3)
plt.plot(T1, result_f2[:,10],color = 'purple', linewidth=3)
plt.xlabel('Time (days)')
plt.ylabel('Wnt10b fold change')
plt.legend(['without butyrate','with butyrate'])
plt.show() | [
"numpy.trapz",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.rcParams.update",
"matplotlib.use",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.errorbar"
] | [((70, 93), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (84, 93), False, 'import matplotlib\n'), ((1354, 1393), 'numpy.arange', 'np.arange', (['(0.0)', '(Nc * cyclelength)', '(0.001)'], {}), '(0.0, Nc * cyclelength, 0.001)\n', (1363, 1393), True, 'import numpy as np\n'), ((2208, 2246), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 25}"], {}), "({'font.size': 25})\n", (2227, 2246), True, 'import matplotlib.pyplot as plt\n'), ((2252, 2264), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (2261, 2264), True, 'import numpy as np\n'), ((2481, 2520), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Osteoblast/Osteoclast AUC"""'], {}), "('Osteoblast/Osteoclast AUC')\n", (2491, 2520), True, 'import matplotlib.pyplot as plt\n'), ((2521, 2531), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2529, 2531), True, 'import matplotlib.pyplot as plt\n'), ((3352, 3390), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 25}"], {}), "({'font.size': 25})\n", (3371, 3390), True, 'import matplotlib.pyplot as plt\n'), ((3396, 3408), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (3405, 3408), True, 'import numpy as np\n'), ((3625, 3668), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pre-Osteoblast/Osteoblast AUC"""'], {}), "('Pre-Osteoblast/Osteoblast AUC')\n", (3635, 3668), True, 'import matplotlib.pyplot as plt\n'), ((3669, 3679), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3677, 3679), True, 'import matplotlib.pyplot as plt\n'), ((4832, 4870), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 25}"], {}), "({'font.size': 25})\n", (4851, 4870), True, 'import matplotlib.pyplot as plt\n'), ((4876, 4888), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (4885, 4888), True, 'import numpy as np\n'), ((5109, 5147), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Change in Osteoblast AUC"""'], {}), "('Change in Osteoblast AUC')\n", (5119, 5147), True, 'import matplotlib.pyplot as plt\n'), ((5148, 5158), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5156, 5158), True, 'import matplotlib.pyplot as plt\n'), ((6312, 6350), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 25}"], {}), "({'font.size': 25})\n", (6331, 6350), True, 'import matplotlib.pyplot as plt\n'), ((6356, 6368), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (6365, 6368), True, 'import numpy as np\n'), ((6589, 6627), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Change in Osteoclast AUC"""'], {}), "('Change in Osteoclast AUC')\n", (6599, 6627), True, 'import matplotlib.pyplot as plt\n'), ((6628, 6638), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6636, 6638), True, 'import matplotlib.pyplot as plt\n'), ((6678, 6905), 'matplotlib.pyplot.plot', 'plt.plot', (['T1', 'result_f1[:, 15]', 'T1', 'result_f2[:, 15]', 'T1', 'result_f3[:, 15]', 'T1', 'result_f4[:, 15]', 'T1', 'result_f5[:, 15]', 'T1', 'result_f6[:, 15]', 'T1', 'result_f7[:, 15]', 'T1', 'result_f8[:, 15]', 'T1', 'result_f9[:, 15]'], {'linewidth': '(3)'}), '(T1, result_f1[:, 15], T1, result_f2[:, 15], T1, result_f3[:, 15],\n T1, result_f4[:, 15], T1, result_f5[:, 15], T1, result_f6[:, 15], T1,\n result_f7[:, 15], T1, result_f8[:, 15], T1, result_f9[:, 15], linewidth=3)\n', (6686, 6905), True, 'import matplotlib.pyplot as plt\n'), ((6882, 6907), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (days)"""'], {}), "('Time (days)')\n", (6892, 6907), True, 'import matplotlib.pyplot as plt\n'), ((6908, 6946), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative bone volume (%)"""'], {}), "('Relative bone volume (%)')\n", (6918, 6946), True, 'import matplotlib.pyplot as plt\n'), ((6947, 7080), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['[28]', '[136.6]'], {'yerr': '[6.47]', 'fmt': '"""o"""', 'color': '"""r"""', 'elinewidth': '(3)', 'markersize': '(10)', 'capsize': '(6)', 'capthick': '(3)', 'barsabove': '(False)'}), "([28], [136.6], yerr=[6.47], fmt='o', color='r', elinewidth=3,\n markersize=10, capsize=6, capthick=3, barsabove=False)\n", (6959, 7080), True, 'import matplotlib.pyplot as plt\n'), ((7076, 7086), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7084, 7086), True, 'import matplotlib.pyplot as plt\n'), ((7125, 7352), 'matplotlib.pyplot.plot', 'plt.plot', (['T1', 'result_f1[:, 11]', 'T1', 'result_f2[:, 11]', 'T1', 'result_f3[:, 11]', 'T1', 'result_f4[:, 11]', 'T1', 'result_f5[:, 11]', 'T1', 'result_f6[:, 11]', 'T1', 'result_f7[:, 11]', 'T1', 'result_f8[:, 11]', 'T1', 'result_f9[:, 11]'], {'linewidth': '(3)'}), '(T1, result_f1[:, 11], T1, result_f2[:, 11], T1, result_f3[:, 11],\n T1, result_f4[:, 11], T1, result_f5[:, 11], T1, result_f6[:, 11], T1,\n result_f7[:, 11], T1, result_f8[:, 11], T1, result_f9[:, 11], linewidth=3)\n', (7133, 7352), True, 'import matplotlib.pyplot as plt\n'), ((7329, 7354), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (days)"""'], {}), "('Time (days)')\n", (7339, 7354), True, 'import matplotlib.pyplot as plt\n'), ((7355, 7384), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Osteocyte cells"""'], {}), "('Osteocyte cells')\n", (7365, 7384), True, 'import matplotlib.pyplot as plt\n'), ((7385, 7395), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7393, 7395), True, 'import matplotlib.pyplot as plt\n'), ((7434, 7661), 'matplotlib.pyplot.plot', 'plt.plot', (['T1', 'result_f1[:, 12]', 'T1', 'result_f2[:, 12]', 'T1', 'result_f3[:, 12]', 'T1', 'result_f4[:, 12]', 'T1', 'result_f5[:, 12]', 'T1', 'result_f6[:, 12]', 'T1', 'result_f7[:, 12]', 'T1', 'result_f8[:, 12]', 'T1', 'result_f9[:, 12]'], {'linewidth': '(3)'}), '(T1, result_f1[:, 12], T1, result_f2[:, 12], T1, result_f3[:, 12],\n T1, result_f4[:, 12], T1, result_f5[:, 12], T1, result_f6[:, 12], T1,\n result_f7[:, 12], T1, result_f8[:, 12], T1, result_f9[:, 12], linewidth=3)\n', (7442, 7661), True, 'import matplotlib.pyplot as plt\n'), ((7638, 7663), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (days)"""'], {}), "('Time (days)')\n", (7648, 7663), True, 'import matplotlib.pyplot as plt\n'), ((7664, 7698), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Pre-osteoblast cells"""'], {}), "('Pre-osteoblast cells')\n", (7674, 7698), True, 'import matplotlib.pyplot as plt\n'), ((7699, 7709), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7707, 7709), True, 'import matplotlib.pyplot as plt\n'), ((7748, 7975), 'matplotlib.pyplot.plot', 'plt.plot', (['T1', 'result_f1[:, 13]', 'T1', 'result_f2[:, 13]', 'T1', 'result_f3[:, 13]', 'T1', 'result_f4[:, 13]', 'T1', 'result_f5[:, 13]', 'T1', 'result_f6[:, 13]', 'T1', 'result_f7[:, 13]', 'T1', 'result_f8[:, 13]', 'T1', 'result_f9[:, 13]'], {'linewidth': '(3)'}), '(T1, result_f1[:, 13], T1, result_f2[:, 13], T1, result_f3[:, 13],\n T1, result_f4[:, 13], T1, result_f5[:, 13], T1, result_f6[:, 13], T1,\n result_f7[:, 13], T1, result_f8[:, 13], T1, result_f9[:, 13], linewidth=3)\n', (7756, 7975), True, 'import matplotlib.pyplot as plt\n'), ((7952, 7977), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (days)"""'], {}), "('Time (days)')\n", (7962, 7977), True, 'import matplotlib.pyplot as plt\n'), ((7978, 8008), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Osteoblast cells"""'], {}), "('Osteoblast cells')\n", (7988, 8008), True, 'import matplotlib.pyplot as plt\n'), ((8009, 8019), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8017, 8019), True, 'import matplotlib.pyplot as plt\n'), ((8058, 8285), 'matplotlib.pyplot.plot', 'plt.plot', (['T1', 'result_f1[:, 14]', 'T1', 'result_f2[:, 14]', 'T1', 'result_f3[:, 14]', 'T1', 'result_f4[:, 14]', 'T1', 'result_f5[:, 14]', 'T1', 'result_f6[:, 14]', 'T1', 'result_f7[:, 14]', 'T1', 'result_f8[:, 14]', 'T1', 'result_f9[:, 14]'], {'linewidth': '(3)'}), '(T1, result_f1[:, 14], T1, result_f2[:, 14], T1, result_f3[:, 14],\n T1, result_f4[:, 14], T1, result_f5[:, 14], T1, result_f6[:, 14], T1,\n result_f7[:, 14], T1, result_f8[:, 14], T1, result_f9[:, 14], linewidth=3)\n', (8066, 8285), True, 'import matplotlib.pyplot as plt\n'), ((8262, 8287), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (days)"""'], {}), "('Time (days)')\n", (8272, 8287), True, 'import matplotlib.pyplot as plt\n'), ((8288, 8318), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Osteoclast cells"""'], {}), "('Osteoclast cells')\n", (8298, 8318), True, 'import matplotlib.pyplot as plt\n'), ((8319, 8329), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8327, 8329), True, 'import matplotlib.pyplot as plt\n'), ((8368, 8434), 'matplotlib.pyplot.plot', 'plt.plot', (['T1', 'result_f1[:, 10]', '"""--"""'], {'color': '"""magenta"""', 'linewidth': '(3)'}), "(T1, result_f1[:, 10], '--', color='magenta', linewidth=3)\n", (8376, 8434), True, 'import matplotlib.pyplot as plt\n'), ((8434, 8493), 'matplotlib.pyplot.plot', 'plt.plot', (['T1', 'result_f2[:, 10]'], {'color': '"""purple"""', 'linewidth': '(3)'}), "(T1, result_f2[:, 10], color='purple', linewidth=3)\n", (8442, 8493), True, 'import matplotlib.pyplot as plt\n'), ((8495, 8520), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (days)"""'], {}), "('Time (days)')\n", (8505, 8520), True, 'import matplotlib.pyplot as plt\n'), ((8521, 8553), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Wnt10b fold change"""'], {}), "('Wnt10b fold change')\n", (8531, 8553), True, 'import matplotlib.pyplot as plt\n'), ((8554, 8603), 'matplotlib.pyplot.legend', 'plt.legend', (["['without butyrate', 'with butyrate']"], {}), "(['without butyrate', 'with butyrate'])\n", (8564, 8603), True, 'import matplotlib.pyplot as plt\n'), ((8603, 8613), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8611, 8613), True, 'import matplotlib.pyplot as plt\n'), ((1443, 1477), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (1451, 1477), True, 'import numpy as np\n'), ((1479, 1513), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (1487, 1513), True, 'import numpy as np\n'), ((1522, 1556), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f2[:, 13]', 'x': 'T1'}), '(y=result_f2[:, 13], x=T1)\n', (1530, 1556), True, 'import numpy as np\n'), ((1558, 1592), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f2[:, 14]', 'x': 'T1'}), '(y=result_f2[:, 14], x=T1)\n', (1566, 1592), True, 'import numpy as np\n'), ((1601, 1635), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f3[:, 13]', 'x': 'T1'}), '(y=result_f3[:, 13], x=T1)\n', (1609, 1635), True, 'import numpy as np\n'), ((1637, 1671), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f3[:, 14]', 'x': 'T1'}), '(y=result_f3[:, 14], x=T1)\n', (1645, 1671), True, 'import numpy as np\n'), ((1680, 1714), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f4[:, 13]', 'x': 'T1'}), '(y=result_f4[:, 13], x=T1)\n', (1688, 1714), True, 'import numpy as np\n'), ((1716, 1750), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f4[:, 14]', 'x': 'T1'}), '(y=result_f4[:, 14], x=T1)\n', (1724, 1750), True, 'import numpy as np\n'), ((1759, 1793), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f5[:, 13]', 'x': 'T1'}), '(y=result_f5[:, 13], x=T1)\n', (1767, 1793), True, 'import numpy as np\n'), ((1795, 1829), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f5[:, 14]', 'x': 'T1'}), '(y=result_f5[:, 14], x=T1)\n', (1803, 1829), True, 'import numpy as np\n'), ((1838, 1872), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f6[:, 13]', 'x': 'T1'}), '(y=result_f6[:, 13], x=T1)\n', (1846, 1872), True, 'import numpy as np\n'), ((1874, 1908), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f6[:, 14]', 'x': 'T1'}), '(y=result_f6[:, 14], x=T1)\n', (1882, 1908), True, 'import numpy as np\n'), ((1917, 1951), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f7[:, 13]', 'x': 'T1'}), '(y=result_f7[:, 13], x=T1)\n', (1925, 1951), True, 'import numpy as np\n'), ((1953, 1987), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f7[:, 14]', 'x': 'T1'}), '(y=result_f7[:, 14], x=T1)\n', (1961, 1987), True, 'import numpy as np\n'), ((1996, 2030), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f8[:, 13]', 'x': 'T1'}), '(y=result_f8[:, 13], x=T1)\n', (2004, 2030), True, 'import numpy as np\n'), ((2032, 2066), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f8[:, 14]', 'x': 'T1'}), '(y=result_f8[:, 14], x=T1)\n', (2040, 2066), True, 'import numpy as np\n'), ((2075, 2109), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f9[:, 13]', 'x': 'T1'}), '(y=result_f9[:, 13], x=T1)\n', (2083, 2109), True, 'import numpy as np\n'), ((2111, 2145), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f9[:, 14]', 'x': 'T1'}), '(y=result_f9[:, 14], x=T1)\n', (2119, 2145), True, 'import numpy as np\n'), ((2587, 2621), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 12]', 'x': 'T1'}), '(y=result_f1[:, 12], x=T1)\n', (2595, 2621), True, 'import numpy as np\n'), ((2623, 2657), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (2631, 2657), True, 'import numpy as np\n'), ((2666, 2700), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f2[:, 12]', 'x': 'T1'}), '(y=result_f2[:, 12], x=T1)\n', (2674, 2700), True, 'import numpy as np\n'), ((2702, 2736), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f2[:, 13]', 'x': 'T1'}), '(y=result_f2[:, 13], x=T1)\n', (2710, 2736), True, 'import numpy as np\n'), ((2745, 2779), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f3[:, 12]', 'x': 'T1'}), '(y=result_f3[:, 12], x=T1)\n', (2753, 2779), True, 'import numpy as np\n'), ((2781, 2815), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f3[:, 13]', 'x': 'T1'}), '(y=result_f3[:, 13], x=T1)\n', (2789, 2815), True, 'import numpy as np\n'), ((2824, 2858), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f4[:, 12]', 'x': 'T1'}), '(y=result_f4[:, 12], x=T1)\n', (2832, 2858), True, 'import numpy as np\n'), ((2860, 2894), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f4[:, 13]', 'x': 'T1'}), '(y=result_f4[:, 13], x=T1)\n', (2868, 2894), True, 'import numpy as np\n'), ((2903, 2937), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f5[:, 12]', 'x': 'T1'}), '(y=result_f5[:, 12], x=T1)\n', (2911, 2937), True, 'import numpy as np\n'), ((2939, 2973), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f5[:, 13]', 'x': 'T1'}), '(y=result_f5[:, 13], x=T1)\n', (2947, 2973), True, 'import numpy as np\n'), ((2982, 3016), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f6[:, 12]', 'x': 'T1'}), '(y=result_f6[:, 12], x=T1)\n', (2990, 3016), True, 'import numpy as np\n'), ((3018, 3052), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f6[:, 13]', 'x': 'T1'}), '(y=result_f6[:, 13], x=T1)\n', (3026, 3052), True, 'import numpy as np\n'), ((3061, 3095), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f7[:, 12]', 'x': 'T1'}), '(y=result_f7[:, 12], x=T1)\n', (3069, 3095), True, 'import numpy as np\n'), ((3097, 3131), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f7[:, 13]', 'x': 'T1'}), '(y=result_f7[:, 13], x=T1)\n', (3105, 3131), True, 'import numpy as np\n'), ((3140, 3174), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f8[:, 12]', 'x': 'T1'}), '(y=result_f8[:, 12], x=T1)\n', (3148, 3174), True, 'import numpy as np\n'), ((3176, 3210), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f8[:, 13]', 'x': 'T1'}), '(y=result_f8[:, 13], x=T1)\n', (3184, 3210), True, 'import numpy as np\n'), ((3219, 3253), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f9[:, 12]', 'x': 'T1'}), '(y=result_f9[:, 12], x=T1)\n', (3227, 3253), True, 'import numpy as np\n'), ((3255, 3289), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f9[:, 13]', 'x': 'T1'}), '(y=result_f9[:, 13], x=T1)\n', (3263, 3289), True, 'import numpy as np\n'), ((3800, 3834), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (3808, 3834), True, 'import numpy as np\n'), ((3917, 3951), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (3925, 3951), True, 'import numpy as np\n'), ((4034, 4068), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (4042, 4068), True, 'import numpy as np\n'), ((4151, 4185), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (4159, 4185), True, 'import numpy as np\n'), ((4268, 4302), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (4276, 4302), True, 'import numpy as np\n'), ((4385, 4419), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (4393, 4419), True, 'import numpy as np\n'), ((4502, 4536), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (4510, 4536), True, 'import numpy as np\n'), ((4619, 4653), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (4627, 4653), True, 'import numpy as np\n'), ((4736, 4770), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (4744, 4770), True, 'import numpy as np\n'), ((5280, 5314), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (5288, 5314), True, 'import numpy as np\n'), ((5397, 5431), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (5405, 5431), True, 'import numpy as np\n'), ((5514, 5548), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (5522, 5548), True, 'import numpy as np\n'), ((5631, 5665), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (5639, 5665), True, 'import numpy as np\n'), ((5748, 5782), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (5756, 5782), True, 'import numpy as np\n'), ((5865, 5899), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (5873, 5899), True, 'import numpy as np\n'), ((5982, 6016), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (5990, 6016), True, 'import numpy as np\n'), ((6099, 6133), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (6107, 6133), True, 'import numpy as np\n'), ((6216, 6250), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (6224, 6250), True, 'import numpy as np\n'), ((6652, 6676), 'numpy.arange', 'np.arange', (['(0)', '(30)'], {'step': '(7)'}), '(0, 30, step=7)\n', (6661, 6676), True, 'import numpy as np\n'), ((7099, 7123), 'numpy.arange', 'np.arange', (['(0)', '(30)'], {'step': '(7)'}), '(0, 30, step=7)\n', (7108, 7123), True, 'import numpy as np\n'), ((7408, 7432), 'numpy.arange', 'np.arange', (['(0)', '(30)'], {'step': '(7)'}), '(0, 30, step=7)\n', (7417, 7432), True, 'import numpy as np\n'), ((7722, 7746), 'numpy.arange', 'np.arange', (['(0)', '(30)'], {'step': '(7)'}), '(0, 30, step=7)\n', (7731, 7746), True, 'import numpy as np\n'), ((8032, 8056), 'numpy.arange', 'np.arange', (['(0)', '(30)'], {'step': '(7)'}), '(0, 30, step=7)\n', (8041, 8056), True, 'import numpy as np\n'), ((8342, 8366), 'numpy.arange', 'np.arange', (['(0)', '(30)'], {'step': '(7)'}), '(0, 30, step=7)\n', (8351, 8366), True, 'import numpy as np\n'), ((3727, 3761), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (3735, 3761), True, 'import numpy as np\n'), ((3763, 3797), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (3771, 3797), True, 'import numpy as np\n'), ((3844, 3878), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f2[:, 13]', 'x': 'T1'}), '(y=result_f2[:, 13], x=T1)\n', (3852, 3878), True, 'import numpy as np\n'), ((3880, 3914), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (3888, 3914), True, 'import numpy as np\n'), ((3961, 3995), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f3[:, 13]', 'x': 'T1'}), '(y=result_f3[:, 13], x=T1)\n', (3969, 3995), True, 'import numpy as np\n'), ((3997, 4031), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (4005, 4031), True, 'import numpy as np\n'), ((4078, 4112), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f4[:, 13]', 'x': 'T1'}), '(y=result_f4[:, 13], x=T1)\n', (4086, 4112), True, 'import numpy as np\n'), ((4114, 4148), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (4122, 4148), True, 'import numpy as np\n'), ((4195, 4229), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f5[:, 13]', 'x': 'T1'}), '(y=result_f5[:, 13], x=T1)\n', (4203, 4229), True, 'import numpy as np\n'), ((4231, 4265), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (4239, 4265), True, 'import numpy as np\n'), ((4312, 4346), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f6[:, 13]', 'x': 'T1'}), '(y=result_f6[:, 13], x=T1)\n', (4320, 4346), True, 'import numpy as np\n'), ((4348, 4382), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (4356, 4382), True, 'import numpy as np\n'), ((4429, 4463), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f7[:, 13]', 'x': 'T1'}), '(y=result_f7[:, 13], x=T1)\n', (4437, 4463), True, 'import numpy as np\n'), ((4465, 4499), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (4473, 4499), True, 'import numpy as np\n'), ((4546, 4580), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f8[:, 13]', 'x': 'T1'}), '(y=result_f8[:, 13], x=T1)\n', (4554, 4580), True, 'import numpy as np\n'), ((4582, 4616), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (4590, 4616), True, 'import numpy as np\n'), ((4663, 4697), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f9[:, 13]', 'x': 'T1'}), '(y=result_f9[:, 13], x=T1)\n', (4671, 4697), True, 'import numpy as np\n'), ((4699, 4733), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 13]', 'x': 'T1'}), '(y=result_f1[:, 13], x=T1)\n', (4707, 4733), True, 'import numpy as np\n'), ((5207, 5241), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (5215, 5241), True, 'import numpy as np\n'), ((5243, 5277), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (5251, 5277), True, 'import numpy as np\n'), ((5324, 5358), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f2[:, 14]', 'x': 'T1'}), '(y=result_f2[:, 14], x=T1)\n', (5332, 5358), True, 'import numpy as np\n'), ((5360, 5394), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (5368, 5394), True, 'import numpy as np\n'), ((5441, 5475), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f3[:, 14]', 'x': 'T1'}), '(y=result_f3[:, 14], x=T1)\n', (5449, 5475), True, 'import numpy as np\n'), ((5477, 5511), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (5485, 5511), True, 'import numpy as np\n'), ((5558, 5592), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f4[:, 14]', 'x': 'T1'}), '(y=result_f4[:, 14], x=T1)\n', (5566, 5592), True, 'import numpy as np\n'), ((5594, 5628), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (5602, 5628), True, 'import numpy as np\n'), ((5675, 5709), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f5[:, 14]', 'x': 'T1'}), '(y=result_f5[:, 14], x=T1)\n', (5683, 5709), True, 'import numpy as np\n'), ((5711, 5745), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (5719, 5745), True, 'import numpy as np\n'), ((5792, 5826), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f6[:, 14]', 'x': 'T1'}), '(y=result_f6[:, 14], x=T1)\n', (5800, 5826), True, 'import numpy as np\n'), ((5828, 5862), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (5836, 5862), True, 'import numpy as np\n'), ((5909, 5943), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f7[:, 14]', 'x': 'T1'}), '(y=result_f7[:, 14], x=T1)\n', (5917, 5943), True, 'import numpy as np\n'), ((5945, 5979), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (5953, 5979), True, 'import numpy as np\n'), ((6026, 6060), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f8[:, 14]', 'x': 'T1'}), '(y=result_f8[:, 14], x=T1)\n', (6034, 6060), True, 'import numpy as np\n'), ((6062, 6096), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (6070, 6096), True, 'import numpy as np\n'), ((6143, 6177), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f9[:, 14]', 'x': 'T1'}), '(y=result_f9[:, 14], x=T1)\n', (6151, 6177), True, 'import numpy as np\n'), ((6179, 6213), 'numpy.trapz', 'np.trapz', ([], {'y': 'result_f1[:, 14]', 'x': 'T1'}), '(y=result_f1[:, 14], x=T1)\n', (6187, 6213), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utils for running distributed actor/learner tests."""
import functools
from absl import logging
import numpy as np
import reverb
import tensorflow.compat.v2 as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.agents.dqn import dqn_agent
from tf_agents.agents.ppo import ppo_clip_agent
from tf_agents.environments import suite_gym
from tf_agents.experimental.distributed import reverb_variable_container
from tf_agents.networks import actor_distribution_network
from tf_agents.networks import sequential
from tf_agents.networks import value_network
from tf_agents.policies import py_tf_eager_policy
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.specs import tensor_spec
from tf_agents.train import actor
from tf_agents.train.utils import replay_buffer_utils
from tf_agents.train.utils import spec_utils
from tf_agents.train.utils import train_utils
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
def configure_logical_cpus():
"""Configures exactly 4 logical CPUs for the first physical CPU.
Assumes no logical configuration exists or it was configured the same way.
**Note**: The reason why the number of logical CPUs fixed is because
reconfiguring the number of logical CPUs once the underlying runtime has been
initialized is not supported (raises `RuntimeError`). So, with this choice it
is ensured that tests running in the same process calling this function
multiple times do not break.
"""
first_cpu = tf.config.list_physical_devices('CPU')[0]
try:
logical_devices = [
tf.config.experimental.VirtualDeviceConfiguration() for _ in range(4)
]
tf.config.experimental.set_virtual_device_configuration(
first_cpu, logical_devices=logical_devices)
logging.info(
'No current virtual device configuration. Defining 4 virtual CPUs on '
'the first physical one.')
except RuntimeError:
current_config = tf.config.experimental.get_virtual_device_configuration(
first_cpu)
logging.warn(
'The following virtual device configuration already exists: %s which '
'resulted this call to fail with `RuntimeError` since it is not '
'possible to reconfigure it after runtime initialization. It is '
'probably safe to ignore.', current_config)
def get_cartpole_env_and_specs():
env = suite_gym.load('CartPole-v0')
_, action_tensor_spec, time_step_tensor_spec = (
spec_utils.get_tensor_specs(env))
return env, action_tensor_spec, time_step_tensor_spec
def build_dummy_sequential_net(fc_layer_params, action_spec):
"""Build a dummy sequential network."""
num_actions = action_spec.maximum - action_spec.minimum + 1
logits = functools.partial(
tf.keras.layers.Dense,
activation=None,
kernel_initializer=tf.random_uniform_initializer(
minval=-0.03, maxval=0.03),
bias_initializer=tf.constant_initializer(-0.2))
dense = functools.partial(
tf.keras.layers.Dense,
activation=tf.keras.activations.relu,
kernel_initializer=tf.compat.v1.variance_scaling_initializer(
scale=2.0, mode='fan_in', distribution='truncated_normal'))
return sequential.Sequential(
[dense(num_units) for num_units in fc_layer_params]
+ [logits(num_actions)])
def create_ppo_agent_and_dataset_fn(action_spec, time_step_spec, train_step,
batch_size):
"""Builds and returns a dummy PPO Agent, dataset and dataset function."""
del action_spec # Unused.
del time_step_spec # Unused.
del batch_size # Unused.
# No arbitrary spec supported.
obs_spec = tensor_spec.TensorSpec([2], tf.float32)
ts_spec = ts.time_step_spec(obs_spec)
act_spec = tensor_spec.BoundedTensorSpec([1], tf.float32, -1, 1)
actor_net = actor_distribution_network.ActorDistributionNetwork(
obs_spec,
act_spec,
fc_layer_params=(100,),
activation_fn=tf.keras.activations.tanh)
value_net = value_network.ValueNetwork(
obs_spec, fc_layer_params=(100,), activation_fn=tf.keras.activations.tanh)
agent = ppo_clip_agent.PPOClipAgent(
ts_spec,
act_spec,
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
actor_net=actor_net,
value_net=value_net,
entropy_regularization=0.0,
importance_ratio_clipping=0.2,
normalize_observations=False,
normalize_rewards=False,
use_gae=False,
use_td_lambda_return=False,
num_epochs=1,
debug_summaries=False,
summarize_grads_and_vars=False,
train_step_counter=train_step,
compute_value_and_advantage_in_train=False)
def _create_experience(_):
observations = tf.constant([
[[1, 2], [3, 4], [5, 6]],
[[1, 2], [3, 4], [5, 6]],
],
dtype=tf.float32)
mid_time_step_val = ts.StepType.MID.tolist()
time_steps = ts.TimeStep(
step_type=tf.constant([[mid_time_step_val] * 3] * 2, dtype=tf.int32),
reward=tf.constant([[1] * 3] * 2, dtype=tf.float32),
discount=tf.constant([[1] * 3] * 2, dtype=tf.float32),
observation=observations)
actions = tf.constant([[[0], [1], [1]], [[0], [1], [1]]], dtype=tf.float32)
action_distribution_parameters = {
'loc': tf.constant([[[0.0]] * 3] * 2, dtype=tf.float32),
'scale': tf.constant([[[1.0]] * 3] * 2, dtype=tf.float32),
}
value_preds = tf.constant([[9., 15., 21.], [9., 15., 21.]],
dtype=tf.float32)
policy_info = {
'dist_params': action_distribution_parameters,
}
policy_info['value_prediction'] = value_preds
experience = trajectory.Trajectory(time_steps.step_type, observations,
actions, policy_info,
time_steps.step_type, time_steps.reward,
time_steps.discount)
return agent._preprocess(experience) # pylint: disable=protected-access
dataset = tf.data.Dataset.from_tensor_slices([[i] for i in range(100)
]).map(_create_experience)
dataset = tf.data.Dataset.zip((dataset, tf.data.experimental.Counter()))
dataset_fn = lambda: dataset
return agent, dataset, dataset_fn, agent.training_data_spec
def create_dqn_agent_and_dataset_fn(action_spec, time_step_spec, train_step,
batch_size):
"""Builds and returns a dataset function for DQN Agent."""
q_net = build_dummy_sequential_net(fc_layer_params=(100,),
action_spec=action_spec)
agent = dqn_agent.DqnAgent(
time_step_spec,
action_spec,
q_network=q_net,
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
train_step_counter=train_step)
agent.initialize()
def make_item(_):
traj = tensor_spec.sample_spec_nest(
agent.collect_data_spec, seed=123, outer_dims=[2])
def scale_observation_only(item):
# Scale float values in the sampled item by large value to avoid NaNs.
if item.dtype == tf.float32:
return tf.math.divide(item, 1.e+22)
else:
return item
return tf.nest.map_structure(scale_observation_only, traj)
l = []
for i in range(100):
l.append([i])
dataset = tf.data.Dataset.zip(
(tf.data.Dataset.from_tensor_slices(l).map(make_item),
tf.data.experimental.Counter()))
dataset_fn = lambda: dataset.batch(batch_size)
return agent, dataset, dataset_fn, agent.collect_data_spec
def build_actor(root_dir, env, agent, rb_observer, train_step):
"""Builds the Actor."""
tf_collect_policy = agent.collect_policy
collect_policy = py_tf_eager_policy.PyTFEagerPolicy(
tf_collect_policy, use_tf_function=True)
temp_dir = root_dir + 'actor'
test_actor = actor.Actor(
env,
collect_policy,
train_step,
steps_per_run=1,
metrics=actor.collect_metrics(10),
summary_dir=temp_dir,
observers=[rb_observer])
return test_actor
def get_actor_thread(test_case, reverb_server_port, num_iterations=10):
"""Returns a thread that runs an Actor."""
def build_and_run_actor():
root_dir = test_case.create_tempdir().full_path
env, action_tensor_spec, time_step_tensor_spec = (
get_cartpole_env_and_specs())
train_step = train_utils.create_train_step()
q_net = build_dummy_sequential_net(fc_layer_params=(100,),
action_spec=action_tensor_spec)
agent = dqn_agent.DqnAgent(
time_step_tensor_spec,
action_tensor_spec,
q_network=q_net,
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
train_step_counter=train_step)
_, rb_observer = (
replay_buffer_utils.get_reverb_buffer_and_observer(
agent.collect_data_spec,
table_name=reverb_replay_buffer.DEFAULT_TABLE,
sequence_length=2,
reverb_server_address='localhost:{}'.format(reverb_server_port)))
variable_container = reverb_variable_container.ReverbVariableContainer(
server_address='localhost:{}'.format(reverb_server_port),
table_names=[reverb_variable_container.DEFAULT_TABLE])
test_actor = build_actor(
root_dir, env, agent, rb_observer, train_step)
variables_dict = {
reverb_variable_container.POLICY_KEY: agent.collect_policy.variables(),
reverb_variable_container.TRAIN_STEP_KEY: train_step
}
variable_container.update(variables_dict)
for _ in range(num_iterations):
test_actor.run()
actor_thread = test_case.checkedThread(target=build_and_run_actor)
return actor_thread
def check_variables_different(test_case, old_vars_numpy, new_vars_numpy):
"""Tests whether the two sets of variables are different.
Useful for checking if variables were updated, i.e. a train step was run.
Args:
test_case: an instande of tf.test.TestCase for assertions
old_vars_numpy: numpy representation of old variables
new_vars_numpy: numpy representation of new variables
"""
# Check if there is a change.
def changed(a, b):
return not np.equal(a, b).all()
vars_changed = tf.nest.flatten(
tf.nest.map_structure(changed, old_vars_numpy, new_vars_numpy))
# Assert if any of the variable changed.
test_case.assertTrue(np.any(vars_changed))
def check_variables_same(test_case, old_vars_numpy, new_vars_numpy):
"""Tests whether the two sets of variables are the same.
Useful for checking if variables were not updated, i.e. a loss step was run.
Args:
test_case: an instande of tf.test.TestCase for assertions
old_vars_numpy: numpy representation of old variables
new_vars_numpy: numpy representation of new variables
"""
# Check that there is no change.
def same(a, b):
return np.equal(a, b).all()
vars_same = tf.nest.flatten(
tf.nest.map_structure(same, old_vars_numpy, new_vars_numpy))
# Assert if all of the variables are the same.
test_case.assertTrue(np.all(vars_same))
def create_reverb_server_for_replay_buffer_and_variable_container(
collect_policy, train_step, replay_buffer_capacity, port):
"""Sets up one reverb server for replay buffer and variable container."""
# Create the signature for the variable container holding the policy weights.
variables = {
reverb_variable_container.POLICY_KEY: collect_policy.variables(),
reverb_variable_container.TRAIN_STEP_KEY: train_step
}
variable_container_signature = tf.nest.map_structure(
lambda variable: tf.TensorSpec(variable.shape, dtype=variable.dtype),
variables)
# Create the signature for the replay buffer holding observed experience.
replay_buffer_signature = tensor_spec.from_spec(
collect_policy.collect_data_spec)
# TODO(b/188427258) Add time dimension when using Reverb.TrajectoryWriters.
# replay_buffer_signature = tensor_spec.add_outer_dim(replay_buffer_signature)
# Crete and start the replay buffer and variable container server.
server = reverb.Server(
tables=[
reverb.Table( # Replay buffer storing experience.
name=reverb_replay_buffer.DEFAULT_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
# TODO(b/159073060): Set rate limiter for SAC properly.
rate_limiter=reverb.rate_limiters.MinSize(1),
max_size=replay_buffer_capacity,
max_times_sampled=0,
signature=replay_buffer_signature,
),
reverb.Table( # Variable container storing policy parameters.
name=reverb_variable_container.DEFAULT_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(1),
max_size=1,
max_times_sampled=0,
signature=variable_container_signature,
),
],
port=port)
return server
| [
"tensorflow.compat.v2.config.list_physical_devices",
"tensorflow.compat.v2.nest.map_structure",
"tf_agents.specs.tensor_spec.TensorSpec",
"tf_agents.environments.suite_gym.load",
"tf_agents.policies.py_tf_eager_policy.PyTFEagerPolicy",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.keras.optimiz... | [((3022, 3051), 'tf_agents.environments.suite_gym.load', 'suite_gym.load', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (3036, 3051), False, 'from tf_agents.environments import suite_gym\n'), ((3110, 3142), 'tf_agents.train.utils.spec_utils.get_tensor_specs', 'spec_utils.get_tensor_specs', (['env'], {}), '(env)\n', (3137, 3142), False, 'from tf_agents.train.utils import spec_utils\n'), ((4303, 4342), 'tf_agents.specs.tensor_spec.TensorSpec', 'tensor_spec.TensorSpec', (['[2]', 'tf.float32'], {}), '([2], tf.float32)\n', (4325, 4342), False, 'from tf_agents.specs import tensor_spec\n'), ((4355, 4382), 'tf_agents.trajectories.time_step.time_step_spec', 'ts.time_step_spec', (['obs_spec'], {}), '(obs_spec)\n', (4372, 4382), True, 'from tf_agents.trajectories import time_step as ts\n'), ((4396, 4449), 'tf_agents.specs.tensor_spec.BoundedTensorSpec', 'tensor_spec.BoundedTensorSpec', (['[1]', 'tf.float32', '(-1)', '(1)'], {}), '([1], tf.float32, -1, 1)\n', (4425, 4449), False, 'from tf_agents.specs import tensor_spec\n'), ((4464, 4604), 'tf_agents.networks.actor_distribution_network.ActorDistributionNetwork', 'actor_distribution_network.ActorDistributionNetwork', (['obs_spec', 'act_spec'], {'fc_layer_params': '(100,)', 'activation_fn': 'tf.keras.activations.tanh'}), '(obs_spec, act_spec,\n fc_layer_params=(100,), activation_fn=tf.keras.activations.tanh)\n', (4515, 4604), False, 'from tf_agents.networks import actor_distribution_network\n'), ((4641, 4747), 'tf_agents.networks.value_network.ValueNetwork', 'value_network.ValueNetwork', (['obs_spec'], {'fc_layer_params': '(100,)', 'activation_fn': 'tf.keras.activations.tanh'}), '(obs_spec, fc_layer_params=(100,), activation_fn=\n tf.keras.activations.tanh)\n', (4667, 4747), False, 'from tf_agents.networks import value_network\n'), ((8368, 8443), 'tf_agents.policies.py_tf_eager_policy.PyTFEagerPolicy', 'py_tf_eager_policy.PyTFEagerPolicy', (['tf_collect_policy'], {'use_tf_function': '(True)'}), '(tf_collect_policy, use_tf_function=True)\n', (8402, 8443), False, 'from tf_agents.policies import py_tf_eager_policy\n'), ((12419, 12474), 'tf_agents.specs.tensor_spec.from_spec', 'tensor_spec.from_spec', (['collect_policy.collect_data_spec'], {}), '(collect_policy.collect_data_spec)\n', (12440, 12474), False, 'from tf_agents.specs import tensor_spec\n'), ((2159, 2197), 'tensorflow.compat.v2.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""CPU"""'], {}), "('CPU')\n", (2190, 2197), True, 'import tensorflow.compat.v2 as tf\n'), ((2320, 2423), 'tensorflow.compat.v2.config.experimental.set_virtual_device_configuration', 'tf.config.experimental.set_virtual_device_configuration', (['first_cpu'], {'logical_devices': 'logical_devices'}), '(first_cpu,\n logical_devices=logical_devices)\n', (2375, 2423), True, 'import tensorflow.compat.v2 as tf\n'), ((2433, 2550), 'absl.logging.info', 'logging.info', (['"""No current virtual device configuration. Defining 4 virtual CPUs on the first physical one."""'], {}), "(\n 'No current virtual device configuration. Defining 4 virtual CPUs on the first physical one.'\n )\n", (2445, 2550), False, 'from absl import logging\n'), ((5354, 5442), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[[1, 2], [3, 4], [5, 6]], [[1, 2], [3, 4], [5, 6]]]'], {'dtype': 'tf.float32'}), '([[[1, 2], [3, 4], [5, 6]], [[1, 2], [3, 4], [5, 6]]], dtype=tf.\n float32)\n', (5365, 5442), True, 'import tensorflow.compat.v2 as tf\n'), ((5516, 5540), 'tf_agents.trajectories.time_step.StepType.MID.tolist', 'ts.StepType.MID.tolist', ([], {}), '()\n', (5538, 5540), True, 'from tf_agents.trajectories import time_step as ts\n'), ((5821, 5886), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[[0], [1], [1]], [[0], [1], [1]]]'], {'dtype': 'tf.float32'}), '([[[0], [1], [1]], [[0], [1], [1]]], dtype=tf.float32)\n', (5832, 5886), True, 'import tensorflow.compat.v2 as tf\n'), ((6083, 6152), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[9.0, 15.0, 21.0], [9.0, 15.0, 21.0]]'], {'dtype': 'tf.float32'}), '([[9.0, 15.0, 21.0], [9.0, 15.0, 21.0]], dtype=tf.float32)\n', (6094, 6152), True, 'import tensorflow.compat.v2 as tf\n'), ((6326, 6471), 'tf_agents.trajectories.trajectory.Trajectory', 'trajectory.Trajectory', (['time_steps.step_type', 'observations', 'actions', 'policy_info', 'time_steps.step_type', 'time_steps.reward', 'time_steps.discount'], {}), '(time_steps.step_type, observations, actions,\n policy_info, time_steps.step_type, time_steps.reward, time_steps.discount)\n', (6347, 6471), False, 'from tf_agents.trajectories import trajectory\n'), ((7538, 7617), 'tf_agents.specs.tensor_spec.sample_spec_nest', 'tensor_spec.sample_spec_nest', (['agent.collect_data_spec'], {'seed': '(123)', 'outer_dims': '[2]'}), '(agent.collect_data_spec, seed=123, outer_dims=[2])\n', (7566, 7617), False, 'from tf_agents.specs import tensor_spec\n'), ((7866, 7917), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['scale_observation_only', 'traj'], {}), '(scale_observation_only, traj)\n', (7887, 7917), True, 'import tensorflow.compat.v2 as tf\n'), ((9018, 9049), 'tf_agents.train.utils.train_utils.create_train_step', 'train_utils.create_train_step', ([], {}), '()\n', (9047, 9049), False, 'from tf_agents.train.utils import train_utils\n'), ((10893, 10955), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['changed', 'old_vars_numpy', 'new_vars_numpy'], {}), '(changed, old_vars_numpy, new_vars_numpy)\n', (10914, 10955), True, 'import tensorflow.compat.v2 as tf\n'), ((11024, 11044), 'numpy.any', 'np.any', (['vars_changed'], {}), '(vars_changed)\n', (11030, 11044), True, 'import numpy as np\n'), ((11573, 11632), 'tensorflow.compat.v2.nest.map_structure', 'tf.nest.map_structure', (['same', 'old_vars_numpy', 'new_vars_numpy'], {}), '(same, old_vars_numpy, new_vars_numpy)\n', (11594, 11632), True, 'import tensorflow.compat.v2 as tf\n'), ((11707, 11724), 'numpy.all', 'np.all', (['vars_same'], {}), '(vars_same)\n', (11713, 11724), True, 'import numpy as np\n'), ((2240, 2291), 'tensorflow.compat.v2.config.experimental.VirtualDeviceConfiguration', 'tf.config.experimental.VirtualDeviceConfiguration', ([], {}), '()\n', (2289, 2291), True, 'import tensorflow.compat.v2 as tf\n'), ((2605, 2671), 'tensorflow.compat.v2.config.experimental.get_virtual_device_configuration', 'tf.config.experimental.get_virtual_device_configuration', (['first_cpu'], {}), '(first_cpu)\n', (2660, 2671), True, 'import tensorflow.compat.v2 as tf\n'), ((2685, 2945), 'absl.logging.warn', 'logging.warn', (['"""The following virtual device configuration already exists: %s which resulted this call to fail with `RuntimeError` since it is not possible to reconfigure it after runtime initialization. It is probably safe to ignore."""', 'current_config'], {}), "(\n 'The following virtual device configuration already exists: %s which resulted this call to fail with `RuntimeError` since it is not possible to reconfigure it after runtime initialization. It is probably safe to ignore.'\n , current_config)\n", (2697, 2945), False, 'from absl import logging\n'), ((3477, 3533), 'tensorflow.compat.v2.random_uniform_initializer', 'tf.random_uniform_initializer', ([], {'minval': '(-0.03)', 'maxval': '(0.03)'}), '(minval=-0.03, maxval=0.03)\n', (3506, 3533), True, 'import tensorflow.compat.v2 as tf\n'), ((3569, 3598), 'tensorflow.compat.v2.constant_initializer', 'tf.constant_initializer', (['(-0.2)'], {}), '(-0.2)\n', (3592, 3598), True, 'import tensorflow.compat.v2 as tf\n'), ((3728, 3832), 'tensorflow.compat.v2.compat.v1.variance_scaling_initializer', 'tf.compat.v1.variance_scaling_initializer', ([], {'scale': '(2.0)', 'mode': '"""fan_in"""', 'distribution': '"""truncated_normal"""'}), "(scale=2.0, mode='fan_in',\n distribution='truncated_normal')\n", (3769, 3832), True, 'import tensorflow.compat.v2 as tf\n'), ((4837, 4882), 'tensorflow.compat.v2.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (4861, 4882), True, 'import tensorflow.compat.v2 as tf\n'), ((5942, 5990), 'tensorflow.compat.v2.constant', 'tf.constant', (['([[[0.0]] * 3] * 2)'], {'dtype': 'tf.float32'}), '([[[0.0]] * 3] * 2, dtype=tf.float32)\n', (5953, 5990), True, 'import tensorflow.compat.v2 as tf\n'), ((6009, 6057), 'tensorflow.compat.v2.constant', 'tf.constant', (['([[[1.0]] * 3] * 2)'], {'dtype': 'tf.float32'}), '([[[1.0]] * 3] * 2, dtype=tf.float32)\n', (6020, 6057), True, 'import tensorflow.compat.v2 as tf\n'), ((6851, 6881), 'tensorflow.compat.v2.data.experimental.Counter', 'tf.data.experimental.Counter', ([], {}), '()\n', (6879, 6881), True, 'import tensorflow.compat.v2 as tf\n'), ((7401, 7446), 'tensorflow.compat.v2.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (7425, 7446), True, 'import tensorflow.compat.v2 as tf\n'), ((8070, 8100), 'tensorflow.compat.v2.data.experimental.Counter', 'tf.data.experimental.Counter', ([], {}), '()\n', (8098, 8100), True, 'import tensorflow.compat.v2 as tf\n'), ((8599, 8624), 'tf_agents.train.actor.collect_metrics', 'actor.collect_metrics', (['(10)'], {}), '(10)\n', (8620, 8624), False, 'from tf_agents.train import actor\n'), ((12244, 12295), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['variable.shape'], {'dtype': 'variable.dtype'}), '(variable.shape, dtype=variable.dtype)\n', (12257, 12295), True, 'import tensorflow.compat.v2 as tf\n'), ((5589, 5647), 'tensorflow.compat.v2.constant', 'tf.constant', (['([[mid_time_step_val] * 3] * 2)'], {'dtype': 'tf.int32'}), '([[mid_time_step_val] * 3] * 2, dtype=tf.int32)\n', (5600, 5647), True, 'import tensorflow.compat.v2 as tf\n'), ((5664, 5708), 'tensorflow.compat.v2.constant', 'tf.constant', (['([[1] * 3] * 2)'], {'dtype': 'tf.float32'}), '([[1] * 3] * 2, dtype=tf.float32)\n', (5675, 5708), True, 'import tensorflow.compat.v2 as tf\n'), ((5727, 5771), 'tensorflow.compat.v2.constant', 'tf.constant', (['([[1] * 3] * 2)'], {'dtype': 'tf.float32'}), '([[1] * 3] * 2, dtype=tf.float32)\n', (5738, 5771), True, 'import tensorflow.compat.v2 as tf\n'), ((7793, 7820), 'tensorflow.compat.v2.math.divide', 'tf.math.divide', (['item', '(1e+22)'], {}), '(item, 1e+22)\n', (7807, 7820), True, 'import tensorflow.compat.v2 as tf\n'), ((9320, 9365), 'tensorflow.compat.v2.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (9344, 9365), True, 'import tensorflow.compat.v2 as tf\n'), ((11514, 11528), 'numpy.equal', 'np.equal', (['a', 'b'], {}), '(a, b)\n', (11522, 11528), True, 'import numpy as np\n'), ((8009, 8046), 'tensorflow.compat.v2.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['l'], {}), '(l)\n', (8043, 8046), True, 'import tensorflow.compat.v2 as tf\n'), ((10831, 10845), 'numpy.equal', 'np.equal', (['a', 'b'], {}), '(a, b)\n', (10839, 10845), True, 'import numpy as np\n'), ((12890, 12916), 'reverb.selectors.Uniform', 'reverb.selectors.Uniform', ([], {}), '()\n', (12914, 12916), False, 'import reverb\n'), ((12940, 12963), 'reverb.selectors.Fifo', 'reverb.selectors.Fifo', ([], {}), '()\n', (12961, 12963), False, 'import reverb\n'), ((13062, 13093), 'reverb.rate_limiters.MinSize', 'reverb.rate_limiters.MinSize', (['(1)'], {}), '(1)\n', (13090, 13093), False, 'import reverb\n'), ((13394, 13420), 'reverb.selectors.Uniform', 'reverb.selectors.Uniform', ([], {}), '()\n', (13418, 13420), False, 'import reverb\n'), ((13444, 13467), 'reverb.selectors.Fifo', 'reverb.selectors.Fifo', ([], {}), '()\n', (13465, 13467), False, 'import reverb\n'), ((13496, 13527), 'reverb.rate_limiters.MinSize', 'reverb.rate_limiters.MinSize', (['(1)'], {}), '(1)\n', (13524, 13527), False, 'import reverb\n')] |
#!/usr/bin/env python
# coding: utf-8
"""
=====================================
4 - Sampling methods: particle filter
=====================================
"""
# %%
# In the previous tutorials we encountered some shortcomings in describing distributions as
# Gaussians, albeit with considerable flexibility in coping with the non-linear transforms.
#
# Sampling methods offer an attractive alternative to such parametric methods in that there is
# no need for complicated though approximate covariance calculations. In this tutorial we look at a
# class of *sequential Monte Carlo sampling* methods, and in particular, the *particle filter*.
#
# Colloquially we can think of a particle filter as a series of point samples being recursed
# through the predict-update stages of a Bayesian filter. The diversity of samples compensates for
# the lack of a covariance estimate, though often at the expense of increased computation
# requirements.
#
# Background
# ----------
#
# In more detail, we seek to approximate the posterior state estimate as a sum of samples, or
# particles,
#
# .. math::
# p(\textbf{x}_{k}|\textbf{z}_{1:k}) \approx
# \sum_{i} w_{k}^i \delta (\textbf{x}_{k} - \textbf{x}_{k}^i)
#
# where :math:`w_{k}^i` are weights such that :math:`\sum\limits_{i} w_{k}^i = 1`. This posterior
# can be calculated, and subsequently maintained, by successive applications of the
# Chapman-Kolmogorov equation and Bayes rule in an analogous manner to the Kalman family of
# filters of previous tutorials. There is considerable flexibility in how to sample from these
# various distributions and the interested reader can refer to [#]_ for more detail.
#
# The present tutorial focuses on a so-called *sequential importance resampling* filter. This is
# facilitated by a number of Stone Soup classes. The weight-update equation is,
#
# .. math::
# w^i_k = w^i_{k-1}
# \frac{p(\mathbf{z}_k|\mathbf{x}^i_k) p(\mathbf{x}^i_k|\mathbf{x}^1_{k-1})}
# {q(\mathbf{x}^i_k|\mathbf{x}^1_{k-1},\mathbf{z}^i_{1:k})}
#
# where :math:`p(\mathbf{z}_k | \mathbf{x}^i_k)` is the likelihood distribution (as defined by the
# :class:`~.MeasurementModel`) and :math:`p(\mathbf{x}^i_k|\mathbf{x}^1_{k-1})` is the transition
# probability distribution (:class:`~.TransitionModel`). The :math:`q(\cdot)` distribution -- the
# importance density -- should approximate the posterior distribution, while still being easy to
# sample from.
#
# A common occurrence in such methods is that of *sample impoverishment*. After a few iterations,
# all but a small number of the particles will have negligible weight. This affects accuracy and
# wastes computation on particles with little effect on the estimate. Many resampling schemes
# exist and are designed to redistribute particles to areas where the posterior probability is
# higher. In Stone Soup such resampling is accomplished by a :class:`~.Resampler`. More detail is
# provided in the
# example below.
# %%
#
# Nearly-constant velocity example
# --------------------------------
# We continue in the same vein as the previous tutorials.
#
# Ground truth
# ^^^^^^^^^^^^
# Import the necessary libraries
import numpy as np
from datetime import datetime
from datetime import timedelta
start_time = datetime.now()
# %%
np.random.seed(1991)
# %%
# Initialise Stone Soup ground-truth and transition models.
from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, \
ConstantVelocity
from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState
transition_model = CombinedLinearGaussianTransitionModel([ConstantVelocity(0.05),
ConstantVelocity(0.05)])
truth = GroundTruthPath([GroundTruthState([0, 1, 0, 1], timestamp=start_time)])
# %%
# Create the truth path
for k in range(1, 21):
truth.append(GroundTruthState(
transition_model.function(truth[k-1], noise=True, time_interval=timedelta(seconds=1)),
timestamp=start_time+timedelta(seconds=k)))
# %%
# Plot the ground truth.
from stonesoup.plotter import Plotter
plotter = Plotter()
plotter.plot_ground_truths(truth, [0, 2])
# %%
# Initialise the bearing, range sensor using the appropriate measurement model.
from stonesoup.models.measurement.nonlinear import CartesianToBearingRange
from stonesoup.types.detection import Detection
sensor_x = 50
sensor_y = 0
measurement_model = CartesianToBearingRange(
ndim_state=4,
mapping=(0, 2),
noise_covar=np.diag([np.radians(0.2), 1]),
translation_offset=np.array([[sensor_x], [sensor_y]])
)
# %%
# Populate the measurement array
measurements = []
for state in truth:
measurement = measurement_model.function(state, noise=True)
measurements.append(Detection(measurement, timestamp=state.timestamp,
measurement_model=measurement_model))
# %%
# Plot those measurements
plotter.plot_measurements(measurements, [0, 2])
plotter.fig
# %%
# Set up the particle filter
# ^^^^^^^^^^^^^^^^^^^^^^^^^^
# Analogously to the Kalman family, we create a :class:`~.ParticlePredictor` and a
# :class:`~.ParticleUpdater` which take responsibility for the predict and update steps
# respectively. These require a :class:`~.TransitionModel` and :class:`~.MeasurementModel` as
# before.
# To cope with sample sparsity we also include a resampler, in this instance
# :class:`~.SystematicResampler`, which is passed to the updater. It should be noted that there are
# many resampling schemes, and almost as many choices as to when to undertake resampling. The
# systematic resampler is described in [#]_, and in what follows below resampling is undertaken
# at each time-step.
from stonesoup.predictor.particle import ParticlePredictor
predictor = ParticlePredictor(transition_model)
from stonesoup.resampler.particle import SystematicResampler
resampler = SystematicResampler()
from stonesoup.updater.particle import ParticleUpdater
updater = ParticleUpdater(measurement_model, resampler)
# %%
# Initialise a prior
# ^^^^^^^^^^^^^^^^^^
# To start we create a prior estimate. This is a :class:`~.ParticleState` which describes
# the state as a distribution of particles using :class:`~.StateVectors` and weights.
# This is sampled from the Gaussian distribution (using the same parameters we
# had in the previous examples).
from scipy.stats import multivariate_normal
from stonesoup.types.numeric import Probability # Similar to a float type
from stonesoup.types.state import ParticleState
from stonesoup.types.array import StateVectors
number_particles = 1000
# Sample from the prior Gaussian distribution
samples = multivariate_normal.rvs(np.array([0, 1, 0, 1]),
np.diag([1.5, 0.5, 1.5, 0.5]),
size=number_particles)
# Create prior particle state.
prior = ParticleState(state_vector=StateVectors(samples.T),
weight=np.array([Probability(1/number_particles)]*number_particles),
timestamp=start_time)
# %%
# Run the tracker
# ^^^^^^^^^^^^^^^
# We now run the predict and update steps, propagating the collection of particles and resampling
# when told to (at every step).
from stonesoup.types.hypothesis import SingleHypothesis
from stonesoup.types.track import Track
track = Track()
for measurement in measurements:
prediction = predictor.predict(prior, timestamp=measurement.timestamp)
hypothesis = SingleHypothesis(prediction, measurement)
post = updater.update(hypothesis)
track.append(post)
prior = track[-1]
# %%
# Plot the resulting track with the sample points at each iteration.
plotter.plot_tracks(track, [0, 2], particle=True)
plotter.fig
# %%
# Key points
# ----------
# 1. Sampling methods offer an attractive alternative to Kalman-based filtering for recursive
# state estimation.
# 2. The particle filter trades off a more subtle quantification of a non-Gaussian
# estimate against increased computational effort.
# 3. Very often particle filters encounter sample impoverishment and require a resampling step.
# %%
# References
# ----------
# .. [#] <NAME>., <NAME>., <NAME>., <NAME>. 2002, Tutorial on Particle Filters
# for Online Nonlinear/Non-Gaussian Bayesian Tracking, IEEE transactions on signal
# processing, vol. 50, no. 2
#
# .. [#] <NAME>., <NAME>., <NAME>. 1999, An improved particle filter for non-linear
# problems, IEE Proc., Radar Sonar Navigation, 146:2–7
# sphinx_gallery_thumbnail_number = 3
| [
"numpy.radians",
"stonesoup.types.hypothesis.SingleHypothesis",
"numpy.random.seed",
"stonesoup.types.groundtruth.GroundTruthState",
"stonesoup.resampler.particle.SystematicResampler",
"stonesoup.plotter.Plotter",
"stonesoup.predictor.particle.ParticlePredictor",
"stonesoup.updater.particle.ParticleUp... | [((3279, 3293), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3291, 3293), False, 'from datetime import datetime\n'), ((3301, 3321), 'numpy.random.seed', 'np.random.seed', (['(1991)'], {}), '(1991)\n', (3315, 3321), True, 'import numpy as np\n'), ((4132, 4141), 'stonesoup.plotter.Plotter', 'Plotter', ([], {}), '()\n', (4139, 4141), False, 'from stonesoup.plotter import Plotter\n'), ((5791, 5826), 'stonesoup.predictor.particle.ParticlePredictor', 'ParticlePredictor', (['transition_model'], {}), '(transition_model)\n', (5808, 5826), False, 'from stonesoup.predictor.particle import ParticlePredictor\n'), ((5900, 5921), 'stonesoup.resampler.particle.SystematicResampler', 'SystematicResampler', ([], {}), '()\n', (5919, 5921), False, 'from stonesoup.resampler.particle import SystematicResampler\n'), ((5987, 6032), 'stonesoup.updater.particle.ParticleUpdater', 'ParticleUpdater', (['measurement_model', 'resampler'], {}), '(measurement_model, resampler)\n', (6002, 6032), False, 'from stonesoup.updater.particle import ParticleUpdater\n'), ((7340, 7347), 'stonesoup.types.track.Track', 'Track', ([], {}), '()\n', (7345, 7347), False, 'from stonesoup.types.track import Track\n'), ((6691, 6713), 'numpy.array', 'np.array', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (6699, 6713), True, 'import numpy as np\n'), ((6749, 6778), 'numpy.diag', 'np.diag', (['[1.5, 0.5, 1.5, 0.5]'], {}), '([1.5, 0.5, 1.5, 0.5])\n', (6756, 6778), True, 'import numpy as np\n'), ((7473, 7514), 'stonesoup.types.hypothesis.SingleHypothesis', 'SingleHypothesis', (['prediction', 'measurement'], {}), '(prediction, measurement)\n', (7489, 7514), False, 'from stonesoup.types.hypothesis import SingleHypothesis\n'), ((3630, 3652), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(0.05)'], {}), '(0.05)\n', (3646, 3652), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((3712, 3734), 'stonesoup.models.transition.linear.ConstantVelocity', 'ConstantVelocity', (['(0.05)'], {}), '(0.05)\n', (3728, 3734), False, 'from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity\n'), ((3762, 3814), 'stonesoup.types.groundtruth.GroundTruthState', 'GroundTruthState', (['[0, 1, 0, 1]'], {'timestamp': 'start_time'}), '([0, 1, 0, 1], timestamp=start_time)\n', (3778, 3814), False, 'from stonesoup.types.groundtruth import GroundTruthPath, GroundTruthState\n'), ((4576, 4610), 'numpy.array', 'np.array', (['[[sensor_x], [sensor_y]]'], {}), '([[sensor_x], [sensor_y]])\n', (4584, 4610), True, 'import numpy as np\n'), ((4778, 4869), 'stonesoup.types.detection.Detection', 'Detection', (['measurement'], {'timestamp': 'state.timestamp', 'measurement_model': 'measurement_model'}), '(measurement, timestamp=state.timestamp, measurement_model=\n measurement_model)\n', (4787, 4869), False, 'from stonesoup.types.detection import Detection\n'), ((6904, 6927), 'stonesoup.types.array.StateVectors', 'StateVectors', (['samples.T'], {}), '(samples.T)\n', (6916, 6927), False, 'from stonesoup.types.array import StateVectors\n'), ((4531, 4546), 'numpy.radians', 'np.radians', (['(0.2)'], {}), '(0.2)\n', (4541, 4546), True, 'import numpy as np\n'), ((3977, 3997), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(1)'}), '(seconds=1)\n', (3986, 3997), False, 'from datetime import timedelta\n'), ((4029, 4049), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'k'}), '(seconds=k)\n', (4038, 4049), False, 'from datetime import timedelta\n'), ((6968, 7001), 'stonesoup.types.numeric.Probability', 'Probability', (['(1 / number_particles)'], {}), '(1 / number_particles)\n', (6979, 7001), False, 'from stonesoup.types.numeric import Probability\n')] |
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import numpy as np
from .tools import k_adjacency, normalize_adjacency_matrix
from .mlp import MLP
from .activation import activation_factory
class MultiScale_GraphConv(nn.Layer):
def __init__(self,
num_scales,
in_channels,
out_channels,
A_binary,
disentangled_agg=True,
use_mask=True,
dropout=0,
activation='relu'):
super().__init__()
self.num_scales = num_scales
if disentangled_agg:
A_powers = [k_adjacency(A_binary, k, with_self=True) for k in range(num_scales)]
A_powers = np.concatenate([normalize_adjacency_matrix(g) for g in A_powers])
else:
A_powers = [A_binary + np.eye(len(A_binary)) for k in range(num_scales)]
A_powers = [normalize_adjacency_matrix(g) for g in A_powers]
A_powers = [np.linalg.matrix_power(g, k) for k, g in enumerate(A_powers)]
A_powers = np.concatenate(A_powers)
self.A_powers = paddle.to_tensor(A_powers)
self.use_mask = use_mask
if use_mask:
# NOTE: the inclusion of residual mask appears to slow down training noticeably
self.A_res = self.create_parameter(
shape=self.A_powers.shape,
default_initializer=nn.initializer.Uniform(low=-1e-6, high=1e-6))
# self.A_res = nn.init.uniform_(nn.Parameter(torch.Tensor(self.A_powers.shape)), -1e-6, 1e-6)
self.mlp = MLP(in_channels * num_scales, [out_channels], dropout=dropout, activation=activation)
def forward(self, x):
N, C, T, V = x.shape
# self.A_powers = self.A_powers.to(x.device)
A = self.A_powers.astype(x.dtype)
if self.use_mask:
A = A + self.A_res.astype(x.dtype)
support = einsum(x,A)
support = support.reshape((N, C, T, self.num_scales, V))
support = support.transpose((0,3,1,2,4)).reshape((N, self.num_scales*C, T, V))
# support = torch.einsum('vu,nctu->nctv', A, x)
# support = support.view(N, C, T, self.num_scales, V)
# support = support.permute(0,3,1,2,4).contiguous().view(N, self.num_scales*C, T, V)
out = self.mlp(support)
return out
def einsum(x, A):
"""paddle.einsum will be implemented in release/2.2.
"""
n, c, t, u = x.shape
v, u2 = A.shape
assert (u==u2), "Args of einsum not match!"
A = A.transpose((1,0))
y = paddle.matmul(x, A) #nctv
return y
# if __name__ == "__main__":
# from graph.ntu_rgb_d import AdjMatrixGraph
# graph = AdjMatrixGraph()
# A_binary = graph.A_binary
# msgcn = MultiScale_GraphConv(num_scales=15, in_channels=3, out_channels=64, A_binary=A_binary)
# msgcn.forward(torch.randn(16,3,30,25))
| [
"paddle.nn.initializer.Uniform",
"paddle.matmul",
"numpy.linalg.matrix_power",
"paddle.to_tensor",
"numpy.concatenate"
] | [((2654, 2673), 'paddle.matmul', 'paddle.matmul', (['x', 'A'], {}), '(x, A)\n', (2667, 2673), False, 'import paddle\n'), ((1171, 1197), 'paddle.to_tensor', 'paddle.to_tensor', (['A_powers'], {}), '(A_powers)\n', (1187, 1197), False, 'import paddle\n'), ((1119, 1143), 'numpy.concatenate', 'np.concatenate', (['A_powers'], {}), '(A_powers)\n', (1133, 1143), True, 'import numpy as np\n'), ((1033, 1061), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['g', 'k'], {}), '(g, k)\n', (1055, 1061), True, 'import numpy as np\n'), ((1485, 1531), 'paddle.nn.initializer.Uniform', 'nn.initializer.Uniform', ([], {'low': '(-1e-06)', 'high': '(1e-06)'}), '(low=-1e-06, high=1e-06)\n', (1507, 1531), True, 'import paddle.nn as nn\n')] |
'''
TensorFlow Dataset API Example
Reinitializable Iterator
<NAME>
Department of Computer Science
University of Chicago
<EMAIL>
'''
import tensorflow as tf
import numpy as np
import time
class Preprocessor(object):
def __init__(self, num_classes):
self.num_classes = num_classes
def preprocess(self, images, labels):
'''
Data preprocess procedures
images: tensor format, dtype = tf.uint8
labels: tensor format, dtype = tf.uint8
'''
# Change dtype from uint to float
images = tf.cast(images, tf.float32)
# Scale images
images = images / 255
# One-hot encoding
labels = tf.one_hot(indices = labels, depth = self.num_classes)
# Change dtype from uint to float
labels = tf.cast(labels, tf.float32)
return images, labels
def conv_net(x, num_classes, dropout, reuse, is_training):
with tf.variable_scope('ConvNet', reuse = reuse):
# Tensor input become 4-D: [batch Size, height, width, channel]
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# Convolution layer with 32 filters and a kernel size of 5
conv1 = tf.layers.conv2d(inputs = x, filters = 32, kernel_size = 5, activation = tf.nn.relu)
# Max pooling (down-sampling) with strides of 2 and kernel size of 2
conv1 = tf.layers.max_pooling2d(inputs = conv1, pool_size = 2, strides = 2)
# Convolution layer with 32 filters and a kernel size of 3
conv2 = tf.layers.conv2d(inputs = conv1, filters = 64, kernel_size = 3, activation=tf.nn.relu)
# Max pooling (down-sampling) with strides of 2 and kernel size of 2
conv2 = tf.layers.max_pooling2d(inputs = conv2, pool_size = 2, strides = 2)
# Flatten the data to a 1-D vector for the fully connected layer
fc1 = tf.layers.flatten(inputs = conv2)
# Fully connected layer (in contrib folder for now)
fc1 = tf.layers.dense(inputs = fc1, units = 1024)
# Apply dropout (if is_training is False, dropout is not applied)
fc1 = tf.layers.dropout(inputs = fc1, rate = dropout, training = is_training)
# Output layer, class prediction
output = tf.layers.dense(inputs = fc1, units = num_classes)
# Because 'softmax_cross_entropy_with_logits' already apply softmax,
# we only apply softmax to testing network
output = tf.nn.softmax(output) if not is_training else output
return output
class CNN(object):
def __init__(self, dataset_output_types, dataset_output_shapes, num_classes = 10, batch_size = 16, dropout = 0.5, learning_rate = 0.001):
self.num_classes = num_classes
self.batch_size = batch_size
self.dropout = dropout
self.learning_rate = learning_rate
self.iterator = tf.data.Iterator.from_structure(output_types = dataset_output_types, output_shapes = dataset_output_shapes)
self.images, self.labels = self.iterator.get_next()
self.model_initializer()
self.optimizer_initializer()
self.saver = tf.train.Saver()
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
def model_initializer(self):
self.outputs_train = conv_net(x = self.images, num_classes = self.num_classes, dropout = self.dropout, reuse = False, is_training = True)
self.outputs_test = conv_net(x = self.images, num_classes = self.num_classes, dropout = 0, reuse = True, is_training = False)
correct_pred = tf.equal(tf.argmax(self.outputs_test, 1), tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
def optimizer_initializer(self):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits = self.outputs_train, labels = self.labels))
self.optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate).minimize(self.loss)
def train(self, train_dataset, num_iterations):
train_init_operator = self.iterator.make_initializer(train_dataset)
self.sess.run(train_init_operator)
train_accuracies = []
for i in range(num_iterations):
_, train_accuracy = self.sess.run([self.optimizer, self.accuracy])
train_accuracies.append(train_accuracy)
train_accuracy_mean = np.mean(train_accuracies)
return train_accuracy_mean
def test(self, test_dataset, num_iterations):
test_init_operator = self.iterator.make_initializer(test_dataset)
self.sess.run(test_init_operator)
test_accuracies = []
for i in range(num_iterations):
test_accuracy = self.sess.run(self.accuracy)
test_accuracies.append(test_accuracy)
test_accuracy_mean = np.mean(test_accuracies)
return test_accuracy_mean
def save(self, directory, filename):
if not os.path.exists(directory):
os.makedirs(directory)
self.saver.save(self.sess, os.path.join(directory, filename))
return os.path.join(directory, filename)
def load(self, filepath):
self.saver.restore(self.sess, filepath)
def dataset_generation(images, labels, preprocess, buffer_size = 100000, batch_size = 16, repeat = False, shuffle = False):
'''
Generate tensorflow dataset object
images: numpy array format
labels: numpy array format
'''
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
dataset = dataset.map(preprocess)
if repeat:
dataset = dataset.repeat()
if shuffle:
dataset = dataset.shuffle(buffer_size = buffer_size)
dataset = dataset.batch(batch_size)
return dataset
def main():
batch_size = 16
num_classes = 10
dropout = 0.5
random_seed = 0
learning_rate = 0.001
num_epochs = 20
num_iterations = 20
tf.set_random_seed(random_seed)
np.random.seed(random_seed)
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
preprocessor = Preprocessor(num_classes = num_classes)
train_dataset = dataset_generation(images = train_images, labels = train_labels, preprocess = preprocessor.preprocess, batch_size = 16, repeat = True, shuffle = False)
test_dataset = dataset_generation(images = test_images, labels = test_labels, preprocess = preprocessor.preprocess, batch_size = 16, repeat = False, shuffle = False)
model = CNN(dataset_output_types = train_dataset.output_types, dataset_output_shapes = train_dataset.output_shapes, num_classes = num_classes, batch_size = batch_size, dropout = dropout, learning_rate = learning_rate)
for i in range(num_epochs):
train_accuracy_mean = model.train(train_dataset = train_dataset, num_iterations = num_iterations)
test_accuracy_mean = model.test(test_dataset = test_dataset, num_iterations = num_iterations)
print('Epoch: {:03d} | Train Accuracy: {:.2f} | Test Accuracy: {:.2f}'.format(i, train_accuracy_mean, test_accuracy_mean))
if __name__ == '__main__':
time_start = time.time()
main()
time_end = time.time()
time_elapsed = time_end - time_start
print("Time Elapsed: %02d:%02d:%02d" % (time_elapsed // 3600, (time_elapsed % 3600 // 60), (time_elapsed % 60 // 1)))
| [
"numpy.random.seed",
"tensorflow.reshape",
"numpy.mean",
"tensorflow.layers.max_pooling2d",
"tensorflow.nn.softmax",
"tensorflow.one_hot",
"tensorflow.variable_scope",
"tensorflow.set_random_seed",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.cast",
"tensorflow.data.Iterator... | [((5449, 5501), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(images, labels)'], {}), '((images, labels))\n', (5483, 5501), True, 'import tensorflow as tf\n'), ((5897, 5928), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['random_seed'], {}), '(random_seed)\n', (5915, 5928), True, 'import tensorflow as tf\n'), ((5933, 5960), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (5947, 5960), True, 'import numpy as np\n'), ((6025, 6060), 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {}), '()\n', (6058, 6060), True, 'import tensorflow as tf\n'), ((7110, 7121), 'time.time', 'time.time', ([], {}), '()\n', (7119, 7121), False, 'import time\n'), ((7154, 7165), 'time.time', 'time.time', ([], {}), '()\n', (7163, 7165), False, 'import time\n'), ((557, 584), 'tensorflow.cast', 'tf.cast', (['images', 'tf.float32'], {}), '(images, tf.float32)\n', (564, 584), True, 'import tensorflow as tf\n'), ((682, 732), 'tensorflow.one_hot', 'tf.one_hot', ([], {'indices': 'labels', 'depth': 'self.num_classes'}), '(indices=labels, depth=self.num_classes)\n', (692, 732), True, 'import tensorflow as tf\n'), ((796, 823), 'tensorflow.cast', 'tf.cast', (['labels', 'tf.float32'], {}), '(labels, tf.float32)\n', (803, 823), True, 'import tensorflow as tf\n'), ((925, 966), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""ConvNet"""'], {'reuse': 'reuse'}), "('ConvNet', reuse=reuse)\n", (942, 966), True, 'import tensorflow as tf\n'), ((1055, 1091), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '[-1, 28, 28, 1]'}), '(x, shape=[-1, 28, 28, 1])\n', (1065, 1091), True, 'import tensorflow as tf\n'), ((1176, 1252), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'x', 'filters': '(32)', 'kernel_size': '(5)', 'activation': 'tf.nn.relu'}), '(inputs=x, filters=32, kernel_size=5, activation=tf.nn.relu)\n', (1192, 1252), True, 'import tensorflow as tf\n'), ((1354, 1415), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv1', 'pool_size': '(2)', 'strides': '(2)'}), '(inputs=conv1, pool_size=2, strides=2)\n', (1377, 1415), True, 'import tensorflow as tf\n'), ((1506, 1591), 'tensorflow.layers.conv2d', 'tf.layers.conv2d', ([], {'inputs': 'conv1', 'filters': '(64)', 'kernel_size': '(3)', 'activation': 'tf.nn.relu'}), '(inputs=conv1, filters=64, kernel_size=3, activation=tf.nn.relu\n )\n', (1522, 1591), True, 'import tensorflow as tf\n'), ((1686, 1747), 'tensorflow.layers.max_pooling2d', 'tf.layers.max_pooling2d', ([], {'inputs': 'conv2', 'pool_size': '(2)', 'strides': '(2)'}), '(inputs=conv2, pool_size=2, strides=2)\n', (1709, 1747), True, 'import tensorflow as tf\n'), ((1842, 1873), 'tensorflow.layers.flatten', 'tf.layers.flatten', ([], {'inputs': 'conv2'}), '(inputs=conv2)\n', (1859, 1873), True, 'import tensorflow as tf\n'), ((1951, 1990), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'fc1', 'units': '(1024)'}), '(inputs=fc1, units=1024)\n', (1966, 1990), True, 'import tensorflow as tf\n'), ((2083, 2148), 'tensorflow.layers.dropout', 'tf.layers.dropout', ([], {'inputs': 'fc1', 'rate': 'dropout', 'training': 'is_training'}), '(inputs=fc1, rate=dropout, training=is_training)\n', (2100, 2148), True, 'import tensorflow as tf\n'), ((2214, 2260), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'fc1', 'units': 'num_classes'}), '(inputs=fc1, units=num_classes)\n', (2229, 2260), True, 'import tensorflow as tf\n'), ((2822, 2929), 'tensorflow.data.Iterator.from_structure', 'tf.data.Iterator.from_structure', ([], {'output_types': 'dataset_output_types', 'output_shapes': 'dataset_output_shapes'}), '(output_types=dataset_output_types,\n output_shapes=dataset_output_shapes)\n', (2853, 2929), True, 'import tensorflow as tf\n'), ((3083, 3099), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3097, 3099), True, 'import tensorflow as tf\n'), ((3120, 3132), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3130, 3132), True, 'import tensorflow as tf\n'), ((4351, 4376), 'numpy.mean', 'np.mean', (['train_accuracies'], {}), '(train_accuracies)\n', (4358, 4376), True, 'import numpy as np\n'), ((4796, 4820), 'numpy.mean', 'np.mean', (['test_accuracies'], {}), '(test_accuracies)\n', (4803, 4820), True, 'import numpy as np\n'), ((2410, 2431), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['output'], {}), '(output)\n', (2423, 2431), True, 'import tensorflow as tf\n'), ((3155, 3188), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3186, 3188), True, 'import tensorflow as tf\n'), ((3538, 3569), 'tensorflow.argmax', 'tf.argmax', (['self.outputs_test', '(1)'], {}), '(self.outputs_test, 1)\n', (3547, 3569), True, 'import tensorflow as tf\n'), ((3571, 3596), 'tensorflow.argmax', 'tf.argmax', (['self.labels', '(1)'], {}), '(self.labels, 1)\n', (3580, 3596), True, 'import tensorflow as tf\n'), ((3637, 3670), 'tensorflow.cast', 'tf.cast', (['correct_pred', 'tf.float32'], {}), '(correct_pred, tf.float32)\n', (3644, 3670), True, 'import tensorflow as tf\n'), ((3746, 3839), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'self.outputs_train', 'labels': 'self.labels'}), '(logits=self.outputs_train,\n labels=self.labels)\n', (3788, 3839), True, 'import tensorflow as tf\n'), ((3866, 3922), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'self.learning_rate'}), '(learning_rate=self.learning_rate)\n', (3888, 3922), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def any_above_alpha(l, alpha):
filter_great_than = list(filter(lambda x: abs(x) >= alpha, l))
return len(filter_great_than) > 0
def compute_theta(params, theta_list, y):
ret = []
import numpy as np
# 补1
s = [1] + list(params)
t = np.asarray(s) * np.asarray(theta_list).T
h = sum(t) - y
for i in range(0, len(theta_list)):
x = s[i]
ret.append(h * x)
return ret
def compute_cost(params, theta_list, y):
import numpy as np
s = [1] + list(params)
t = np.asarray(s) * np.asarray(theta_list).T
return sum(t) - y
def gcd_m(m, alpha):
import numpy as np
col, row = np.shape(m)
# print("col:{col}, row:{row}".format(col=col, row=row))
theta_list = [100] * row
ret = [100.0] * len(theta_list)
while any_above_alpha(ret, alpha):
ret = [0.0] * len(theta_list)
# 每每项数据
for i in range(0, col):
y = m.item(i, -1)
s = m[i, 0:row - 1]
ret_temp = compute_theta(s, theta_list, y)
# cost = compute_cost(s, theta_list, y)
for r in range(0, row):
ret[r] += ret_temp[r]
for i in range(0, row):
ret[i] = ret[i] / col
# print(ret)
# 必须分开两个循环,更新和计算不能交叉做
for i in range(0, row):
theta_list[i] = theta_list[i] - alpha * ret[i]
# print(theta_list)
return theta_list
| [
"numpy.shape",
"numpy.asarray"
] | [((691, 702), 'numpy.shape', 'np.shape', (['m'], {}), '(m)\n', (699, 702), True, 'import numpy as np\n'), ((308, 321), 'numpy.asarray', 'np.asarray', (['s'], {}), '(s)\n', (318, 321), True, 'import numpy as np\n'), ((567, 580), 'numpy.asarray', 'np.asarray', (['s'], {}), '(s)\n', (577, 580), True, 'import numpy as np\n'), ((324, 346), 'numpy.asarray', 'np.asarray', (['theta_list'], {}), '(theta_list)\n', (334, 346), True, 'import numpy as np\n'), ((583, 605), 'numpy.asarray', 'np.asarray', (['theta_list'], {}), '(theta_list)\n', (593, 605), True, 'import numpy as np\n')] |
# TestEvaluator.py
# Written <NAME> January 2021
#
#
import numpy as np
import matplotlib.pyplot as plt
import rdml_graph as gr
import pdb
x_axis = 40
y_axis = 40
path = np.array([[3.1,4.2], [8, 5], [13,12], [-10,13], [-15, -5], [15, -5]])
budget = 0
for i in range(1, path.shape[0]):
budget += np.linalg.norm(path[i] - path[i-1], ord=2)
info_field = np.ones((x_axis,y_axis, 2)) * np.array([2,3])
x_ticks = np.arange(-x_axis/2,x_axis/2)
y_ticks = np.arange(-y_axis/2,y_axis/2)
eval = gr.MaskedEvaluator(info_field, \
x_ticks, y_ticks,
radius=2, \
budget=budget)
score, mask = eval.getScore(path, return_mask=True)
#plt.matshow(mask)
print(mask[np.newaxis,:,:].shape)
gr.plot_multi(mask[:,:, np.newaxis], [path], x_ticks=x_ticks, y_ticks=y_ticks)
print(score)
plt.show()
#
# info_field = np.ones((x_axis, y_axis))
# step_size = 3
# x_ticks = np.arange(25.5, 25.5+step_size*x_axis, step_size)
# y_ticks = np.arange(3.7, 3.7+step_size*y_axis, step_size)
#
#
# eval = gr.MaskedEvaluator(info_field, x_ticks, y_ticks, radius=4)
#
# path = np.array([[34.5, 14.2], [56.3, 23], [56.3, 50], [101, 26.3]])
#
# score = eval.getScore(path)
# print(score)
| [
"matplotlib.pyplot.show",
"numpy.ones",
"rdml_graph.MaskedEvaluator",
"rdml_graph.plot_multi",
"numpy.array",
"numpy.arange",
"numpy.linalg.norm"
] | [((175, 247), 'numpy.array', 'np.array', (['[[3.1, 4.2], [8, 5], [13, 12], [-10, 13], [-15, -5], [15, -5]]'], {}), '([[3.1, 4.2], [8, 5], [13, 12], [-10, 13], [-15, -5], [15, -5]])\n', (183, 247), True, 'import numpy as np\n'), ((418, 452), 'numpy.arange', 'np.arange', (['(-x_axis / 2)', '(x_axis / 2)'], {}), '(-x_axis / 2, x_axis / 2)\n', (427, 452), True, 'import numpy as np\n'), ((458, 492), 'numpy.arange', 'np.arange', (['(-y_axis / 2)', '(y_axis / 2)'], {}), '(-y_axis / 2, y_axis / 2)\n', (467, 492), True, 'import numpy as np\n'), ((495, 568), 'rdml_graph.MaskedEvaluator', 'gr.MaskedEvaluator', (['info_field', 'x_ticks', 'y_ticks'], {'radius': '(2)', 'budget': 'budget'}), '(info_field, x_ticks, y_ticks, radius=2, budget=budget)\n', (513, 568), True, 'import rdml_graph as gr\n'), ((742, 821), 'rdml_graph.plot_multi', 'gr.plot_multi', (['mask[:, :, np.newaxis]', '[path]'], {'x_ticks': 'x_ticks', 'y_ticks': 'y_ticks'}), '(mask[:, :, np.newaxis], [path], x_ticks=x_ticks, y_ticks=y_ticks)\n', (755, 821), True, 'import rdml_graph as gr\n'), ((836, 846), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (844, 846), True, 'import matplotlib.pyplot as plt\n'), ((305, 349), 'numpy.linalg.norm', 'np.linalg.norm', (['(path[i] - path[i - 1])'], {'ord': '(2)'}), '(path[i] - path[i - 1], ord=2)\n', (319, 349), True, 'import numpy as np\n'), ((362, 390), 'numpy.ones', 'np.ones', (['(x_axis, y_axis, 2)'], {}), '((x_axis, y_axis, 2))\n', (369, 390), True, 'import numpy as np\n'), ((392, 408), 'numpy.array', 'np.array', (['[2, 3]'], {}), '([2, 3])\n', (400, 408), True, 'import numpy as np\n')] |
# coding: utf-8
"""Tools to compute equations of states with different models."""
from __future__ import unicode_literals, division, print_function
import collections
import numpy as np
import pymatgen.core.units as units
from monty.functools import return_none_if_raise
from pymatgen.core.units import FloatWithUnit
from pymatgen.util.plotting_utils import add_fig_kwargs, get_ax_fig_plt
import logging
logger = logging.getLogger(__file__)
__all__ = [
"EOS",
]
def quadratic(V, a, b, c):
"""Quadratic fit"""
return a*V**2 + b*V + c
def murnaghan(V, E0, B0, B1, V0):
"""From PRB 28,5480 (1983)"""
E = E0 + B0*V/B1*(((V0/V)**B1)/(B1-1)+1) - V0*B0/(B1-1)
return E
def birch(V, E0, B0, B1, V0):
"""
From Intermetallic compounds: Principles and Practice, Vol. I: Principles
Chapter 9 pages 195-210 by <NAME>, <NAME>aconstantopoulos paper downloaded from Web
case where n=0
"""
E = (E0
+ 9.0/8.0*B0*V0*((V0/V)**(2.0/3.0) - 1.0)**2
+ 9.0/16.0*B0*V0*(B1-4.)*((V0/V)**(2.0/3.0) - 1.0)**3)
return E
def birch_murnaghan(V, E0, B0, B1, V0):
"""BirchMurnaghan equation from PRB 70, 224107"""
eta = (V/V0)**(1./3.)
E = E0 + 9.*B0*V0/16.*(eta**2-1)**2*(6 + B1*(eta**2-1.) - 4.*eta**2)
return E
def pourier_tarantola(V, E0, B0, B1, V0):
"""Pourier-Tarantola equation from PRB 70, 224107"""
eta = (V/V0)**(1./3.)
squiggle = -3.*np.log(eta)
E = E0 + B0*V0*squiggle**2/6.*(3. + squiggle*(B1 - 2))
return E
def vinet(V, E0, B0, B1, V0):
'Vinet equation from PRB 70, 224107'
eta = (V/V0)**(1./3.)
E = (E0 + 2.*B0*V0/(B1-1.)**2
* (2. - (5. +3.*B1*(eta-1.)-3.*eta)*np.exp(-3.*(B1-1.)*(eta-1.)/2.)))
return E
def deltafactor_polyfit(volumes, energies):
"""
This is the routine used to compute V0, B0, B1 in the deltafactor code.
Taken from deltafactor/eosfit.py
"""
fitdata = np.polyfit(volumes**(-2./3.), energies, 3, full=True)
ssr = fitdata[1]
sst = np.sum((energies - np.average(energies))**2.)
residuals0 = ssr/sst
deriv0 = np.poly1d(fitdata[0])
deriv1 = np.polyder(deriv0, 1)
deriv2 = np.polyder(deriv1, 1)
deriv3 = np.polyder(deriv2, 1)
v0 = 0
x = 0
for x in np.roots(deriv1):
if x > 0 and deriv2(x) > 0:
v0 = x**(-3./2.)
break
else:
raise EOSError("No minimum could be found")
derivV2 = 4./9. * x**5. * deriv2(x)
derivV3 = (-20./9. * x**(13./2.) * deriv2(x) - 8./27. * x**(15./2.) * deriv3(x))
b0 = derivV2 / x**(3./2.)
b1 = -1 - x**(-3./2.) * derivV3 / derivV2
#print('deltafactor polyfit:')
#print('e0, b0, b1, v0')
#print(fitdata[0], b0, b1, v0)
n = collections.namedtuple("DeltaFitResults", "v0 b0 b1 poly1d")
return n(v0, b0, b1, fitdata[0])
class EOSError(Exception):
"""Exceptions raised by EOS."""
class EOS(object):
"""
Fit equation of state for bulk systems.
The following equation is used::
murnaghan
PRB 28, 5480 (1983)
birch
Intermetallic compounds: Principles and Practice, Vol I: Principles. pages 195-210
birchmurnaghan
PRB 70, 224107
pouriertarantola
PRB 70, 224107
vinet
PRB 70, 224107
Use::
eos = EOS(eos_name='murnaghan')
fit = eos.fit(volumes, energies)
print(fit)
fit.plot()
"""
Error = EOSError
#: Models available.
MODELS = {
"quadratic": quadratic,
"murnaghan": murnaghan,
"birch": birch,
"birch_murnaghan": birch_murnaghan,
"pourier_tarantola": pourier_tarantola,
"vinet": vinet,
"deltafactor": deltafactor_polyfit,
}
def __init__(self, eos_name='murnaghan'):
self._eos_name = eos_name
self._func = self.MODELS[eos_name]
@staticmethod
def Quadratic():
return EOS(eos_name="quadratic")
@staticmethod
def Murnaghan():
return EOS(eos_name='murnaghan')
@staticmethod
def Birch():
return EOS(eos_name='birch')
@staticmethod
def Birch_Murnaghan():
return EOS(eos_name='birch_murnaghan')
@staticmethod
def Pourier_Tarantola():
return EOS(eos_name='pourier_tarantola')
@staticmethod
def Vinet():
return EOS(eos_name='vinet')
@staticmethod
def DeltaFactor():
return EOS(eos_name='deltafactor')
def fit(self, volumes, energies, vol_unit="ang^3", ene_unit="eV"):
"""
Fit energies [eV] as function of volumes [Angstrom**3].
Returns `EosFit` instance that gives access to the optimal volume,
the minumum energy, and the bulk modulus.
Notice that the units for the bulk modulus is eV/Angstrom^3.
"""
# Convert volumes to Ang**3 and energies to eV (if needed).
volumes = units.ArrayWithUnit(volumes, vol_unit).to("ang^3")
energies = units.EnergyArray(energies, ene_unit).to("eV")
return EOS_Fit(volumes, energies, self._func, self._eos_name)
class EOS_Fit(object):
"""Performs the fit of E(V) and provides method to access the results of the fit."""
def __init__(self, volumes, energies, func, eos_name):
"""
args:
energies: list of energies in eV
volumes: list of volumes in Angstrom^3
func: callable function
"""
self.volumes = np.array(volumes)
self.energies = np.array(energies)
assert len(self.volumes) == len(self.energies)
self.func = func
self.eos_name = eos_name
self.exceptions = []
self.ierr = 0
if eos_name == "deltafactor":
try:
results = deltafactor_polyfit(self.volumes, self.energies)
self.e0 = None
self.v0 = results.v0
self.b0 = results.b0
self.b1 = results.b1
self.p0 = results.poly1d
self.eos_params = results.poly1d
except EOSError as exc:
self.ierr = 1
logger.critical(str(exc))
self.exceptions.append(exc)
raise
elif eos_name == "quadratic":
# Quadratic fit
a, b, c = np.polyfit(self.volumes, self.energies, 2)
self.v0 = v0 = -b/(2*a)
self.e0 = a*v0**2 + b*v0 + c
self.b0 = 2*a*v0
self.b1 = np.inf
self.p0 = [a, b, c]
self.eos_params = [a, b, c]
vmin, vmax = self.volumes.min(), self.volumes.max()
if not vmin < v0 and v0 < vmax:
exc = EOSError('The minimum volume of a fitted parabola is not in the input volumes\n.')
logger.critical(str(exc))
self.exceptions.append(exc)
else:
# Objective function that will be minimized
def objective(pars, x, y):
return y - self.func(x, *pars)
# Quadratic fit to get an initial guess for the parameters
a, b, c = np.polyfit(self.volumes, self.energies, 2)
v0 = -b/(2*a)
e0 = a*v0**2 + b*v0 + c
b0 = 2*a*v0
b1 = 4 # b1 is usually a small number like 4
vmin, vmax = self.volumes.min(), self.volumes.max()
if not vmin < v0 and v0 < vmax:
exc = EOSError('The minimum volume of a fitted parabola is not in the input volumes\n.')
logger.critical(str(exc))
self.exceptions.append(exc)
# Initial guesses for the parameters
self.p0 = [e0, b0, b1, v0]
from scipy.optimize import leastsq
self.eos_params, self.ierr = leastsq(objective, self.p0, args=(self.volumes, self.energies))
if self.ierr not in [1, 2, 3, 4]:
exc = EOSError("Optimal parameters not found")
logger.critical(str(exc))
self.exceptions.append(exc)
raise exc
self.e0 = self.eos_params[0]
self.b0 = self.eos_params[1]
self.b1 = self.eos_params[2]
self.v0 = self.eos_params[3]
print('EOS_fit:', func)
print('e0, b0, b1, v0')
print(self.eos_params)
def __str__(self):
lines = []
app = lines.append
app("Equation of State: %s" % self.name)
app("Minimum volume = %1.2f Ang^3" % self.v0)
app("Bulk modulus = %1.2f eV/Ang^3 = %1.2f GPa, b1 = %1.2f" % (self.b0, self.b0_GPa, self.b1))
return "\n".join(lines)
@property
def name(self):
return self.func.__name__
@property
def b0_GPa(self):
return FloatWithUnit(self.b0, "eV ang^-3").to("GPa")
@property
@return_none_if_raise(AttributeError)
def results(self):
"""Dictionary with the results. None if results are not available"""
return dict(v0=self.v0, e0=self.e0, b0=self.b0, b1=self.b1)
@add_fig_kwargs
def plot(self, ax=None, **kwargs):
"""
Uses Matplotlib to plot the energy curve.
Args:
ax: :class:`Axes` object. If ax is None, a new figure is produced.
================ ==============================================================
kwargs Meaning
================ ==============================================================
style
color
text
label
================ ==============================================================
Returns:
Matplotlib figure.
"""
ax, fig, plt = get_ax_fig_plt(ax)
vmin, vmax = self.volumes.min(), self.volumes.max()
emin, emax = self.energies.min(), self.energies.max()
vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax))
emin, emax = (emin - 0.01 * abs(emin), emax + 0.01 * abs(emax))
color = kwargs.pop("color", "r")
label = kwargs.pop("label", None)
# Plot input data.
ax.plot(self.volumes, self.energies, linestyle="None", marker="o", color=color) #, label="Input Data")
# Plot EOS.
vfit = np.linspace(vmin, vmax, 100)
if label is None:
label = self.name + ' fit'
if self.eos_name == "deltafactor":
xx = vfit**(-2./3.)
ax.plot(vfit, np.polyval(self.eos_params, xx), linestyle="dashed", color=color, label=label)
else:
ax.plot(vfit, self.func(vfit, *self.eos_params), linestyle="dashed", color=color, label=label)
# Set xticks and labels.
ax.grid(True)
ax.set_xlabel("Volume $\AA^3$")
ax.set_ylabel("Energy (eV)")
ax.legend(loc="best", shadow=True)
# Add text with fit parameters.
if kwargs.pop("text", True):
text = []; app = text.append
app("Min Volume = %1.2f $\AA^3$" % self.v0)
app("Bulk modulus = %1.2f eV/$\AA^3$ = %1.2f GPa" % (self.b0, self.b0_GPa))
app("B1 = %1.2f" % self.b1)
fig.text(0.4, 0.5, "\n".join(text), transform=ax.transAxes)
return fig
| [
"numpy.roots",
"numpy.poly1d",
"pymatgen.core.units.FloatWithUnit",
"numpy.average",
"numpy.log",
"numpy.polyfit",
"numpy.polyval",
"numpy.polyder",
"pymatgen.core.units.EnergyArray",
"scipy.optimize.leastsq",
"pymatgen.util.plotting_utils.get_ax_fig_plt",
"pymatgen.core.units.ArrayWithUnit",
... | [((416, 443), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (433, 443), False, 'import logging\n'), ((1931, 1990), 'numpy.polyfit', 'np.polyfit', (['(volumes ** (-2.0 / 3.0))', 'energies', '(3)'], {'full': '(True)'}), '(volumes ** (-2.0 / 3.0), energies, 3, full=True)\n', (1941, 1990), True, 'import numpy as np\n'), ((2100, 2121), 'numpy.poly1d', 'np.poly1d', (['fitdata[0]'], {}), '(fitdata[0])\n', (2109, 2121), True, 'import numpy as np\n'), ((2135, 2156), 'numpy.polyder', 'np.polyder', (['deriv0', '(1)'], {}), '(deriv0, 1)\n', (2145, 2156), True, 'import numpy as np\n'), ((2170, 2191), 'numpy.polyder', 'np.polyder', (['deriv1', '(1)'], {}), '(deriv1, 1)\n', (2180, 2191), True, 'import numpy as np\n'), ((2205, 2226), 'numpy.polyder', 'np.polyder', (['deriv2', '(1)'], {}), '(deriv2, 1)\n', (2215, 2226), True, 'import numpy as np\n'), ((2262, 2278), 'numpy.roots', 'np.roots', (['deriv1'], {}), '(deriv1)\n', (2270, 2278), True, 'import numpy as np\n'), ((2736, 2796), 'collections.namedtuple', 'collections.namedtuple', (['"""DeltaFitResults"""', '"""v0 b0 b1 poly1d"""'], {}), "('DeltaFitResults', 'v0 b0 b1 poly1d')\n", (2758, 2796), False, 'import collections\n'), ((8843, 8879), 'monty.functools.return_none_if_raise', 'return_none_if_raise', (['AttributeError'], {}), '(AttributeError)\n', (8863, 8879), False, 'from monty.functools import return_none_if_raise\n'), ((1429, 1440), 'numpy.log', 'np.log', (['eta'], {}), '(eta)\n', (1435, 1440), True, 'import numpy as np\n'), ((5464, 5481), 'numpy.array', 'np.array', (['volumes'], {}), '(volumes)\n', (5472, 5481), True, 'import numpy as np\n'), ((5506, 5524), 'numpy.array', 'np.array', (['energies'], {}), '(energies)\n', (5514, 5524), True, 'import numpy as np\n'), ((9718, 9736), 'pymatgen.util.plotting_utils.get_ax_fig_plt', 'get_ax_fig_plt', (['ax'], {}), '(ax)\n', (9732, 9736), False, 'from pymatgen.util.plotting_utils import add_fig_kwargs, get_ax_fig_plt\n'), ((10264, 10292), 'numpy.linspace', 'np.linspace', (['vmin', 'vmax', '(100)'], {}), '(vmin, vmax, 100)\n', (10275, 10292), True, 'import numpy as np\n'), ((2035, 2055), 'numpy.average', 'np.average', (['energies'], {}), '(energies)\n', (2045, 2055), True, 'import numpy as np\n'), ((4908, 4946), 'pymatgen.core.units.ArrayWithUnit', 'units.ArrayWithUnit', (['volumes', 'vol_unit'], {}), '(volumes, vol_unit)\n', (4927, 4946), True, 'import pymatgen.core.units as units\n'), ((4978, 5015), 'pymatgen.core.units.EnergyArray', 'units.EnergyArray', (['energies', 'ene_unit'], {}), '(energies, ene_unit)\n', (4995, 5015), True, 'import pymatgen.core.units as units\n'), ((6318, 6360), 'numpy.polyfit', 'np.polyfit', (['self.volumes', 'self.energies', '(2)'], {}), '(self.volumes, self.energies, 2)\n', (6328, 6360), True, 'import numpy as np\n'), ((7121, 7163), 'numpy.polyfit', 'np.polyfit', (['self.volumes', 'self.energies', '(2)'], {}), '(self.volumes, self.energies, 2)\n', (7131, 7163), True, 'import numpy as np\n'), ((7788, 7851), 'scipy.optimize.leastsq', 'leastsq', (['objective', 'self.p0'], {'args': '(self.volumes, self.energies)'}), '(objective, self.p0, args=(self.volumes, self.energies))\n', (7795, 7851), False, 'from scipy.optimize import leastsq\n'), ((8777, 8812), 'pymatgen.core.units.FloatWithUnit', 'FloatWithUnit', (['self.b0', '"""eV ang^-3"""'], {}), "(self.b0, 'eV ang^-3')\n", (8790, 8812), False, 'from pymatgen.core.units import FloatWithUnit\n'), ((10460, 10491), 'numpy.polyval', 'np.polyval', (['self.eos_params', 'xx'], {}), '(self.eos_params, xx)\n', (10470, 10491), True, 'import numpy as np\n'), ((1694, 1739), 'numpy.exp', 'np.exp', (['(-3.0 * (B1 - 1.0) * (eta - 1.0) / 2.0)'], {}), '(-3.0 * (B1 - 1.0) * (eta - 1.0) / 2.0)\n', (1700, 1739), True, 'import numpy as np\n')] |
import numpy as np
from .base import Operation
from ..utils import as_numpy
class Cast(Operation):
def __init__(self, x, to):
self.x = x
self.to = to
@classmethod
def from_onnx(cls, onnx_node, *inputs):
attributes = {a.name: as_numpy(a) for a in onnx_node.attribute}
axis = attributes.get("to")
return cls(inputs, axis=axis)
class Concat(Operation):
def __init__(self, x, axis):
self.x = x
self.axis = axis
@classmethod
def from_onnx(cls, onnx_node, *inputs):
attributes = {a.name: as_numpy(a) for a in onnx_node.attribute}
axis = attributes.get("axis")
return cls(inputs, axis=axis)
class Expand(Operation):
def __init__(self, x, shape):
self.x = x
self.shape = shape
@classmethod
def from_onnx(cls, onnx_node, *inputs):
return cls(*inputs)
class Flatten(Operation):
def __init__(self, x, *, axis=1):
self.x = x
self.axis = axis
@classmethod
def from_onnx(cls, onnx_node, *inputs):
attributes = {a.name: as_numpy(a) for a in onnx_node.attribute}
axis = attributes.get("axis", 1)
return cls(*inputs, axis=axis)
class Gather(Operation):
def __init__(self, x, indices, *, axis=0):
self.x = x
self.indices = indices
self.axis = axis
@classmethod
def from_onnx(cls, onnx_node, *inputs):
attributes = {a.name: as_numpy(a) for a in onnx_node.attribute}
axis = attributes.get("axis", 0)
return cls(*inputs, axis=axis)
class Identity(Operation):
def __init__(self, x):
self.x = x
@classmethod
def from_onnx(cls, onnx_node, *inputs):
return cls(*inputs)
class Pad(Operation):
def __init__(self, x, pads, *, mode="constant", value=0.0):
self.x = x
self.pads = pads
self.mode = mode
self.value = value
@classmethod
def from_onnx(cls, onnx_node, *inputs):
attributes = {a.name: as_numpy(a) for a in onnx_node.attribute}
mode = attributes.get("mode", "constant")
pads = attributes.get("pads")
value = attributes.get("value", 0.0)
return cls(*inputs, pads, mode=mode, value=value)
class Reshape(Operation):
def __init__(self, x, shape):
self.x = x
self.shape = shape
@classmethod
def from_onnx(cls, onnx_node, *inputs):
return cls(*inputs)
class Resize(Operation):
def __init__(
self,
x,
roi=np.array([], dtype=np.float32),
scales=np.array([], dtype=np.float),
sizes=np.array([], dtype=np.int64),
*,
coordinate_transformation_mode="half_pixel",
cubic_coeff_a=-0.75,
exclude_outside=0,
extrapolation_value=0.0,
mode="nearest",
nearest_mode="round_prefer_floor"
):
assert scales.size != 0 or sizes.size != 0
assert scales.size == 0 or sizes.size == 0
self.x = x
self.roi = roi
self.scales = scales
self.sizes = sizes
self.coordinate_transformation_mode = coordinate_transformation_mode
self.cubic_coeff_a = cubic_coeff_a
self.exclude_outside = exclude_outside
self.extrapolation_value = extrapolation_value
self.mode = mode
self.nearest_mode = nearest_mode
@classmethod
def from_onnx(cls, onnx_node, *inputs):
attributes = {a.name: as_numpy(a) for a in onnx_node.attribute}
coordinate_transformation_mode = attributes.get(
"coordinate_transformation_mode", "half_pixel"
)
cubic_coeff_a = attributes.get("cubic_coeff_a", -0.75)
exclude_outside = attributes.get("exclude_outside", 0)
extrapolation_value = attributes.get("extrapolation_value", 0.0)
mode = attributes.get("mode", "nearest")
nearest_mode = attributes.get("nearest_mode", "round_prefer_floor")
return cls(
*inputs,
coordinate_transformation_mode=coordinate_transformation_mode,
cubic_coeff_a=cubic_coeff_a,
exclude_outside=exclude_outside,
extrapolation_value=extrapolation_value,
mode=mode,
nearest_mode=nearest_mode
)
class Shape(Operation):
def __init__(self, x):
self.x = x
@classmethod
def from_onnx(cls, onnx_node, *inputs):
return cls(*inputs)
class Tile(Operation):
def __init__(self, x, repeats):
self.x = x
self.repeats = repeats
@classmethod
def from_onnx(cls, onnx_node, *inputs):
return cls(*inputs)
class Transpose(Operation):
def __init__(self, x, *, permutation=None):
self.x = x
if permutation is not None:
self.permutation = permutation
else:
self.permutation = np.arange(len(self.x.shape) - 1, -1, -1)
@classmethod
def from_onnx(cls, onnx_node, *inputs):
attributes = {a.name: as_numpy(a) for a in onnx_node.attribute}
perm = attributes.get("perm")
return cls(*inputs, permutation=perm)
class Unsqueeze(Operation):
def __init__(self, x, axes):
self.x = x
self.axes = axes
@classmethod
def from_onnx(cls, onnx_node, *inputs):
attributes = {a.name: as_numpy(a) for a in onnx_node.attribute}
axes = attributes.get("axes")
if isinstance(inputs[0], np.ndarray):
a = inputs[0]
for axis in axes:
a = np.expand_dims(a, axis)
return a
return cls(*inputs, axes=axes)
| [
"numpy.array",
"numpy.expand_dims"
] | [((2533, 2563), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float32'}), '([], dtype=np.float32)\n', (2541, 2563), True, 'import numpy as np\n'), ((2580, 2608), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float'}), '([], dtype=np.float)\n', (2588, 2608), True, 'import numpy as np\n'), ((2624, 2652), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (2632, 2652), True, 'import numpy as np\n'), ((5522, 5545), 'numpy.expand_dims', 'np.expand_dims', (['a', 'axis'], {}), '(a, axis)\n', (5536, 5545), True, 'import numpy as np\n')] |
import aubio
import numpy as np
import pyaudio
import time
import argparse
import queue
import music21 # yes! new favorite library
parser = argparse.ArgumentParser()
parser.add_argument("-input", required=False, type=int, help="Audio Input Device")
args = parser.parse_args()
if not args.input:
print("No input device specified. Printing list of input devices now: ")
p = pyaudio.PyAudio()
for i in range(p.get_device_count()):
print("Device number (%i): %s" % (i, p.get_device_info_by_index(i).get('name')))
print("Run this program with -input 1, or the number of the input you'd like to use.")
exit()
# PyAudio object.
p = pyaudio.PyAudio()
# Open stream.
stream = p.open(format=pyaudio.paFloat32,
channels=1, rate=44100, input=True,
input_device_index=args.input, frames_per_buffer=4096)
time.sleep(1)
# Aubio's pitch detection.
pDetection = aubio.pitch("default", 2048, 2048//2, 44100)
# Set unit.
pDetection.set_unit("Hz")
pDetection.set_silence(-40)
q = queue.Queue()
def get_current_note(volume_thresh=0.01, printOut=False):
"""Returns the Note Currently Played on the q object when audio is present
Keyword arguments:
volume_thresh -- the volume threshold for input. defaults to 0.01
printOut -- whether or not to print to the terminal. defaults to False
"""
current_pitch = music21.pitch.Pitch()
while True:
data = stream.read(1024, exception_on_overflow=False)
samples = np.fromstring(data,
dtype=aubio.float_type)
pitch = pDetection(samples)[0]
# Compute the energy (volume) of the
# current frame.
volume = np.sum(samples**2)/len(samples) * 100
if pitch and volume > volume_thresh: # adjust with your mic!
current_pitch.frequency = pitch
else:
continue
if printOut:
print(current_pitch)
else:
current = current_pitch.nameWithOctave
q.put({'Note': current, 'Cents': current_pitch.microtone.cents})
if __name__ == '__main__':
get_current_note(volume_thresh=0.001, printOut=True)
| [
"numpy.sum",
"argparse.ArgumentParser",
"time.sleep",
"numpy.fromstring",
"pyaudio.PyAudio",
"music21.pitch.Pitch",
"aubio.pitch",
"queue.Queue"
] | [((145, 170), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (168, 170), False, 'import argparse\n'), ((660, 677), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (675, 677), False, 'import pyaudio\n'), ((859, 872), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (869, 872), False, 'import time\n'), ((914, 960), 'aubio.pitch', 'aubio.pitch', (['"""default"""', '(2048)', '(2048 // 2)', '(44100)'], {}), "('default', 2048, 2048 // 2, 44100)\n", (925, 960), False, 'import aubio\n'), ((1030, 1043), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (1041, 1043), False, 'import queue\n'), ((386, 403), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (401, 403), False, 'import pyaudio\n'), ((1385, 1406), 'music21.pitch.Pitch', 'music21.pitch.Pitch', ([], {}), '()\n', (1404, 1406), False, 'import music21\n'), ((1505, 1548), 'numpy.fromstring', 'np.fromstring', (['data'], {'dtype': 'aubio.float_type'}), '(data, dtype=aubio.float_type)\n', (1518, 1548), True, 'import numpy as np\n'), ((1708, 1728), 'numpy.sum', 'np.sum', (['(samples ** 2)'], {}), '(samples ** 2)\n', (1714, 1728), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# coding=utf-8
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from matplotlib import cm
x = np.array([1, 2])
y = np.array([1, 2])
z = np.array([[1, -1], [-1, 1]])
# plt.xlim(1, 2)
# plt.ylim(1, 2)
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(z))])
# plt.contour(x, y, z, cmap=cmap)
# plt.contourf(x, y, z, cmap=cm.PuBu_r)
plt.contour(x, y, z, 1)
plt.show()
| [
"numpy.array",
"matplotlib.pyplot.contour",
"numpy.unique",
"matplotlib.pyplot.show"
] | [((167, 183), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (175, 183), True, 'import numpy as np\n'), ((188, 204), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (196, 204), True, 'import numpy as np\n'), ((209, 237), 'numpy.array', 'np.array', (['[[1, -1], [-1, 1]]'], {}), '([[1, -1], [-1, 1]])\n', (217, 237), True, 'import numpy as np\n'), ((451, 474), 'matplotlib.pyplot.contour', 'plt.contour', (['x', 'y', 'z', '(1)'], {}), '(x, y, z, 1)\n', (462, 474), True, 'import matplotlib.pyplot as plt\n'), ((475, 485), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (483, 485), True, 'import matplotlib.pyplot as plt\n'), ((361, 373), 'numpy.unique', 'np.unique', (['z'], {}), '(z)\n', (370, 373), True, 'import numpy as np\n')] |
import numpy as np
from multiprocessing import Pool, cpu_count
import statsmodels.api as sm
from scipy.stats import norm
from tqdm import tqdm
from itertools import product
import pandas as pd
from ananke.graphs import ADMG
from ananke.models import LinearGaussianSEM
from statsmodels.stats.proportion import proportion_confint
import os
import sys
try: sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
except NameError: print("Cannot load testing module")
from wrapper_resampler import ShiftedTester
np.random.seed(1)
# Help functions
def e(n=1): return np.random.normal(size=(n, 1)) # Gaussian noise
def cb(*args): return np.concatenate(args, axis=1) # Col bind
inv = np.linalg.inv
p = norm.pdf
# Simulate data from a Gaussian SCM
def scm(n, causal_effect=0):
H = e(n)
X1 = e(n)
X2 = X1 + H + e(n)
X3 = X2 + 2*e(n)
X4 = causal_effect*X1 + X3 + H + e(n)
return cb(H, X1, X2, X3, X4)
def weight(X):
num = p(X[:, 3], loc=X[:,3].mean(), scale=1.0)
denom = p(X[:, 3], loc=X[:,2], scale=2)
return num/denom
# Fitted weight
def weight_fit(X):
num = p(X[:, 3], loc=X[:,3].mean(), scale=1.0)
mod1 = sm.OLS(X[:,3], X[:,2]).fit()
denom = p(X[:,3], loc=mod1.fittedvalues, scale=np.sqrt(mod1.scale))
return num/denom
# Test function: Regress X4 ~ X1 and get p-value
def T(X): return (sm.OLS(X[:, 4], sm.tools.add_constant(X[:, 1])).fit().pvalues[1] < 0.05)*1
vertices = ["A", "B", "C", "D"]
di_edges = [("A", "B"), ("B", "C"), ("C", "D")]
bi_edges = [("B", "D")]
G = ADMG(vertices, di_edges=di_edges, bi_edges=bi_edges)
G_causal = ADMG(vertices, di_edges=di_edges + [("A", "D")], bi_edges=bi_edges)
def score_test(X):
n = X.shape[0]
data = pd.DataFrame({"A": X[:,1], "B": X[:,2], "C": X[:,3], "D": X[:,4]})
S = np.cov(X[:,1:].T)
# Fit model 1
model = LinearGaussianSEM(G, method="trust-exact")
model.fit(data)
omega_ = model.omega_
B_ = model.B_
Sigma_ = inv(np.eye(4)-B_)@omega_@inv((np.eye(4)-B_).T)
score = -n/2*(np.log(np.linalg.det(2*np.pi*Sigma_))+(n-1)/n*np.trace(inv(Sigma_)@S))
model = LinearGaussianSEM(G_causal, method="trust-exact")
model.fit(data)
omega_ = model.omega_
B_ = model.B_
Sigma_ = inv(np.eye(4)-B_)@omega_@inv((np.eye(4)-B_).T)
score_causal = -n/2*(np.log(np.linalg.det(2*np.pi*Sigma_))+(n-1)/n*np.trace(inv(Sigma_)@S))
return 1*(score_causal - score > np.log(n))
# Parameters for choice-of-m algorithm
tune_m_repeats = 25
cutoff = np.quantile(np.random.uniform(size=(1000, tune_m_repeats)).min(axis=1), 0.05)
# Loop parameters
causal_effects = [0, 0.3]#np.linspace(0, 5, num=21)
n_range = [int(10**(x/2)) for x in range(4, 10)]
tests = {"LinReg": T}
m_choices = ["heuristic", "sqrt"]
methods = ["resampling", "score-based"]
combinations = list(product(n_range, causal_effects, m_choices, methods))
# n, c_e, m_choice, method = 100, 0.2, "heuristic", "score-based"
## Wrap as function to apply multiprocessing
def conduct_experiment(i=None):
out = []
for n, c_e, m_choice, method in combinations:
X = scm(n, causal_effect=c_e)
# Do not do anything if m < 5 or (m>n and not replacement)
if method == "resampling":
try:
psi = ShiftedTester(weight_fit, tests["LinReg"], replacement="NO-REPL-reject", reject_retries=100)
m = psi.tune_m(X, j_x = [3], j_y=[2], gaussian=True, cond = [X[:,3].mean()], m_factor=1.3,
p_cutoff=cutoff, repeats=tune_m_repeats, replacement=False,
m_init=int(np.sqrt(n))) if m_choice == "heuristic" else None
out.append(psi.test(X, m=m))
except:
# Catch errors from test statistic
print(f"Error occurred {c_e}, {n}, {m_choice}, {method}")
out.append(np.nan)
else:
try:
out.append(score_test(X))
except:
print(f"Error occurred {c_e}, {n}, {m_choice}, {method}")
out.append(np.nan)
return(out)
## Conduct multiple experiments with multiprocessing and export to R for plotting:
if __name__ == '__main__':
repeats = 1000
# Multiprocess
pool = Pool(cpu_count()-2)
res = np.array(
list(tqdm(pool.imap_unordered(conduct_experiment, range(repeats)), total=repeats)))
pool.close()
# Count non-nas, to be used for binomial confidence intervals
counts = (~np.isnan(res)).sum(axis=0)
nans = np.isnan(res).sum(axis=0)
res = np.nansum(res, axis=0)
df = pd.DataFrame(
[(x/(repeats-n), *v, *proportion_confint(x, repeats-n, method="binom_test"), n) for x, v, n in zip(res, combinations, nans)],
columns=["alpha", "n", "Causal_Effect", "m_choice", "method","Lower", "Upper", "NoNans"])
# Export to R for ggplotting
df['alpha'] = df["alpha"].replace(np.NaN, "NA")
df.to_csv("experiment-dormant-continuous.csv")
| [
"numpy.random.seed",
"statsmodels.api.OLS",
"ananke.graphs.ADMG",
"numpy.isnan",
"numpy.random.normal",
"multiprocessing.cpu_count",
"pandas.DataFrame",
"os.path.abspath",
"wrapper_resampler.ShiftedTester",
"itertools.product",
"numpy.linalg.det",
"numpy.cov",
"numpy.nansum",
"statsmodels.... | [((532, 549), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (546, 549), True, 'import numpy as np\n'), ((1548, 1600), 'ananke.graphs.ADMG', 'ADMG', (['vertices'], {'di_edges': 'di_edges', 'bi_edges': 'bi_edges'}), '(vertices, di_edges=di_edges, bi_edges=bi_edges)\n', (1552, 1600), False, 'from ananke.graphs import ADMG\n'), ((1612, 1679), 'ananke.graphs.ADMG', 'ADMG', (['vertices'], {'di_edges': "(di_edges + [('A', 'D')])", 'bi_edges': 'bi_edges'}), "(vertices, di_edges=di_edges + [('A', 'D')], bi_edges=bi_edges)\n", (1616, 1679), False, 'from ananke.graphs import ADMG\n'), ((587, 616), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n, 1)'}), '(size=(n, 1))\n', (603, 616), True, 'import numpy as np\n'), ((657, 685), 'numpy.concatenate', 'np.concatenate', (['args'], {'axis': '(1)'}), '(args, axis=1)\n', (671, 685), True, 'import numpy as np\n'), ((1730, 1800), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': X[:, 1], 'B': X[:, 2], 'C': X[:, 3], 'D': X[:, 4]}"], {}), "({'A': X[:, 1], 'B': X[:, 2], 'C': X[:, 3], 'D': X[:, 4]})\n", (1742, 1800), True, 'import pandas as pd\n'), ((1805, 1823), 'numpy.cov', 'np.cov', (['X[:, 1:].T'], {}), '(X[:, 1:].T)\n', (1811, 1823), True, 'import numpy as np\n'), ((1854, 1896), 'ananke.models.LinearGaussianSEM', 'LinearGaussianSEM', (['G'], {'method': '"""trust-exact"""'}), "(G, method='trust-exact')\n", (1871, 1896), False, 'from ananke.models import LinearGaussianSEM\n'), ((2123, 2172), 'ananke.models.LinearGaussianSEM', 'LinearGaussianSEM', (['G_causal'], {'method': '"""trust-exact"""'}), "(G_causal, method='trust-exact')\n", (2140, 2172), False, 'from ananke.models import LinearGaussianSEM\n'), ((2828, 2880), 'itertools.product', 'product', (['n_range', 'causal_effects', 'm_choices', 'methods'], {}), '(n_range, causal_effects, m_choices, methods)\n', (2835, 2880), False, 'from itertools import product\n'), ((4562, 4584), 'numpy.nansum', 'np.nansum', (['res'], {'axis': '(0)'}), '(res, axis=0)\n', (4571, 4584), True, 'import numpy as np\n'), ((1174, 1198), 'statsmodels.api.OLS', 'sm.OLS', (['X[:, 3]', 'X[:, 2]'], {}), '(X[:, 3], X[:, 2])\n', (1180, 1198), True, 'import statsmodels.api as sm\n'), ((1254, 1273), 'numpy.sqrt', 'np.sqrt', (['mod1.scale'], {}), '(mod1.scale)\n', (1261, 1273), True, 'import numpy as np\n'), ((2431, 2440), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (2437, 2440), True, 'import numpy as np\n'), ((2524, 2570), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1000, tune_m_repeats)'}), '(size=(1000, tune_m_repeats))\n', (2541, 2570), True, 'import numpy as np\n'), ((4262, 4273), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (4271, 4273), False, 'from multiprocessing import Pool, cpu_count\n'), ((4526, 4539), 'numpy.isnan', 'np.isnan', (['res'], {}), '(res)\n', (4534, 4539), True, 'import numpy as np\n'), ((404, 429), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (419, 429), False, 'import os\n'), ((2046, 2079), 'numpy.linalg.det', 'np.linalg.det', (['(2 * np.pi * Sigma_)'], {}), '(2 * np.pi * Sigma_)\n', (2059, 2079), True, 'import numpy as np\n'), ((2329, 2362), 'numpy.linalg.det', 'np.linalg.det', (['(2 * np.pi * Sigma_)'], {}), '(2 * np.pi * Sigma_)\n', (2342, 2362), True, 'import numpy as np\n'), ((3269, 3365), 'wrapper_resampler.ShiftedTester', 'ShiftedTester', (['weight_fit', "tests['LinReg']"], {'replacement': '"""NO-REPL-reject"""', 'reject_retries': '(100)'}), "(weight_fit, tests['LinReg'], replacement='NO-REPL-reject',\n reject_retries=100)\n", (3282, 3365), False, 'from wrapper_resampler import ShiftedTester\n'), ((4488, 4501), 'numpy.isnan', 'np.isnan', (['res'], {}), '(res)\n', (4496, 4501), True, 'import numpy as np\n'), ((1978, 1987), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1984, 1987), True, 'import numpy as np\n'), ((2004, 2013), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2010, 2013), True, 'import numpy as np\n'), ((2254, 2263), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2260, 2263), True, 'import numpy as np\n'), ((2280, 2289), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (2286, 2289), True, 'import numpy as np\n'), ((4640, 4695), 'statsmodels.stats.proportion.proportion_confint', 'proportion_confint', (['x', '(repeats - n)'], {'method': '"""binom_test"""'}), "(x, repeats - n, method='binom_test')\n", (4658, 4695), False, 'from statsmodels.stats.proportion import proportion_confint\n'), ((1380, 1410), 'statsmodels.api.tools.add_constant', 'sm.tools.add_constant', (['X[:, 1]'], {}), '(X[:, 1])\n', (1401, 1410), True, 'import statsmodels.api as sm\n'), ((3602, 3612), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (3609, 3612), True, 'import numpy as np\n')] |
"""Makes animation to explain multivariate convolution."""
import argparse
import numpy
import matplotlib
matplotlib.use('agg')
import matplotlib.colors
from matplotlib.patches import ConnectionPatch
import matplotlib.pyplot as pyplot
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.deep_learning import standalone_utils
from gewittergefahr.plotting import plotting_utils
from gewittergefahr.plotting import imagemagick_utils
DEFAULT_FONT_SIZE = 25
DEFAULT_LINE_WIDTH = 4
pyplot.rc('font', size=DEFAULT_FONT_SIZE)
pyplot.rc('axes', titlesize=DEFAULT_FONT_SIZE)
pyplot.rc('axes', labelsize=DEFAULT_FONT_SIZE)
pyplot.rc('axes', linewidth=DEFAULT_LINE_WIDTH)
pyplot.rc('xtick', labelsize=DEFAULT_FONT_SIZE)
pyplot.rc('ytick', labelsize=DEFAULT_FONT_SIZE)
pyplot.rc('legend', fontsize=DEFAULT_FONT_SIZE)
pyplot.rc('figure', titlesize=DEFAULT_FONT_SIZE)
COLOUR_LIST = [
numpy.full(3, 1.),
numpy.array([27, 158, 119], dtype=float) / 255
]
COLOUR_LIST[1] = matplotlib.colors.to_rgba(COLOUR_LIST[1], 0.5)
COLOUR_MAP_OBJECT = matplotlib.colors.ListedColormap(COLOUR_LIST)
DEFAULT_FONT_COLOUR = numpy.array([217, 95, 2], dtype=float) / 255
SPECIAL_FONT_COLOUR = numpy.array([117, 112, 179], dtype=float) / 255
FEATURE_TO_KERNEL_LINE_COLOUR = numpy.full(3, 152. / 255)
PANEL_LETTER_FONT_SIZE = 30
FEATURE_TO_KERNEL_LINE_WIDTH = 2
TEMPERATURE_MATRIX = numpy.array([
[-10, -8, -6, -4, -2, 0],
[-8, -6, -4, -2, 0.5, 1],
[-6, -4, 1.5, 1.75, 2, 2],
[-4, 2, 2.25, 2.5, 2.75, 3]
])
U_WIND_MATRIX = numpy.array([
[1, 1, 1, 1, 1, -2],
[1, 1, 1, -2, -2, -2],
[1, -2, -2, -2, -2, -2],
[-2, -2, -2, -2, -2, -2]
])
V_WIND_MATRIX = numpy.array([
[-6, -6, -6, -6, -6, -6],
[-6, -6, -6, -6, -6, 3],
[-6, -6, -6, 3, 3, 3],
[-6, -6, 3, 3, 3, 3]
])
INPUT_FEATURE_MATRIX = numpy.stack(
(TEMPERATURE_MATRIX, U_WIND_MATRIX, V_WIND_MATRIX), axis=-1
)
TEMPERATURE_KERNEL_MATRIX = numpy.array([
[0, 1, 0],
[1, -4, 1],
[0, 1, 0]
])
U_WIND_KERNEL_MATRIX = numpy.array([
[1, 0, -1],
[0, 0, 0],
[-1, 0, 1]
])
V_WIND_KERNEL_MATRIX = numpy.array([
[-1, -1, -1],
[-1, 8, -1],
[-1, -1, -1]
])
KERNEL_MATRIX = numpy.stack(
(TEMPERATURE_KERNEL_MATRIX, U_WIND_KERNEL_MATRIX, V_WIND_KERNEL_MATRIX),
axis=-1
).astype(float)
KERNEL_MATRIX = numpy.expand_dims(KERNEL_MATRIX, axis=-1)
NUM_PANEL_ROWS = 3
NUM_PANEL_COLUMNS = 3
FIGURE_RESOLUTION_DPI = 300
# FIGURE_CAPTION = (
# 'Schematic for multivariate convolution in two spatial dimensions.\n'
# '[a-c] Input maps, one for each variable.\n'
# '[d-f] The convolutional filter, one panel for each input variable. '
# 'Filter weights are\n'
# 'set to values known to be useful for edge detection. In a real CNN these '
# 'weights\n'
# 'are learned during training.\n'
# '[g] Feature map produced by convolution. At each filter position, the '
# 'highlighted\n'
# 'value in g is the sum of elementwise products of highlighted values in '
# 'a-f. When\n'
# 'the filter runs off the edge of the 2-D grid, the missing input values are'
# ' assumed\n'
# 'to be zero (the default in Keras).'
# )
FIGURE_CAPTION = (
'Schematic for multivariate convolution in two spatial dimensions.\n'
'[a-c] Input maps, one for each variable.\n'
'[d-f] The convolutional filter, one panel for each input variable.\n'
'[g] Feature map produced by convolution. At each filter position, the '
'highlighted\n'
'value in g is the sum of elementwise products of highlighted values in '
'a-f. When\n'
'the filter runs off the edge of the 2-D grid, the missing input values are'
' assumed\n'
'to be zero.'
)
INCLUDE_CAPTION_ARG_NAME = 'include_caption'
OUTPUT_DIR_ARG_NAME = 'output_dir_name'
INCLUDE_CAPTION_HELP_STRING = (
'Boolean flag. If 1, will include caption in each frame of animation.')
OUTPUT_DIR_HELP_STRING = (
'Name of output directory (individual frames and full GIF will be saved '
'here).')
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + INCLUDE_CAPTION_ARG_NAME, type=int, required=False, default=1,
help=INCLUDE_CAPTION_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,
help=OUTPUT_DIR_HELP_STRING)
def _plot_feature_map(feature_matrix_2d, kernel_row, kernel_column,
is_output_map, axes_object):
"""Plots one feature map.
M = number of rows in grid
N = number of columns in grid
:param feature_matrix_2d: M-by-N numpy array of feature values.
:param kernel_row: Row index at center of kernel.
:param kernel_column: Column index at center of kernel.
:param is_output_map: Boolean flag.
:param axes_object: Will plot on these axes (instance of
`matplotlib.axes._subplots.AxesSubplot`).
"""
num_rows = feature_matrix_2d.shape[0]
num_columns = feature_matrix_2d.shape[1]
if is_output_map:
first_highlighted_row = kernel_row
last_highlighted_row = kernel_row
first_highlighted_column = kernel_column
last_highlighted_column = kernel_column
else:
first_highlighted_row = max([kernel_row - 1, 0])
last_highlighted_row = min([kernel_row + 1, num_rows - 1])
first_highlighted_column = max([kernel_column - 1, 0])
last_highlighted_column = min([kernel_column + 1, num_columns - 1])
dummy_matrix = numpy.full(feature_matrix_2d.shape, numpy.nan)
dummy_matrix[
first_highlighted_row:(last_highlighted_row + 1),
first_highlighted_column:(last_highlighted_column + 1)
] = 0
dummy_matrix = numpy.ma.masked_where(
numpy.isnan(dummy_matrix), dummy_matrix
)
axes_object.imshow(
dummy_matrix, cmap=COLOUR_MAP_OBJECT, vmin=-1., vmax=0., origin='upper'
)
axes_object.set_xticks([], [])
axes_object.set_yticks([], [])
for i in range(num_rows):
for j in range(num_columns):
if i == kernel_row and j == kernel_column and is_output_map:
this_colour = SPECIAL_FONT_COLOUR
else:
this_colour = DEFAULT_FONT_COLOUR
this_label_string = '{0:d}'.format(
int(numpy.round(feature_matrix_2d[i, j]))
)
axes_object.text(
j, i, this_label_string, fontsize=DEFAULT_FONT_SIZE,
color=this_colour,
horizontalalignment='center', verticalalignment='center')
def _plot_kernel(kernel_matrix_2d, feature_matrix_2d, feature_row_at_center,
feature_column_at_center, axes_object):
"""Plots convolution kernel (filter).
M = number of rows in feature map
N = number of columns in feature map
J = number of rows in kernel
K = number of columns in kernel
:param kernel_matrix_2d: J-by-K numpy array of weights.
:param feature_matrix_2d: M-by-N numpy array of feature values.
:param feature_row_at_center: Feature-map row at center of kernel.
:param feature_column_at_center: Feature-map column at center of kernel.
:param axes_object: See doc for `_plot_feature_map`.
"""
num_kernel_rows = kernel_matrix_2d.shape[0]
num_kernel_columns = kernel_matrix_2d.shape[1]
num_feature_rows = feature_matrix_2d.shape[0]
num_feature_columns = feature_matrix_2d.shape[1]
center_row_from_bottom = num_feature_rows - feature_row_at_center - 1
center_column_from_right = (
num_feature_columns - feature_column_at_center - 1
)
first_highlighted_row = max([1 - feature_row_at_center, 0])
last_highlighted_row = min([
1 + center_row_from_bottom, num_kernel_rows - 1
])
first_highlighted_column = max([1 - feature_column_at_center, 0])
last_highlighted_column = min([
1 + center_column_from_right, num_kernel_columns - 1
])
dummy_matrix = numpy.full(kernel_matrix_2d.shape, numpy.nan)
dummy_matrix[
first_highlighted_row:(last_highlighted_row + 1),
first_highlighted_column:(last_highlighted_column + 1)
] = 0
dummy_matrix = numpy.ma.masked_where(
numpy.isnan(dummy_matrix), dummy_matrix
)
axes_object.imshow(
dummy_matrix, cmap=COLOUR_MAP_OBJECT, vmin=-1., vmax=0., origin='upper'
)
axes_object.set_xticks([], [])
axes_object.set_yticks([], [])
for i in range(num_kernel_rows):
for j in range(num_kernel_columns):
this_label_string = '{0:d}'.format(
int(numpy.round(kernel_matrix_2d[i, j]))
)
axes_object.text(
j, i, this_label_string, fontsize=DEFAULT_FONT_SIZE,
color=DEFAULT_FONT_COLOUR,
horizontalalignment='center', verticalalignment='center')
def _plot_feature_to_kernel_lines(
kernel_matrix_2d, feature_matrix_2d, feature_row_at_center,
feature_column_at_center, kernel_axes_object, feature_axes_object):
"""Plots lines between feature map and kernel.
:param kernel_matrix_2d: See doc for `_plot_kernel`.
:param feature_matrix_2d: Same.
:param feature_row_at_center: Same.
:param feature_column_at_center: Same.
:param kernel_axes_object: Axes on which kernel is plotted (instance of
`matplotlib.axes._subplots.AxesSubplot`).
:param feature_axes_object: Axes on which feature map is plotted (instance
of `matplotlib.axes._subplots.AxesSubplot`).
"""
num_feature_rows = feature_matrix_2d.shape[0]
num_feature_columns = feature_matrix_2d.shape[1]
num_kernel_rows = kernel_matrix_2d.shape[0]
first_feature_row = max([feature_row_at_center - 1.5, -0.5])
last_feature_row = min([
feature_row_at_center + 1.5, num_feature_rows - 0.5
])
last_feature_column = min([
feature_column_at_center + 1.5, num_feature_columns - 0.5
])
center_row_from_bottom = num_feature_rows - feature_row_at_center - 1
first_kernel_row = -0.5 + max([1 - feature_row_at_center, 0])
last_kernel_row = 0.5 + min([
1 + center_row_from_bottom, num_kernel_rows - 1
])
first_kernel_column = -0.5 + max([1 - feature_column_at_center, 0])
this_connection_object = ConnectionPatch(
xyA=(first_kernel_column, first_kernel_row),
xyB=(last_feature_column, first_feature_row),
coordsA='data', coordsB='data',
axesA=kernel_axes_object, axesB=feature_axes_object,
color=FEATURE_TO_KERNEL_LINE_COLOUR,
linewidth=FEATURE_TO_KERNEL_LINE_WIDTH, linestyle='dashed'
)
kernel_axes_object.add_artist(this_connection_object)
this_connection_object = ConnectionPatch(
xyA=(first_kernel_column, last_kernel_row),
xyB=(last_feature_column, last_feature_row),
coordsA='data', coordsB='data',
axesA=kernel_axes_object, axesB=feature_axes_object,
color=FEATURE_TO_KERNEL_LINE_COLOUR,
linewidth=FEATURE_TO_KERNEL_LINE_WIDTH, linestyle='dashed'
)
kernel_axes_object.add_artist(this_connection_object)
def _run(include_caption, output_dir_name):
"""Makes animation to explain multivariate convolution.
This is effectively the main method.
:param include_caption: See documentation at top of file.
:param output_dir_name: Same.
"""
file_system_utils.mkdir_recursive_if_necessary(
directory_name=output_dir_name)
output_feature_matrix = standalone_utils.do_2d_convolution(
feature_matrix=INPUT_FEATURE_MATRIX, kernel_matrix=KERNEL_MATRIX,
pad_edges=True, stride_length_px=1)
output_feature_matrix = output_feature_matrix[0, ..., 0]
num_grid_rows = INPUT_FEATURE_MATRIX.shape[0]
num_grid_columns = INPUT_FEATURE_MATRIX.shape[1]
num_input_channels = INPUT_FEATURE_MATRIX.shape[2]
image_file_names = []
kernel_width_ratio = float(KERNEL_MATRIX.shape[1]) / num_grid_columns
kernel_height_ratio = float(KERNEL_MATRIX.shape[0]) / num_grid_rows
for i in range(num_grid_rows):
for j in range(num_grid_columns):
this_figure_object, this_axes_object_matrix = (
plotting_utils.create_paneled_figure(
num_rows=NUM_PANEL_ROWS, num_columns=NUM_PANEL_COLUMNS,
horizontal_spacing=0.2, vertical_spacing=0.,
shared_x_axis=False, shared_y_axis=False,
keep_aspect_ratio=True)
)
this_axes_object_matrix[0, 2].axis('off')
this_axes_object_matrix[2, 2].axis('off')
letter_label = None
for k in range(num_input_channels):
_plot_feature_map(
feature_matrix_2d=INPUT_FEATURE_MATRIX[..., k],
kernel_row=i, kernel_column=j, is_output_map=False,
axes_object=this_axes_object_matrix[k, 0]
)
if letter_label is None:
letter_label = 'a'
else:
letter_label = chr(ord(letter_label) + 1)
plotting_utils.label_axes(
axes_object=this_axes_object_matrix[k, 0],
label_string='({0:s})'.format(letter_label),
font_size=PANEL_LETTER_FONT_SIZE,
y_coord_normalized=1.04, x_coord_normalized=0.1
)
_plot_feature_map(
feature_matrix_2d=output_feature_matrix,
kernel_row=i, kernel_column=j, is_output_map=True,
axes_object=this_axes_object_matrix[1, 2]
)
for k in range(num_input_channels):
this_bbox_object = this_axes_object_matrix[k, 1].get_position()
this_width = kernel_width_ratio * (
this_bbox_object.x1 - this_bbox_object.x0
)
this_height = kernel_height_ratio * (
this_bbox_object.y1 - this_bbox_object.y0
)
this_bbox_object.x0 += 0.5 * this_width
this_bbox_object.y0 += 0.005
this_bbox_object.x1 = this_bbox_object.x0 + this_width
this_bbox_object.y1 = this_bbox_object.y0 + this_height
this_axes_object_matrix[k, 1].set_position(this_bbox_object)
_plot_kernel(
kernel_matrix_2d=KERNEL_MATRIX[..., k, 0],
feature_matrix_2d=INPUT_FEATURE_MATRIX[..., k],
feature_row_at_center=i, feature_column_at_center=j,
axes_object=this_axes_object_matrix[k, 1]
)
letter_label = chr(ord(letter_label) + 1)
plotting_utils.label_axes(
axes_object=this_axes_object_matrix[k, 1],
label_string='({0:s})'.format(letter_label),
font_size=PANEL_LETTER_FONT_SIZE,
y_coord_normalized=1.04, x_coord_normalized=0.2
)
_plot_feature_to_kernel_lines(
kernel_matrix_2d=KERNEL_MATRIX[..., k, 0],
feature_matrix_2d=INPUT_FEATURE_MATRIX[..., k],
feature_row_at_center=i, feature_column_at_center=j,
kernel_axes_object=this_axes_object_matrix[k, 1],
feature_axes_object=this_axes_object_matrix[k, 0]
)
letter_label = chr(ord(letter_label) + 1)
plotting_utils.label_axes(
axes_object=this_axes_object_matrix[1, 2],
label_string='({0:s})'.format(letter_label),
font_size=PANEL_LETTER_FONT_SIZE,
y_coord_normalized=1.04, x_coord_normalized=0.1
)
if include_caption:
this_figure_object.text(
0.5, 0., FIGURE_CAPTION, fontsize=DEFAULT_FONT_SIZE,
color='k', horizontalalignment='center',
verticalalignment='top')
image_file_names.append(
'{0:s}/conv_animation_row{1:d}_column{2:d}.jpg'.format(
output_dir_name, i, j)
)
print('Saving figure to: "{0:s}"...'.format(image_file_names[-1]))
this_figure_object.savefig(
image_file_names[-1], dpi=FIGURE_RESOLUTION_DPI,
pad_inches=0, bbox_inches='tight'
)
pyplot.close(this_figure_object)
imagemagick_utils.trim_whitespace(
input_file_name=image_file_names[-1],
output_file_name=image_file_names[-1]
)
animation_file_name = '{0:s}/conv_animation.gif'.format(output_dir_name)
print('Creating animation: "{0:s}"...'.format(animation_file_name))
imagemagick_utils.create_gif(
input_file_names=image_file_names, output_file_name=animation_file_name,
num_seconds_per_frame=0.5, resize_factor=0.5)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
include_caption=bool(
getattr(INPUT_ARG_OBJECT, INCLUDE_CAPTION_ARG_NAME)
),
output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME)
)
| [
"matplotlib.colors.to_rgba",
"numpy.full",
"numpy.stack",
"argparse.ArgumentParser",
"matplotlib.pyplot.close",
"gewittergefahr.plotting.plotting_utils.create_paneled_figure",
"numpy.expand_dims",
"numpy.isnan",
"gewittergefahr.deep_learning.standalone_utils.do_2d_convolution",
"matplotlib.use",
... | [((107, 128), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (121, 128), False, 'import matplotlib\n'), ((501, 542), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""font"""'], {'size': 'DEFAULT_FONT_SIZE'}), "('font', size=DEFAULT_FONT_SIZE)\n", (510, 542), True, 'import matplotlib.pyplot as pyplot\n'), ((543, 589), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""axes"""'], {'titlesize': 'DEFAULT_FONT_SIZE'}), "('axes', titlesize=DEFAULT_FONT_SIZE)\n", (552, 589), True, 'import matplotlib.pyplot as pyplot\n'), ((590, 636), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""axes"""'], {'labelsize': 'DEFAULT_FONT_SIZE'}), "('axes', labelsize=DEFAULT_FONT_SIZE)\n", (599, 636), True, 'import matplotlib.pyplot as pyplot\n'), ((637, 684), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""axes"""'], {'linewidth': 'DEFAULT_LINE_WIDTH'}), "('axes', linewidth=DEFAULT_LINE_WIDTH)\n", (646, 684), True, 'import matplotlib.pyplot as pyplot\n'), ((685, 732), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""xtick"""'], {'labelsize': 'DEFAULT_FONT_SIZE'}), "('xtick', labelsize=DEFAULT_FONT_SIZE)\n", (694, 732), True, 'import matplotlib.pyplot as pyplot\n'), ((733, 780), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""ytick"""'], {'labelsize': 'DEFAULT_FONT_SIZE'}), "('ytick', labelsize=DEFAULT_FONT_SIZE)\n", (742, 780), True, 'import matplotlib.pyplot as pyplot\n'), ((781, 828), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""legend"""'], {'fontsize': 'DEFAULT_FONT_SIZE'}), "('legend', fontsize=DEFAULT_FONT_SIZE)\n", (790, 828), True, 'import matplotlib.pyplot as pyplot\n'), ((829, 877), 'matplotlib.pyplot.rc', 'pyplot.rc', (['"""figure"""'], {'titlesize': 'DEFAULT_FONT_SIZE'}), "('figure', titlesize=DEFAULT_FONT_SIZE)\n", (838, 877), True, 'import matplotlib.pyplot as pyplot\n'), ((989, 1035), 'matplotlib.colors.to_rgba', 'matplotlib.colors.to_rgba', (['COLOUR_LIST[1]', '(0.5)'], {}), '(COLOUR_LIST[1], 0.5)\n', (1014, 1035), False, 'import matplotlib\n'), ((1056, 1101), 'matplotlib.colors.ListedColormap', 'matplotlib.colors.ListedColormap', (['COLOUR_LIST'], {}), '(COLOUR_LIST)\n', (1088, 1101), False, 'import matplotlib\n'), ((1272, 1298), 'numpy.full', 'numpy.full', (['(3)', '(152.0 / 255)'], {}), '(3, 152.0 / 255)\n', (1282, 1298), False, 'import numpy\n'), ((1382, 1508), 'numpy.array', 'numpy.array', (['[[-10, -8, -6, -4, -2, 0], [-8, -6, -4, -2, 0.5, 1], [-6, -4, 1.5, 1.75, 2,\n 2], [-4, 2, 2.25, 2.5, 2.75, 3]]'], {}), '([[-10, -8, -6, -4, -2, 0], [-8, -6, -4, -2, 0.5, 1], [-6, -4, \n 1.5, 1.75, 2, 2], [-4, 2, 2.25, 2.5, 2.75, 3]])\n', (1393, 1508), False, 'import numpy\n'), ((1539, 1651), 'numpy.array', 'numpy.array', (['[[1, 1, 1, 1, 1, -2], [1, 1, 1, -2, -2, -2], [1, -2, -2, -2, -2, -2], [-2, \n -2, -2, -2, -2, -2]]'], {}), '([[1, 1, 1, 1, 1, -2], [1, 1, 1, -2, -2, -2], [1, -2, -2, -2, -2,\n -2], [-2, -2, -2, -2, -2, -2]])\n', (1550, 1651), False, 'import numpy\n'), ((1683, 1796), 'numpy.array', 'numpy.array', (['[[-6, -6, -6, -6, -6, -6], [-6, -6, -6, -6, -6, 3], [-6, -6, -6, 3, 3, 3],\n [-6, -6, 3, 3, 3, 3]]'], {}), '([[-6, -6, -6, -6, -6, -6], [-6, -6, -6, -6, -6, 3], [-6, -6, -6,\n 3, 3, 3], [-6, -6, 3, 3, 3, 3]])\n', (1694, 1796), False, 'import numpy\n'), ((1835, 1907), 'numpy.stack', 'numpy.stack', (['(TEMPERATURE_MATRIX, U_WIND_MATRIX, V_WIND_MATRIX)'], {'axis': '(-1)'}), '((TEMPERATURE_MATRIX, U_WIND_MATRIX, V_WIND_MATRIX), axis=-1)\n', (1846, 1907), False, 'import numpy\n'), ((1943, 1990), 'numpy.array', 'numpy.array', (['[[0, 1, 0], [1, -4, 1], [0, 1, 0]]'], {}), '([[0, 1, 0], [1, -4, 1], [0, 1, 0]])\n', (1954, 1990), False, 'import numpy\n'), ((2029, 2077), 'numpy.array', 'numpy.array', (['[[1, 0, -1], [0, 0, 0], [-1, 0, 1]]'], {}), '([[1, 0, -1], [0, 0, 0], [-1, 0, 1]])\n', (2040, 2077), False, 'import numpy\n'), ((2116, 2170), 'numpy.array', 'numpy.array', (['[[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]]'], {}), '([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])\n', (2127, 2170), False, 'import numpy\n'), ((2337, 2378), 'numpy.expand_dims', 'numpy.expand_dims', (['KERNEL_MATRIX'], {'axis': '(-1)'}), '(KERNEL_MATRIX, axis=-1)\n', (2354, 2378), False, 'import numpy\n'), ((4063, 4088), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4086, 4088), False, 'import argparse\n'), ((899, 917), 'numpy.full', 'numpy.full', (['(3)', '(1.0)'], {}), '(3, 1.0)\n', (909, 917), False, 'import numpy\n'), ((1125, 1163), 'numpy.array', 'numpy.array', (['[217, 95, 2]'], {'dtype': 'float'}), '([217, 95, 2], dtype=float)\n', (1136, 1163), False, 'import numpy\n'), ((1192, 1233), 'numpy.array', 'numpy.array', (['[117, 112, 179]'], {'dtype': 'float'}), '([117, 112, 179], dtype=float)\n', (1203, 1233), False, 'import numpy\n'), ((5499, 5545), 'numpy.full', 'numpy.full', (['feature_matrix_2d.shape', 'numpy.nan'], {}), '(feature_matrix_2d.shape, numpy.nan)\n', (5509, 5545), False, 'import numpy\n'), ((7963, 8008), 'numpy.full', 'numpy.full', (['kernel_matrix_2d.shape', 'numpy.nan'], {}), '(kernel_matrix_2d.shape, numpy.nan)\n', (7973, 8008), False, 'import numpy\n'), ((10289, 10595), 'matplotlib.patches.ConnectionPatch', 'ConnectionPatch', ([], {'xyA': '(first_kernel_column, first_kernel_row)', 'xyB': '(last_feature_column, first_feature_row)', 'coordsA': '"""data"""', 'coordsB': '"""data"""', 'axesA': 'kernel_axes_object', 'axesB': 'feature_axes_object', 'color': 'FEATURE_TO_KERNEL_LINE_COLOUR', 'linewidth': 'FEATURE_TO_KERNEL_LINE_WIDTH', 'linestyle': '"""dashed"""'}), "(xyA=(first_kernel_column, first_kernel_row), xyB=(\n last_feature_column, first_feature_row), coordsA='data', coordsB='data',\n axesA=kernel_axes_object, axesB=feature_axes_object, color=\n FEATURE_TO_KERNEL_LINE_COLOUR, linewidth=FEATURE_TO_KERNEL_LINE_WIDTH,\n linestyle='dashed')\n", (10304, 10595), False, 'from matplotlib.patches import ConnectionPatch\n'), ((10721, 11025), 'matplotlib.patches.ConnectionPatch', 'ConnectionPatch', ([], {'xyA': '(first_kernel_column, last_kernel_row)', 'xyB': '(last_feature_column, last_feature_row)', 'coordsA': '"""data"""', 'coordsB': '"""data"""', 'axesA': 'kernel_axes_object', 'axesB': 'feature_axes_object', 'color': 'FEATURE_TO_KERNEL_LINE_COLOUR', 'linewidth': 'FEATURE_TO_KERNEL_LINE_WIDTH', 'linestyle': '"""dashed"""'}), "(xyA=(first_kernel_column, last_kernel_row), xyB=(\n last_feature_column, last_feature_row), coordsA='data', coordsB='data',\n axesA=kernel_axes_object, axesB=feature_axes_object, color=\n FEATURE_TO_KERNEL_LINE_COLOUR, linewidth=FEATURE_TO_KERNEL_LINE_WIDTH,\n linestyle='dashed')\n", (10736, 11025), False, 'from matplotlib.patches import ConnectionPatch\n'), ((11379, 11457), 'gewittergefahr.gg_utils.file_system_utils.mkdir_recursive_if_necessary', 'file_system_utils.mkdir_recursive_if_necessary', ([], {'directory_name': 'output_dir_name'}), '(directory_name=output_dir_name)\n', (11425, 11457), False, 'from gewittergefahr.gg_utils import file_system_utils\n'), ((11496, 11636), 'gewittergefahr.deep_learning.standalone_utils.do_2d_convolution', 'standalone_utils.do_2d_convolution', ([], {'feature_matrix': 'INPUT_FEATURE_MATRIX', 'kernel_matrix': 'KERNEL_MATRIX', 'pad_edges': '(True)', 'stride_length_px': '(1)'}), '(feature_matrix=INPUT_FEATURE_MATRIX,\n kernel_matrix=KERNEL_MATRIX, pad_edges=True, stride_length_px=1)\n', (11530, 11636), False, 'from gewittergefahr.deep_learning import standalone_utils\n'), ((16843, 16998), 'gewittergefahr.plotting.imagemagick_utils.create_gif', 'imagemagick_utils.create_gif', ([], {'input_file_names': 'image_file_names', 'output_file_name': 'animation_file_name', 'num_seconds_per_frame': '(0.5)', 'resize_factor': '(0.5)'}), '(input_file_names=image_file_names,\n output_file_name=animation_file_name, num_seconds_per_frame=0.5,\n resize_factor=0.5)\n', (16871, 16998), False, 'from gewittergefahr.plotting import imagemagick_utils\n'), ((922, 962), 'numpy.array', 'numpy.array', (['[27, 158, 119]'], {'dtype': 'float'}), '([27, 158, 119], dtype=float)\n', (933, 962), False, 'import numpy\n'), ((2202, 2299), 'numpy.stack', 'numpy.stack', (['(TEMPERATURE_KERNEL_MATRIX, U_WIND_KERNEL_MATRIX, V_WIND_KERNEL_MATRIX)'], {'axis': '(-1)'}), '((TEMPERATURE_KERNEL_MATRIX, U_WIND_KERNEL_MATRIX,\n V_WIND_KERNEL_MATRIX), axis=-1)\n', (2213, 2299), False, 'import numpy\n'), ((5746, 5771), 'numpy.isnan', 'numpy.isnan', (['dummy_matrix'], {}), '(dummy_matrix)\n', (5757, 5771), False, 'import numpy\n'), ((8209, 8234), 'numpy.isnan', 'numpy.isnan', (['dummy_matrix'], {}), '(dummy_matrix)\n', (8220, 8234), False, 'import numpy\n'), ((12198, 12411), 'gewittergefahr.plotting.plotting_utils.create_paneled_figure', 'plotting_utils.create_paneled_figure', ([], {'num_rows': 'NUM_PANEL_ROWS', 'num_columns': 'NUM_PANEL_COLUMNS', 'horizontal_spacing': '(0.2)', 'vertical_spacing': '(0.0)', 'shared_x_axis': '(False)', 'shared_y_axis': '(False)', 'keep_aspect_ratio': '(True)'}), '(num_rows=NUM_PANEL_ROWS, num_columns=\n NUM_PANEL_COLUMNS, horizontal_spacing=0.2, vertical_spacing=0.0,\n shared_x_axis=False, shared_y_axis=False, keep_aspect_ratio=True)\n', (12234, 12411), False, 'from gewittergefahr.plotting import plotting_utils\n'), ((16485, 16517), 'matplotlib.pyplot.close', 'pyplot.close', (['this_figure_object'], {}), '(this_figure_object)\n', (16497, 16517), True, 'import matplotlib.pyplot as pyplot\n'), ((16531, 16645), 'gewittergefahr.plotting.imagemagick_utils.trim_whitespace', 'imagemagick_utils.trim_whitespace', ([], {'input_file_name': 'image_file_names[-1]', 'output_file_name': 'image_file_names[-1]'}), '(input_file_name=image_file_names[-1],\n output_file_name=image_file_names[-1])\n', (16564, 16645), False, 'from gewittergefahr.plotting import imagemagick_utils\n'), ((6302, 6338), 'numpy.round', 'numpy.round', (['feature_matrix_2d[i, j]'], {}), '(feature_matrix_2d[i, j])\n', (6313, 6338), False, 'import numpy\n'), ((8587, 8622), 'numpy.round', 'numpy.round', (['kernel_matrix_2d[i, j]'], {}), '(kernel_matrix_2d[i, j])\n', (8598, 8622), False, 'import numpy\n')] |
import tensorflow as tf
import numpy as np
from collections import deque
import gym
import random
from Agent import Agent
from Layers import DenseEmbeddingNet, QNet
class DQN(tf.keras.Model):
def __init__(self, embedding_net: tf.keras.layers.Layer, q_net: tf.keras.layers.Layer):
super().__init__()
self.embedding_layer = embedding_net
self.value_layer = q_net
def call(self, state):
output = self.embedding_layer(state)
output = self.value_layer(output)
return output
class DQNAgent(Agent):
def __init__(self, env: gym.Env, set_summary=True):
super().__init__()
self.env = env
self.lr = 0.001
self.gamma = 0.99
self.epsilon = 0
self.dqn_net = DQN(embedding_net=DenseEmbeddingNet(),
q_net=QNet(env))
self.target_net = DQN(embedding_net=DenseEmbeddingNet(),
q_net=QNet(env))
self.opt = tf.keras.optimizers.Adam(self.lr)
self.action_size = self.env.action_space.n
self.batch_size = 64
self.maxlen = 2000
self.memory = deque(maxlen=self.maxlen)
self.step = 0
self.target_net_update_step = 20
self.episode = 0
self.score = 0
self.set_summary = set_summary
if set_summary:
self.checkpoint_summary_setting("double_dqn", self.opt, self.dqn_net)
def get_action(self, state):
q_value = self.dqn_net(np.array([state], dtype=np.float32))[0]
if np.random.rand() < self.epsilon:
action = np.random.choice(self.action_size)
else:
action = np.argmax(q_value)
return action
def collect_transitions(self, start_state):
state = start_state
while True:
self.step += 1
self.epsilon = 1 / (self.episode * 0.1 + 10)
action = self.get_action(state)
next_state, reward, done, _ = self.env.step(action)
self.score += reward
self.memory.append((state, action, reward, next_state, done))
state = next_state
if done:
self.episode += 1
print("Episode %s, Score %s." % (self.episode, self.score))
if self.set_summary:
self.train_summary(self.score, self.episode)
self.save_checkpoint()
self.score = 0
state = self.env.reset()
if self.episode % 1000 == 0:
self.dqn_net.save("double_dqn_model/")
if len(self.memory) > self.batch_size:
return state
def sampling(self):
mini_batch = random.sample(self.memory, self.batch_size)
states, actions, rewards, next_states, dones = [], [], [], [], []
for transition in mini_batch:
states.append(transition[0])
actions.append(transition[1])
rewards.append(transition[2])
next_states.append(transition[3])
dones.append(transition[4])
return states, actions, rewards, next_states, dones
def update(self):
states, actions, rewards, next_states, dones = self.sampling()
dqn_variable = self.dqn_net.trainable_variables
with tf.GradientTape() as tape:
tape.watch(dqn_variable)
states = np.array(states, dtype=np.float32)
actions = np.array(actions, dtype=np.int32)
rewards = np.array(rewards, dtype=np.float32)
next_states = np.array(next_states, dtype=np.float32)
dones = np.array(dones, dtype=np.int32)
q = tf.stop_gradient(self.dqn_net(next_states))
next_actions = tf.argmax(q, axis=1)
target_q = self.target_net(next_states)
target_op_value = tf.reduce_sum(tf.one_hot(next_actions, self.action_size) * target_q, axis=1)
td_target = rewards + self.gamma * (1 - dones) * target_op_value
q = self.dqn_net(states)
op_value = tf.reduce_sum(tf.one_hot(actions, self.action_size) * q, axis=1)
loss = tf.reduce_mean(tf.square(op_value - td_target) * 0.5)
self.train_loss(loss)
gradients = tape.gradient(loss, dqn_variable)
self.opt.apply_gradients(zip(gradients, dqn_variable))
if self.step % self.target_net_update_step == 0:
self.target_net.set_weights(self.dqn_net.get_weights())
def learn(self, max_step=1000000):
self.ckpt.restore(self.manager.latest_checkpoint)
state = self.env.reset()
for _ in range(max_step):
state = self.collect_transitions(state)
if self.step > self.maxlen/2:
self.update()
if __name__ == '__main__':
env = gym.make("CartPole-v1")
# train
agent = DQNAgent(env=env)
agent.learn()
# play
# a2c = tf.keras.models.load_model("target_dqn_model/")
# agent = DQNAgent(env=env)
# agent.play()
| [
"tensorflow.one_hot",
"tensorflow.square",
"gym.make",
"numpy.argmax",
"random.sample",
"tensorflow.argmax",
"Layers.QNet",
"tensorflow.keras.optimizers.Adam",
"numpy.array",
"Layers.DenseEmbeddingNet",
"numpy.random.choice",
"numpy.random.rand",
"tensorflow.GradientTape",
"collections.deq... | [((4808, 4831), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (4816, 4831), False, 'import gym\n'), ((978, 1011), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['self.lr'], {}), '(self.lr)\n', (1002, 1011), True, 'import tensorflow as tf\n'), ((1142, 1167), 'collections.deque', 'deque', ([], {'maxlen': 'self.maxlen'}), '(maxlen=self.maxlen)\n', (1147, 1167), False, 'from collections import deque\n'), ((2714, 2757), 'random.sample', 'random.sample', (['self.memory', 'self.batch_size'], {}), '(self.memory, self.batch_size)\n', (2727, 2757), False, 'import random\n'), ((1542, 1558), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1556, 1558), True, 'import numpy as np\n'), ((1596, 1630), 'numpy.random.choice', 'np.random.choice', (['self.action_size'], {}), '(self.action_size)\n', (1612, 1630), True, 'import numpy as np\n'), ((1666, 1684), 'numpy.argmax', 'np.argmax', (['q_value'], {}), '(q_value)\n', (1675, 1684), True, 'import numpy as np\n'), ((3306, 3323), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3321, 3323), True, 'import tensorflow as tf\n'), ((3391, 3425), 'numpy.array', 'np.array', (['states'], {'dtype': 'np.float32'}), '(states, dtype=np.float32)\n', (3399, 3425), True, 'import numpy as np\n'), ((3448, 3481), 'numpy.array', 'np.array', (['actions'], {'dtype': 'np.int32'}), '(actions, dtype=np.int32)\n', (3456, 3481), True, 'import numpy as np\n'), ((3504, 3539), 'numpy.array', 'np.array', (['rewards'], {'dtype': 'np.float32'}), '(rewards, dtype=np.float32)\n', (3512, 3539), True, 'import numpy as np\n'), ((3566, 3605), 'numpy.array', 'np.array', (['next_states'], {'dtype': 'np.float32'}), '(next_states, dtype=np.float32)\n', (3574, 3605), True, 'import numpy as np\n'), ((3626, 3657), 'numpy.array', 'np.array', (['dones'], {'dtype': 'np.int32'}), '(dones, dtype=np.int32)\n', (3634, 3657), True, 'import numpy as np\n'), ((3746, 3766), 'tensorflow.argmax', 'tf.argmax', (['q'], {'axis': '(1)'}), '(q, axis=1)\n', (3755, 3766), True, 'import tensorflow as tf\n'), ((781, 800), 'Layers.DenseEmbeddingNet', 'DenseEmbeddingNet', ([], {}), '()\n', (798, 800), False, 'from Layers import DenseEmbeddingNet, QNet\n'), ((835, 844), 'Layers.QNet', 'QNet', (['env'], {}), '(env)\n', (839, 844), False, 'from Layers import DenseEmbeddingNet, QNet\n'), ((890, 909), 'Layers.DenseEmbeddingNet', 'DenseEmbeddingNet', ([], {}), '()\n', (907, 909), False, 'from Layers import DenseEmbeddingNet, QNet\n'), ((947, 956), 'Layers.QNet', 'QNet', (['env'], {}), '(env)\n', (951, 956), False, 'from Layers import DenseEmbeddingNet, QNet\n'), ((1490, 1525), 'numpy.array', 'np.array', (['[state]'], {'dtype': 'np.float32'}), '([state], dtype=np.float32)\n', (1498, 1525), True, 'import numpy as np\n'), ((3863, 3905), 'tensorflow.one_hot', 'tf.one_hot', (['next_actions', 'self.action_size'], {}), '(next_actions, self.action_size)\n', (3873, 3905), True, 'import tensorflow as tf\n'), ((4078, 4115), 'tensorflow.one_hot', 'tf.one_hot', (['actions', 'self.action_size'], {}), '(actions, self.action_size)\n', (4088, 4115), True, 'import tensorflow as tf\n'), ((4164, 4195), 'tensorflow.square', 'tf.square', (['(op_value - td_target)'], {}), '(op_value - td_target)\n', (4173, 4195), True, 'import tensorflow as tf\n')] |
import os
from os.path import basename, join
from setuptools import setup, find_packages
from setuptools.extension import Extension
from Cython.Build import cythonize
import eigency
import numpy as np
__package_name__ = "eigency"
__eigen_dir__ = eigency.__eigen_dir__
__eigen_lib_dir__ = join(basename(__eigen_dir__), 'Eigen')
extensions = [
Extension("eigency.conversions", ["eigency/conversions.pyx"],
include_dirs = [np.get_include(), __eigen_dir__],
language="c++"
),
Extension("eigency.core", ["eigency/core.pyx"],
include_dirs = [np.get_include(), __eigen_dir__],
language="c++"
)
]
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
long_description = open('README.md').read()
eigen_data_files = []
for root, dirs, files in os.walk(join(__eigen_dir__, 'Eigen')):
for f in files:
if f.endswith('.h'):
eigen_data_files.append(join(root, f))
dist = setup(
name = __package_name__,
description = "Cython interface between the numpy arrays and the Matrix/Array classes of the Eigen C++ library",
long_description=long_description,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: C++',
'Programming Language :: Cython',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
license = "MIT",
author = "<NAME>",
author_email = "<EMAIL>",
url = "https://github.com/wouterboomsma/eigency",
use_scm_version = True,
setup_requires = ['setuptools_scm', 'cython'],
ext_modules = cythonize(extensions),
packages = find_packages(),
include_package_data=True,
package_data = {__package_name__: [
'*.h', '*.pxd', '*.pyx',
join(__eigen_lib_dir__, '*'),
] + eigen_data_files},
exclude_package_data = {__package_name__: [join(__eigen_lib_dir__, 'CMakeLists.txt')]},
install_requires = ['numpy', 'cython']
)
| [
"Cython.Build.cythonize",
"os.path.basename",
"pypandoc.convert",
"numpy.get_include",
"os.path.join",
"setuptools.find_packages"
] | [((295, 318), 'os.path.basename', 'basename', (['__eigen_dir__'], {}), '(__eigen_dir__)\n', (303, 318), False, 'from os.path import basename, join\n'), ((713, 749), 'pypandoc.convert', 'pypandoc.convert', (['"""README.md"""', '"""rst"""'], {}), "('README.md', 'rst')\n", (729, 749), False, 'import pypandoc\n'), ((884, 912), 'os.path.join', 'join', (['__eigen_dir__', '"""Eigen"""'], {}), "(__eigen_dir__, 'Eigen')\n", (888, 912), False, 'from os.path import basename, join\n'), ((1884, 1905), 'Cython.Build.cythonize', 'cythonize', (['extensions'], {}), '(extensions)\n', (1893, 1905), False, 'from Cython.Build import cythonize\n'), ((1922, 1937), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1935, 1937), False, 'from setuptools import setup, find_packages\n'), ((441, 457), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (455, 457), True, 'import numpy as np\n'), ((593, 609), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (607, 609), True, 'import numpy as np\n'), ((1000, 1013), 'os.path.join', 'join', (['root', 'f'], {}), '(root, f)\n', (1004, 1013), False, 'from os.path import basename, join\n'), ((2156, 2197), 'os.path.join', 'join', (['__eigen_lib_dir__', '"""CMakeLists.txt"""'], {}), "(__eigen_lib_dir__, 'CMakeLists.txt')\n", (2160, 2197), False, 'from os.path import basename, join\n'), ((2052, 2080), 'os.path.join', 'join', (['__eigen_lib_dir__', '"""*"""'], {}), "(__eigen_lib_dir__, '*')\n", (2056, 2080), False, 'from os.path import basename, join\n')] |
import numpy as np
import time
def sample(basis, random_initial, optimizer, solver, tol=1.0e-14, maxiter=80):
'''
Model constrained adaptive sampler to create trial basis for ROM
Parameters
----------
basis : numpy.ndarray
Initial reduced basis
z_0 : dolfin.Function
Initial parameter guess
optimizer : function
Function to find param with maximum error
solver : Solver class
Solver with forward and reduced forward functions
tol : tol
Misfit error tolerance
maxiter : int
Maximum sampling iterations
'''
iterations = 0
g_z_star = 1e30
while g_z_star > tol and iterations < maxiter:
# Perform optimization to find parameter with the maximum error
z_0 = random_initial()
t_i = time.time()
prev_g_z_star = g_z_star
z_star, g_z_star = optimizer(z_0, basis, solver)
t_f = time.time()
iterations += 1
print("Optimizer iteration {} time taken: {}".format(iterations, t_f - t_i))
#Solve full system with z_star and obtain state vector x(z_star)
w, y, A, B, C = solver.forward(z_star)
w = w.vector()[:]
w = w.reshape(w.shape[0], 1)
#Enrich basis with generated snapshots
basis = enrich(basis,w)
print("Current error: {}, Improv: {}\n".format(g_z_star, prev_g_z_star - g_z_star))
print("Sampling completed after {} iterations".format(iterations))
return basis
def enrich(basis, w):
'''
Enrich basis given a new snapshot with Gram-Schmidt Orthogonalization
TODO: I am throwing away samples during the optimization phase as I am
using a blackbox scipy optimization tool. Implementing a bound-constrained
optimizer will expose those to me and I can use those snapshots to produce
a POD basis.
basis - existing basis
w - new snapshot
'''
(n,k) = basis.shape
U = np.hstack((basis, w))
for j in range(0,k-1):
U[:,-1] = U[:,-1] - ( U[:,-1].T @ U[:,j] )/( U[:,j].T @ U[:,j] ) * U[:,j]
U[:,-1] = U[:,-1] / np.sqrt(U[:,-1].T @ U[:,-1])
return U
| [
"time.time",
"numpy.sqrt",
"numpy.hstack"
] | [((2005, 2026), 'numpy.hstack', 'np.hstack', (['(basis, w)'], {}), '((basis, w))\n', (2014, 2026), True, 'import numpy as np\n'), ((857, 868), 'time.time', 'time.time', ([], {}), '()\n', (866, 868), False, 'import time\n'), ((973, 984), 'time.time', 'time.time', ([], {}), '()\n', (982, 984), False, 'import time\n'), ((2164, 2194), 'numpy.sqrt', 'np.sqrt', (['(U[:, -1].T @ U[:, -1])'], {}), '(U[:, -1].T @ U[:, -1])\n', (2171, 2194), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from PIL import Image
from keras import backend as K
from keras.engine import Layer
from keras import activations
# https://software.intel.com/en-us/articles/keras-implementation-of-siamese-like-networks
class Normalized_Correlation_Layer(Layer):
def __init__(self, patch_size=(5,5),
dim_ordering='tf',
border_mode='same',
stride=(1, 1),
activation=None,
**kwargs):
if border_mode != 'same':
raise ValueError('Invalid border mode for Correlation Layer (only "same" is supported as of now):', border_mode)
self.kernel_size = patch_size
self.subsample = stride
self.dim_ordering = dim_ordering
self.border_mode = border_mode
self.activation = activations.get(activation)
super(Normalized_Correlation_Layer, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return (input_shape[0][0], input_shape[0][1], input_shape[0][2],
self.kernel_size[0] * input_shape[0][2] * input_shape[0][-1])
def get_config(self):
config = {'patch_size': self.kernel_size,
'activation': self.activation.__name__,
'border_mode': self.border_mode,
'stride': self.subsample,
'dim_ordering': self.dim_ordering}
base_config = super(Normalized_Correlation_Layer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, x, mask=None):
input_1, input_2 = x
stride_row, stride_col = self.subsample
inp_shape = input_1._keras_shape
output_shape = self.compute_output_shape([inp_shape, inp_shape])
padding_row = (int(self.kernel_size[0] / 2),int(self.kernel_size[0] / 2))
padding_col = (int(self.kernel_size[1] / 2),int(self.kernel_size[1] / 2))
input_1 = K.spatial_2d_padding(input_1, padding =(padding_row,padding_col))
input_2 = K.spatial_2d_padding(input_2, padding = ((padding_row[0]*2, padding_row[1]*2),padding_col))
output_row = output_shape[1]
output_col = output_shape[2]
output = []
for k in range(inp_shape[-1]):
xc_1 = []
xc_2 = []
for i in range(padding_row[0]):
for j in range(output_col):
xc_2.append(K.reshape(input_2[:, i:i + self.kernel_size[0], j:j + self.kernel_size[1], k],
(-1, 1, self.kernel_size[0] * self.kernel_size[1])))
for i in range(output_row):
slice_row = slice(i, i + self.kernel_size[0])
slice_row2 = slice(i + padding_row[0], i +self.kernel_size[0] + padding_row[0])
for j in range(output_col):
slice_col = slice(j, j + self.kernel_size[1])
xc_2.append(K.reshape(input_2[:, slice_row2, slice_col, k],
(-1, 1,self.kernel_size[0]*self.kernel_size[1])))
xc_1.append(K.reshape(input_1[:, slice_row, slice_col, k],
(-1, 1,self.kernel_size[0]*self.kernel_size[1])))
for i in range(output_row, output_row + padding_row[1]):
for j in range(output_col):
xc_2.append(K.reshape(input_2[:, i:i + self.kernel_size[0], j:j + self.kernel_size[1], k],
(-1, 1, self.kernel_size[0] * self.kernel_size[1])))
xc_1_aggregate = K.concatenate(xc_1, axis=1)
xc_1_mean = K.mean(xc_1_aggregate, axis=-1, keepdims=True)
xc_1_std = K.std(xc_1_aggregate, axis=-1, keepdims=True)
xc_1_aggregate = (xc_1_aggregate - xc_1_mean) / xc_1_std
xc_2_aggregate = K.concatenate(xc_2, axis=1)
xc_2_mean = K.mean(xc_2_aggregate, axis=-1, keepdims=True)
xc_2_std = K.std(xc_2_aggregate, axis=-1, keepdims=True)
xc_2_aggregate = (xc_2_aggregate - xc_2_mean) / xc_2_std
xc_1_aggregate = K.permute_dimensions(xc_1_aggregate, (0, 2, 1))
block = []
len_xc_1 = len(xc_1)
for i in range(len_xc_1):
# This for loop is to compute the product of a given patch of feature map 1 and the feature maps on which it is supposed to
sl1 = slice(int(i / inp_shape[2]) * inp_shape[2],
int(i / inp_shape[2]) * inp_shape[2] + inp_shape[2] * self.kernel_size[0])
# This calculates which are the patches of feature map 2 to be considered for a given patch of first feature map.
block.append(K.reshape(K.batch_dot(xc_2_aggregate[:, sl1, :],
xc_1_aggregate[:, :, i]),
(-1, 1, 1, inp_shape[2] * self.kernel_size[0])))
block = K.concatenate(block, axis=1)
block = K.reshape(block, (-1, output_row, output_col, inp_shape[2] * self.kernel_size[0]))
output.append(block)
output = self.activation(output)
return output
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
with open('/store/datasets/UA-Detrac/train/images/MVI_20035/img00001.jpg') as f:
content = f.read()
im1 = tf.image.decode_jpeg(content)
with open('/store/datasets/UA-Detrac/train/images/MVI_20035/img00002.jpg') as f:
content = f.read()
im2 = tf.image.decode_jpeg(content)
im1 = tf.image.convert_image_dtype(im1, tf.float32)
im2 = tf.image.convert_image_dtype(im2, tf.float32)
# print(tf.shape(im1))
im = tf.cross(im1, im2)
# print(tf.shape(im))
im = tf.image.convert_image_dtype(im, tf.uint8)
im = im.eval()
Image.fromarray((np.asarray(im))).show()
sess.close()
| [
"keras.backend.concatenate",
"keras.activations.get",
"tensorflow.global_variables_initializer",
"keras.backend.spatial_2d_padding",
"keras.backend.reshape",
"numpy.asarray",
"tensorflow.Session",
"keras.backend.batch_dot",
"keras.backend.mean",
"keras.backend.std",
"tensorflow.cross",
"tensor... | [((5074, 5107), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5105, 5107), True, 'import tensorflow as tf\n'), ((5113, 5125), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (5123, 5125), True, 'import tensorflow as tf\n'), ((5481, 5526), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['im1', 'tf.float32'], {}), '(im1, tf.float32)\n', (5509, 5526), True, 'import tensorflow as tf\n'), ((5537, 5582), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['im2', 'tf.float32'], {}), '(im2, tf.float32)\n', (5565, 5582), True, 'import tensorflow as tf\n'), ((5620, 5638), 'tensorflow.cross', 'tf.cross', (['im1', 'im2'], {}), '(im1, im2)\n', (5628, 5638), True, 'import tensorflow as tf\n'), ((5675, 5717), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['im', 'tf.uint8'], {}), '(im, tf.uint8)\n', (5703, 5717), True, 'import tensorflow as tf\n'), ((801, 828), 'keras.activations.get', 'activations.get', (['activation'], {}), '(activation)\n', (816, 828), False, 'from keras import activations\n'), ((1939, 2004), 'keras.backend.spatial_2d_padding', 'K.spatial_2d_padding', (['input_1'], {'padding': '(padding_row, padding_col)'}), '(input_1, padding=(padding_row, padding_col))\n', (1959, 2004), True, 'from keras import backend as K\n'), ((2023, 2121), 'keras.backend.spatial_2d_padding', 'K.spatial_2d_padding', (['input_2'], {'padding': '((padding_row[0] * 2, padding_row[1] * 2), padding_col)'}), '(input_2, padding=((padding_row[0] * 2, padding_row[1] *\n 2), padding_col))\n', (2043, 2121), True, 'from keras import backend as K\n'), ((3535, 3562), 'keras.backend.concatenate', 'K.concatenate', (['xc_1'], {'axis': '(1)'}), '(xc_1, axis=1)\n', (3548, 3562), True, 'from keras import backend as K\n'), ((3584, 3630), 'keras.backend.mean', 'K.mean', (['xc_1_aggregate'], {'axis': '(-1)', 'keepdims': '(True)'}), '(xc_1_aggregate, axis=-1, keepdims=True)\n', (3590, 3630), True, 'from keras import backend as K\n'), ((3650, 3695), 'keras.backend.std', 'K.std', (['xc_1_aggregate'], {'axis': '(-1)', 'keepdims': '(True)'}), '(xc_1_aggregate, axis=-1, keepdims=True)\n', (3655, 3695), True, 'from keras import backend as K\n'), ((3787, 3814), 'keras.backend.concatenate', 'K.concatenate', (['xc_2'], {'axis': '(1)'}), '(xc_2, axis=1)\n', (3800, 3814), True, 'from keras import backend as K\n'), ((3835, 3881), 'keras.backend.mean', 'K.mean', (['xc_2_aggregate'], {'axis': '(-1)', 'keepdims': '(True)'}), '(xc_2_aggregate, axis=-1, keepdims=True)\n', (3841, 3881), True, 'from keras import backend as K\n'), ((3901, 3946), 'keras.backend.std', 'K.std', (['xc_2_aggregate'], {'axis': '(-1)', 'keepdims': '(True)'}), '(xc_2_aggregate, axis=-1, keepdims=True)\n', (3906, 3946), True, 'from keras import backend as K\n'), ((4038, 4085), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['xc_1_aggregate', '(0, 2, 1)'], {}), '(xc_1_aggregate, (0, 2, 1))\n', (4058, 4085), True, 'from keras import backend as K\n'), ((4841, 4869), 'keras.backend.concatenate', 'K.concatenate', (['block'], {'axis': '(1)'}), '(block, axis=1)\n', (4854, 4869), True, 'from keras import backend as K\n'), ((4886, 4973), 'keras.backend.reshape', 'K.reshape', (['block', '(-1, output_row, output_col, inp_shape[2] * self.kernel_size[0])'], {}), '(block, (-1, output_row, output_col, inp_shape[2] * self.\n kernel_size[0]))\n', (4895, 4973), True, 'from keras import backend as K\n'), ((5284, 5313), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['content'], {}), '(content)\n', (5304, 5313), True, 'import tensorflow as tf\n'), ((5440, 5469), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['content'], {}), '(content)\n', (5460, 5469), True, 'import tensorflow as tf\n'), ((5759, 5773), 'numpy.asarray', 'np.asarray', (['im'], {}), '(im)\n', (5769, 5773), True, 'import numpy as np\n'), ((2918, 3021), 'keras.backend.reshape', 'K.reshape', (['input_2[:, slice_row2, slice_col, k]', '(-1, 1, self.kernel_size[0] * self.kernel_size[1])'], {}), '(input_2[:, slice_row2, slice_col, k], (-1, 1, self.kernel_size[0] *\n self.kernel_size[1]))\n', (2927, 3021), True, 'from keras import backend as K\n'), ((3076, 3178), 'keras.backend.reshape', 'K.reshape', (['input_1[:, slice_row, slice_col, k]', '(-1, 1, self.kernel_size[0] * self.kernel_size[1])'], {}), '(input_1[:, slice_row, slice_col, k], (-1, 1, self.kernel_size[0] *\n self.kernel_size[1]))\n', (3085, 3178), True, 'from keras import backend as K\n'), ((3339, 3473), 'keras.backend.reshape', 'K.reshape', (['input_2[:, i:i + self.kernel_size[0], j:j + self.kernel_size[1], k]', '(-1, 1, self.kernel_size[0] * self.kernel_size[1])'], {}), '(input_2[:, i:i + self.kernel_size[0], j:j + self.kernel_size[1],\n k], (-1, 1, self.kernel_size[0] * self.kernel_size[1]))\n', (3348, 3473), True, 'from keras import backend as K\n'), ((4628, 4691), 'keras.backend.batch_dot', 'K.batch_dot', (['xc_2_aggregate[:, sl1, :]', 'xc_1_aggregate[:, :, i]'], {}), '(xc_2_aggregate[:, sl1, :], xc_1_aggregate[:, :, i])\n', (4639, 4691), True, 'from keras import backend as K\n'), ((2414, 2548), 'keras.backend.reshape', 'K.reshape', (['input_2[:, i:i + self.kernel_size[0], j:j + self.kernel_size[1], k]', '(-1, 1, self.kernel_size[0] * self.kernel_size[1])'], {}), '(input_2[:, i:i + self.kernel_size[0], j:j + self.kernel_size[1],\n k], (-1, 1, self.kernel_size[0] * self.kernel_size[1]))\n', (2423, 2548), True, 'from keras import backend as K\n')] |
# -*- coding: utf-8 -*-
# file: train.py
# author: yangheng <<EMAIL>>
# Copyright (C) 2019. All Rights Reserved.
import argparse
import json
import logging
import os, sys
import random
from sklearn.metrics import f1_score
from time import strftime, localtime
import numpy as np
import torch
import torch.nn.functional as F
from transformers.optimization import AdamW
from transformers.models.bert.modeling_bert import BertModel
from transformers import BertTokenizer
# from pytorch_transformers.optimization import AdamW
# from pytorch_transformers.tokenization_bert import BertTokenizer
# from pytorch_transformers.modeling_bert import BertModel
from seqeval.metrics import classification_report
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
from utils.data_utils import ATEPCProcessor, convert_examples_to_features
from model.lcf_atepc import LCF_ATEPC
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stdout))
os.makedirs('logs', exist_ok=True)
time = '{}'.format(strftime("%y%m%d-%H%M%S", localtime()))
log_file = 'logs/{}.log'.format(time)
logger.addHandler(logging.FileHandler(log_file))
logger.info('log file: {}'.format(log_file))
def main(config):
args = config
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
processor = ATEPCProcessor()
label_list = processor.get_labels()
num_labels = len(label_list) + 1
datasets = {
'camera': "atepc_datasets/camera",
'car': "atepc_datasets/car",
'phone': "atepc_datasets/phone",
'notebook': "atepc_datasets/notebook",
'laptop': "atepc_datasets/laptop",
'restaurant': "atepc_datasets/restaurant",
'twitter': "atepc_datasets/twitter",
'mixed': "atepc_datasets/mixed",
}
pretrained_bert_models = {
'camera': "bert-base-chinese",
'car': "bert-base-chinese",
'phone': "bert-base-chinese",
'notebook': "bert-base-chinese",
'laptop': "bert-base-uncased",
'restaurant': "bert-base-uncased",
# for loading domain-adapted BERT
# 'restaurant': "../bert_pretrained_restaurant",
'twitter': "bert-base-uncased",
'mixed': "bert-base-multilingual-uncased",
}
args.bert_model = pretrained_bert_models[args.dataset]
args.data_dir = datasets[args.dataset]
def convert_polarity(examples):
for i in range(len(examples)):
polarities = []
for polarity in examples[i].polarity:
if polarity == 2:
polarities.append(1)
else:
polarities.append(polarity)
examples[i].polarity = polarities
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=True)
train_examples = processor.get_train_examples(args.data_dir)
eval_examples = processor.get_test_examples(args.data_dir)
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
bert_base_model = BertModel.from_pretrained(args.bert_model)
bert_base_model.config.num_labels = num_labels
if args.dataset in {'camera', 'car', 'phone', 'notebook'}:
convert_polarity(train_examples)
convert_polarity(eval_examples)
model = LCF_ATEPC(bert_base_model, args=args)
else:
model = LCF_ATEPC(bert_base_model, args=args)
for arg in vars(args):
logger.info('>>> {0}: {1}'.format(arg, getattr(args, arg)))
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.00001},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.00001}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, weight_decay=0.00001)
eval_features = convert_examples_to_features(eval_examples, label_list, args.max_seq_length,
tokenizer)
all_spc_input_ids = torch.tensor([f.input_ids_spc for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
all_polarities = torch.tensor([f.polarities for f in eval_features], dtype=torch.long)
all_valid_ids = torch.tensor([f.valid_ids for f in eval_features], dtype=torch.long)
all_lmask_ids = torch.tensor([f.label_mask for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_spc_input_ids, all_input_mask, all_segment_ids, all_label_ids,
all_polarities, all_valid_ids, all_lmask_ids)
# Run prediction for full data
eval_sampler = RandomSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
def evaluate(eval_ATE=True, eval_APC=True):
# evaluate
apc_result = {'max_apc_test_acc': 0, 'max_apc_test_f1': 0}
ate_result = 0
y_true = []
y_pred = []
n_test_correct, n_test_total = 0, 0
test_apc_logits_all, test_polarities_all = None, None
model.eval()
label_map = {i: label for i, label in enumerate(label_list, 1)}
for input_ids_spc, input_mask, segment_ids, label_ids, polarities, valid_ids, l_mask in eval_dataloader:
input_ids_spc = input_ids_spc.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
valid_ids = valid_ids.to(device)
label_ids = label_ids.to(device)
polarities = polarities.to(device)
l_mask = l_mask.to(device)
with torch.no_grad():
ate_logits, apc_logits = model(input_ids_spc, segment_ids, input_mask,
valid_ids=valid_ids, polarities=polarities, attention_mask_label=l_mask)
if eval_APC:
polarities = model.get_batch_polarities(polarities)
n_test_correct += (torch.argmax(apc_logits, -1) == polarities).sum().item()
n_test_total += len(polarities)
if test_polarities_all is None:
test_polarities_all = polarities
test_apc_logits_all = apc_logits
else:
test_polarities_all = torch.cat((test_polarities_all, polarities), dim=0)
test_apc_logits_all = torch.cat((test_apc_logits_all, apc_logits), dim=0)
if eval_ATE:
if not args.use_bert_spc:
label_ids = model.get_batch_token_labels_bert_base_indices(label_ids)
ate_logits = torch.argmax(F.log_softmax(ate_logits, dim=2), dim=2)
ate_logits = ate_logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
input_mask = input_mask.to('cpu').numpy()
for i, label in enumerate(label_ids):
temp_1 = []
temp_2 = []
for j, m in enumerate(label):
if j == 0:
continue
elif label_ids[i][j] == len(label_list):
y_true.append(temp_1)
y_pred.append(temp_2)
break
else:
temp_1.append(label_map.get(label_ids[i][j], 'O'))
temp_2.append(label_map.get(ate_logits[i][j], 'O'))
if eval_APC:
test_acc = n_test_correct / n_test_total
if args.dataset in {'camera', 'car', 'phone', 'notebook'}:
test_f1 = f1_score(torch.argmax(test_apc_logits_all, -1).cpu(), test_polarities_all.cpu(),
labels=[0, 1], average='macro')
else:
test_f1 = f1_score(torch.argmax(test_apc_logits_all, -1).cpu(), test_polarities_all.cpu(),
labels=[0, 1, 2], average='macro')
test_acc = round(test_acc * 100, 2)
test_f1 = round(test_f1 * 100, 2)
apc_result = {'max_apc_test_acc': test_acc, 'max_apc_test_f1': test_f1}
if eval_ATE:
report = classification_report(y_true, y_pred, digits=4)
tmps = report.split()
ate_result = round(float(tmps[7]) * 100, 2)
return apc_result, ate_result
def save_model(path):
# Save a trained model and the associated configuration,
# Take care of the storage!
os.makedirs(path, exist_ok=True)
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
model_to_save.save_pretrained(path)
tokenizer.save_pretrained(path)
label_map = {i : label for i, label in enumerate(label_list,1)}
model_config = {"bert_model":args.bert_model,"do_lower": True,"max_seq_length":args.max_seq_length,"num_labels":len(label_list)+1,"label_map":label_map}
json.dump(model_config,open(os.path.join(path,"config.json"),"w"))
logger.info('save model to: {}'.format(path))
def train():
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
all_spc_input_ids = torch.tensor([f.input_ids_spc for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
all_valid_ids = torch.tensor([f.valid_ids for f in train_features], dtype=torch.long)
all_lmask_ids = torch.tensor([f.label_mask for f in train_features], dtype=torch.long)
all_polarities = torch.tensor([f.polarities for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_spc_input_ids, all_input_mask, all_segment_ids,
all_label_ids, all_polarities, all_valid_ids, all_lmask_ids)
train_sampler = SequentialSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
max_apc_test_acc = 0
max_apc_test_f1 = 0
max_ate_test_f1 = 0
global_step = 0
for epoch in range(int(args.num_train_epochs)):
logger.info('#' * 80)
logger.info('Train {} Epoch{}'.format(args.seed, epoch + 1, args.data_dir))
logger.info('#' * 80)
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(train_dataloader):
model.train()
batch = tuple(t.to(device) for t in batch)
input_ids_spc, input_mask, segment_ids, label_ids, polarities, valid_ids, l_mask = batch
loss_ate, loss_apc = model(input_ids_spc, segment_ids, input_mask, label_ids, polarities, valid_ids,
l_mask)
loss = loss_ate + loss_apc
loss.backward()
nb_tr_examples += input_ids_spc.size(0)
nb_tr_steps += 1
optimizer.step()
optimizer.zero_grad()
global_step += 1
if global_step % args.eval_steps == 0:
if epoch >= args.num_train_epochs-2 or args.num_train_epochs<=2:
# evaluate in last 2 epochs
apc_result, ate_result = evaluate(eval_ATE=not args.use_bert_spc)
# apc_result, ate_result = evaluate()
# path = '{0}/{1}_{2}_apcacc_{3}_apcf1_{4}_atef1_{5}'.format(
# args.output_dir,
# args.dataset,
# args.local_context_focus,
# round(apc_result['max_apc_test_acc'], 2),
# round(apc_result['max_apc_test_f1'], 2),
# round(ate_result, 2)
# )
# if apc_result['max_apc_test_acc'] > max_apc_test_acc or \
# apc_result['max_apc_test_f1'] > max_apc_test_f1 or \
# ate_result > max_ate_test_f1:
# save_model(path)
if apc_result['max_apc_test_acc'] > max_apc_test_acc:
max_apc_test_acc = apc_result['max_apc_test_acc']
if apc_result['max_apc_test_f1'] > max_apc_test_f1:
max_apc_test_f1 = apc_result['max_apc_test_f1']
if ate_result > max_ate_test_f1:
max_ate_test_f1 = ate_result
current_apc_test_acc = apc_result['max_apc_test_acc']
current_apc_test_f1 = apc_result['max_apc_test_f1']
current_ate_test_f1 = round(ate_result, 2)
logger.info('*' * 80)
logger.info('Train {} Epoch{}, Evaluate for {}'.format(args.seed, epoch + 1, args.data_dir))
logger.info(f'APC_test_acc: {current_apc_test_acc}(max: {max_apc_test_acc}) '
f'APC_test_f1: {current_apc_test_f1}(max: {max_apc_test_f1})')
if args.use_bert_spc:
logger.info(f'ATE_test_F1: {current_apc_test_f1}(max: {max_apc_test_f1})'
f' (Unreliable since `use_bert_spc` is "True".)')
else:
logger.info(f'ATE_test_f1: {current_ate_test_f1}(max:{max_ate_test_f1})')
logger.info('*' * 80)
return [max_apc_test_acc, max_apc_test_f1, max_ate_test_f1]
return train()
def parse_experiments(path):
configs = []
opt = argparse.ArgumentParser()
with open(path, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for id, config in json_config.items():
# Hyper Parameters
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", default=config['dataset'], type=str)
parser.add_argument("--output_dir", default=config['output_dir'], type=str)
parser.add_argument("--SRD", default=int(config['SRD']), type=int)
parser.add_argument("--learning_rate", default=float(config['learning_rate']), type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--use_unique_bert", default=bool(config['use_unique_bert']), type=bool)
parser.add_argument("--use_bert_spc", default=bool(config['use_bert_spc_for_apc']), type=bool)
parser.add_argument("--local_context_focus", default=config['local_context_focus'], type=str)
parser.add_argument("--num_train_epochs", default=float(config['num_train_epochs']), type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--train_batch_size", default=int(config['train_batch_size']), type=int,
help="Total batch size for training.")
parser.add_argument("--dropout", default=float(config['dropout']), type=int)
parser.add_argument("--max_seq_length", default=int(config['max_seq_length']), type=int)
parser.add_argument("--eval_batch_size", default=32, type=int, help="Total batch size for eval.")
parser.add_argument("--eval_steps", default=20, help="evaluate per steps")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
configs.append(parser.parse_args())
return configs
if __name__ == "__main__":
experiments = argparse.ArgumentParser()
experiments.add_argument('--config_path', default='experiments.json', type=str, help='Path of experiments config file')
experiments = experiments.parse_args()
from utils.Pytorch_GPUManager import GPUManager
index = GPUManager().auto_choice()
device = torch.device("cuda:" + str(index) if torch.cuda.is_available() else "cpu")
exp_configs = parse_experiments(experiments.config_path)
n = 5
for config in exp_configs:
logger.info('-'*80)
logger.info('Config {} (totally {} configs)'.format(exp_configs.index(config)+1,len(exp_configs)))
results = []
max_apc_test_acc, max_apc_test_f1, max_ate_test_f1 = 0,0,0
for i in range(n):
config.device = device
config.seed = i + 1
logger.info('No.{} training process of {}'.format(i + 1, n))
apc_test_acc, apc_test_f1, ate_test_f1 = main(config)
if apc_test_acc > max_apc_test_acc:
max_apc_test_acc = apc_test_acc
if apc_test_f1 > max_apc_test_f1:
max_apc_test_f1 = apc_test_f1
if ate_test_f1 > max_ate_test_f1:
max_ate_test_f1 = ate_test_f1
logger.info('max_ate_test_f1:{} max_apc_test_acc: {}\tmax_apc_test_f1: {} \t'
.format(max_ate_test_f1, max_apc_test_acc, max_apc_test_f1))
| [
"numpy.random.seed",
"utils.data_utils.ATEPCProcessor",
"torch.utils.data.RandomSampler",
"argparse.ArgumentParser",
"torch.argmax",
"model.lcf_atepc.LCF_ATEPC",
"torch.cat",
"logging.getLogger",
"transformers.optimization.AdamW",
"torch.utils.data.TensorDataset",
"torch.no_grad",
"os.path.joi... | [((913, 932), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (930, 932), False, 'import logging\n'), ((1017, 1051), 'os.makedirs', 'os.makedirs', (['"""logs"""'], {'exist_ok': '(True)'}), "('logs', exist_ok=True)\n", (1028, 1051), False, 'import os, sys\n'), ((981, 1014), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1002, 1014), False, 'import logging\n'), ((1167, 1196), 'logging.FileHandler', 'logging.FileHandler', (['log_file'], {}), '(log_file)\n', (1186, 1196), False, 'import logging\n'), ((1566, 1588), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (1577, 1588), False, 'import random\n'), ((1593, 1618), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (1607, 1618), True, 'import numpy as np\n'), ((1623, 1651), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1640, 1651), False, 'import torch\n'), ((1751, 1767), 'utils.data_utils.ATEPCProcessor', 'ATEPCProcessor', ([], {}), '()\n', (1765, 1767), False, 'from utils.data_utils import ATEPCProcessor, convert_examples_to_features\n'), ((3145, 3211), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['args.bert_model'], {'do_lower_case': '(True)'}), '(args.bert_model, do_lower_case=True)\n', (3174, 3211), False, 'from transformers import BertTokenizer\n'), ((3514, 3556), 'transformers.models.bert.modeling_bert.BertModel.from_pretrained', 'BertModel.from_pretrained', (['args.bert_model'], {}), '(args.bert_model)\n', (3539, 3556), False, 'from transformers.models.bert.modeling_bert import BertModel\n'), ((4394, 4472), 'transformers.optimization.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'weight_decay': '(1e-05)'}), '(optimizer_grouped_parameters, lr=args.learning_rate, weight_decay=1e-05)\n', (4399, 4472), False, 'from transformers.optimization import AdamW\n'), ((4495, 4586), 'utils.data_utils.convert_examples_to_features', 'convert_examples_to_features', (['eval_examples', 'label_list', 'args.max_seq_length', 'tokenizer'], {}), '(eval_examples, label_list, args.max_seq_length,\n tokenizer)\n', (4523, 4586), False, 'from utils.data_utils import ATEPCProcessor, convert_examples_to_features\n'), ((4656, 4728), 'torch.tensor', 'torch.tensor', (['[f.input_ids_spc for f in eval_features]'], {'dtype': 'torch.long'}), '([f.input_ids_spc for f in eval_features], dtype=torch.long)\n', (4668, 4728), False, 'import torch\n'), ((4750, 4819), 'torch.tensor', 'torch.tensor', (['[f.input_mask for f in eval_features]'], {'dtype': 'torch.long'}), '([f.input_mask for f in eval_features], dtype=torch.long)\n', (4762, 4819), False, 'import torch\n'), ((4842, 4912), 'torch.tensor', 'torch.tensor', (['[f.segment_ids for f in eval_features]'], {'dtype': 'torch.long'}), '([f.segment_ids for f in eval_features], dtype=torch.long)\n', (4854, 4912), False, 'import torch\n'), ((4933, 5000), 'torch.tensor', 'torch.tensor', (['[f.label_id for f in eval_features]'], {'dtype': 'torch.long'}), '([f.label_id for f in eval_features], dtype=torch.long)\n', (4945, 5000), False, 'import torch\n'), ((5022, 5091), 'torch.tensor', 'torch.tensor', (['[f.polarities for f in eval_features]'], {'dtype': 'torch.long'}), '([f.polarities for f in eval_features], dtype=torch.long)\n', (5034, 5091), False, 'import torch\n'), ((5112, 5180), 'torch.tensor', 'torch.tensor', (['[f.valid_ids for f in eval_features]'], {'dtype': 'torch.long'}), '([f.valid_ids for f in eval_features], dtype=torch.long)\n', (5124, 5180), False, 'import torch\n'), ((5201, 5270), 'torch.tensor', 'torch.tensor', (['[f.label_mask for f in eval_features]'], {'dtype': 'torch.long'}), '([f.label_mask for f in eval_features], dtype=torch.long)\n', (5213, 5270), False, 'import torch\n'), ((5287, 5417), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_spc_input_ids', 'all_input_mask', 'all_segment_ids', 'all_label_ids', 'all_polarities', 'all_valid_ids', 'all_lmask_ids'], {}), '(all_spc_input_ids, all_input_mask, all_segment_ids,\n all_label_ids, all_polarities, all_valid_ids, all_lmask_ids)\n', (5300, 5417), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((5498, 5522), 'torch.utils.data.RandomSampler', 'RandomSampler', (['eval_data'], {}), '(eval_data)\n', (5511, 5522), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((5545, 5621), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_data'], {'sampler': 'eval_sampler', 'batch_size': 'args.eval_batch_size'}), '(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n', (5555, 5621), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((15096, 15121), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (15119, 15121), False, 'import argparse\n'), ((17084, 17109), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17107, 17109), False, 'import argparse\n'), ((1097, 1108), 'time.localtime', 'localtime', ([], {}), '()\n', (1106, 1108), False, 'from time import strftime, localtime\n'), ((1664, 1695), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (1678, 1695), False, 'import os, sys\n'), ((1705, 1733), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {}), '(args.output_dir)\n', (1716, 1733), False, 'import os, sys\n'), ((3769, 3806), 'model.lcf_atepc.LCF_ATEPC', 'LCF_ATEPC', (['bert_base_model'], {'args': 'args'}), '(bert_base_model, args=args)\n', (3778, 3806), False, 'from model.lcf_atepc import LCF_ATEPC\n'), ((3833, 3870), 'model.lcf_atepc.LCF_ATEPC', 'LCF_ATEPC', (['bert_base_model'], {'args': 'args'}), '(bert_base_model, args=args)\n', (3842, 3870), False, 'from model.lcf_atepc import LCF_ATEPC\n'), ((9390, 9422), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (9401, 9422), False, 'import os, sys\n'), ((10019, 10112), 'utils.data_utils.convert_examples_to_features', 'convert_examples_to_features', (['train_examples', 'label_list', 'args.max_seq_length', 'tokenizer'], {}), '(train_examples, label_list, args.\n max_seq_length, tokenizer)\n', (10047, 10112), False, 'from utils.data_utils import ATEPCProcessor, convert_examples_to_features\n'), ((10399, 10472), 'torch.tensor', 'torch.tensor', (['[f.input_ids_spc for f in train_features]'], {'dtype': 'torch.long'}), '([f.input_ids_spc for f in train_features], dtype=torch.long)\n', (10411, 10472), False, 'import torch\n'), ((10498, 10568), 'torch.tensor', 'torch.tensor', (['[f.input_mask for f in train_features]'], {'dtype': 'torch.long'}), '([f.input_mask for f in train_features], dtype=torch.long)\n', (10510, 10568), False, 'import torch\n'), ((10595, 10666), 'torch.tensor', 'torch.tensor', (['[f.segment_ids for f in train_features]'], {'dtype': 'torch.long'}), '([f.segment_ids for f in train_features], dtype=torch.long)\n', (10607, 10666), False, 'import torch\n'), ((10691, 10759), 'torch.tensor', 'torch.tensor', (['[f.label_id for f in train_features]'], {'dtype': 'torch.long'}), '([f.label_id for f in train_features], dtype=torch.long)\n', (10703, 10759), False, 'import torch\n'), ((10784, 10853), 'torch.tensor', 'torch.tensor', (['[f.valid_ids for f in train_features]'], {'dtype': 'torch.long'}), '([f.valid_ids for f in train_features], dtype=torch.long)\n', (10796, 10853), False, 'import torch\n'), ((10878, 10948), 'torch.tensor', 'torch.tensor', (['[f.label_mask for f in train_features]'], {'dtype': 'torch.long'}), '([f.label_mask for f in train_features], dtype=torch.long)\n', (10890, 10948), False, 'import torch\n'), ((10974, 11044), 'torch.tensor', 'torch.tensor', (['[f.polarities for f in train_features]'], {'dtype': 'torch.long'}), '([f.polarities for f in train_features], dtype=torch.long)\n', (10986, 11044), False, 'import torch\n'), ((11066, 11196), 'torch.utils.data.TensorDataset', 'TensorDataset', (['all_spc_input_ids', 'all_input_mask', 'all_segment_ids', 'all_label_ids', 'all_polarities', 'all_valid_ids', 'all_lmask_ids'], {}), '(all_spc_input_ids, all_input_mask, all_segment_ids,\n all_label_ids, all_polarities, all_valid_ids, all_lmask_ids)\n', (11079, 11196), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((11253, 11282), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['train_data'], {}), '(train_data)\n', (11270, 11282), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((11310, 11389), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'sampler': 'train_sampler', 'batch_size': 'args.train_batch_size'}), '(train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n', (11320, 11389), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\n'), ((15311, 15336), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (15334, 15336), False, 'import argparse\n'), ((9078, 9125), 'seqeval.metrics.classification_report', 'classification_report', (['y_true', 'y_pred'], {'digits': '(4)'}), '(y_true, y_pred, digits=4)\n', (9099, 9125), False, 'from seqeval.metrics import classification_report\n'), ((17343, 17355), 'utils.Pytorch_GPUManager.GPUManager', 'GPUManager', ([], {}), '()\n', (17353, 17355), False, 'from utils.Pytorch_GPUManager import GPUManager\n'), ((17420, 17445), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (17443, 17445), False, 'import torch\n'), ((6475, 6490), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6488, 6490), False, 'import torch\n'), ((9883, 9916), 'os.path.join', 'os.path.join', (['path', '"""config.json"""'], {}), "(path, 'config.json')\n", (9895, 9916), False, 'import os, sys\n'), ((7151, 7202), 'torch.cat', 'torch.cat', (['(test_polarities_all, polarities)'], {'dim': '(0)'}), '((test_polarities_all, polarities), dim=0)\n', (7160, 7202), False, 'import torch\n'), ((7245, 7296), 'torch.cat', 'torch.cat', (['(test_apc_logits_all, apc_logits)'], {'dim': '(0)'}), '((test_apc_logits_all, apc_logits), dim=0)\n', (7254, 7296), False, 'import torch\n'), ((7497, 7529), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['ate_logits'], {'dim': '(2)'}), '(ate_logits, dim=2)\n', (7510, 7529), True, 'import torch.nn.functional as F\n'), ((8523, 8560), 'torch.argmax', 'torch.argmax', (['test_apc_logits_all', '(-1)'], {}), '(test_apc_logits_all, -1)\n', (8535, 8560), False, 'import torch\n'), ((8715, 8752), 'torch.argmax', 'torch.argmax', (['test_apc_logits_all', '(-1)'], {}), '(test_apc_logits_all, -1)\n', (8727, 8752), False, 'import torch\n'), ((6827, 6855), 'torch.argmax', 'torch.argmax', (['apc_logits', '(-1)'], {}), '(apc_logits, -1)\n', (6839, 6855), False, 'import torch\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
import os
import torch.utils.data as data
class NUSCENESHP(data.Dataset):
num_classes = 10
num_joints = 9
default_resolution = [896, 1600]
mean = np.array([0.485, 0.456, 0.406], np.float32).reshape(1, 1, 3)
std = np.array([0.229, 0.224, 0.225], np.float32).reshape(1, 1, 3)
flip_idx = [[0, 1], [2, 3], [4, 5], [6, 7]]
def __init__(self, opt, split):
super(KITTIHP, self).__init__()
self.edges = [[0, 1], [0, 2], [1, 3], [2, 4],
[4, 6], [3, 5], [5, 6],
[5, 7]]
self.acc_idxs = [1, 2, 3, 4, 5, 6, 7, 8]
self.data_dir = os.path.join(opt.data_dir, 'kitti')
self.img_dir= os.path.join(self.data_dir,'image')
self.calib_dir = os.path.join(self.data_dir,'calib')
if split == 'test':
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'image_info_test-dev2017.json').format(split)
else:
self.annot_path = os.path.join(
self.data_dir, 'annotations',
'kitti_{}_nuscenes.json').format(split)
self.max_objs = 32
self._data_rng = np.random.RandomState(123)
self._eig_val = np.array([0.2141788, 0.01817699, 0.00341571],
dtype=np.float32)
self._eig_vec = np.array([
[-0.58752847, -0.69563484, 0.41340352],
[-0.5832747, 0.00994535, -0.81221408],
[-0.56089297, 0.71832671, 0.41158938]
], dtype=np.float32)
self.split = split
self.opt = opt
self.alpha_in_degree = False
print('==> initializing kitti{} data.'.format(split))
self.coco = coco.COCO(self.annot_path)
image_ids = self.coco.getImgIds()
if split == 'train':
self.images = []
for img_id in image_ids:
idxs = self.coco.getAnnIds(imgIds=[img_id])
if len(idxs) > 0:
self.images.append(img_id)
else:
self.images = image_ids
self.num_samples = len(self.images)
print('Loaded {} {} samples'.format(split, self.num_samples))
def _to_float(self, x):
return float("{:.2f}".format(x))
def __len__(self):
return self.num_samples
| [
"numpy.array",
"pycocotools.coco.COCO",
"os.path.join",
"numpy.random.RandomState"
] | [((839, 874), 'os.path.join', 'os.path.join', (['opt.data_dir', '"""kitti"""'], {}), "(opt.data_dir, 'kitti')\n", (851, 874), False, 'import os\n'), ((897, 933), 'os.path.join', 'os.path.join', (['self.data_dir', '"""image"""'], {}), "(self.data_dir, 'image')\n", (909, 933), False, 'import os\n'), ((958, 994), 'os.path.join', 'os.path.join', (['self.data_dir', '"""calib"""'], {}), "(self.data_dir, 'calib')\n", (970, 994), False, 'import os\n'), ((1386, 1412), 'numpy.random.RandomState', 'np.random.RandomState', (['(123)'], {}), '(123)\n', (1407, 1412), True, 'import numpy as np\n'), ((1437, 1500), 'numpy.array', 'np.array', (['[0.2141788, 0.01817699, 0.00341571]'], {'dtype': 'np.float32'}), '([0.2141788, 0.01817699, 0.00341571], dtype=np.float32)\n', (1445, 1500), True, 'import numpy as np\n'), ((1558, 1709), 'numpy.array', 'np.array', (['[[-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, -\n 0.81221408], [-0.56089297, 0.71832671, 0.41158938]]'], {'dtype': 'np.float32'}), '([[-0.58752847, -0.69563484, 0.41340352], [-0.5832747, 0.00994535, \n -0.81221408], [-0.56089297, 0.71832671, 0.41158938]], dtype=np.float32)\n', (1566, 1709), True, 'import numpy as np\n'), ((1920, 1946), 'pycocotools.coco.COCO', 'coco.COCO', (['self.annot_path'], {}), '(self.annot_path)\n', (1929, 1946), True, 'import pycocotools.coco as coco\n'), ((378, 421), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]', 'np.float32'], {}), '([0.485, 0.456, 0.406], np.float32)\n', (386, 421), True, 'import numpy as np\n'), ((449, 492), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]', 'np.float32'], {}), '([0.229, 0.224, 0.225], np.float32)\n', (457, 492), True, 'import numpy as np\n'), ((1052, 1126), 'os.path.join', 'os.path.join', (['self.data_dir', '"""annotations"""', '"""image_info_test-dev2017.json"""'], {}), "(self.data_dir, 'annotations', 'image_info_test-dev2017.json')\n", (1064, 1126), False, 'import os\n'), ((1218, 1286), 'os.path.join', 'os.path.join', (['self.data_dir', '"""annotations"""', '"""kitti_{}_nuscenes.json"""'], {}), "(self.data_dir, 'annotations', 'kitti_{}_nuscenes.json')\n", (1230, 1286), False, 'import os\n')] |
import numpy as np
from scipy.special import expit
from ..envs.configuration import Configuration
from . import (
AbstractFeatureProvider,
Model,
ModelBasedAgent,
ViewsFeaturesProvider
)
from .organic_count import to_categorical
bayesian_poly_args = {
'num_products': 10,
'random_seed': np.random.randint(2 ** 31 - 1),
'poly_degree': 2,
'max_iter': 5000,
'aa': 1.,
'bb': 1.,
'with_ps_all': False,
}
from scipy import rand
from numpy.linalg import inv
# Algorithm 6
# http://www.maths.usyd.edu.au/u/jormerod/JTOpapers/Ormerod10.pdf
def JJ(zeta):
return 1. / (2. * zeta) * (1. / (1 + np.exp(-zeta)) - 0.5)
# TODO replace explicit inv with linear solves
def bayesian_logistic(Psi, y, mu_beta, Sigma_beta, iter = 200):
zeta = rand(Psi.shape[0])
for _ in range(iter):
q_Sigma = inv(inv(Sigma_beta) + 2 * np.matmul(np.matmul(Psi.T, np.diag(JJ(zeta))), Psi))
q_mu = np.matmul(q_Sigma, (np.matmul(Psi.T, y - 0.5) + np.matmul(inv(Sigma_beta), mu_beta)))
zeta = np.sqrt(np.diag(np.matmul(np.matmul(Psi, q_Sigma + np.matmul(q_mu, q_mu.T)), Psi.T)))
return q_mu, q_Sigma
from scipy.stats import multivariate_normal
class BayesianModelBuilderVB(AbstractFeatureProvider):
def __init__(self, config):
super(BayesianModelBuilderVB, self).__init__(config)
def build(self):
class BayesianFeaturesProviderVB(ViewsFeaturesProvider):
"""
"""
def __init__(self, config):
super(BayesianFeaturesProviderVB, self).__init__(config)
def features(self, observation):
base_features = super().features(observation)
return base_features.reshape(1, self.config.num_products)
class BayesianRegressionModelVB(Model):
"""
"""
def __init__(self, config, Lambda):
super(BayesianRegressionModelVB, self).__init__(config)
self.Lambda = Lambda
def act(self, observation, features):
X = features
P = X.shape[1]
A = np.eye(P)
XA = np.kron(X, A)
action_proba = expit(np.matmul(XA, self.Lambda.T)).mean(1)
action = np.argmax(action_proba)
if self.config.with_ps_all:
ps_all = np.zeros(self.config.num_products)
ps_all[action] = 1.0
else:
ps_all = ()
return {
**super().act(observation, features),
**{
'a': action,
'ps': 1.0,
'ps-a': ps_all,
},
}
features, actions, deltas, pss = self.train_data()
X = features
N = X.shape[0]
P = X.shape[1]
A = to_categorical(actions, P)
XA = np.array([np.kron(X[n, :], A[n, :]) for n in range(N)])
y = deltas # clicks
Sigma = np.kron(self.config.aa * np.eye(P) + self.config.bb,
self.config.aa * np.eye(P) + self.config.bb)
q_mu, q_Sigma = bayesian_logistic(XA, y.reshape((N, 1)),
mu_beta = -6 * np.ones((P ** 2, 1)), Sigma_beta = Sigma)
Lambda = multivariate_normal.rvs(q_mu.reshape(P ** 2), q_Sigma, 1000)
# stan version of the above (seems to agree well)
# fit = pystan.stan('model.stan', data = {'N': features.shape[0], 'P': features.shape[1], 'XA': XA, 'y': y, 'Sigma': Sigma}, chains = 1)
# s = fit.extract()
# Lambda = s['lambda']
###
return (
BayesianFeaturesProviderVB(self.config), # Poly is a bad name ..
BayesianRegressionModelVB(self.config, Lambda)
)
class BayesianAgentVB(ModelBasedAgent):
"""
Bayesian Agent.
Note: the agent utilises VB to train a model.
"""
def __init__(self, config = Configuration(bayesian_poly_args)):
print('ffq')
super(BayesianAgentVB, self).__init__(
config,
BayesianModelBuilderVB(config)
)
| [
"scipy.rand",
"numpy.argmax",
"numpy.zeros",
"numpy.ones",
"numpy.random.randint",
"numpy.linalg.inv",
"numpy.kron",
"numpy.matmul",
"numpy.exp",
"numpy.eye"
] | [((313, 343), 'numpy.random.randint', 'np.random.randint', (['(2 ** 31 - 1)'], {}), '(2 ** 31 - 1)\n', (330, 343), True, 'import numpy as np\n'), ((782, 800), 'scipy.rand', 'rand', (['Psi.shape[0]'], {}), '(Psi.shape[0])\n', (786, 800), False, 'from scipy import rand\n'), ((849, 864), 'numpy.linalg.inv', 'inv', (['Sigma_beta'], {}), '(Sigma_beta)\n', (852, 864), False, 'from numpy.linalg import inv\n'), ((959, 984), 'numpy.matmul', 'np.matmul', (['Psi.T', '(y - 0.5)'], {}), '(Psi.T, y - 0.5)\n', (968, 984), True, 'import numpy as np\n'), ((2132, 2141), 'numpy.eye', 'np.eye', (['P'], {}), '(P)\n', (2138, 2141), True, 'import numpy as np\n'), ((2163, 2176), 'numpy.kron', 'np.kron', (['X', 'A'], {}), '(X, A)\n', (2170, 2176), True, 'import numpy as np\n'), ((2278, 2301), 'numpy.argmax', 'np.argmax', (['action_proba'], {}), '(action_proba)\n', (2287, 2301), True, 'import numpy as np\n'), ((2955, 2980), 'numpy.kron', 'np.kron', (['X[n, :]', 'A[n, :]'], {}), '(X[n, :], A[n, :])\n', (2962, 2980), True, 'import numpy as np\n'), ((636, 649), 'numpy.exp', 'np.exp', (['(-zeta)'], {}), '(-zeta)\n', (642, 649), True, 'import numpy as np\n'), ((997, 1012), 'numpy.linalg.inv', 'inv', (['Sigma_beta'], {}), '(Sigma_beta)\n', (1000, 1012), False, 'from numpy.linalg import inv\n'), ((2375, 2409), 'numpy.zeros', 'np.zeros', (['self.config.num_products'], {}), '(self.config.num_products)\n', (2383, 2409), True, 'import numpy as np\n'), ((3072, 3081), 'numpy.eye', 'np.eye', (['P'], {}), '(P)\n', (3078, 3081), True, 'import numpy as np\n'), ((3141, 3150), 'numpy.eye', 'np.eye', (['P'], {}), '(P)\n', (3147, 3150), True, 'import numpy as np\n'), ((3292, 3312), 'numpy.ones', 'np.ones', (['(P ** 2, 1)'], {}), '((P ** 2, 1))\n', (3299, 3312), True, 'import numpy as np\n'), ((1091, 1114), 'numpy.matmul', 'np.matmul', (['q_mu', 'q_mu.T'], {}), '(q_mu, q_mu.T)\n', (1100, 1114), True, 'import numpy as np\n'), ((2215, 2243), 'numpy.matmul', 'np.matmul', (['XA', 'self.Lambda.T'], {}), '(XA, self.Lambda.T)\n', (2224, 2243), True, 'import numpy as np\n')] |
# by <NAME>, 2021. MIT license
##############################################################################
### Volumetric processing of fibers
##############################################################################
import time
import os
from tifffile import TiffFile
import pickle
from random import shuffle
from skimage import morphology
from scipy import ndimage
import numpy as np
from extractCenterPoints import getTiffProperties
from combineFunctions import findCollisions
from fibers import fiberObj
from combineFunctions import compactifySlice,makePropertySlice
from joblib import Parallel, delayed
import multiprocessing
color0=tuple([float(val)/255. for val in [ 24, 120,250] ])
color1=tuple([float(val)/255. for val in [ 25,155, 50] ])
color2=tuple([float(val)/255. for val in [ 77, 217,155] ])
color3=tuple([float(val)/255. for val in [ 255,179, 25] ])
colorCollisions=tuple([float(val)/255. for val in [ 207, 27,25] ])
def paddingOfVolume(V,radiusZ,radiusX,radiusY,keepTopAndBottom=True,paddingValue=0):
paddedV_perim = np.ones((V.shape[0]+2*radiusZ,V.shape[1]+2*radiusX,V.shape[2]+2*radiusY),V.dtype)*paddingValue
if keepTopAndBottom:
#fibers connected to top or bottom of volume must be connected with True value, else would be trimmed
for i in range(radiusZ):
paddedV_perim[i, radiusX:-radiusX,radiusY:-radiusY] = V[ 0,:,:].copy()
paddedV_perim[-i-1,radiusX:-radiusX,radiusY:-radiusY] = V[-1,:,:].copy()
#interior region
#included:-(excluded)
paddedV_perim[radiusZ:-radiusZ,radiusX:-radiusX,radiusY :-radiusY ] = V
return paddedV_perim
def volumetricGapFilling(
fiberID,
operationTuple,
V_thisMarkerOnly,
SE_radius,
useInclinedCylinderSE,
makePlotsIndividual,
V_hist=None,
engine=None,
articlePlots=False
):
lenChain=max(1,operationTuple[0]) # can be 0 in case of backtracking, but should be at least 1
oriVec=operationTuple[1]
angle=operationTuple[2]
# outer convention is [z,x,y]. avoiding transposition except for plotting, for performance purposes
z,x,y=np.where(V_thisMarkerOnly==255)
# if a fiber is strongly inclined, it is more efficient to process it after transposition,
# such that the principal direction is closer to z direction
transposeBool=None
if angle>50.:
if abs(oriVec[0])>abs(oriVec[1]): #oriVec is denoted (x,y,z)
transposeBool="xz"
temp=z
z=x
x=temp
oriVec=oriVec[[2,1,0]] #transposed to (z,y,x)
oldShape=V_thisMarkerOnly.shape
newShape=[oldShape[1],oldShape[0],oldShape[2]] # was denoted (z,x,y), transposed to (x,z,y)
V_thisMarkerOnly=np.zeros(newShape,np.uint8)
else:
transposeBool="yz"
temp=z
z=y
y=temp
oriVec=oriVec[[0,2,1]] #transposed to (x,z,y)
oldShape=V_thisMarkerOnly.shape
newShape=[oldShape[2],oldShape[1],oldShape[0]] # was denoted (z,x,y), transposed to (y,x,z)
V_thisMarkerOnly=np.zeros(newShape,np.uint8)
V_thisMarkerOnly[z,x,y]=255
xMin=min(x)
yMin=min(y)
zMin=min(z)
xMax=max(x)
yMax=max(y)
zMax=max(z)
xMean=np.mean([xMin,xMax])
yMean=np.mean([yMin,yMax])
zMean=np.mean([zMin,zMax])
SE_size=SE_radius*2+1
SE_ball3D=morphology.ball(SE_radius, dtype=np.uint8)*255
SE_ball3D_opening=morphology.ball(SE_radius-1, dtype=np.uint8)*255
if useInclinedCylinderSE:
SE_disk=morphology.disk(SE_radius, dtype=np.uint8)*255
lengthTranslation=lenChain+SE_radius*2
#extend orientation vector so it reaches across delta z = lengthTranslation
vectorTranslation=np.array([round(val) for val in oriVec*lengthTranslation/oriVec[2]],np.int)
offsetX=int(round(vectorTranslation[0]/2))
offsetY=int(round(vectorTranslation[1]/2))
sizeX=2*abs(offsetX)+2*SE_radius
sizeY=2*abs(offsetY)+2*SE_radius
sizeZ=abs(vectorTranslation[2])+2*SE_radius
#odd size is required for structuring element
if sizeX%2==0:
sizeX+=1
if sizeY%2==0:
sizeY+=1
if sizeZ%2==0:
sizeZ+=1
posX=np.array([round(val)for val in np.linspace(-offsetX,offsetX,sizeZ)],np.int)
posY=np.array([round(val)for val in np.linspace(-offsetY,offsetY,sizeZ)],np.int)
SE_rod3D=np.zeros((sizeZ,sizeX,sizeY),np.uint8)
#middle position, counting from 0:sizeX (sizeX excluded)
midX=int((sizeX-1)/2)
midY=int((sizeY-1)/2)
for i in range(SE_rod3D.shape[0]):
SE_rod3D[
i,
midX+posX[i]-SE_radius:midX+posX[i]-SE_radius+SE_size,
midY+posY[i]-SE_radius:midY+posY[i]-SE_radius+SE_size]=SE_disk
paddingSizeX=sizeX
paddingSizeY=sizeY
paddingSizeZ=sizeZ
else:
#use vertical rod instead of inclined cylinder: bad results for inclined fibers
SE_rod3D=np.zeros((lenChain+2*SE_radius,SE_ball3D.shape[0],SE_ball3D.shape[1]),np.uint8)
for i in range(lenChain):
SE_rod3D[i:SE_size+i,:,:][SE_ball3D==255]=255
paddingSizeX=paddingSizeY=paddingSizeZ=SE_radius # to avoid artifacts on corners after opening
if makePlotsIndividual:
from mayavi import mlab
from visualisationTool import addPlaneWidgets
mlab.figure(figure="Structuring element, closing, fiberID={}".format(fiberID),size=(1200,1050),bgcolor=(1.,1.,1.))
transposedSE_rod3D=np.transpose(SE_rod3D,(1,2,0))
if not articlePlots:
addPlaneWidgets(transposedSE_rod3D,engine,axOnly="z_axes") ## article
srcRod = mlab.pipeline.scalar_field(transposedSE_rod3D)
mlab.pipeline.iso_surface(srcRod, contours=[255], opacity=0.5, color=(0.,0.,0.))
mlab.outline(color=(0,0,0))
mlab.figure(figure="Structuring element, opening, fiberID={}".format(fiberID),size=(1200,1050),bgcolor=(1.,1.,1.))
transposedSE_ball3D_opening=np.transpose(SE_ball3D_opening,(1,2,0))
if not articlePlots:
addPlaneWidgets(transposedSE_ball3D_opening,engine,axOnly="z_axes") ## article
srcBall= mlab.pipeline.scalar_field(transposedSE_ball3D_opening)
mlab.pipeline.iso_surface(srcBall, contours=[255], opacity=0.5, color=(0.,0.,0.))
mlab.outline(color=(0,0,0))
# padding on all sides is necessary or else ball SE cannot reach pixels close to boundary
paddedV_thisMarkerOnly=paddingOfVolume(V_thisMarkerOnly,paddingSizeZ,paddingSizeX,paddingSizeY)
V_sliceMarker=paddedV_thisMarkerOnly[zMin:zMax+2*paddingSizeZ,xMin:xMax+2*paddingSizeX,yMin:yMax+2*paddingSizeY]
if makePlotsIndividual:
mlab.figure(figure="V_sliceMarker, fiberID={}".format(fiberID),size=(1200,1050),bgcolor=(1.,1.,1.))
srcFiber = mlab.pipeline.scalar_field(
np.transpose(
V_sliceMarker[
paddingSizeZ:-paddingSizeZ,
paddingSizeX:-paddingSizeX,
paddingSizeY:-paddingSizeY
],
(1,2,0)
)
)
mlab.pipeline.iso_surface(srcFiber, contours=[255], opacity=0.45, color=color0)
if articlePlots:
import tifffile
tifffile.imwrite(
"/home/facu/Phd_Private/Redaction/OpenSeg/PostProcessing/Single/V_fiberID{}_before.tiff".format(fiberID),
V_sliceMarker,
compress=True
)
else:
addPlaneWidgets( ###article
np.transpose(V_hist,(1,2,0)),
engine,
widgetLUT="black-white",
axOnly="z_axes"
)
mlab.outline(color=(0,0,0))
tic = time.perf_counter()
print("\tvolumetric closing started for fiberID={} on {}, centerOfMass x={: >4.0f}, y={: >4.0f}, z={: >4.0f}".\
format(fiberID,multiprocessing.current_process().name,xMean,yMean,zMean))
try:
V_sliceMarker_closed=np.array(ndimage.binary_closing(V_sliceMarker,SE_rod3D),np.uint8)*255
except MemoryError:
print("Encountered: MemoryError, continuing without performing closing on marker={}".format(fiberID))
V_sliceMarker_closed=V_sliceMarker
paddedV_thisMarkerOnly[zMin:zMax+2*paddingSizeZ,xMin:xMax+2*paddingSizeX,yMin:yMax+2*paddingSizeY]=V_sliceMarker_closed
toc = time.perf_counter()
print("\t\tvolumetric closing completed in {: >4.4f}s for fiberID={} on {}".format(toc-tic,fiberID,multiprocessing.current_process().name))
#############################################
# # # opening
print("\tvolumetric opening started for fiberID={} on {}, centerOfMass x={: >4.0f}, y={: >4.0f}, z={: >4.0f}".\
format(fiberID,multiprocessing.current_process().name,xMean,yMean,zMean))
try:
V_sliceMarker_closed_opened=np.array(ndimage.binary_opening(V_sliceMarker_closed,SE_ball3D_opening),np.uint8)*255
except MemoryError:
print("Encountered: MemoryError, continuing without performing opening on marker={}".format(fiberID))
V_sliceMarker_closed_opened=V_sliceMarker_closed
toc = time.perf_counter()
print("\t\tvolumetric opening completed in {: >4.4f}s for fiberID={} on {}".format(toc-tic,fiberID,multiprocessing.current_process().name))
if makePlotsIndividual:
mlab.figure(figure="V_sliceMarker_closed, fiberID={}".format(fiberID),size=(1200,1050),bgcolor=(1.,1.,1.))
if not articlePlots:
#overlay SE in plot ## article
V_sliceMarker_closed[
paddingSizeZ:SE_rod3D.shape[0]+paddingSizeZ,
paddingSizeX:SE_rod3D.shape[1]+paddingSizeX,
paddingSizeY:SE_rod3D.shape[2]+paddingSizeY]=SE_rod3D
srcFiber = mlab.pipeline.scalar_field(
np.transpose(
V_sliceMarker_closed[
paddingSizeZ:-paddingSizeZ,
paddingSizeX:-paddingSizeX,
paddingSizeY:-paddingSizeY
],(1,2,0)
)
)
if articlePlots:
tifffile.imwrite(
"/home/facu/Phd_Private/Redaction/OpenSeg/PostProcessing/Single/V_fiberID{}_closed.tiff".format(fiberID),
V_sliceMarker_closed,
compress=True
)
mlab.pipeline.iso_surface(srcFiber, contours=[255], opacity=0.5, color=color1)
transposedV_sliceHist=np.transpose(V_hist,(1,2,0))
mlab.outline(color=(0,0,0))
### article
if not articlePlots:
addPlaneWidgets(transposedV_sliceHist,engine, widgetLUT="black-white",axOnly="z_axes")
#################################
### opening
mlab.figure(figure="V_sliceMarker_closed_opened, fiberID={}".format(fiberID),size=(1200,1050),bgcolor=(1.,1.,1.))
if not articlePlots:
# overlay SE in plot ### article
V_sliceMarker_closed_opened[
paddingSizeZ:SE_ball3D_opening.shape[0]+paddingSizeZ,
paddingSizeX:SE_ball3D_opening.shape[1]+paddingSizeX,
paddingSizeY:SE_ball3D_opening.shape[2]+paddingSizeY]=SE_ball3D_opening
srcFiber = mlab.pipeline.scalar_field(
np.transpose(
V_sliceMarker_closed_opened[
paddingSizeZ:-paddingSizeZ,
paddingSizeX:-paddingSizeX,
paddingSizeY:-paddingSizeY
],(1,2,0)
)
)
mlab.pipeline.iso_surface(srcFiber, contours=[255], opacity=0.5, color=color3)
transposedV_sliceHist=np.transpose(V_hist,(1,2,0))
mlab.outline(color=(0,0,0))
###article
if not articlePlots:
addPlaneWidgets(transposedV_sliceHist,engine, widgetLUT="black-white",axOnly="z_axes")
if articlePlots:
mlab.outline(color=(0,0,0))
engine=mlab.get_engine()
for iScene in range(5):#[2,3,4]:
scene = engine.scenes[iScene]
scene.scene.camera.position = [230.7885982150969, -55.77080802359471, 70.10653478028216]
scene.scene.camera.focal_point = [13.5, 22.0, 56.0]
scene.scene.camera.view_angle = 30.0
scene.scene.camera.view_up = [-0.060090652925034176, 0.013151557327761474, 0.9981062819013302]
scene.scene.camera.clipping_range = [185.46468568513774, 289.3690443494203]
scene.scene.camera.compute_view_plane_normal()
scene.scene.render()
tifffile.imwrite(
"/home/facu/Phd_Private/Redaction/OpenSeg/PostProcessing/Single/V_fiberID{}_after.tiff".format(fiberID),
V_sliceMarker_closed_opened,
compress=True
)
mlab.show()
# if fiber connects with first or last imSlice, it is copied in the z direction,
# must be erased before transfering
V_sliceMarker_closed_opened[:paddingSizeZ ,:,:]=0
V_sliceMarker_closed_opened[-paddingSizeZ:,:,:]=0
zNew,xNew,yNew=np.where(V_sliceMarker_closed_opened==255)
xNew+=xMin-paddingSizeX
yNew+=yMin-paddingSizeY
zNew+=zMin-paddingSizeZ
if transposeBool is not None:
if transposeBool=="xz":
temp=zNew
zNew=xNew
xNew=temp
elif transposeBool=="yz":
temp=zNew
zNew=yNew
yNew=temp
return zNew,xNew,yNew
def parallelGapFilling(
fiberID,
operationTuple,
V_fiberMap,
makePlotAll,
makePlotsIndividual,
V_hist=None,
engine=None,
checkCollision=True,
SE_radius=4,
useInclinedCylinderSE=True,
articlePlots=False
):
fiberID=int(round(fiberID))
if articlePlots:
V_fiberMap[0:100]=-1 ## article
zBefore,xBefore,yBefore=np.where(V_fiberMap==fiberID)
xMin=min(xBefore)
xMax=max(xBefore)
yMin=min(yBefore)
yMax=max(yBefore)
zMin=min(zBefore)
zMax=max(zBefore)
#create smaller volume where fiber is present (will be padded inside volumetricGapFilling)
V_thisMarkerOnly=np.zeros((zMax-zMin+1,xMax-xMin+1,yMax-yMin+1),np.uint8)
V_thisMarkerOnly[zBefore-zMin,xBefore-xMin,yBefore-yMin]=255
if makePlotsIndividual:
V_hist_thisMarkerOnly=V_hist[zMin:zMax,xMin:xMax,yMin:yMax]
else:
V_hist_thisMarkerOnly=None
if 255 not in V_thisMarkerOnly:
raise ValueError(f"fiberID:{fiberID} not found in V_fiberMap")
if makePlotAll:
oldVoxels=(zBefore,xBefore,yBefore)
else:
oldVoxels=None
zNew,xNew,yNew=volumetricGapFilling(
fiberID,
operationTuple,
V_thisMarkerOnly,
SE_radius=SE_radius,
useInclinedCylinderSE=useInclinedCylinderSE,
makePlotsIndividual=makePlotsIndividual,
V_hist=V_hist_thisMarkerOnly,
engine=engine,
articlePlots=articlePlots
)
newVoxels={fiberID:{
"zNew" :zNew+zMin,
"xNew" :xNew+xMin,
"yNew" :yNew+yMin
}
}
if checkCollision:
V_NewVoxels=np.zeros(V_fiberMap.shape,np.uint32)
#reassign voxels added by morphological operations for collision detection
V_NewVoxels[zNew+zMin,xNew+xMin,yNew+yMin]=fiberID
# maxAll_old contains collisions from V_newVoxels to global V_fiberMap
# maxAll_new contains collisions from V_fiberMap to V_new: will only have self-references in this case
maxAll_old,maxAll_new=findCollisions(V_fiberMap,V_NewVoxels,makeV_collisions=False)[:2]
#remove self-referenced, false collision
collisions={key:val for key,val in maxAll_old.items() if key!=fiberID}
collisionsDict={}
if maxAll_old:
collisionsDict[fiberID]={
"collisions":collisions,
"newVoxels" :newVoxels[fiberID]
}
else:
collisionsDict=None
return oldVoxels,newVoxels,collisionsDict
def collisionDetectionWrapper(
postProcessQueue,
minCountsCombination,
angleCombineDEG,
oriVecAll,
fiberStruct,
V_fiberMap,
fiberStruct_combined,
V_hist=None,
makePlotsIndividual=False,
makePlotAll=False,
parallelHandle=False
):
newVoxels={}
collisionsDict={}
if makePlotsIndividual or makePlotAll:
from mayavi import mlab
from visualisationTool import addPlaneWidgets
engine=mlab.get_engine()
else:
engine=None
if makePlotAll:
V_before=np.zeros(V_fiberMap.shape,np.uint8)
V_after =np.zeros(V_fiberMap.shape,np.uint8)
V_collisions=np.zeros(V_fiberMap.shape,np.uint8)
else:
V_before=V_after=V_collisions=None
if parallelHandle:
num_cores=int(multiprocessing.cpu_count()/3) # may cause memory overload if too many processes are used simultaneously
makePlotsIndividual=False
V_hist_parallel=None # shouldn't be sent if not used for plotting
engine_parallel=None # cant be sent in a parallel call, un-pickleable
else:
num_cores=1
V_hist_parallel=V_hist
engine_parallel=engine
results= Parallel(n_jobs=num_cores)\
(delayed(parallelGapFilling)\
(fiberID,
operationTuple,
V_fiberMap,
makePlotAll,
makePlotsIndividual,
V_hist_parallel,
engine_parallel,
)for fiberID,operationTuple in postProcessQueue)
for resTuple in results:
if makePlotAll:
zBefore,xBefore,yBefore=resTuple[0]
V_before[zBefore,xBefore,yBefore]=255
newVoxels .update(resTuple[1])
collisionsDict .update(resTuple[2])
combineLUT={}
combinedAfterGapFilling=set([])
combinedPreviously=set([fiberID for fiberID in fiberStruct_combined.keys()])
for fiberID,collisions in collisionsDict.items():
for fiberID_other,collision in collisions["collisions"].items():
if collision["counts"]>minCountsCombination:
oriVecSelf =oriVecAll[fiberID]
oriVecOther=oriVecAll[fiberID_other]
angle=np.degrees(np.arccos(np.dot(oriVecSelf,oriVecOther)))
if angle>90:
angle=180-angle
if angle<angleCombineDEG:
# if this fiber already has been combined to another, combine to that other one
if fiberID in combineLUT.keys():
if combineLUT[fiberID]==fiberID_other:
#avoid cyclical combination
continue
fiberID=combineLUT[fiberID]
# if the other fiber already has been combined to another, combine to that other one
if fiberID_other in combineLUT.keys():
if fiberID in combineLUT.keys():
if combineLUT[fiberID]==fiberID_other:
#avoid cyclical combination
continue
fiberID_other=combineLUT[fiberID_other]
if fiberID!=fiberID_other: #otherwise, it means the combination has already been performed
combinedAfterGapFilling.add(fiberID)
fiberStruct_combined[fiberID]=fiberStruct[fiberID]
if fiberID_other in combinedAfterGapFilling:
combinedAfterGapFilling.remove(fiberID_other)
fiberID_otherSuffix=fiberID_other+fiberStruct[fiberID_other].suffix
if fiberID_otherSuffix in fiberObj.classAttributes["listFiberIDs_tracked"]:
# if fiber is combined more than once
fiberObj.classAttributes["listFiberIDs_tracked"].remove(fiberID_otherSuffix)
# add otherFib's voxels to this one:
V_fiberMap[V_fiberMap==fiberID_other]=fiberID
if makePlotAll:
V_collisions[V_fiberMap==fiberID_other]=255
#combine fiberObj
fiberStruct[fiberID].combine(fiberStruct[fiberID_other])
fiberStruct[fiberID].setColor("combined")
fiberStruct[fiberID_other].setColor("combined_other")
combineLUT[fiberID_other]=fiberID
fiberStruct[fiberID_other].tags.add("combined_postProcessing")
for key,fiber in fiberStruct.items():
if fiber.colorLabel=="combined":
raiseError=False
if int(key) not in combinedAfterGapFilling and \
int(key not in combinedPreviously):
print("key:",key,"\tfiberID:",fiber.fiberID)
raiseError=True
if raiseError:
raise RuntimeError("not labelling correctly")
#last Pass of postProcessing for the fibers which were combined (there can remain gaps)
postProcessQueue_index={}
for index,dataTuple in enumerate(postProcessQueue):
fiberID=dataTuple[0]
postProcessQueue_index[int(fiberID)]=index
postProcessQueueSecondPass=[
(
fiberID,
postProcessQueue[postProcessQueue_index[fiberID] ][1] #(lenChain, oriVec, angle)
) for fiberID in combinedAfterGapFilling]
#TODO if only a section of V_fiberMap is passed to each parallel process, much less memory requirement.
# to that end, could use a function to truncate V_fiberMap to where fiberID is present, and keep track of the x,y,z offsets,
# and reflect those in results
results= Parallel(n_jobs=num_cores)\
(delayed(parallelGapFilling)\
(fiberID,
operationTuple,
V_fiberMap,
makePlotAll,
makePlotsIndividual,
V_hist_parallel,
engine_parallel
)for fiberID,operationTuple in postProcessQueueSecondPass)
for resTuple in results:
if makePlotAll:
zBefore,xBefore,yBefore=resTuple[0]
V_before[zBefore,xBefore,yBefore]=255
newVoxels .update(resTuple[1])
collisionsDict .update(resTuple[2])
# Assign all new voxels to global fiberMap
for fiberID in newVoxels.keys():
zNew=newVoxels[fiberID]["zNew"]
xNew=newVoxels[fiberID]["xNew"]
yNew=newVoxels[fiberID]["yNew"]
V_fiberMap[zNew,xNew,yNew]=fiberID
if makePlotAll:
V_after[zNew,xNew,yNew]=255
if makePlotAll:
mlab.figure(figure="Before/After",size=(1200,1050),bgcolor=(0.1,0.1,0.1))
srcBefore = mlab.pipeline.scalar_field(np.transpose(V_before, (1,2,0)) )
srcAfter = mlab.pipeline.scalar_field(np.transpose(V_after , (1,2,0)) )
srcCollisions = mlab.pipeline.scalar_field(np.transpose(V_collisions, (1,2,0)) )
mlab.pipeline.iso_surface(srcBefore, contours=[255], opacity=0.8, color=color1)
mlab.pipeline.iso_surface(srcAfter , contours=[255], opacity=0.8, color=color2)
mlab.pipeline.iso_surface(srcCollisions, contours=[255], opacity=0.8, color=colorCollisions)
V_planeWidget=np.transpose(V_hist,(1,2,0))
addPlaneWidgets(V_planeWidget,engine, widgetLUT="black-white",axOnly="z_axes")
mlab.outline()
mlab.show()
return collisionsDict,V_fiberMap
def randomizeVoxels(V_fiberMap,listMarkers,parallelHandle=True):
V_fiberMap_randomized=V_fiberMap.copy()
reassignedMarkers=listMarkers.copy()
#random shuffling of original markers
shuffle(reassignedMarkers)
markerLUT={}
for i,iMark in enumerate(listMarkers):
markerLUT[iMark]=reassignedMarkers[i]
if parallelHandle:
num_cores=multiprocessing.cpu_count()-1
else:
num_cores=1
results = Parallel(n_jobs=num_cores)\
(delayed(compactifySlice)\
(
V_fiberMap[iSlice],
markerLUT
)for iSlice in range(V_fiberMap.shape[0]) )
for iSlice,resTuple in enumerate(results):
V_fiberMap_randomized[iSlice]=resTuple
return V_fiberMap_randomized
def makePropertyMap(V_fiberMap,marker_to_propertyLUT,parallelHandle=True):
V_fiberMap_property=np.empty(V_fiberMap.shape,np.float32)
if parallelHandle:
num_cores=multiprocessing.cpu_count()-1 #will cause memory overload for large sets if too many cores used
else:
num_cores=1
results = Parallel(n_jobs=num_cores)\
(delayed(makePropertySlice)\
(
V_fiberMap[iSlice],
marker_to_propertyLUT
)for iSlice in range(V_fiberMap.shape[0]) )
for iSlice,resTuple in enumerate(results):
V_fiberMap_property[iSlice]=resTuple
return V_fiberMap_property
def postProcessingOfFibers(
commonPath,
permutationPath,
SE_radius=4,
useInclinedCylinderSE=True,
makePlotsIndividual=False,
makePlotAll=False,
randomize=False,
parallelHandle=False,
postProcessAllFibers=False,
articlePlots=False,
article_savePlotALL=False,
exclusiveFibers=None #list of fibers which will be postprocessed. useful for debugging
):
if makePlotsIndividual or makePlotAll:
from mayavi import mlab
from visualisationTool import addPlaneWidgets
engine = mlab.get_engine()
else:
engine=None
print('\n\tpostProcessingOfFibers() called on dataset:\n {}\n\treading from disk'.format(commonPath))
tic = time.perf_counter()
with TiffFile(os.path.join(commonPath,permutationPath,"V_fiberMap.tiff")) as tif:
print("\tloading: \n{}".format(os.path.join(commonPath,permutationPath,"V_fiberMap.tiff")))
xRes,unitTiff,descriptionStr=getTiffProperties(tif,getDescription=True)
V_fiberMap=tif.asarray()
with TiffFile(os.path.join(commonPath,permutationPath,"V_pores.tiff")) as tif:
print("\tloading: \n{}".format(os.path.join(commonPath,permutationPath,"V_pores.tiff")))
V_pores=tif.asarray()
try:
with TiffFile(os.path.join(commonPath,permutationPath,"V_perim.tiff")) as tif:
print("\tloading: \n{}".format(os.path.join(commonPath,permutationPath,"V_perim.tiff")))
V_perim=tif.asarray()
except:
V_perim=None
if makePlotsIndividual or makePlotAll:
with TiffFile(os.path.join(commonPath,permutationPath,"V_hist.tiff")) as tif:
print("\tloading: \n{}".format(os.path.join(commonPath,permutationPath,"V_hist.tiff")))
V_hist=tif.asarray()
else:
V_hist=None
if makePlotAll:
V_before=np.zeros(V_fiberMap.shape,np.uint8)
V_after =np.zeros(V_fiberMap.shape,np.uint8)
else:
V_before=V_after=None
if permutationPath!="Permutation123/":
filesInDir123 = [f.path for f in os.scandir(os.path.join(commonPath,"Permutation123/")) if f.is_file()]
indexFibers_mask=None
for i,iPath in enumerate(filesInDir123):
if "V_fibers_masked.tiff" in iPath:
indexFibers_mask=i
if indexFibers_mask is None:
raise RuntimeError("Can't find V_fibers_masked.tiff in \n{}".\
format(os.path.join(commonPath,"Permutation123/")))
with TiffFile(filesInDir123[indexFibers_mask]) as tif:
print("\tloading: \n"+filesInDir123[indexFibers_mask])
if permutationPath=="Permutation132/":
transposeTuple=(2,1,0) # [z,x,y]->[y,x,z]
elif permutationPath=="Permutation321/":
transposeTuple=(1,0,2) # [z,x,y]->[x,z,y]
V_fibers_masked=np.array(np.transpose(tif.asarray()/255,transposeTuple),np.uint8)
if V_fiberMap.shape!=V_fibers_masked.shape:
raise ValueError("V_fiberMap.tiff and V_fibers_masked.tiff are of incompatible shapes")
with open(os.path.join(commonPath,permutationPath,"fiberStruct.pickle") , "rb") as f:
fiberStruct = pickle.load(f)
exclusiveZone=fiberStruct["exclusiveZone"]
if len(exclusiveZone)>0:
if exclusiveZone is not None:
if permutationPath=="Permutation123/":
zMin=exclusiveZone["zMin"]
zMax=exclusiveZone["zMax"]
xMin=exclusiveZone["xMin"]
xMax=exclusiveZone["xMax"]
yMin=exclusiveZone["yMin"]
yMax=exclusiveZone["yMax"]
elif permutationPath=="Permutation132/":
zMin=exclusiveZone["yMin"]
zMax=exclusiveZone["yMax"]
xMin=exclusiveZone["xMin"]
xMax=exclusiveZone["xMax"]
yMin=exclusiveZone["zMin"]
yMax=exclusiveZone["zMax"]
elif permutationPath=="Permutation321/":
zMin=exclusiveZone["xMin"]
zMax=exclusiveZone["xMax"]
xMin=exclusiveZone["zMin"]
xMax=exclusiveZone["zMax"]
yMin=exclusiveZone["yMin"]
yMax=exclusiveZone["yMax"]
V_pores=V_pores[zMin:zMax,xMin:xMax,yMin:yMax]
if V_perim is not None:
V_perim=V_perim[zMin:zMax,xMin:xMax,yMin:yMax]
else:
exclusiveZone=None
toc = time.perf_counter()
print(f"\treading from disk complete in {toc - tic:0.4f} seconds\n")
times_postProc={}
times_postProc["reading from disk only:"]=time.strftime("%Hh%Mm%Ss", time.gmtime(toc-tic))
ticPost = time.perf_counter()
postProcessQueue=[]
if postProcessAllFibers:
doNotPostProcess={"initial_stitched_segment","stitched_blind(added)","stitched_smart(added)"}
for fiberID,fibObj in fiberStruct["fiberStruct"].items():
if fiberID not in fiberStruct["fiberObj_classAttributes"]["interpolatedCenters"].keys() and\
not fibObj.tags.intersection(doNotPostProcess):
fiberStruct["fiberObj_classAttributes"]["interpolatedCenters"][fiberID]=[5]
# only fiberObj that have interuptions (centroids are added by filling), and
# that are not rejected are postProcessed
for fiberID,interpolationChains in fiberStruct["fiberObj_classAttributes"]["interpolatedCenters"].items():
fibO=fiberStruct["fiberStruct"][fiberID]
skip=True
if exclusiveFibers is not None:
if fiberID in exclusiveFibers:
skip=False
else:
skip=False
if not skip:
fiberObj.initializeClassAttributes(savedAttributes=fiberStruct["fiberObj_classAttributes"])
#fiberObj that were added to another at blindStitching() or smartStitching wont be processed (starting fiberObj will)
#"initial_stitched_segment" tags is for duplicates, kept for plotting purposes. do not post-process
doNotPostProcess={"initial_stitched_segment","stitched_blind(added)","stitched_smart(added)"}
if not fibO.tags.intersection(doNotPostProcess):
if not fibO.rejected:
oriVec=fibO.orientationVec
oriVec/=np.linalg.norm(oriVec)
#if there is more than one interpolation chain, keep the longest to create Structuring Element
if len(interpolationChains)>1:
# listLengths=[len(chain) for chain in interpolationChains]
# pos=listLengths.index(max(listLengths))
pos=interpolationChains.index(max(interpolationChains))
else:
pos=0
angle=np.degrees(np.arccos(np.dot(oriVec,[0.,0.,1.])))
postProcessQueue.append(
(
fiberID,
(interpolationChains[pos], oriVec,angle)
)
)
########################################
###
### in V_fiberMap:
### markers>=0 are "tracked" fiberIDs
### marker==-1 is background
### markers<-1 are "rejected" fiberIDs
### markers==-999999 are unmatched
###
########################################
newVoxels={}
if parallelHandle:
num_cores=int(multiprocessing.cpu_count()*2/3) # may cause memory overload if too many processes are used simultaneously on large datasets
makePlotsIndividual=False
V_hist_parallel=None # shouldn't be sent if not used for plotting
engine_parallel=None # cant be sent in a parallel call, un-pickleable
else:
num_cores=1
V_hist_parallel=V_hist
engine_parallel=engine
results= Parallel(n_jobs=num_cores)\
(delayed(parallelGapFilling)\
(fiberID,
operationTuple,
V_fiberMap,
makePlotAll,
makePlotsIndividual,
V_hist_parallel,
engine_parallel,
checkCollision=False, #it is unlikely to encounter large collisions at this point. will be checked at combinePermutations()
SE_radius=SE_radius,
useInclinedCylinderSE=useInclinedCylinderSE,
articlePlots=articlePlots
)for fiberID,operationTuple in postProcessQueue)
for resTuple in results:
if makePlotAll:
zBefore,xBefore,yBefore=resTuple[0]
V_before[zBefore,xBefore,yBefore]=255
newVoxels .update(resTuple[1])
# Assign all new voxels to global fiberMap
for fiberID in newVoxels.keys():
zNew=newVoxels[fiberID]["zNew"]
xNew=newVoxels[fiberID]["xNew"]
yNew=newVoxels[fiberID]["yNew"]
V_fiberMap[zNew,xNew,yNew]=fiberID
if makePlotAll:
V_after[zNew,xNew,yNew]=255
#prevent spillover to pores and perim
V_fiberMap[V_pores==255]=-1
if V_perim is not None:
V_fiberMap[V_perim==255]=-1
if permutationPath!="Permutation123/":
# V_fibers_masked==1 where there is a fiber present from permutation123
# collisions prevented here
V_fiberMap[V_fibers_masked==1]=-1 # marker==-1 is background
if makePlotAll:
V_after_masked=V_after.copy()
V_after_masked[V_fibers_masked==1]=0
if makePlotAll:
mlab.figure(figure="Before/After",size=(1200,1050),bgcolor=(0.1,0.1,0.1))
srcBefore = mlab.pipeline.scalar_field(np.transpose(V_before,(1,2,0)) )
srcAfter = mlab.pipeline.scalar_field(np.transpose(V_after ,(1,2,0)) )
mlab.pipeline.iso_surface(srcBefore, contours=[255], opacity=1.0, color=color3)
mlab.pipeline.iso_surface(srcAfter , contours=[255], opacity=0.8, color=color0)
if permutationPath!="Permutation123/":
srcAfter_masked = mlab.pipeline.scalar_field(np.transpose(V_after_masked ,(1,2,0)) )
mlab.pipeline.iso_surface(srcAfter_masked , contours=[255], opacity=0.8, color=(0.9,0.20,0.1))
V_planeWidget=np.transpose(V_fibers_masked,(1,2,0))
else:
V_planeWidget=np.transpose(V_hist,(1,2,0))
if exclusiveZone:
rangeOutline=[-exclusiveZone["xMin"],0,-exclusiveZone["yMin"],0,-exclusiveZone["zMin"],0.]
else:
rangeOutline=None
addPlaneWidgets(V_planeWidget,engine, widgetLUT="black-white",axOnly="z_axes",rangeOutline=rangeOutline)
mlab.outline()
if article_savePlotALL:
import tifffile
tifffile.imwrite(
"/home/facu/Phd_Private/Redaction/OpenSeg/PostProcessing/All/V_before.tiff",
V_before,
resolution=(xRes,xRes,unitTiff),
compress=True
)
tifffile.imwrite(
"/home/facu/Phd_Private/Redaction/OpenSeg/PostProcessing/All/V_after.tiff",
V_after,
resolution=(xRes,xRes,unitTiff),
compress=True
)
tocPost=time.perf_counter()
print(f"\tpostProcessingOfFibers call complete in {tocPost-ticPost:0.4f} seconds\n")
times_postProc["postProcessingOfFibers only:"]=time.strftime("%Hh%Mm%Ss", time.gmtime(tocPost-ticPost))
###############################################################################################
# voxelReassignment to make it easier to identify different fibers:
##############################################################################################
if randomize:
listMarkers=np.unique(V_fiberMap)
listMarkers=[val for val in listMarkers if val>=0]# tracked fibers have markers starting at 0
for fiberID in fiberStruct["fiberObj_classAttributes"]["listFiberIDs_tracked"]:
if fiberID not in listMarkers:
print("in listFiberIDs_tracked, not in listMarkers",fiberID)
for fiberID in listMarkers:
if fiberID not in fiberStruct["fiberObj_classAttributes"]["listFiberIDs_tracked"]:
print("in listMarkers, not in listFiberIDs_tracked",fiberID)
print("\trandom shuffling of markers for rendering purposes started")
ticRandomize=time.perf_counter()
V_fiberMap_randomized=randomizeVoxels(V_fiberMap,listMarkers)
tocRandomize=time.perf_counter()
times_postProc["random shuffling in: "]=time.strftime("%Hh%Mm%Ss", time.gmtime(tocRandomize-ticRandomize))
else:
V_fiberMap_randomized=None
if permutationPath=="Permutation123/":
ticMakeMask=time.perf_counter()
V_fibers_masked=np.zeros(V_fiberMap.shape,np.uint8)
# in V_fibers_masked, mark pixels that were not tracked as False.
# "Tracked" pixels will then be removed from the V_fibers.tiff in
# other permutations. This is so extractCenterPoints() only finds centroids in regions not
# already containing a fiber from permutation 123, as in V_fibers[V_fibers_masked==1]=0. line 293
V_fibers_masked[V_fiberMap>=0]=255 # markers>0 are "tracked", background(matrix) has marker==-1. markers>-1 are untracked(-999999) or rejected
tocMakeMask=time.perf_counter()
print(f"making fibermask in: {tocMakeMask-ticMakeMask:0.4f} seconds\n")
times_postProc["making mask in: "]=time.strftime("%Hh%Mm%Ss", time.gmtime(tocMakeMask-ticMakeMask))
else:
V_fibers_masked=None
return V_fiberMap,V_fiberMap_randomized,V_fibers_masked,xRes,unitTiff,descriptionStr,times_postProc
| [
"mayavi.mlab.figure",
"combineFunctions.findCollisions",
"random.shuffle",
"numpy.empty",
"mayavi.mlab.pipeline.scalar_field",
"tifffile.imwrite",
"numpy.ones",
"extractCenterPoints.getTiffProperties",
"numpy.mean",
"pickle.load",
"numpy.linalg.norm",
"os.path.join",
"numpy.unique",
"multi... | [((2221, 2254), 'numpy.where', 'np.where', (['(V_thisMarkerOnly == 255)'], {}), '(V_thisMarkerOnly == 255)\n', (2229, 2254), True, 'import numpy as np\n'), ((3390, 3411), 'numpy.mean', 'np.mean', (['[xMin, xMax]'], {}), '([xMin, xMax])\n', (3397, 3411), True, 'import numpy as np\n'), ((3421, 3442), 'numpy.mean', 'np.mean', (['[yMin, yMax]'], {}), '([yMin, yMax])\n', (3428, 3442), True, 'import numpy as np\n'), ((3452, 3473), 'numpy.mean', 'np.mean', (['[zMin, zMax]'], {}), '([zMin, zMax])\n', (3459, 3473), True, 'import numpy as np\n'), ((8031, 8050), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (8048, 8050), False, 'import time\n'), ((8674, 8693), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (8691, 8693), False, 'import time\n'), ((9449, 9468), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (9466, 9468), False, 'import time\n'), ((13403, 13447), 'numpy.where', 'np.where', (['(V_sliceMarker_closed_opened == 255)'], {}), '(V_sliceMarker_closed_opened == 255)\n', (13411, 13447), True, 'import numpy as np\n'), ((14185, 14216), 'numpy.where', 'np.where', (['(V_fiberMap == fiberID)'], {}), '(V_fiberMap == fiberID)\n', (14193, 14216), True, 'import numpy as np\n'), ((14499, 14570), 'numpy.zeros', 'np.zeros', (['(zMax - zMin + 1, xMax - xMin + 1, yMax - yMin + 1)', 'np.uint8'], {}), '((zMax - zMin + 1, xMax - xMin + 1, yMax - yMin + 1), np.uint8)\n', (14507, 14570), True, 'import numpy as np\n'), ((24456, 24482), 'random.shuffle', 'shuffle', (['reassignedMarkers'], {}), '(reassignedMarkers)\n', (24463, 24482), False, 'from random import shuffle\n'), ((25117, 25155), 'numpy.empty', 'np.empty', (['V_fiberMap.shape', 'np.float32'], {}), '(V_fiberMap.shape, np.float32)\n', (25125, 25155), True, 'import numpy as np\n'), ((26430, 26449), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (26447, 26449), False, 'import time\n'), ((30172, 30191), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (30189, 30191), False, 'import time\n'), ((30398, 30417), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (30415, 30417), False, 'import time\n'), ((36838, 36857), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (36855, 36857), False, 'import time\n'), ((1094, 1194), 'numpy.ones', 'np.ones', (['(V.shape[0] + 2 * radiusZ, V.shape[1] + 2 * radiusX, V.shape[2] + 2 * radiusY)', 'V.dtype'], {}), '((V.shape[0] + 2 * radiusZ, V.shape[1] + 2 * radiusX, V.shape[2] + 2 *\n radiusY), V.dtype)\n', (1101, 1194), True, 'import numpy as np\n'), ((3515, 3557), 'skimage.morphology.ball', 'morphology.ball', (['SE_radius'], {'dtype': 'np.uint8'}), '(SE_radius, dtype=np.uint8)\n', (3530, 3557), False, 'from skimage import morphology\n'), ((3585, 3631), 'skimage.morphology.ball', 'morphology.ball', (['(SE_radius - 1)'], {'dtype': 'np.uint8'}), '(SE_radius - 1, dtype=np.uint8)\n', (3600, 3631), False, 'from skimage import morphology\n'), ((4596, 4637), 'numpy.zeros', 'np.zeros', (['(sizeZ, sizeX, sizeY)', 'np.uint8'], {}), '((sizeZ, sizeX, sizeY), np.uint8)\n', (4604, 4637), True, 'import numpy as np\n'), ((5194, 5284), 'numpy.zeros', 'np.zeros', (['(lenChain + 2 * SE_radius, SE_ball3D.shape[0], SE_ball3D.shape[1])', 'np.uint8'], {}), '((lenChain + 2 * SE_radius, SE_ball3D.shape[0], SE_ball3D.shape[1]),\n np.uint8)\n', (5202, 5284), True, 'import numpy as np\n'), ((5752, 5785), 'numpy.transpose', 'np.transpose', (['SE_rod3D', '(1, 2, 0)'], {}), '(SE_rod3D, (1, 2, 0))\n', (5764, 5785), True, 'import numpy as np\n'), ((5913, 5959), 'mayavi.mlab.pipeline.scalar_field', 'mlab.pipeline.scalar_field', (['transposedSE_rod3D'], {}), '(transposedSE_rod3D)\n', (5939, 5959), False, 'from mayavi import mlab\n'), ((5968, 6058), 'mayavi.mlab.pipeline.iso_surface', 'mlab.pipeline.iso_surface', (['srcRod'], {'contours': '[255]', 'opacity': '(0.5)', 'color': '(0.0, 0.0, 0.0)'}), '(srcRod, contours=[255], opacity=0.5, color=(0.0, \n 0.0, 0.0))\n', (5993, 6058), False, 'from mayavi import mlab\n'), ((6058, 6087), 'mayavi.mlab.outline', 'mlab.outline', ([], {'color': '(0, 0, 0)'}), '(color=(0, 0, 0))\n', (6070, 6087), False, 'from mayavi import mlab\n'), ((6247, 6289), 'numpy.transpose', 'np.transpose', (['SE_ball3D_opening', '(1, 2, 0)'], {}), '(SE_ball3D_opening, (1, 2, 0))\n', (6259, 6289), True, 'import numpy as np\n'), ((6426, 6481), 'mayavi.mlab.pipeline.scalar_field', 'mlab.pipeline.scalar_field', (['transposedSE_ball3D_opening'], {}), '(transposedSE_ball3D_opening)\n', (6452, 6481), False, 'from mayavi import mlab\n'), ((6490, 6580), 'mayavi.mlab.pipeline.iso_surface', 'mlab.pipeline.iso_surface', (['srcBall'], {'contours': '[255]', 'opacity': '(0.5)', 'color': '(0.0, 0.0, 0.0)'}), '(srcBall, contours=[255], opacity=0.5, color=(0.0,\n 0.0, 0.0))\n', (6515, 6580), False, 'from mayavi import mlab\n'), ((6581, 6610), 'mayavi.mlab.outline', 'mlab.outline', ([], {'color': '(0, 0, 0)'}), '(color=(0, 0, 0))\n', (6593, 6610), False, 'from mayavi import mlab\n'), ((7392, 7471), 'mayavi.mlab.pipeline.iso_surface', 'mlab.pipeline.iso_surface', (['srcFiber'], {'contours': '[255]', 'opacity': '(0.45)', 'color': 'color0'}), '(srcFiber, contours=[255], opacity=0.45, color=color0)\n', (7417, 7471), False, 'from mayavi import mlab\n'), ((7991, 8020), 'mayavi.mlab.outline', 'mlab.outline', ([], {'color': '(0, 0, 0)'}), '(color=(0, 0, 0))\n', (8003, 8020), False, 'from mayavi import mlab\n'), ((10643, 10721), 'mayavi.mlab.pipeline.iso_surface', 'mlab.pipeline.iso_surface', (['srcFiber'], {'contours': '[255]', 'opacity': '(0.5)', 'color': 'color1'}), '(srcFiber, contours=[255], opacity=0.5, color=color1)\n', (10668, 10721), False, 'from mayavi import mlab\n'), ((10753, 10784), 'numpy.transpose', 'np.transpose', (['V_hist', '(1, 2, 0)'], {}), '(V_hist, (1, 2, 0))\n', (10765, 10784), True, 'import numpy as np\n'), ((10791, 10820), 'mayavi.mlab.outline', 'mlab.outline', ([], {'color': '(0, 0, 0)'}), '(color=(0, 0, 0))\n', (10803, 10820), False, 'from mayavi import mlab\n'), ((11823, 11901), 'mayavi.mlab.pipeline.iso_surface', 'mlab.pipeline.iso_surface', (['srcFiber'], {'contours': '[255]', 'opacity': '(0.5)', 'color': 'color3'}), '(srcFiber, contours=[255], opacity=0.5, color=color3)\n', (11848, 11901), False, 'from mayavi import mlab\n'), ((11933, 11964), 'numpy.transpose', 'np.transpose', (['V_hist', '(1, 2, 0)'], {}), '(V_hist, (1, 2, 0))\n', (11945, 11964), True, 'import numpy as np\n'), ((11971, 12000), 'mayavi.mlab.outline', 'mlab.outline', ([], {'color': '(0, 0, 0)'}), '(color=(0, 0, 0))\n', (11983, 12000), False, 'from mayavi import mlab\n'), ((13133, 13144), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (13142, 13144), False, 'from mayavi import mlab\n'), ((15617, 15654), 'numpy.zeros', 'np.zeros', (['V_fiberMap.shape', 'np.uint32'], {}), '(V_fiberMap.shape, np.uint32)\n', (15625, 15654), True, 'import numpy as np\n'), ((17046, 17063), 'mayavi.mlab.get_engine', 'mlab.get_engine', ([], {}), '()\n', (17061, 17063), False, 'from mayavi import mlab\n'), ((17140, 17176), 'numpy.zeros', 'np.zeros', (['V_fiberMap.shape', 'np.uint8'], {}), '(V_fiberMap.shape, np.uint8)\n', (17148, 17176), True, 'import numpy as np\n'), ((17193, 17229), 'numpy.zeros', 'np.zeros', (['V_fiberMap.shape', 'np.uint8'], {}), '(V_fiberMap.shape, np.uint8)\n', (17201, 17229), True, 'import numpy as np\n'), ((17250, 17286), 'numpy.zeros', 'np.zeros', (['V_fiberMap.shape', 'np.uint8'], {}), '(V_fiberMap.shape, np.uint8)\n', (17258, 17286), True, 'import numpy as np\n'), ((17786, 17812), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_cores'}), '(n_jobs=num_cores)\n', (17794, 17812), False, 'from joblib import Parallel, delayed\n'), ((22474, 22500), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_cores'}), '(n_jobs=num_cores)\n', (22482, 22500), False, 'from joblib import Parallel, delayed\n'), ((23390, 23468), 'mayavi.mlab.figure', 'mlab.figure', ([], {'figure': '"""Before/After"""', 'size': '(1200, 1050)', 'bgcolor': '(0.1, 0.1, 0.1)'}), "(figure='Before/After', size=(1200, 1050), bgcolor=(0.1, 0.1, 0.1))\n", (23401, 23468), False, 'from mayavi import mlab\n'), ((23747, 23826), 'mayavi.mlab.pipeline.iso_surface', 'mlab.pipeline.iso_surface', (['srcBefore'], {'contours': '[255]', 'opacity': '(0.8)', 'color': 'color1'}), '(srcBefore, contours=[255], opacity=0.8, color=color1)\n', (23772, 23826), False, 'from mayavi import mlab\n'), ((23842, 23920), 'mayavi.mlab.pipeline.iso_surface', 'mlab.pipeline.iso_surface', (['srcAfter'], {'contours': '[255]', 'opacity': '(0.8)', 'color': 'color2'}), '(srcAfter, contours=[255], opacity=0.8, color=color2)\n', (23867, 23920), False, 'from mayavi import mlab\n'), ((23937, 24034), 'mayavi.mlab.pipeline.iso_surface', 'mlab.pipeline.iso_surface', (['srcCollisions'], {'contours': '[255]', 'opacity': '(0.8)', 'color': 'colorCollisions'}), '(srcCollisions, contours=[255], opacity=0.8, color\n =colorCollisions)\n', (23962, 24034), False, 'from mayavi import mlab\n'), ((24056, 24087), 'numpy.transpose', 'np.transpose', (['V_hist', '(1, 2, 0)'], {}), '(V_hist, (1, 2, 0))\n', (24068, 24087), True, 'import numpy as np\n'), ((24094, 24179), 'visualisationTool.addPlaneWidgets', 'addPlaneWidgets', (['V_planeWidget', 'engine'], {'widgetLUT': '"""black-white"""', 'axOnly': '"""z_axes"""'}), "(V_planeWidget, engine, widgetLUT='black-white', axOnly='z_axes'\n )\n", (24109, 24179), False, 'from visualisationTool import addPlaneWidgets\n'), ((24182, 24196), 'mayavi.mlab.outline', 'mlab.outline', ([], {}), '()\n', (24194, 24196), False, 'from mayavi import mlab\n'), ((24206, 24217), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (24215, 24217), False, 'from mayavi import mlab\n'), ((24712, 24738), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_cores'}), '(n_jobs=num_cores)\n', (24720, 24738), False, 'from joblib import Parallel, delayed\n'), ((25343, 25369), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_cores'}), '(n_jobs=num_cores)\n', (25351, 25369), False, 'from joblib import Parallel, delayed\n'), ((26264, 26281), 'mayavi.mlab.get_engine', 'mlab.get_engine', ([], {}), '()\n', (26279, 26281), False, 'from mayavi import mlab\n'), ((26674, 26717), 'extractCenterPoints.getTiffProperties', 'getTiffProperties', (['tif'], {'getDescription': '(True)'}), '(tif, getDescription=True)\n', (26691, 26717), False, 'from extractCenterPoints import getTiffProperties\n'), ((27573, 27609), 'numpy.zeros', 'np.zeros', (['V_fiberMap.shape', 'np.uint8'], {}), '(V_fiberMap.shape, np.uint8)\n', (27581, 27609), True, 'import numpy as np\n'), ((27626, 27662), 'numpy.zeros', 'np.zeros', (['V_fiberMap.shape', 'np.uint8'], {}), '(V_fiberMap.shape, np.uint8)\n', (27634, 27662), True, 'import numpy as np\n'), ((28915, 28929), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (28926, 28929), False, 'import pickle\n'), ((30361, 30383), 'time.gmtime', 'time.gmtime', (['(toc - tic)'], {}), '(toc - tic)\n', (30372, 30383), False, 'import time\n'), ((33588, 33614), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_cores'}), '(n_jobs=num_cores)\n', (33596, 33614), False, 'from joblib import Parallel, delayed\n'), ((35148, 35226), 'mayavi.mlab.figure', 'mlab.figure', ([], {'figure': '"""Before/After"""', 'size': '(1200, 1050)', 'bgcolor': '(0.1, 0.1, 0.1)'}), "(figure='Before/After', size=(1200, 1050), bgcolor=(0.1, 0.1, 0.1))\n", (35159, 35226), False, 'from mayavi import mlab\n'), ((35392, 35471), 'mayavi.mlab.pipeline.iso_surface', 'mlab.pipeline.iso_surface', (['srcBefore'], {'contours': '[255]', 'opacity': '(1.0)', 'color': 'color3'}), '(srcBefore, contours=[255], opacity=1.0, color=color3)\n', (35417, 35471), False, 'from mayavi import mlab\n'), ((35480, 35558), 'mayavi.mlab.pipeline.iso_surface', 'mlab.pipeline.iso_surface', (['srcAfter'], {'contours': '[255]', 'opacity': '(0.8)', 'color': 'color0'}), '(srcAfter, contours=[255], opacity=0.8, color=color0)\n', (35505, 35558), False, 'from mayavi import mlab\n'), ((36143, 36255), 'visualisationTool.addPlaneWidgets', 'addPlaneWidgets', (['V_planeWidget', 'engine'], {'widgetLUT': '"""black-white"""', 'axOnly': '"""z_axes"""', 'rangeOutline': 'rangeOutline'}), "(V_planeWidget, engine, widgetLUT='black-white', axOnly=\n 'z_axes', rangeOutline=rangeOutline)\n", (36158, 36255), False, 'from visualisationTool import addPlaneWidgets\n'), ((36257, 36271), 'mayavi.mlab.outline', 'mlab.outline', ([], {}), '()\n', (36269, 36271), False, 'from mayavi import mlab\n'), ((37026, 37056), 'time.gmtime', 'time.gmtime', (['(tocPost - ticPost)'], {}), '(tocPost - ticPost)\n', (37037, 37056), False, 'import time\n'), ((37370, 37391), 'numpy.unique', 'np.unique', (['V_fiberMap'], {}), '(V_fiberMap)\n', (37379, 37391), True, 'import numpy as np\n'), ((38015, 38034), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (38032, 38034), False, 'import time\n'), ((38136, 38155), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (38153, 38155), False, 'import time\n'), ((38384, 38403), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (38401, 38403), False, 'import time\n'), ((38429, 38465), 'numpy.zeros', 'np.zeros', (['V_fiberMap.shape', 'np.uint8'], {}), '(V_fiberMap.shape, np.uint8)\n', (38437, 38465), True, 'import numpy as np\n'), ((38995, 39014), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (39012, 39014), False, 'import time\n'), ((2853, 2881), 'numpy.zeros', 'np.zeros', (['newShape', 'np.uint8'], {}), '(newShape, np.uint8)\n', (2861, 2881), True, 'import numpy as np\n'), ((3216, 3244), 'numpy.zeros', 'np.zeros', (['newShape', 'np.uint8'], {}), '(newShape, np.uint8)\n', (3224, 3244), True, 'import numpy as np\n'), ((3682, 3724), 'skimage.morphology.disk', 'morphology.disk', (['SE_radius'], {'dtype': 'np.uint8'}), '(SE_radius, dtype=np.uint8)\n', (3697, 3724), False, 'from skimage import morphology\n'), ((5825, 5885), 'visualisationTool.addPlaneWidgets', 'addPlaneWidgets', (['transposedSE_rod3D', 'engine'], {'axOnly': '"""z_axes"""'}), "(transposedSE_rod3D, engine, axOnly='z_axes')\n", (5840, 5885), False, 'from visualisationTool import addPlaneWidgets\n'), ((6329, 6398), 'visualisationTool.addPlaneWidgets', 'addPlaneWidgets', (['transposedSE_ball3D_opening', 'engine'], {'axOnly': '"""z_axes"""'}), "(transposedSE_ball3D_opening, engine, axOnly='z_axes')\n", (6344, 6398), False, 'from visualisationTool import addPlaneWidgets\n'), ((7123, 7250), 'numpy.transpose', 'np.transpose', (['V_sliceMarker[paddingSizeZ:-paddingSizeZ, paddingSizeX:-paddingSizeX,\n paddingSizeY:-paddingSizeY]', '(1, 2, 0)'], {}), '(V_sliceMarker[paddingSizeZ:-paddingSizeZ, paddingSizeX:-\n paddingSizeX, paddingSizeY:-paddingSizeY], (1, 2, 0))\n', (7135, 7250), True, 'import numpy as np\n'), ((10125, 10259), 'numpy.transpose', 'np.transpose', (['V_sliceMarker_closed[paddingSizeZ:-paddingSizeZ, paddingSizeX:-paddingSizeX,\n paddingSizeY:-paddingSizeY]', '(1, 2, 0)'], {}), '(V_sliceMarker_closed[paddingSizeZ:-paddingSizeZ, paddingSizeX:\n -paddingSizeX, paddingSizeY:-paddingSizeY], (1, 2, 0))\n', (10137, 10259), True, 'import numpy as np\n'), ((10889, 10981), 'visualisationTool.addPlaneWidgets', 'addPlaneWidgets', (['transposedV_sliceHist', 'engine'], {'widgetLUT': '"""black-white"""', 'axOnly': '"""z_axes"""'}), "(transposedV_sliceHist, engine, widgetLUT='black-white',\n axOnly='z_axes')\n", (10904, 10981), False, 'from visualisationTool import addPlaneWidgets\n'), ((11566, 11706), 'numpy.transpose', 'np.transpose', (['V_sliceMarker_closed_opened[paddingSizeZ:-paddingSizeZ, paddingSizeX:-\n paddingSizeX, paddingSizeY:-paddingSizeY]', '(1, 2, 0)'], {}), '(V_sliceMarker_closed_opened[paddingSizeZ:-paddingSizeZ,\n paddingSizeX:-paddingSizeX, paddingSizeY:-paddingSizeY], (1, 2, 0))\n', (11578, 11706), True, 'import numpy as np\n'), ((12068, 12160), 'visualisationTool.addPlaneWidgets', 'addPlaneWidgets', (['transposedV_sliceHist', 'engine'], {'widgetLUT': '"""black-white"""', 'axOnly': '"""z_axes"""'}), "(transposedV_sliceHist, engine, widgetLUT='black-white',\n axOnly='z_axes')\n", (12083, 12160), False, 'from visualisationTool import addPlaneWidgets\n'), ((12193, 12222), 'mayavi.mlab.outline', 'mlab.outline', ([], {'color': '(0, 0, 0)'}), '(color=(0, 0, 0))\n', (12205, 12222), False, 'from mayavi import mlab\n'), ((12240, 12257), 'mayavi.mlab.get_engine', 'mlab.get_engine', ([], {}), '()\n', (12255, 12257), False, 'from mayavi import mlab\n'), ((16039, 16102), 'combineFunctions.findCollisions', 'findCollisions', (['V_fiberMap', 'V_NewVoxels'], {'makeV_collisions': '(False)'}), '(V_fiberMap, V_NewVoxels, makeV_collisions=False)\n', (16053, 16102), False, 'from combineFunctions import findCollisions\n'), ((23516, 23549), 'numpy.transpose', 'np.transpose', (['V_before', '(1, 2, 0)'], {}), '(V_before, (1, 2, 0))\n', (23528, 23549), True, 'import numpy as np\n'), ((23607, 23639), 'numpy.transpose', 'np.transpose', (['V_after', '(1, 2, 0)'], {}), '(V_after, (1, 2, 0))\n', (23619, 23639), True, 'import numpy as np\n'), ((23698, 23735), 'numpy.transpose', 'np.transpose', (['V_collisions', '(1, 2, 0)'], {}), '(V_collisions, (1, 2, 0))\n', (23710, 23735), True, 'import numpy as np\n'), ((24636, 24663), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (24661, 24663), False, 'import multiprocessing\n'), ((25201, 25228), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (25226, 25228), False, 'import multiprocessing\n'), ((26469, 26529), 'os.path.join', 'os.path.join', (['commonPath', 'permutationPath', '"""V_fiberMap.tiff"""'], {}), "(commonPath, permutationPath, 'V_fiberMap.tiff')\n", (26481, 26529), False, 'import os\n'), ((26770, 26827), 'os.path.join', 'os.path.join', (['commonPath', 'permutationPath', '"""V_pores.tiff"""'], {}), "(commonPath, permutationPath, 'V_pores.tiff')\n", (26782, 26827), False, 'import os\n'), ((28216, 28257), 'tifffile.TiffFile', 'TiffFile', (['filesInDir123[indexFibers_mask]'], {}), '(filesInDir123[indexFibers_mask])\n', (28224, 28257), False, 'from tifffile import TiffFile\n'), ((28816, 28879), 'os.path.join', 'os.path.join', (['commonPath', 'permutationPath', '"""fiberStruct.pickle"""'], {}), "(commonPath, permutationPath, 'fiberStruct.pickle')\n", (28828, 28879), False, 'import os\n'), ((31399, 31495), 'fibers.fiberObj.initializeClassAttributes', 'fiberObj.initializeClassAttributes', ([], {'savedAttributes': "fiberStruct['fiberObj_classAttributes']"}), "(savedAttributes=fiberStruct[\n 'fiberObj_classAttributes'])\n", (31433, 31495), False, 'from fibers import fiberObj\n'), ((35270, 35303), 'numpy.transpose', 'np.transpose', (['V_before', '(1, 2, 0)'], {}), '(V_before, (1, 2, 0))\n', (35282, 35303), True, 'import numpy as np\n'), ((35350, 35382), 'numpy.transpose', 'np.transpose', (['V_after', '(1, 2, 0)'], {}), '(V_after, (1, 2, 0))\n', (35362, 35382), True, 'import numpy as np\n'), ((35722, 35820), 'mayavi.mlab.pipeline.iso_surface', 'mlab.pipeline.iso_surface', (['srcAfter_masked'], {'contours': '[255]', 'opacity': '(0.8)', 'color': '(0.9, 0.2, 0.1)'}), '(srcAfter_masked, contours=[255], opacity=0.8,\n color=(0.9, 0.2, 0.1))\n', (35747, 35820), False, 'from mayavi import mlab\n'), ((35844, 35884), 'numpy.transpose', 'np.transpose', (['V_fibers_masked', '(1, 2, 0)'], {}), '(V_fibers_masked, (1, 2, 0))\n', (35856, 35884), True, 'import numpy as np\n'), ((35923, 35954), 'numpy.transpose', 'np.transpose', (['V_hist', '(1, 2, 0)'], {}), '(V_hist, (1, 2, 0))\n', (35935, 35954), True, 'import numpy as np\n'), ((36345, 36508), 'tifffile.imwrite', 'tifffile.imwrite', (['"""/home/facu/Phd_Private/Redaction/OpenSeg/PostProcessing/All/V_before.tiff"""', 'V_before'], {'resolution': '(xRes, xRes, unitTiff)', 'compress': '(True)'}), "(\n '/home/facu/Phd_Private/Redaction/OpenSeg/PostProcessing/All/V_before.tiff'\n , V_before, resolution=(xRes, xRes, unitTiff), compress=True)\n", (36361, 36508), False, 'import tifffile\n'), ((36592, 36752), 'tifffile.imwrite', 'tifffile.imwrite', (['"""/home/facu/Phd_Private/Redaction/OpenSeg/PostProcessing/All/V_after.tiff"""', 'V_after'], {'resolution': '(xRes, xRes, unitTiff)', 'compress': '(True)'}), "(\n '/home/facu/Phd_Private/Redaction/OpenSeg/PostProcessing/All/V_after.tiff',\n V_after, resolution=(xRes, xRes, unitTiff), compress=True)\n", (36608, 36752), False, 'import tifffile\n'), ((38232, 38272), 'time.gmtime', 'time.gmtime', (['(tocRandomize - ticRandomize)'], {}), '(tocRandomize - ticRandomize)\n', (38243, 38272), False, 'import time\n'), ((39167, 39205), 'time.gmtime', 'time.gmtime', (['(tocMakeMask - ticMakeMask)'], {}), '(tocMakeMask - ticMakeMask)\n', (39178, 39205), False, 'import time\n'), ((7828, 7859), 'numpy.transpose', 'np.transpose', (['V_hist', '(1, 2, 0)'], {}), '(V_hist, (1, 2, 0))\n', (7840, 7859), True, 'import numpy as np\n'), ((8193, 8226), 'multiprocessing.current_process', 'multiprocessing.current_process', ([], {}), '()\n', (8224, 8226), False, 'import multiprocessing\n'), ((8300, 8347), 'scipy.ndimage.binary_closing', 'ndimage.binary_closing', (['V_sliceMarker', 'SE_rod3D'], {}), '(V_sliceMarker, SE_rod3D)\n', (8322, 8347), False, 'from scipy import ndimage\n'), ((8798, 8831), 'multiprocessing.current_process', 'multiprocessing.current_process', ([], {}), '()\n', (8829, 8831), False, 'import multiprocessing\n'), ((9056, 9089), 'multiprocessing.current_process', 'multiprocessing.current_process', ([], {}), '()\n', (9087, 9089), False, 'import multiprocessing\n'), ((9170, 9233), 'scipy.ndimage.binary_opening', 'ndimage.binary_opening', (['V_sliceMarker_closed', 'SE_ball3D_opening'], {}), '(V_sliceMarker_closed, SE_ball3D_opening)\n', (9192, 9233), False, 'from scipy import ndimage\n'), ((9573, 9606), 'multiprocessing.current_process', 'multiprocessing.current_process', ([], {}), '()\n', (9604, 9606), False, 'import multiprocessing\n'), ((17389, 17416), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (17414, 17416), False, 'import multiprocessing\n'), ((17823, 17850), 'joblib.delayed', 'delayed', (['parallelGapFilling'], {}), '(parallelGapFilling)\n', (17830, 17850), False, 'from joblib import Parallel, delayed\n'), ((22511, 22538), 'joblib.delayed', 'delayed', (['parallelGapFilling'], {}), '(parallelGapFilling)\n', (22518, 22538), False, 'from joblib import Parallel, delayed\n'), ((24745, 24769), 'joblib.delayed', 'delayed', (['compactifySlice'], {}), '(compactifySlice)\n', (24752, 24769), False, 'from joblib import Parallel, delayed\n'), ((25376, 25402), 'joblib.delayed', 'delayed', (['makePropertySlice'], {}), '(makePropertySlice)\n', (25383, 25402), False, 'from joblib import Parallel, delayed\n'), ((26576, 26636), 'os.path.join', 'os.path.join', (['commonPath', 'permutationPath', '"""V_fiberMap.tiff"""'], {}), "(commonPath, permutationPath, 'V_fiberMap.tiff')\n", (26588, 26636), False, 'import os\n'), ((26874, 26931), 'os.path.join', 'os.path.join', (['commonPath', 'permutationPath', '"""V_pores.tiff"""'], {}), "(commonPath, permutationPath, 'V_pores.tiff')\n", (26886, 26931), False, 'import os\n'), ((26997, 27054), 'os.path.join', 'os.path.join', (['commonPath', 'permutationPath', '"""V_perim.tiff"""'], {}), "(commonPath, permutationPath, 'V_perim.tiff')\n", (27009, 27054), False, 'import os\n'), ((27299, 27355), 'os.path.join', 'os.path.join', (['commonPath', 'permutationPath', '"""V_hist.tiff"""'], {}), "(commonPath, permutationPath, 'V_hist.tiff')\n", (27311, 27355), False, 'import os\n'), ((33621, 33648), 'joblib.delayed', 'delayed', (['parallelGapFilling'], {}), '(parallelGapFilling)\n', (33628, 33648), False, 'from joblib import Parallel, delayed\n'), ((35670, 35709), 'numpy.transpose', 'np.transpose', (['V_after_masked', '(1, 2, 0)'], {}), '(V_after_masked, (1, 2, 0))\n', (35682, 35709), True, 'import numpy as np\n'), ((4444, 4481), 'numpy.linspace', 'np.linspace', (['(-offsetX)', 'offsetX', 'sizeZ'], {}), '(-offsetX, offsetX, sizeZ)\n', (4455, 4481), True, 'import numpy as np\n'), ((4533, 4570), 'numpy.linspace', 'np.linspace', (['(-offsetY)', 'offsetY', 'sizeZ'], {}), '(-offsetY, offsetY, sizeZ)\n', (4544, 4570), True, 'import numpy as np\n'), ((27105, 27162), 'os.path.join', 'os.path.join', (['commonPath', 'permutationPath', '"""V_perim.tiff"""'], {}), "(commonPath, permutationPath, 'V_perim.tiff')\n", (27117, 27162), False, 'import os\n'), ((27406, 27462), 'os.path.join', 'os.path.join', (['commonPath', 'permutationPath', '"""V_hist.tiff"""'], {}), "(commonPath, permutationPath, 'V_hist.tiff')\n", (27418, 27462), False, 'import os\n'), ((27799, 27842), 'os.path.join', 'os.path.join', (['commonPath', '"""Permutation123/"""'], {}), "(commonPath, 'Permutation123/')\n", (27811, 27842), False, 'import os\n'), ((28157, 28200), 'os.path.join', 'os.path.join', (['commonPath', '"""Permutation123/"""'], {}), "(commonPath, 'Permutation123/')\n", (28169, 28200), False, 'import os\n'), ((32014, 32036), 'numpy.linalg.norm', 'np.linalg.norm', (['oriVec'], {}), '(oriVec)\n', (32028, 32036), True, 'import numpy as np\n'), ((33171, 33198), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (33196, 33198), False, 'import multiprocessing\n'), ((18813, 18844), 'numpy.dot', 'np.dot', (['oriVecSelf', 'oriVecOther'], {}), '(oriVecSelf, oriVecOther)\n', (18819, 18844), True, 'import numpy as np\n'), ((32557, 32588), 'numpy.dot', 'np.dot', (['oriVec', '[0.0, 0.0, 1.0]'], {}), '(oriVec, [0.0, 0.0, 1.0])\n', (32563, 32588), True, 'import numpy as np\n')] |
import cellpylib as cpl
import numpy as np
from matplotlib.colors import ListedColormap
def wireworld_rule(n, c, t):
current_activity = n[1][1]
if current_activity == 0: # empty
return 0
if current_activity == 1: # electron head
return 2
if current_activity == 2: # electron tail
return 3
if current_activity == 3: # conductor
electron_head_count = np.count_nonzero(n == 1)
return 1 if electron_head_count == 1 or electron_head_count == 2 else 3
cellular_automata = np.array([[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 3, 1, 2, 3, 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 3, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 3, 3, 3, 2],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0],
[0, 0, 0, 3, 3, 2, 1, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0],
[0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
]])
cellular_automata = cpl.evolve2d(cellular_automata, timesteps=25,
apply_rule=wireworld_rule, neighbourhood="Moore")
cpl.plot2d_animate(cellular_automata, show_grid=True, show_margin=False, scale=0.3,
colormap=ListedColormap(["black", "blue", "red", "yellow"]))
| [
"numpy.count_nonzero",
"cellpylib.evolve2d",
"numpy.array",
"matplotlib.colors.ListedColormap"
] | [((534, 1573), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0\n ], [0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0], [0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 3, 3, 3, 3, 0, 0, 0, 0, 0, \n 0, 0], [0, 0, 0, 3, 1, 2, 3, 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, \n 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 3, \n 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, \n 3, 3, 3, 3, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, \n 3, 3, 0, 0, 0, 0], [0, 0, 0, 3, 3, 2, 1, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, \n 3, 0, 0, 0, 0, 0, 0], [0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 3, 3, 3, \n 3, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 1, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]]'], {}), '([[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0], [0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0], [0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 3, 3, 3, 3, 0, \n 0, 0, 0, 0, 0, 0], [0, 0, 0, 3, 1, 2, 3, 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, \n 2, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 1, 1, 1, 3, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 3, 0, 0, 3, 3, 3, 3, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 3, 3, 3, 3, 0, 0, 0, 0], [0, 0, 0, 3, 3, 2, 1, 3, 3, 3, 3, 0, 0, \n 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0], [0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 2, \n 1, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 3, 3, 3, 3, 3, 3, 3, 1, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]])\n', (542, 1573), True, 'import numpy as np\n'), ((1584, 1683), 'cellpylib.evolve2d', 'cpl.evolve2d', (['cellular_automata'], {'timesteps': '(25)', 'apply_rule': 'wireworld_rule', 'neighbourhood': '"""Moore"""'}), "(cellular_automata, timesteps=25, apply_rule=wireworld_rule,\n neighbourhood='Moore')\n", (1596, 1683), True, 'import cellpylib as cpl\n'), ((407, 431), 'numpy.count_nonzero', 'np.count_nonzero', (['(n == 1)'], {}), '(n == 1)\n', (423, 431), True, 'import numpy as np\n'), ((1826, 1876), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['black', 'blue', 'red', 'yellow']"], {}), "(['black', 'blue', 'red', 'yellow'])\n", (1840, 1876), False, 'from matplotlib.colors import ListedColormap\n')] |
import unittest
import aspecd.model
import lmfit
import numpy as np
import fitpy.dataset
class TestCalculatedDataset(unittest.TestCase):
def setUp(self):
self.dataset = fitpy.dataset.CalculatedDataset()
def test_instantiate_class(self):
pass
def test_data_has_residual_property(self):
self.assertTrue(hasattr(self.dataset.data, 'residual'))
def test_data_calculated_is_true(self):
self.assertTrue(self.dataset.data.calculated)
def test_origdata_has_residual_property(self):
self.assertTrue(hasattr(self.dataset._origdata, 'residual'))
def test_origdata_calculated_is_true(self):
self.assertTrue(self.dataset._origdata.calculated)
def test_metadata_has_result_property(self):
self.assertTrue(hasattr(self.dataset.metadata, 'result'))
class TestCalculatedDatasetLHS(unittest.TestCase):
def setUp(self):
self.dataset = fitpy.dataset.CalculatedDatasetLHS()
def test_instantiate_class(self):
pass
def test_data_has_residual_property(self):
self.assertTrue(hasattr(self.dataset.data, 'residual'))
def test_data_calculated_is_true(self):
self.assertTrue(self.dataset.data.calculated)
def test_origdata_has_residual_property(self):
self.assertTrue(hasattr(self.dataset._origdata, 'residual'))
def test_origdata_calculated_is_true(self):
self.assertTrue(self.dataset._origdata.calculated)
def test_metadata_has_result_property(self):
self.assertTrue(hasattr(self.dataset.metadata, 'lhs'))
class TestData(unittest.TestCase):
def setUp(self):
self.data = fitpy.dataset.Data()
def test_instantiate_class(self):
pass
def test_has_residual_property(self):
self.assertTrue(hasattr(self.data, 'residual'))
def test_set_residual_with_shape_unequal_data_shape_raises(self):
message = 'Shapes of data and residual need to match'
with self.assertRaisesRegex(ValueError, message):
self.data.residual = np.zeros(5)
def test_set_residual(self):
self.data.data = np.zeros(5)
self.data.residual = np.zeros(5)
self.assertListEqual(list(np.zeros(5)), list(self.data.residual))
def test_residual_in_dict(self):
dict_ = self.data.to_dict()
self.assertIn('residual', dict_)
class TestCalculatedDatasetMetadata(unittest.TestCase):
def setUp(self):
self.metadata = fitpy.dataset.CalculatedDatasetMetadata()
def test_instantiate_class(self):
pass
def test_has_model_property(self):
self.assertTrue(hasattr(self.metadata, 'model'))
def test_has_data_property(self):
self.assertTrue(hasattr(self.metadata, 'data'))
def test_has_result_property(self):
self.assertTrue(hasattr(self.metadata, 'result'))
class TestModel(unittest.TestCase):
def setUp(self):
self.metadata = fitpy.dataset.Model()
def test_instantiate_class(self):
pass
def test_has_type_property(self):
self.assertTrue(hasattr(self.metadata, 'type'))
def test_has_parameters_property(self):
self.assertTrue(hasattr(self.metadata, 'parameters'))
def test_from_model_sets_type(self):
model = aspecd.model.Gaussian()
self.metadata.from_model(model)
self.assertEqual('aspecd.model.Gaussian', self.metadata.type)
def test_from_model_sets_parameters(self):
model = aspecd.model.Gaussian()
self.metadata.from_model(model)
self.assertDictEqual(model.parameters, self.metadata.parameters)
class TestDataMetadata(unittest.TestCase):
def setUp(self):
self.metadata = fitpy.dataset.DataMetadata()
def test_instantiate_class(self):
pass
def test_has_id_property(self):
self.assertTrue(hasattr(self.metadata, 'id'))
def test_has_label_property(self):
self.assertTrue(hasattr(self.metadata, 'label'))
def test_from_dataset_sets_id(self):
dataset = fitpy.dataset.CalculatedDataset()
dataset.id = 'foo'
self.metadata.from_dataset(dataset)
self.assertEqual(dataset.id, self.metadata.id)
def test_from_dataset_sets_label(self):
dataset = fitpy.dataset.CalculatedDataset()
dataset.label = 'bar'
self.metadata.from_dataset(dataset)
self.assertEqual(dataset.label, self.metadata.label)
class TestResult(unittest.TestCase):
def setUp(self):
self.metadata = fitpy.dataset.Result()
self.result = lmfit.minimizer.MinimizerResult()
def perform_fit(self):
p_true = lmfit.Parameters()
p_true.add('amp', value=14.0)
p_true.add('period', value=5.46)
p_true.add('shift', value=0.123)
p_true.add('decay', value=0.032)
def residual(pars, x, data=None):
"""Model a decaying sine wave and subtract data."""
vals = pars.valuesdict()
if abs(vals['shift']) > np.pi / 2:
vals['shift'] = \
vals['shift'] - np.sign(vals['shift']) * np.pi
model = vals['amp'] * np.sin(vals['shift'] + x / vals['period']) \
* np.exp(-x * x * vals['decay'] * vals['decay'])
if data is None:
return model
return model - data
np.random.seed(0)
x = np.linspace(0.0, 250., 1001)
noise = np.random.normal(scale=0.7215, size=x.size)
data_ = residual(p_true, x) + noise
fit_params = lmfit.Parameters()
fit_params.add('amp', value=13.0)
fit_params.add('period', value=2)
fit_params.add('shift', value=0.0)
fit_params.add('decay', value=0.02)
self.result = lmfit.minimize(
residual, fit_params, args=(x,), kws={'data': data_})
def test_instantiate_class(self):
pass
def test_from_lmfit_minimizer_result_sets_attributes(self):
self.perform_fit()
self.metadata.from_lmfit_minimizer_result(self.result)
mappings = {
'params': 'parameters',
'success': 'success',
'errorbars': 'error_bars',
'nfev': 'n_function_evaluations',
'nvarys': 'n_variables',
'nfree': 'degrees_of_freedom',
'chisqr': 'chi_square',
'redchi': 'reduced_chi_square',
'aic': 'akaike_information_criterion',
'bic': 'bayesian_information_criterion',
'var_names': 'variable_names',
'covar': 'covariance_matrix',
'init_vals': 'initial_values',
'message': 'message',
}
for key, value in mappings.items():
if isinstance(getattr(self.result, key), list):
self.assertListEqual(list(getattr(self.result, key)),
list(getattr(self.metadata, value)))
elif isinstance(getattr(self.result, key), np.ndarray):
self.assertListEqual(
list(getattr(self.result, key).flatten()),
list(getattr(self.metadata, value).flatten()))
else:
self.assertEqual(getattr(self.result, key),
getattr(self.metadata, value))
def test_to_dict_adds_value_to_parameters(self):
self.perform_fit()
self.metadata.from_lmfit_minimizer_result(self.result)
dict_ = self.metadata.to_dict()
for key in dict_['parameters'].keys():
self.assertIn('value', dict_['parameters'][key])
class TestLHS(unittest.TestCase):
def setUp(self):
self.metadata = fitpy.dataset.LHS()
self.result = lmfit.minimizer.MinimizerResult()
def perform_fit(self):
p_true = lmfit.Parameters()
p_true.add('amp', value=14.0)
p_true.add('period', value=5.46)
p_true.add('shift', value=0.123)
p_true.add('decay', value=0.032)
def residual(pars, x, data=None):
"""Model a decaying sine wave and subtract data."""
vals = pars.valuesdict()
if abs(vals['shift']) > np.pi / 2:
vals['shift'] = \
vals['shift'] - np.sign(vals['shift']) * np.pi
model = vals['amp'] * np.sin(vals['shift'] + x / vals['period']) \
* np.exp(-x * x * vals['decay'] * vals['decay'])
if data is None:
return model
return model - data
np.random.seed(0)
x = np.linspace(0.0, 250., 1001)
noise = np.random.normal(scale=0.7215, size=x.size)
data_ = residual(p_true, x) + noise
fit_params = lmfit.Parameters()
fit_params.add('amp', value=13.0)
fit_params.add('period', value=2)
fit_params.add('shift', value=0.0)
fit_params.add('decay', value=0.02)
self.result = lmfit.minimize(
residual, fit_params, args=(x,), kws={'data': data_})
def test_instantiate_class(self):
pass
def test_has_samples_property(self):
self.assertTrue(hasattr(self.metadata, 'samples'))
def test_has_discrepancy_property(self):
self.assertTrue(hasattr(self.metadata, 'discrepancy'))
def test_has_results_property(self):
self.assertTrue(hasattr(self.metadata, 'results'))
def test_from_lmfit_minimizer_results_sets_results(self):
self.perform_fit()
self.metadata.from_lmfit_minimizer_results([self.result])
mappings = {
'params': 'parameters',
'success': 'success',
'errorbars': 'error_bars',
'nfev': 'n_function_evaluations',
'nvarys': 'n_variables',
'nfree': 'degrees_of_freedom',
'chisqr': 'chi_square',
'redchi': 'reduced_chi_square',
'aic': 'akaike_information_criterion',
'bic': 'bayesian_information_criterion',
'var_names': 'variable_names',
'covar': 'covariance_matrix',
'init_vals': 'initial_values',
'message': 'message',
}
metadata = self.metadata.results[0]
for key, value in mappings.items():
if isinstance(getattr(self.result, key), list):
self.assertListEqual(list(getattr(self.result, key)),
list(getattr(metadata, value)))
elif isinstance(getattr(self.result, key), np.ndarray):
self.assertListEqual(
list(getattr(self.result, key).flatten()),
list(getattr(metadata, value).flatten()))
else:
self.assertEqual(getattr(self.result, key),
getattr(metadata, value))
| [
"lmfit.minimizer.MinimizerResult",
"numpy.random.seed",
"numpy.zeros",
"numpy.sign",
"lmfit.minimize",
"numpy.sin",
"numpy.exp",
"numpy.linspace",
"numpy.random.normal",
"lmfit.Parameters"
] | [((2111, 2122), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (2119, 2122), True, 'import numpy as np\n'), ((2152, 2163), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (2160, 2163), True, 'import numpy as np\n'), ((4533, 4566), 'lmfit.minimizer.MinimizerResult', 'lmfit.minimizer.MinimizerResult', ([], {}), '()\n', (4564, 4566), False, 'import lmfit\n'), ((4612, 4630), 'lmfit.Parameters', 'lmfit.Parameters', ([], {}), '()\n', (4628, 4630), False, 'import lmfit\n'), ((5328, 5345), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5342, 5345), True, 'import numpy as np\n'), ((5358, 5387), 'numpy.linspace', 'np.linspace', (['(0.0)', '(250.0)', '(1001)'], {}), '(0.0, 250.0, 1001)\n', (5369, 5387), True, 'import numpy as np\n'), ((5403, 5446), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.7215)', 'size': 'x.size'}), '(scale=0.7215, size=x.size)\n', (5419, 5446), True, 'import numpy as np\n'), ((5513, 5531), 'lmfit.Parameters', 'lmfit.Parameters', ([], {}), '()\n', (5529, 5531), False, 'import lmfit\n'), ((5726, 5794), 'lmfit.minimize', 'lmfit.minimize', (['residual', 'fit_params'], {'args': '(x,)', 'kws': "{'data': data_}"}), "(residual, fit_params, args=(x,), kws={'data': data_})\n", (5740, 5794), False, 'import lmfit\n'), ((7668, 7701), 'lmfit.minimizer.MinimizerResult', 'lmfit.minimizer.MinimizerResult', ([], {}), '()\n', (7699, 7701), False, 'import lmfit\n'), ((7747, 7765), 'lmfit.Parameters', 'lmfit.Parameters', ([], {}), '()\n', (7763, 7765), False, 'import lmfit\n'), ((8463, 8480), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (8477, 8480), True, 'import numpy as np\n'), ((8493, 8522), 'numpy.linspace', 'np.linspace', (['(0.0)', '(250.0)', '(1001)'], {}), '(0.0, 250.0, 1001)\n', (8504, 8522), True, 'import numpy as np\n'), ((8538, 8581), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.7215)', 'size': 'x.size'}), '(scale=0.7215, size=x.size)\n', (8554, 8581), True, 'import numpy as np\n'), ((8648, 8666), 'lmfit.Parameters', 'lmfit.Parameters', ([], {}), '()\n', (8664, 8666), False, 'import lmfit\n'), ((8861, 8929), 'lmfit.minimize', 'lmfit.minimize', (['residual', 'fit_params'], {'args': '(x,)', 'kws': "{'data': data_}"}), "(residual, fit_params, args=(x,), kws={'data': data_})\n", (8875, 8929), False, 'import lmfit\n'), ((2040, 2051), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (2048, 2051), True, 'import numpy as np\n'), ((2198, 2209), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (2206, 2209), True, 'import numpy as np\n'), ((5182, 5228), 'numpy.exp', 'np.exp', (["(-x * x * vals['decay'] * vals['decay'])"], {}), "(-x * x * vals['decay'] * vals['decay'])\n", (5188, 5228), True, 'import numpy as np\n'), ((8317, 8363), 'numpy.exp', 'np.exp', (["(-x * x * vals['decay'] * vals['decay'])"], {}), "(-x * x * vals['decay'] * vals['decay'])\n", (8323, 8363), True, 'import numpy as np\n'), ((5119, 5161), 'numpy.sin', 'np.sin', (["(vals['shift'] + x / vals['period'])"], {}), "(vals['shift'] + x / vals['period'])\n", (5125, 5161), True, 'import numpy as np\n'), ((8254, 8296), 'numpy.sin', 'np.sin', (["(vals['shift'] + x / vals['period'])"], {}), "(vals['shift'] + x / vals['period'])\n", (8260, 8296), True, 'import numpy as np\n'), ((5054, 5076), 'numpy.sign', 'np.sign', (["vals['shift']"], {}), "(vals['shift'])\n", (5061, 5076), True, 'import numpy as np\n'), ((8189, 8211), 'numpy.sign', 'np.sign', (["vals['shift']"], {}), "(vals['shift'])\n", (8196, 8211), True, 'import numpy as np\n')] |
from functools import lru_cache
import cv2
import numpy as np
def visualize_instances(
imask, bg_color=255, boundaries_color=None, boundaries_width=1, boundaries_alpha=0.8
):
num_objects = imask.max() + 1
palette = get_palette(num_objects)
if bg_color is not None:
palette[0] = bg_color
result = palette[imask].astype(np.uint8)
if boundaries_color is not None:
boundaries_mask = get_boundaries(imask, boundaries_width=boundaries_width)
tresult = result.astype('float32')
tresult[boundaries_mask] = boundaries_color
tresult = tresult * boundaries_alpha + (1 - boundaries_alpha) * result
result = tresult.astype(np.uint8)
return result
@lru_cache(maxsize=16)
def get_palette(num_cls):
return np.array([[0, 0, 0], [128, 0, 0], [0, 128, 0], [0, 0, 128]])
palette = np.zeros(3 * num_cls, dtype='int32')
for j in range(0, num_cls):
lab = j
i = 0
while lab > 0:
palette[j * 3 + 0] |= ((lab >> 0) & 1) << (7 - i)
palette[j * 3 + 1] |= ((lab >> 1) & 1) << (7 - i)
palette[j * 3 + 2] |= ((lab >> 2) & 1) << (7 - i)
i = i + 1
lab >>= 3
return palette.reshape((-1, 3))
def visualize_mask(mask, num_cls):
palette = get_palette(num_cls)
mask[mask == -1] = 0
return palette[mask].astype(np.uint8)
def visualize_proposals(proposals_info, point_color=(255, 0, 0), point_radius=1):
proposal_map, colors, candidates = proposals_info
proposal_map = draw_probmap(proposal_map)
for x, y in candidates:
proposal_map = cv2.circle(proposal_map, (y, x), point_radius, point_color, -1)
return proposal_map
def draw_probmap(x):
return cv2.applyColorMap((x * 255).astype(np.uint8), cv2.COLORMAP_HOT)
def draw_points(image, points, color, radius=3):
image = image.copy()
for p in points:
image = cv2.circle(image, (int(p[1]), int(p[0])), radius, color, -1)
return image
def draw_instance_map(x, palette=None):
num_colors = x.max() + 1
if palette is None:
palette = get_palette(num_colors)
return palette[x].astype(np.uint8)
def blend_mask(image, mask, alpha=0.6):
if mask.min() == -1:
mask = mask.copy() + 1
imap = draw_instance_map(mask)
result = (image * (1 - alpha) + alpha * imap).astype(np.uint8)
return result
def get_boundaries(instances_masks, boundaries_width=1):
boundaries = np.zeros(
(instances_masks.shape[0], instances_masks.shape[1]), dtype='bool'
)
for obj_id in np.unique(instances_masks.flatten()):
if obj_id == 0:
continue
obj_mask = instances_masks == obj_id
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
inner_mask = cv2.erode(
obj_mask.astype(np.uint8), kernel, iterations=boundaries_width
).astype('bool')
obj_boundary = np.logical_xor(obj_mask, np.logical_and(inner_mask, obj_mask))
boundaries = np.logical_or(boundaries, obj_boundary)
return boundaries
def draw_with_blend_and_clicks(
img,
mask=None,
alpha=0.6,
clicks_list=None,
pos_color=(0, 255, 0),
neg_color=(255, 0, 0),
radius=4,
palette=None,
):
result = img.copy()
if mask is not None:
if not palette:
palette = get_palette(np.max(mask) + 1)
palette = np.array(palette)
rgb_mask = palette[mask.astype(np.uint8)]
mask_region = (mask > 0).astype(np.uint8)
result = (
result * (1 - mask_region[:, :, np.newaxis])
+ (1 - alpha) * mask_region[:, :, np.newaxis] * result
+ alpha * rgb_mask
)
result = result.astype(np.uint8)
if clicks_list is not None and len(clicks_list) > 0:
pos_points = [click.coords for click in clicks_list if click.is_positive]
neg_points = [click.coords for click in clicks_list if not click.is_positive]
result = draw_points(result, pos_points, pos_color, radius=radius)
result = draw_points(result, neg_points, neg_color, radius=radius)
return result
| [
"cv2.circle",
"numpy.logical_and",
"cv2.getStructuringElement",
"numpy.zeros",
"numpy.max",
"numpy.array",
"numpy.logical_or",
"functools.lru_cache"
] | [((718, 739), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(16)'}), '(maxsize=16)\n', (727, 739), False, 'from functools import lru_cache\n'), ((777, 837), 'numpy.array', 'np.array', (['[[0, 0, 0], [128, 0, 0], [0, 128, 0], [0, 0, 128]]'], {}), '([[0, 0, 0], [128, 0, 0], [0, 128, 0], [0, 0, 128]])\n', (785, 837), True, 'import numpy as np\n'), ((852, 888), 'numpy.zeros', 'np.zeros', (['(3 * num_cls)'], {'dtype': '"""int32"""'}), "(3 * num_cls, dtype='int32')\n", (860, 888), True, 'import numpy as np\n'), ((2470, 2546), 'numpy.zeros', 'np.zeros', (['(instances_masks.shape[0], instances_masks.shape[1])'], {'dtype': '"""bool"""'}), "((instances_masks.shape[0], instances_masks.shape[1]), dtype='bool')\n", (2478, 2546), True, 'import numpy as np\n'), ((1619, 1682), 'cv2.circle', 'cv2.circle', (['proposal_map', '(y, x)', 'point_radius', 'point_color', '(-1)'], {}), '(proposal_map, (y, x), point_radius, point_color, -1)\n', (1629, 1682), False, 'import cv2\n'), ((2726, 2778), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(3, 3)'], {}), '(cv2.MORPH_ELLIPSE, (3, 3))\n', (2751, 2778), False, 'import cv2\n'), ((3019, 3058), 'numpy.logical_or', 'np.logical_or', (['boundaries', 'obj_boundary'], {}), '(boundaries, obj_boundary)\n', (3032, 3058), True, 'import numpy as np\n'), ((3409, 3426), 'numpy.array', 'np.array', (['palette'], {}), '(palette)\n', (3417, 3426), True, 'import numpy as np\n'), ((2960, 2996), 'numpy.logical_and', 'np.logical_and', (['inner_mask', 'obj_mask'], {}), '(inner_mask, obj_mask)\n', (2974, 2996), True, 'import numpy as np\n'), ((3373, 3385), 'numpy.max', 'np.max', (['mask'], {}), '(mask)\n', (3379, 3385), True, 'import numpy as np\n')] |
import argparse
import copy
import random
from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey
import rom_generator.background as background
import numpy
from rom_generator.mazes.mazeGen import run, getList
import rom_generator.spriteSheetManager as manager
index = 0
class pair():
def __init__(self, aa, bb, cc):
self.x = aa
self.y = bb
self.counter = cc
def dfs(x, y, counter, mazeArray):
dx = [0, 0, 1, -1]
dy = [1, -1, 0, 0]
queue = []
queue.append(pair(0, 0, 1))
while len(queue) > 0:
x = queue[0].x
y = queue[0].y
counter = queue[0].counter
mazeArray[x][y] = counter
queue.pop(0)
for i in range(4):
xx = x + dx[i]
yy = y + dy[i]
if xx >= 0 and xx < len(mazeArray) and yy >= 0 and yy < len(mazeArray[0]) and mazeArray[xx][yy] == 0:
queue.append(pair(xx, yy, counter + 1))
def generateKruskalMaze(size_x, size_y, num_lock, keySprite, lockSprite):
# Add a background image
#default_bkg = makeBackground("placeholder.png", "placeholder")
#project.backgrounds.append(default_bkg)
#a_scene = makeScene("Scene", default_bkg)
#project.scenes.append(a_scene)
#size_x, size_y = (8, 8)
maze_tile_names = ["GreenBlock8.png","MazeBlock8.png"]
maze_tile_list = background.getTileList(maze_tile_names)
maze_tile_array = [[0 for n in range(size_y * 3)] for m in range(size_x * 3)]
maze_collisions = [[0 for n in range(size_y * 3)] for m in range(size_x * 3)]
def addBackgroundTile(tile_num, x, y):
#print(x,y)
maze_tile_array[x][y] = tile_num
maze_collisions[x][y] = 1
return maze_tile_array
#print(maze_tile_list)
maze_wall_tile = maze_tile_names.index("MazeBlock8.png")
run(size_x)
array = getList()
#print("lengt " + str(len(array)))
mazeArray = [[0 for a in range(size_y * 3)] for b in range(size_x * 3)]
sizey = size_y * 2
sizex = size_x * 3
#print("SETTING THE TILES")
counter = 0
for i in range(len(array)):
for j in range(len(array)):
#print("Index " + str(i) + " " + str(j))
temp = array[i][j]
if temp.ar[1] == False:
mazeArray[2*i+1][3*j+1] = -1
mazeArray[2*i+1][3*j] = -1
addBackgroundTile(maze_wall_tile, 2 * i + 1, 3 * j + 1)
addBackgroundTile(maze_wall_tile, 2 * i + 1, 3 * j)
if temp.ar[0] == False:
mazeArray[2*i][3*j+2] = -1
addBackgroundTile(maze_wall_tile, 2 * i, 3 * j + 2)
mazeArray[2*i+1][3*j+2] = -1
addBackgroundTile(maze_wall_tile, 2 * i + 1, 3 * j + 2)
#print(counter)
counter += 1
dfs(0, 0, 1, mazeArray) #this is bfs now
#print("Maze is")
#print(mazeArray)
flat = numpy.array(mazeArray).flatten(order='C').tolist()
# for y in range(numpy.array(mazeArray).shape[1]):
# print()
# for x in range(numpy.array(mazeArray).shape[0]):
# print(flat[x + (y * numpy.array(mazeArray).shape[0])], end='')
# print()
global index
maze_background_image_path = background.generateBackgroundImageFromTiles(maze_tile_array, maze_tile_list)
maze_background = makeBackground(maze_background_image_path, "maze_background")
nameBackground = "maze_background" + str(index)
manager.addBackgroundSpriteSheet(nameBackground, maze_background)
manager.addInConnections(nameBackground, [[0, 0]])
manager.addOutConnections(nameBackground, [[sizex - 2, sizey - 2]])
index+=1
a_scene = makeScene(f"Scene" + str(index), maze_background)
#flat_maze_collisions = numpy.rot90(numpy.array(maze_collisions), 2, axes=(0,1)).flatten(order='C').tolist()
flat_maze_collisions = numpy.array(maze_collisions).flatten(order='C').tolist()
# for y in range(numpy.array(maze_collisions).shape[1]):
# print()
# for x in range(numpy.array(maze_collisions).shape[0]):
# print(flat_maze_collisions[x + (y * numpy.array(maze_collisions).shape[0])], end='')
# print()
makeCol(flat_maze_collisions, a_scene)
#print(maze_background)
#print(a_scene)
sizex -= 1
sizey -=1
for i in range(num_lock):
intx = random.randint(0, sizex)
inty = random.randint(0, sizey)
while(mazeArray[inty][intx] == -1 or (intx + 1 > sizex and mazeArray[inty][intx+1] == -1)):
intx = random.randint(0, sizex)
inty = random.randint(0, sizey)
intx2 = random.randint(0, sizex)
inty2 = random.randint(0, sizey)
while(mazeArray[inty2][intx2] == -1 or (intx2+1>sizex and mazeArray[inty2][intx2+1]==-1)):
intx2 = random.randint(0, sizex)
inty2 = random.randint(0, sizey)
if(mazeArray[inty][intx] > mazeArray[inty2][intx2]):
inttemp = intx
intx = intx2
intx2 = inttemp
inttemp = inty
inty = inty2
inty2 = inttemp
# print("coords")
# print(intx)
# print(inty)
# print(intx2)
# print(inty2)
# print("MAZE DEPTH COMP")
# print(mazeArray[inty][intx])
# print(mazeArray[inty2][intx2])
a_scene["actors"].append(makeKey(keySprite, intx, inty))
a_scene["actors"].append(makeLock(lockSprite, intx2, inty2))
return [nameBackground, a_scene]
def generatemaze():
project = makeBasicProject()
# Add a background image
#default_bkg = makeBackground("placeholder.png", "placeholder")
#project.backgrounds.append(default_bkg)
#a_scene = makeScene("Scene", default_bkg)
#project.scenes.append(a_scene)
size_x, size_y = (19, 19)
maze_tile_names = ["GreenBlock8.png","MazeBlock8.png"]
maze_tile_list = background.getTileList(maze_tile_names)
maze_tile_array = [[0 for n in range(size_y)] for m in range(size_x) ]
maze_collisions = [[0 for n in range(size_y)] for m in range(size_x)]
def addBackgroundTile(tile_num, x, y):
#print(x,y)
maze_tile_array[x][y] = tile_num
maze_collisions[x][y] = 1
return maze_tile_array
#print(maze_tile_list)
maze_wall_tile = maze_tile_names.index("MazeBlock8.png")
tile_placement_list = [(1, 2), (1, 9), (1, 16),(2, 2),(2, 9),(2, 10),(2, 11),(2, 12),(2, 13),(2, 14),(2, 16),(3, 6),(3, 9),(3, 16),(4, 6),(4, 16),(5, 1),(5, 2),(5, 3),(5, 4),(5, 5),(5, 6),(5, 16),(6, 2),(6, 6),(6, 10),(6, 11),(6, 12),(6, 13), (6, 14), (6, 15), (6, 16), (7, 2), (7, 6), (7, 16), (8, 2), (8, 12), (8, 13), (8, 14), (8, 16), (9, 8), (9, 10), (9, 16), (10, 4), (10, 5), (10, 6), (10, 7), (10, 8), (10, 10), (10, 16), (11, 4), (11, 8), (11, 10), (11, 13), (11, 14), (11, 15), (11, 16), (12, 4), (12, 7), (12, 8), (12, 16), (13, 4), (13, 12), (13, 16), (14, 4), (14, 12), (14, 16), (15, 1), (15, 2), (15, 3), (15, 4), (15, 12), (15, 16), (16, 12), (16, 13), (16, 14), (16, 15), (16, 16)]
for x,y in tile_placement_list:
addBackgroundTile(maze_wall_tile, x, y)
maze_background_image_path = background.generateBackgroundImageFromTiles(maze_tile_array, maze_tile_list)
maze_background = makeBackground(maze_background_image_path, "maze_background")
project.backgrounds.append(maze_background)
a_scene = makeScene(f"Scene", maze_background)
#flat_maze_collisions = numpy.rot90(numpy.array(maze_collisions), 2, axes=(0,1)).flatten(order='C').tolist()
flat_maze_collisions = numpy.array(maze_collisions).flatten(order='C').tolist()
for y in range(numpy.array(maze_collisions).shape[1]):
print()
for x in range(numpy.array(maze_collisions).shape[0]):
print(flat_maze_collisions[x + (y * numpy.array(maze_collisions).shape[0])], end='')
print()
makeCol(flat_maze_collisions, a_scene)
print(maze_background)
project.scenes.append(a_scene)
print(a_scene)
player_sprite_sheet = addSpriteSheet(project, "actor_animated.png", "actor_animated", "actor_animated")
project.settings["playerSpriteSheetId"] = player_sprite_sheet["id"]
# Add some music
project.music.append(makeMusic("template", "template.mod"))
# Set the starting scene
project.settings["startSceneId"] = project.scenes[0]["id"]
return project
# Utilities
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
### Run the generator
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generate a Game Boy ROM via a GB Studio project file.")
parser.add_argument('--destination', '-d', type=str, help="destination folder name", default="../gbprojects/projects_maze2/")
parser.add_argument('--assets', '-a', type=str, help="asset folder name", default="assets/")
args = parser.parse_args()
initializeGenerator()
project = makeBasicProject()
a_rock_sprite = addSpriteSheet(project, "rock.png", "rock", "static")
doorway_sprite = addSpriteSheet(project, "tower.png", "tower", "static")
l = generateKruskalMaze(8, 8, 2, a_rock_sprite, doorway_sprite)
project.scenes.append(l[1])
manager.submitBackgroundSpriteSheets(project)
player_sprite_sheet = addSpriteSheet(project, "actor_animated.png", "actor_animated", "actor_animated")
project.settings["playerSpriteSheetId"] = player_sprite_sheet["id"]
# Add some music
project.music.append(makeMusic("template", "template.mod"))
# Set the starting scene
project.settings["startSceneId"] = project.scenes[0]["id"]
writeProjectToDisk(project, output_path = args.destination)
if args.destination == "../gbprojects/projects/":
print(f"{bcolors.WARNING}NOTE: Used default output directory, change with the -d flag{bcolors.ENDC}")
print(f"{bcolors.OKBLUE}See generate.py --help for more options{bcolors.ENDC}")
| [
"rom_generator.generator.makeBackground",
"rom_generator.generator.makeMusic",
"argparse.ArgumentParser",
"rom_generator.generator.writeProjectToDisk",
"random.randint",
"rom_generator.background.getTileList",
"rom_generator.mazes.mazeGen.run",
"rom_generator.generator.makeKey",
"rom_generator.backg... | [((1560, 1599), 'rom_generator.background.getTileList', 'background.getTileList', (['maze_tile_names'], {}), '(maze_tile_names)\n', (1582, 1599), True, 'import rom_generator.background as background\n'), ((2027, 2038), 'rom_generator.mazes.mazeGen.run', 'run', (['size_x'], {}), '(size_x)\n', (2030, 2038), False, 'from rom_generator.mazes.mazeGen import run, getList\n'), ((2052, 2061), 'rom_generator.mazes.mazeGen.getList', 'getList', ([], {}), '()\n', (2059, 2061), False, 'from rom_generator.mazes.mazeGen import run, getList\n'), ((3432, 3508), 'rom_generator.background.generateBackgroundImageFromTiles', 'background.generateBackgroundImageFromTiles', (['maze_tile_array', 'maze_tile_list'], {}), '(maze_tile_array, maze_tile_list)\n', (3475, 3508), True, 'import rom_generator.background as background\n'), ((3531, 3592), 'rom_generator.generator.makeBackground', 'makeBackground', (['maze_background_image_path', '"""maze_background"""'], {}), "(maze_background_image_path, 'maze_background')\n", (3545, 3592), False, 'from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey\n'), ((3649, 3714), 'rom_generator.spriteSheetManager.addBackgroundSpriteSheet', 'manager.addBackgroundSpriteSheet', (['nameBackground', 'maze_background'], {}), '(nameBackground, maze_background)\n', (3681, 3714), True, 'import rom_generator.spriteSheetManager as manager\n'), ((3719, 3769), 'rom_generator.spriteSheetManager.addInConnections', 'manager.addInConnections', (['nameBackground', '[[0, 0]]'], {}), '(nameBackground, [[0, 0]])\n', (3743, 3769), True, 'import rom_generator.spriteSheetManager as manager\n'), ((3774, 3841), 'rom_generator.spriteSheetManager.addOutConnections', 'manager.addOutConnections', (['nameBackground', '[[sizex - 2, sizey - 2]]'], {}), '(nameBackground, [[sizex - 2, sizey - 2]])\n', (3799, 3841), True, 'import rom_generator.spriteSheetManager as manager\n'), ((4379, 4417), 'rom_generator.generator.makeCol', 'makeCol', (['flat_maze_collisions', 'a_scene'], {}), '(flat_maze_collisions, a_scene)\n', (4386, 4417), False, 'from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey\n'), ((5747, 5765), 'rom_generator.generator.makeBasicProject', 'makeBasicProject', ([], {}), '()\n', (5763, 5765), False, 'from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey\n'), ((6105, 6144), 'rom_generator.background.getTileList', 'background.getTileList', (['maze_tile_names'], {}), '(maze_tile_names)\n', (6127, 6144), True, 'import rom_generator.background as background\n'), ((7373, 7449), 'rom_generator.background.generateBackgroundImageFromTiles', 'background.generateBackgroundImageFromTiles', (['maze_tile_array', 'maze_tile_list'], {}), '(maze_tile_array, maze_tile_list)\n', (7416, 7449), True, 'import rom_generator.background as background\n'), ((7472, 7533), 'rom_generator.generator.makeBackground', 'makeBackground', (['maze_background_image_path', '"""maze_background"""'], {}), "(maze_background_image_path, 'maze_background')\n", (7486, 7533), False, 'from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey\n'), ((7596, 7632), 'rom_generator.generator.makeScene', 'makeScene', (['f"""Scene"""', 'maze_background'], {}), "(f'Scene', maze_background)\n", (7605, 7632), False, 'from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey\n'), ((8083, 8121), 'rom_generator.generator.makeCol', 'makeCol', (['flat_maze_collisions', 'a_scene'], {}), '(flat_maze_collisions, a_scene)\n', (8090, 8121), False, 'from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey\n'), ((8230, 8315), 'rom_generator.generator.addSpriteSheet', 'addSpriteSheet', (['project', '"""actor_animated.png"""', '"""actor_animated"""', '"""actor_animated"""'], {}), "(project, 'actor_animated.png', 'actor_animated',\n 'actor_animated')\n", (8244, 8315), False, 'from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey\n'), ((8861, 8958), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate a Game Boy ROM via a GB Studio project file."""'}), "(description=\n 'Generate a Game Boy ROM via a GB Studio project file.')\n", (8884, 8958), False, 'import argparse\n'), ((9216, 9237), 'rom_generator.generator.initializeGenerator', 'initializeGenerator', ([], {}), '()\n', (9235, 9237), False, 'from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey\n'), ((9252, 9270), 'rom_generator.generator.makeBasicProject', 'makeBasicProject', ([], {}), '()\n', (9268, 9270), False, 'from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey\n'), ((9291, 9344), 'rom_generator.generator.addSpriteSheet', 'addSpriteSheet', (['project', '"""rock.png"""', '"""rock"""', '"""static"""'], {}), "(project, 'rock.png', 'rock', 'static')\n", (9305, 9344), False, 'from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey\n'), ((9366, 9421), 'rom_generator.generator.addSpriteSheet', 'addSpriteSheet', (['project', '"""tower.png"""', '"""tower"""', '"""static"""'], {}), "(project, 'tower.png', 'tower', 'static')\n", (9380, 9421), False, 'from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey\n'), ((9526, 9571), 'rom_generator.spriteSheetManager.submitBackgroundSpriteSheets', 'manager.submitBackgroundSpriteSheets', (['project'], {}), '(project)\n', (9562, 9571), True, 'import rom_generator.spriteSheetManager as manager\n'), ((9598, 9683), 'rom_generator.generator.addSpriteSheet', 'addSpriteSheet', (['project', '"""actor_animated.png"""', '"""actor_animated"""', '"""actor_animated"""'], {}), "(project, 'actor_animated.png', 'actor_animated',\n 'actor_animated')\n", (9612, 9683), False, 'from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey\n'), ((9936, 9993), 'rom_generator.generator.writeProjectToDisk', 'writeProjectToDisk', (['project'], {'output_path': 'args.destination'}), '(project, output_path=args.destination)\n', (9954, 9993), False, 'from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey\n'), ((4541, 4565), 'random.randint', 'random.randint', (['(0)', 'sizex'], {}), '(0, sizex)\n', (4555, 4565), False, 'import random\n'), ((4581, 4605), 'random.randint', 'random.randint', (['(0)', 'sizey'], {}), '(0, sizey)\n', (4595, 4605), False, 'import random\n'), ((4811, 4835), 'random.randint', 'random.randint', (['(0)', 'sizex'], {}), '(0, sizex)\n', (4825, 4835), False, 'import random\n'), ((4852, 4876), 'random.randint', 'random.randint', (['(0)', 'sizey'], {}), '(0, sizey)\n', (4866, 4876), False, 'import random\n'), ((8431, 8468), 'rom_generator.generator.makeMusic', 'makeMusic', (['"""template"""', '"""template.mod"""'], {}), "('template', 'template.mod')\n", (8440, 8468), False, 'from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey\n'), ((9799, 9836), 'rom_generator.generator.makeMusic', 'makeMusic', (['"""template"""', '"""template.mod"""'], {}), "('template', 'template.mod')\n", (9808, 9836), False, 'from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey\n'), ((4725, 4749), 'random.randint', 'random.randint', (['(0)', 'sizex'], {}), '(0, sizex)\n', (4739, 4749), False, 'import random\n'), ((4769, 4793), 'random.randint', 'random.randint', (['(0)', 'sizey'], {}), '(0, sizey)\n', (4783, 4793), False, 'import random\n'), ((4996, 5020), 'random.randint', 'random.randint', (['(0)', 'sizex'], {}), '(0, sizex)\n', (5010, 5020), False, 'import random\n'), ((5041, 5065), 'random.randint', 'random.randint', (['(0)', 'sizey'], {}), '(0, sizey)\n', (5055, 5065), False, 'import random\n'), ((5553, 5583), 'rom_generator.generator.makeKey', 'makeKey', (['keySprite', 'intx', 'inty'], {}), '(keySprite, intx, inty)\n', (5560, 5583), False, 'from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey\n'), ((5618, 5652), 'rom_generator.generator.makeLock', 'makeLock', (['lockSprite', 'intx2', 'inty2'], {}), '(lockSprite, intx2, inty2)\n', (5626, 5652), False, 'from rom_generator.generator import makeSpriteSheet, makeBasicProject, addSpriteSheet, makeBackground, makeScene, makeActor, addSymmetricSceneConnections, makeMusic, reverse_direction, initializeGenerator, writeProjectToDisk, addActor, makeCol, makeLock, makeKey\n'), ((7850, 7878), 'numpy.array', 'numpy.array', (['maze_collisions'], {}), '(maze_collisions)\n', (7861, 7878), False, 'import numpy\n'), ((3107, 3129), 'numpy.array', 'numpy.array', (['mazeArray'], {}), '(mazeArray)\n', (3118, 3129), False, 'import numpy\n'), ((4060, 4088), 'numpy.array', 'numpy.array', (['maze_collisions'], {}), '(maze_collisions)\n', (4071, 4088), False, 'import numpy\n'), ((7774, 7802), 'numpy.array', 'numpy.array', (['maze_collisions'], {}), '(maze_collisions)\n', (7785, 7802), False, 'import numpy\n'), ((7929, 7957), 'numpy.array', 'numpy.array', (['maze_collisions'], {}), '(maze_collisions)\n', (7940, 7957), False, 'import numpy\n'), ((8017, 8045), 'numpy.array', 'numpy.array', (['maze_collisions'], {}), '(maze_collisions)\n', (8028, 8045), False, 'import numpy\n')] |
from numpy import asarray, block, clip, inf, kron, sqrt
from numpy.linalg import pinv
from glimix_core._util import rsolve, unvec, vec
from .._util import cached_property, log2pi, safe_log
class KronFastScanner:
"""
Approximated fast inference over several covariates.
Specifically, it maximizes the log of the marginal likelihood ::
log(p(Y)ⱼ) = log𝓝(vec(Y) | (A ⊗ X)vec(𝚩ⱼ) + (Aⱼ ⊗ Xⱼ)vec(𝚨ⱼ), sⱼK),
where K = C₀ ⊗ GGᵀ + C₁ ⊗ I and ⱼ index the candidates set. For performance purpose,
we optimise only the fixed-effect sizes and scale parameters. Therefore, K is fixed
throughout the process.
"""
def __init__(self, Y, A, X, G, terms):
"""
Constructor.
Parameters
----------
Y : (n, p) array_like
Outcome matrix.
A : (n, n) array_like
Trait-by-trait design matrix.
X : (n, c) array_like
Covariates design matrix.
G : (n, r) array_like
Matrix G from the GGᵀ term.
terms : dict
Pre-computed terms.
"""
self._Y = asarray(Y, float)
self._A = asarray(A, float)
self._X = asarray(X, float)
self._G = asarray(G, float)
self._H = terms["H"]
self._logdetK = terms["logdetK"]
self._W = terms["W"]
self._yKiy = terms["yKiy"]
self._WA = terms["WA"]
self._WL0 = terms["WL0"]
self._Lz = terms["Lz"]
self._XRiM = terms["XRiM"]
self._ZiXRiy = terms["ZiXRiy"]
self._ZiXRiM = terms["ZiXRiM"]
self._MRiM = terms["MRiM"]
self._MRiXZiXRiM = terms["MRiXZiXRiM"]
self._MRiy = terms["MRiy"]
self._MRiXZiXRiy = terms["MRiXZiXRiy"]
def null_lml(self):
return self._null_lml
@cached_property
def _null_lml(self):
"""
Log of the marginal likelihood for the null hypothesis.
It is implemented as ::
2·log(p(Y)) = -n·p·log(2𝜋s) - log|K| - n·p,
for which s and 𝚩 are optimal.
Returns
-------
lml : float
Log of the marginal likelihood.
"""
np = self._nsamples * self._ntraits
scale = self.null_scale
return self._static_lml / 2 - np * safe_log(scale) / 2 - np / 2
@cached_property
def null_beta(self):
"""
Optimal 𝛃 according to the marginal likelihood.
It is compute by solving the equation ::
MᵀK⁻¹M𝛃 = MᵀK⁻¹𝐲,
for 𝐲 = vec(Y) and M = (A ⊗ X)vec(𝚩).
Returns
-------
effsizes : ndarray
Optimal 𝛃.
"""
return rsolve(self._MKiM, self._MKiy)
@cached_property
def null_beta_covariance(self):
"""
Covariance of the optimal 𝛃 according to the marginal likelihood.
Returns
-------
effsizes-covariance : ndarray
s(MᵀK⁻¹M)⁻¹.
"""
return self.null_scale * pinv(self._H)
@cached_property
def null_beta_se(self):
"""
Standard errors of the optimal 𝛃.
Returns
-------
beta_se : ndarray
Square root of the diagonal of the beta covariance.
"""
return sqrt(self.null_beta_covariance.diagonal())
@cached_property
def null_scale(self):
"""
Optimal s according to the marginal likelihood.
The optimal s is given by
s = (n·p)⁻¹𝐲ᵀK⁻¹(𝐲 - 𝐦),
where 𝐦 = (A ⊗ X)vec(𝚩) and 𝚩 is optimal.
Returns
-------
scale : float
Optimal scale.
"""
np = self._nsamples * self._ntraits
b = vec(self.null_beta)
mKiy = b.T @ self._MKiy
sqrtdot = self._yKiy - mKiy
scale = sqrtdot / np
return scale
def scan(self, A1, X1):
"""
LML, fixed-effect sizes, and scale of the candidate set.
Parameters
----------
A1 : (p, e) array_like
Trait-by-environments design matrix.
X1 : (n, m) array_like
Variants set matrix.
Returns
-------
lml : float
Log of the marginal likelihood for the set.
effsizes0 : (c, p) ndarray
Fixed-effect sizes for the covariates.
effsizes0_se : (c, p) ndarray
Fixed-effect size standard errors for the covariates.
effsizes1 : (m, e) ndarray
Fixed-effect sizes for the candidates.
effsizes1_se : (m, e) ndarray
Fixed-effect size standard errors for the candidates.
scale : float
Optimal scale.
"""
from numpy import empty
from numpy.linalg import multi_dot
from numpy_sugar import epsilon, is_all_finite
from scipy.linalg import cho_solve
A1 = asarray(A1, float)
X1 = asarray(X1, float)
if not is_all_finite(A1):
raise ValueError("A1 parameter has non-finite elements.")
if not is_all_finite(X1):
raise ValueError("X1 parameter has non-finite elements.")
if A1.shape[1] == 0:
beta_se = sqrt(self.null_beta_covariance.diagonal())
return {
"lml": self._null_lml,
"effsizes0": unvec(self.null_beta, (self._ncovariates, -1)),
"effsizes0_se": unvec(beta_se, (self._ncovariates, -1)),
"effsizes1": empty((0,)),
"effsizes1_se": empty((0,)),
"scale": self.null_scale,
}
X1X1 = X1.T @ X1
XX1 = self._X.T @ X1
AWA1 = self._WA.T @ A1
A1W = A1.T @ self._W
GX1 = self._G.T @ X1
MRiM1 = kron(AWA1, XX1)
M1RiM1 = kron(A1W @ A1, X1X1)
M1Riy = vec(multi_dot([X1.T, self._Y, A1W.T]))
XRiM1 = kron(self._WL0.T @ A1, GX1)
ZiXRiM1 = cho_solve(self._Lz, XRiM1)
MRiXZiXRiM1 = self._XRiM.T @ ZiXRiM1
M1RiXZiXRiM1 = XRiM1.T @ ZiXRiM1
M1RiXZiXRiy = XRiM1.T @ self._ZiXRiy
T0 = [[self._MRiM, MRiM1], [MRiM1.T, M1RiM1]]
T1 = [[self._MRiXZiXRiM, MRiXZiXRiM1], [MRiXZiXRiM1.T, M1RiXZiXRiM1]]
T2 = [self._MRiy, M1Riy]
T3 = [self._MRiXZiXRiy, M1RiXZiXRiy]
MKiM = block(T0) - block(T1)
MKiy = block(T2) - block(T3)
beta = rsolve(MKiM, MKiy)
mKiy = beta.T @ MKiy
cp = self._ntraits * self._ncovariates
effsizes0 = unvec(beta[:cp], (self._ncovariates, self._ntraits))
effsizes1 = unvec(beta[cp:], (X1.shape[1], A1.shape[1]))
np = self._nsamples * self._ntraits
sqrtdot = self._yKiy - mKiy
scale = clip(sqrtdot / np, epsilon.tiny, inf)
lml = self._static_lml / 2 - np * safe_log(scale) / 2 - np / 2
effsizes_se = sqrt(clip(scale * pinv(MKiM).diagonal(), epsilon.tiny, inf))
effsizes0_se = unvec(effsizes_se[:cp], (self._ncovariates, self._ntraits))
effsizes1_se = unvec(effsizes_se[cp:], (X1.shape[1], A1.shape[1]))
return {
"lml": lml,
"effsizes0": effsizes0,
"effsizes1": effsizes1,
"scale": scale,
"effsizes0_se": effsizes0_se,
"effsizes1_se": effsizes1_se,
}
@cached_property
def _static_lml(self):
np = self._nsamples * self._ntraits
static_lml = -np * log2pi - self._logdetK
return static_lml
@property
def _nsamples(self):
return self._Y.shape[0]
@property
def _ntraits(self):
return self._Y.shape[1]
@property
def _ncovariates(self):
return self._X.shape[1]
@cached_property
def _MKiM(self):
return self._MRiM - self._XRiM.T @ self._ZiXRiM
@cached_property
def _MKiy(self):
return self._MRiy - self._XRiM.T @ self._ZiXRiy
| [
"glimix_core._util.vec",
"glimix_core._util.unvec",
"numpy.block",
"numpy_sugar.is_all_finite",
"numpy.asarray",
"scipy.linalg.cho_solve",
"numpy.empty",
"glimix_core._util.rsolve",
"numpy.clip",
"numpy.kron",
"numpy.linalg.pinv",
"numpy.linalg.multi_dot"
] | [((1110, 1127), 'numpy.asarray', 'asarray', (['Y', 'float'], {}), '(Y, float)\n', (1117, 1127), False, 'from numpy import asarray, block, clip, inf, kron, sqrt\n'), ((1146, 1163), 'numpy.asarray', 'asarray', (['A', 'float'], {}), '(A, float)\n', (1153, 1163), False, 'from numpy import asarray, block, clip, inf, kron, sqrt\n'), ((1182, 1199), 'numpy.asarray', 'asarray', (['X', 'float'], {}), '(X, float)\n', (1189, 1199), False, 'from numpy import asarray, block, clip, inf, kron, sqrt\n'), ((1218, 1235), 'numpy.asarray', 'asarray', (['G', 'float'], {}), '(G, float)\n', (1225, 1235), False, 'from numpy import asarray, block, clip, inf, kron, sqrt\n'), ((2660, 2690), 'glimix_core._util.rsolve', 'rsolve', (['self._MKiM', 'self._MKiy'], {}), '(self._MKiM, self._MKiy)\n', (2666, 2690), False, 'from glimix_core._util import rsolve, unvec, vec\n'), ((3677, 3696), 'glimix_core._util.vec', 'vec', (['self.null_beta'], {}), '(self.null_beta)\n', (3680, 3696), False, 'from glimix_core._util import rsolve, unvec, vec\n'), ((4841, 4859), 'numpy.asarray', 'asarray', (['A1', 'float'], {}), '(A1, float)\n', (4848, 4859), False, 'from numpy import asarray, block, clip, inf, kron, sqrt\n'), ((4873, 4891), 'numpy.asarray', 'asarray', (['X1', 'float'], {}), '(X1, float)\n', (4880, 4891), False, 'from numpy import asarray, block, clip, inf, kron, sqrt\n'), ((5711, 5726), 'numpy.kron', 'kron', (['AWA1', 'XX1'], {}), '(AWA1, XX1)\n', (5715, 5726), False, 'from numpy import asarray, block, clip, inf, kron, sqrt\n'), ((5744, 5764), 'numpy.kron', 'kron', (['(A1W @ A1)', 'X1X1'], {}), '(A1W @ A1, X1X1)\n', (5748, 5764), False, 'from numpy import asarray, block, clip, inf, kron, sqrt\n'), ((5837, 5864), 'numpy.kron', 'kron', (['(self._WL0.T @ A1)', 'GX1'], {}), '(self._WL0.T @ A1, GX1)\n', (5841, 5864), False, 'from numpy import asarray, block, clip, inf, kron, sqrt\n'), ((5883, 5909), 'scipy.linalg.cho_solve', 'cho_solve', (['self._Lz', 'XRiM1'], {}), '(self._Lz, XRiM1)\n', (5892, 5909), False, 'from scipy.linalg import cho_solve\n'), ((6343, 6361), 'glimix_core._util.rsolve', 'rsolve', (['MKiM', 'MKiy'], {}), '(MKiM, MKiy)\n', (6349, 6361), False, 'from glimix_core._util import rsolve, unvec, vec\n'), ((6459, 6511), 'glimix_core._util.unvec', 'unvec', (['beta[:cp]', '(self._ncovariates, self._ntraits)'], {}), '(beta[:cp], (self._ncovariates, self._ntraits))\n', (6464, 6511), False, 'from glimix_core._util import rsolve, unvec, vec\n'), ((6532, 6576), 'glimix_core._util.unvec', 'unvec', (['beta[cp:]', '(X1.shape[1], A1.shape[1])'], {}), '(beta[cp:], (X1.shape[1], A1.shape[1]))\n', (6537, 6576), False, 'from glimix_core._util import rsolve, unvec, vec\n'), ((6674, 6711), 'numpy.clip', 'clip', (['(sqrtdot / np)', 'epsilon.tiny', 'inf'], {}), '(sqrtdot / np, epsilon.tiny, inf)\n', (6678, 6711), False, 'from numpy import asarray, block, clip, inf, kron, sqrt\n'), ((6890, 6949), 'glimix_core._util.unvec', 'unvec', (['effsizes_se[:cp]', '(self._ncovariates, self._ntraits)'], {}), '(effsizes_se[:cp], (self._ncovariates, self._ntraits))\n', (6895, 6949), False, 'from glimix_core._util import rsolve, unvec, vec\n'), ((6973, 7024), 'glimix_core._util.unvec', 'unvec', (['effsizes_se[cp:]', '(X1.shape[1], A1.shape[1])'], {}), '(effsizes_se[cp:], (X1.shape[1], A1.shape[1]))\n', (6978, 7024), False, 'from glimix_core._util import rsolve, unvec, vec\n'), ((2976, 2989), 'numpy.linalg.pinv', 'pinv', (['self._H'], {}), '(self._H)\n', (2980, 2989), False, 'from numpy.linalg import pinv\n'), ((4908, 4925), 'numpy_sugar.is_all_finite', 'is_all_finite', (['A1'], {}), '(A1)\n', (4921, 4925), False, 'from numpy_sugar import epsilon, is_all_finite\n'), ((5013, 5030), 'numpy_sugar.is_all_finite', 'is_all_finite', (['X1'], {}), '(X1)\n', (5026, 5030), False, 'from numpy_sugar import epsilon, is_all_finite\n'), ((5786, 5819), 'numpy.linalg.multi_dot', 'multi_dot', (['[X1.T, self._Y, A1W.T]'], {}), '([X1.T, self._Y, A1W.T])\n', (5795, 5819), False, 'from numpy.linalg import multi_dot\n'), ((6269, 6278), 'numpy.block', 'block', (['T0'], {}), '(T0)\n', (6274, 6278), False, 'from numpy import asarray, block, clip, inf, kron, sqrt\n'), ((6281, 6290), 'numpy.block', 'block', (['T1'], {}), '(T1)\n', (6286, 6290), False, 'from numpy import asarray, block, clip, inf, kron, sqrt\n'), ((6306, 6315), 'numpy.block', 'block', (['T2'], {}), '(T2)\n', (6311, 6315), False, 'from numpy import asarray, block, clip, inf, kron, sqrt\n'), ((6318, 6327), 'numpy.block', 'block', (['T3'], {}), '(T3)\n', (6323, 6327), False, 'from numpy import asarray, block, clip, inf, kron, sqrt\n'), ((5286, 5332), 'glimix_core._util.unvec', 'unvec', (['self.null_beta', '(self._ncovariates, -1)'], {}), '(self.null_beta, (self._ncovariates, -1))\n', (5291, 5332), False, 'from glimix_core._util import rsolve, unvec, vec\n'), ((5366, 5405), 'glimix_core._util.unvec', 'unvec', (['beta_se', '(self._ncovariates, -1)'], {}), '(beta_se, (self._ncovariates, -1))\n', (5371, 5405), False, 'from glimix_core._util import rsolve, unvec, vec\n'), ((5436, 5447), 'numpy.empty', 'empty', (['(0,)'], {}), '((0,))\n', (5441, 5447), False, 'from numpy import empty\n'), ((5481, 5492), 'numpy.empty', 'empty', (['(0,)'], {}), '((0,))\n', (5486, 5492), False, 'from numpy import empty\n'), ((6824, 6834), 'numpy.linalg.pinv', 'pinv', (['MKiM'], {}), '(MKiM)\n', (6828, 6834), False, 'from numpy.linalg import pinv\n')] |
import os
import pickle
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from utils import config_reference as cfg
sns.set()
LOGIT_DIFFERENCE_PATH = "/Users/blakeedwards/Desktop/Repos/research/neural-distiller/post-experiment/ESKD-Analysis/experiment3_logit_diffs.pkl"
with open(LOGIT_DIFFERENCE_PATH, 'rb') as file:
logit_differences = pickle.load(file)
train_difference = logit_differences[0][0]
test_difference = logit_differences[0][1]
train_difference = np.transpose(train_difference)
test_difference = np.transpose(test_difference)
cmap = "seismic"
max_heat = 0.1
min_heat = -0.1
# average of columns to get average class difference
train_mean = np.mean(train_difference, axis=1)
test_mean = np.mean(test_difference, axis=1)
train_plot = sns.heatmap([train_mean], vmin=min_heat, vmax=max_heat, cmap=cmap)
train_plot.set(xlabel="Data Sample", ylabel="Class")
plt.title("Teacher-Student Average Logit Difference (Train Data)")
fig = train_plot.get_figure()
fig.savefig(os.path.join(cfg.figures_path, "average-train-logit-difference.png"))
plt.show()
train_plot = sns.heatmap([test_mean], vmin=min_heat, vmax=max_heat, cmap=cmap)
train_plot.invert_yaxis()
train_plot.set(xlabel="Data Sample", ylabel="Class")
plt.title("Teacher-Student Average Logit Difference (Test Data)")
fig = train_plot.get_figure()
fig.savefig(os.path.join(cfg.figures_path, "average-test-logit-difference.png"))
plt.show()
train_difference = np.transpose(train_difference)
test_difference = np.transpose(test_difference)
cmap = "seismic"
max_heat = 0.5
min_heat = -0.5
train_start = 0
train_slice = 1000
train_plot = sns.heatmap(train_difference[train_start:train_slice + train_start, :], vmin=min_heat, vmax=max_heat, cmap=cmap)
train_plot.invert_yaxis()
train_plot.set(xlabel="Data Sample", ylabel="Class")
plt.title("Teacher-Student Logit Difference Map (Train Data)")
fig = train_plot.get_figure()
fig.savefig(os.path.join(cfg.figures_path, "train-logit-difference.png"))
plt.show()
test_start = 0
test_slice = 1000
test_plot = sns.heatmap(test_difference[test_start:test_slice + test_start, :], vmin=min_heat, vmax=max_heat, cmap=cmap)
test_plot.invert_yaxis()
test_plot.set(xlabel="Data Sample", ylabel="Class")
plt.title("Teacher-Student Logit Difference Map (Test Data)")
fig = test_plot.get_figure()
fig.savefig(os.path.join(cfg.figures_path, "test-logit-difference.png"))
plt.show()
#
# indices = np.arange(1, len(train_difference)+1, 1)
# cols = ["col"+str(i) for i in range(1, len(train_difference[0])+1)]
# df_train = pd.DataFrame(data=train_difference, index=indices, columns=cols)
#
# indices = np.arange(1, len(test_difference)+1, 1)
# cols = ["col"+str(i) for i in range(1, len(test_difference[0])+1)]
# df_test = pd.DataFrame(data=test_difference, index=indices, columns=cols)
#
| [
"matplotlib.pyplot.title",
"seaborn.heatmap",
"matplotlib.pyplot.show",
"os.path.join",
"numpy.transpose",
"numpy.mean",
"pickle.load",
"seaborn.set"
] | [((159, 168), 'seaborn.set', 'sns.set', ([], {}), '()\n', (166, 168), True, 'import seaborn as sns\n'), ((509, 539), 'numpy.transpose', 'np.transpose', (['train_difference'], {}), '(train_difference)\n', (521, 539), True, 'import numpy as np\n'), ((558, 587), 'numpy.transpose', 'np.transpose', (['test_difference'], {}), '(test_difference)\n', (570, 587), True, 'import numpy as np\n'), ((704, 737), 'numpy.mean', 'np.mean', (['train_difference'], {'axis': '(1)'}), '(train_difference, axis=1)\n', (711, 737), True, 'import numpy as np\n'), ((750, 782), 'numpy.mean', 'np.mean', (['test_difference'], {'axis': '(1)'}), '(test_difference, axis=1)\n', (757, 782), True, 'import numpy as np\n'), ((797, 863), 'seaborn.heatmap', 'sns.heatmap', (['[train_mean]'], {'vmin': 'min_heat', 'vmax': 'max_heat', 'cmap': 'cmap'}), '([train_mean], vmin=min_heat, vmax=max_heat, cmap=cmap)\n', (808, 863), True, 'import seaborn as sns\n'), ((917, 983), 'matplotlib.pyplot.title', 'plt.title', (['"""Teacher-Student Average Logit Difference (Train Data)"""'], {}), "('Teacher-Student Average Logit Difference (Train Data)')\n", (926, 983), True, 'import matplotlib.pyplot as plt\n'), ((1096, 1106), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1104, 1106), True, 'import matplotlib.pyplot as plt\n'), ((1122, 1187), 'seaborn.heatmap', 'sns.heatmap', (['[test_mean]'], {'vmin': 'min_heat', 'vmax': 'max_heat', 'cmap': 'cmap'}), '([test_mean], vmin=min_heat, vmax=max_heat, cmap=cmap)\n', (1133, 1187), True, 'import seaborn as sns\n'), ((1267, 1332), 'matplotlib.pyplot.title', 'plt.title', (['"""Teacher-Student Average Logit Difference (Test Data)"""'], {}), "('Teacher-Student Average Logit Difference (Test Data)')\n", (1276, 1332), True, 'import matplotlib.pyplot as plt\n'), ((1444, 1454), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1452, 1454), True, 'import matplotlib.pyplot as plt\n'), ((1475, 1505), 'numpy.transpose', 'np.transpose', (['train_difference'], {}), '(train_difference)\n', (1487, 1505), True, 'import numpy as np\n'), ((1524, 1553), 'numpy.transpose', 'np.transpose', (['test_difference'], {}), '(test_difference)\n', (1536, 1553), True, 'import numpy as np\n'), ((1652, 1768), 'seaborn.heatmap', 'sns.heatmap', (['train_difference[train_start:train_slice + train_start, :]'], {'vmin': 'min_heat', 'vmax': 'max_heat', 'cmap': 'cmap'}), '(train_difference[train_start:train_slice + train_start, :],\n vmin=min_heat, vmax=max_heat, cmap=cmap)\n', (1663, 1768), True, 'import seaborn as sns\n'), ((1844, 1906), 'matplotlib.pyplot.title', 'plt.title', (['"""Teacher-Student Logit Difference Map (Train Data)"""'], {}), "('Teacher-Student Logit Difference Map (Train Data)')\n", (1853, 1906), True, 'import matplotlib.pyplot as plt\n'), ((2011, 2021), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2019, 2021), True, 'import matplotlib.pyplot as plt\n'), ((2068, 2181), 'seaborn.heatmap', 'sns.heatmap', (['test_difference[test_start:test_slice + test_start, :]'], {'vmin': 'min_heat', 'vmax': 'max_heat', 'cmap': 'cmap'}), '(test_difference[test_start:test_slice + test_start, :], vmin=\n min_heat, vmax=max_heat, cmap=cmap)\n', (2079, 2181), True, 'import seaborn as sns\n'), ((2254, 2315), 'matplotlib.pyplot.title', 'plt.title', (['"""Teacher-Student Logit Difference Map (Test Data)"""'], {}), "('Teacher-Student Logit Difference Map (Test Data)')\n", (2263, 2315), True, 'import matplotlib.pyplot as plt\n'), ((2418, 2428), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2426, 2428), True, 'import matplotlib.pyplot as plt\n'), ((386, 403), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (397, 403), False, 'import pickle\n'), ((1026, 1094), 'os.path.join', 'os.path.join', (['cfg.figures_path', '"""average-train-logit-difference.png"""'], {}), "(cfg.figures_path, 'average-train-logit-difference.png')\n", (1038, 1094), False, 'import os\n'), ((1375, 1442), 'os.path.join', 'os.path.join', (['cfg.figures_path', '"""average-test-logit-difference.png"""'], {}), "(cfg.figures_path, 'average-test-logit-difference.png')\n", (1387, 1442), False, 'import os\n'), ((1949, 2009), 'os.path.join', 'os.path.join', (['cfg.figures_path', '"""train-logit-difference.png"""'], {}), "(cfg.figures_path, 'train-logit-difference.png')\n", (1961, 2009), False, 'import os\n'), ((2357, 2416), 'os.path.join', 'os.path.join', (['cfg.figures_path', '"""test-logit-difference.png"""'], {}), "(cfg.figures_path, 'test-logit-difference.png')\n", (2369, 2416), False, 'import os\n')] |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_series_equal
from random import shuffle
from unittest import TestCase
from traceml.processors import df_processors
from traceml.processors.units_processors import to_percentage
@pytest.mark.processors_mark
class DataFrameSummaryTest(TestCase):
def setUp(self):
self.size = 1000
missing = [np.nan] * (self.size // 10) + list(range(10)) * (
(self.size - self.size // 10) // 10
)
shuffle(missing)
self.types = [
df_processors.DF_TYPE_NUMERIC,
df_processors.DF_TYPE_BOOL,
df_processors.DF_TYPE_CATEGORICAL,
df_processors.DF_TYPE_CONSTANT,
df_processors.DF_TYPE_UNIQUE,
df_processors.DF_TYPE_DATE,
]
self.columns = [
"dbool1",
"dbool2",
"duniques",
"dcategoricals",
"dnumerics1",
"dnumerics2",
"dnumerics3",
"dmissing",
"dconstant",
"ddates",
]
self.df = pd.DataFrame(
dict(
dbool1=np.random.choice([0, 1], size=self.size),
dbool2=np.random.choice(["a", "b"], size=self.size),
duniques=["x{}".format(i) for i in range(self.size)],
dcategoricals=[
"a".format(i)
if i % 2 == 0
else "b".format(i)
if i % 3 == 0
else "c".format(i)
for i in range(self.size)
],
dnumerics1=range(self.size),
dnumerics2=range(self.size, 2 * self.size),
dnumerics3=list(range(self.size - self.size // 10))
+ list(range(-self.size // 10, 0)),
dmissing=missing,
dconstant=["a"] * self.size,
ddates=pd.date_range("2010-01-01", periods=self.size, freq="1M"),
)
)
self.column_stats = df_processors.get_df_column_stats(self.df)
self.columns_types = df_processors.get_df_columns_types(self.column_stats)
def test_get_columns_works_as_expected(self):
assert len(df_processors.get_df_columns(self.df, df_processors.ALL)) == 10
assert (
len(
df_processors.get_df_columns(
self.df,
df_processors.INCLUDE,
["dnumerics1", "dnumerics2", "dnumerics3"],
)
)
== 3
)
assert (
len(
df_processors.get_df_columns(
self.df,
df_processors.EXCLUDE,
["dnumerics1", "dnumerics2", "dnumerics3"],
)
)
== 7
)
def test_column_types_works_as_expected(self):
expected = pd.Series(index=self.types, data=[4, 2, 1, 1, 1, 1], name="types")
assert_series_equal(self.columns_types[self.types], expected[self.types])
def test_column_stats_works_as_expected(self):
self.assertTupleEqual(self.column_stats.shape, (5, 10))
# counts
expected = pd.Series(
index=self.columns, data=self.size, name="counts", dtype="object"
)
expected["dmissing"] -= 100
assert_series_equal(
self.column_stats[self.columns].loc["counts"], expected[self.columns]
)
# uniques
expected = pd.Series(
index=self.columns, data=self.size, name="uniques", dtype="object"
)
expected[["dbool1", "dbool2"]] = 2
expected[["dcategoricals"]] = 3
expected[["dconstant"]] = 1
expected[["dmissing"]] = 10
assert_series_equal(
self.column_stats[self.columns].loc["uniques"].sort_index(),
expected[self.columns].sort_index(),
check_dtype=False,
)
# missing
expected = pd.Series(index=self.columns, data=0, name="missing", dtype="object")
expected[["dmissing"]] = 100
assert_series_equal(
self.column_stats[self.columns].loc["missing"],
expected[self.columns],
check_dtype=False,
)
# missing_perc
expected = pd.Series(
index=self.columns, data=["0%"] * 10, name="missing_perc", dtype="object"
)
expected[["dmissing"]] = "10%"
assert_series_equal(
self.column_stats[self.columns].loc["missing_perc"], expected[self.columns]
)
# types
expected = pd.Series(
index=self.columns, data=[np.nan] * 10, name="types", dtype="object"
)
expected[["dbool1", "dbool2"]] = df_processors.DF_TYPE_BOOL
expected[["dcategoricals"]] = df_processors.DF_TYPE_CATEGORICAL
expected[["dconstant"]] = df_processors.DF_TYPE_CONSTANT
expected[["ddates"]] = df_processors.DF_TYPE_DATE
expected[["duniques"]] = df_processors.DF_TYPE_UNIQUE
expected[
["dnumerics1", "dnumerics2", "dnumerics3", "dmissing"]
] = df_processors.DF_TYPE_NUMERIC
assert_series_equal(
self.column_stats[self.columns].loc["types"], expected[self.columns]
)
def test_uniques_summary(self):
expected = pd.Series(
index=["counts", "uniques", "missing", "missing_perc", "types"],
data=[self.size, self.size, 0, "0%", df_processors.DF_TYPE_UNIQUE],
name="duniques",
dtype=object,
)
assert_series_equal(
df_processors.get_df_column_summary(self.df, "duniques"), expected
)
def test_constant_summary(self):
self.assertEqual(
df_processors.get_df_column_summary(self.df, "dconstant"),
"This is a constant value: a",
)
def test_bool1_summary(self):
count_values = self.df["dbool1"].value_counts()
total_count = self.df["dbool1"].count()
count0 = count_values[0]
count1 = count_values[1]
perc0 = to_percentage(count0 / total_count)
perc1 = to_percentage(count1 / total_count)
expected = pd.Series(
index=[
'"0" count',
'"0" perc',
'"1" count',
'"1" perc',
"counts",
"uniques",
"missing",
"missing_perc",
"types",
],
data=[
str(count0),
perc0,
str(count1),
perc1,
self.size,
2,
0,
"0%",
df_processors.DF_TYPE_BOOL,
],
name="dbool1",
dtype=object,
)
assert_series_equal(
df_processors.get_df_column_summary(self.df, "dbool1"), expected
)
def test_bool2_summary(self):
count_values = self.df["dbool2"].value_counts()
total_count = self.df["dbool2"].count()
count0 = count_values["a"]
count1 = count_values["b"]
perc0 = to_percentage(count0 / total_count)
perc1 = to_percentage(count1 / total_count)
expected = pd.Series(
index=[
'"a" count',
'"a" perc',
'"b" count',
'"b" perc',
"counts",
"uniques",
"missing",
"missing_perc",
"types",
],
data=[
str(count0),
perc0,
str(count1),
perc1,
self.size,
2,
0,
"0%",
df_processors.DF_TYPE_BOOL,
],
name="dbool2",
dtype=object,
)
assert_series_equal(
df_processors.get_df_column_summary(self.df, "dbool2"), expected
)
def test_categorical_summary(self):
expected = pd.Series(
index=["top", "counts", "uniques", "missing", "missing_perc", "types"],
data=["a: 500", self.size, 3, 0, "0%", df_processors.DF_TYPE_CATEGORICAL],
name="dcategoricals",
dtype=object,
)
assert_series_equal(
df_processors.get_df_column_summary(self.df, "dcategoricals"), expected
)
def test_dates_summary(self):
dmin = self.df["ddates"].min()
dmax = self.df["ddates"].max()
expected = pd.Series(
index=[
"max",
"min",
"range",
"counts",
"uniques",
"missing",
"missing_perc",
"types",
],
data=[
dmax,
dmin,
dmax - dmin,
self.size,
self.size,
0,
"0%",
df_processors.DF_TYPE_DATE,
],
name="ddates",
dtype=object,
).sort_index()
tmp = df_processors.get_df_column_summary(self.df, "ddates").sort_index()
assert_series_equal(tmp, expected)
def test_numerics_summary(self):
num1 = self.df["dnumerics1"]
dm, dmp = df_processors.get_deviation_of_mean(num1)
dam, damp = df_processors.get_median_absolute_deviation(num1)
expected = pd.Series(
index=[
"mean",
"std",
"variance",
"min",
"max",
"mode",
"5%",
"25%",
"50%",
"75%",
"95%",
"iqr",
"kurtosis",
"skewness",
"sum",
"mad",
"cv",
"zeros_num",
"zeros_perc",
"deviating_of_mean",
"deviating_of_mean_perc",
"deviating_of_median",
"deviating_of_median_perc",
"top_correlations",
"counts",
"uniques",
"missing",
"missing_perc",
"types",
],
data=[
num1.mean(),
num1.std(),
num1.var(),
num1.min(),
num1.max(),
num1.mode()[0],
num1.quantile(0.05),
num1.quantile(0.25),
num1.quantile(0.5),
num1.quantile(0.75),
num1.quantile(0.95),
num1.quantile(0.75) - num1.quantile(0.25),
num1.kurt(),
num1.skew(),
num1.sum(),
num1.mad(),
num1.std() / num1.mean() if num1.mean() else np.nan,
self.size - np.count_nonzero(num1),
to_percentage((self.size - np.count_nonzero(num1)) / self.size),
dm,
dmp,
dam,
damp,
"dnumerics2: 100%",
self.size,
self.size,
0,
"0%",
df_processors.DF_TYPE_NUMERIC,
],
name="dnumerics1",
dtype=object,
)
assert_series_equal(
df_processors.get_df_column_summary(self.df, "dnumerics1"), expected
)
| [
"traceml.processors.df_processors.get_df_columns",
"pandas.date_range",
"traceml.processors.df_processors.get_median_absolute_deviation",
"numpy.count_nonzero",
"traceml.processors.df_processors.get_deviation_of_mean",
"random.shuffle",
"traceml.processors.df_processors.get_df_column_summary",
"pandas... | [((1121, 1137), 'random.shuffle', 'shuffle', (['missing'], {}), '(missing)\n', (1128, 1137), False, 'from random import shuffle\n'), ((2680, 2722), 'traceml.processors.df_processors.get_df_column_stats', 'df_processors.get_df_column_stats', (['self.df'], {}), '(self.df)\n', (2713, 2722), False, 'from traceml.processors import df_processors\n'), ((2752, 2805), 'traceml.processors.df_processors.get_df_columns_types', 'df_processors.get_df_columns_types', (['self.column_stats'], {}), '(self.column_stats)\n', (2786, 2805), False, 'from traceml.processors import df_processors\n'), ((3563, 3629), 'pandas.Series', 'pd.Series', ([], {'index': 'self.types', 'data': '[4, 2, 1, 1, 1, 1]', 'name': '"""types"""'}), "(index=self.types, data=[4, 2, 1, 1, 1, 1], name='types')\n", (3572, 3629), True, 'import pandas as pd\n'), ((3638, 3711), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['self.columns_types[self.types]', 'expected[self.types]'], {}), '(self.columns_types[self.types], expected[self.types])\n', (3657, 3711), False, 'from pandas.testing import assert_series_equal\n'), ((3865, 3941), 'pandas.Series', 'pd.Series', ([], {'index': 'self.columns', 'data': 'self.size', 'name': '"""counts"""', 'dtype': '"""object"""'}), "(index=self.columns, data=self.size, name='counts', dtype='object')\n", (3874, 3941), True, 'import pandas as pd\n'), ((4008, 4103), 'pandas.testing.assert_series_equal', 'assert_series_equal', (["self.column_stats[self.columns].loc['counts']", 'expected[self.columns]'], {}), "(self.column_stats[self.columns].loc['counts'], expected\n [self.columns])\n", (4027, 4103), False, 'from pandas.testing import assert_series_equal\n'), ((4159, 4236), 'pandas.Series', 'pd.Series', ([], {'index': 'self.columns', 'data': 'self.size', 'name': '"""uniques"""', 'dtype': '"""object"""'}), "(index=self.columns, data=self.size, name='uniques', dtype='object')\n", (4168, 4236), True, 'import pandas as pd\n'), ((4644, 4713), 'pandas.Series', 'pd.Series', ([], {'index': 'self.columns', 'data': '(0)', 'name': '"""missing"""', 'dtype': '"""object"""'}), "(index=self.columns, data=0, name='missing', dtype='object')\n", (4653, 4713), True, 'import pandas as pd\n'), ((4759, 4873), 'pandas.testing.assert_series_equal', 'assert_series_equal', (["self.column_stats[self.columns].loc['missing']", 'expected[self.columns]'], {'check_dtype': '(False)'}), "(self.column_stats[self.columns].loc['missing'],\n expected[self.columns], check_dtype=False)\n", (4778, 4873), False, 'from pandas.testing import assert_series_equal\n'), ((4960, 5049), 'pandas.Series', 'pd.Series', ([], {'index': 'self.columns', 'data': "(['0%'] * 10)", 'name': '"""missing_perc"""', 'dtype': '"""object"""'}), "(index=self.columns, data=['0%'] * 10, name='missing_perc', dtype=\n 'object')\n", (4969, 5049), True, 'import pandas as pd\n'), ((5115, 5215), 'pandas.testing.assert_series_equal', 'assert_series_equal', (["self.column_stats[self.columns].loc['missing_perc']", 'expected[self.columns]'], {}), "(self.column_stats[self.columns].loc['missing_perc'],\n expected[self.columns])\n", (5134, 5215), False, 'from pandas.testing import assert_series_equal\n'), ((5270, 5349), 'pandas.Series', 'pd.Series', ([], {'index': 'self.columns', 'data': '([np.nan] * 10)', 'name': '"""types"""', 'dtype': '"""object"""'}), "(index=self.columns, data=[np.nan] * 10, name='types', dtype='object')\n", (5279, 5349), True, 'import pandas as pd\n'), ((5833, 5927), 'pandas.testing.assert_series_equal', 'assert_series_equal', (["self.column_stats[self.columns].loc['types']", 'expected[self.columns]'], {}), "(self.column_stats[self.columns].loc['types'], expected[\n self.columns])\n", (5852, 5927), False, 'from pandas.testing import assert_series_equal\n'), ((6001, 6182), 'pandas.Series', 'pd.Series', ([], {'index': "['counts', 'uniques', 'missing', 'missing_perc', 'types']", 'data': "[self.size, self.size, 0, '0%', df_processors.DF_TYPE_UNIQUE]", 'name': '"""duniques"""', 'dtype': 'object'}), "(index=['counts', 'uniques', 'missing', 'missing_perc', 'types'],\n data=[self.size, self.size, 0, '0%', df_processors.DF_TYPE_UNIQUE],\n name='duniques', dtype=object)\n", (6010, 6182), True, 'import pandas as pd\n'), ((6761, 6796), 'traceml.processors.units_processors.to_percentage', 'to_percentage', (['(count0 / total_count)'], {}), '(count0 / total_count)\n', (6774, 6796), False, 'from traceml.processors.units_processors import to_percentage\n'), ((6813, 6848), 'traceml.processors.units_processors.to_percentage', 'to_percentage', (['(count1 / total_count)'], {}), '(count1 / total_count)\n', (6826, 6848), False, 'from traceml.processors.units_processors import to_percentage\n'), ((7839, 7874), 'traceml.processors.units_processors.to_percentage', 'to_percentage', (['(count0 / total_count)'], {}), '(count0 / total_count)\n', (7852, 7874), False, 'from traceml.processors.units_processors import to_percentage\n'), ((7891, 7926), 'traceml.processors.units_processors.to_percentage', 'to_percentage', (['(count1 / total_count)'], {}), '(count1 / total_count)\n', (7904, 7926), False, 'from traceml.processors.units_processors import to_percentage\n'), ((8752, 8953), 'pandas.Series', 'pd.Series', ([], {'index': "['top', 'counts', 'uniques', 'missing', 'missing_perc', 'types']", 'data': "['a: 500', self.size, 3, 0, '0%', df_processors.DF_TYPE_CATEGORICAL]", 'name': '"""dcategoricals"""', 'dtype': 'object'}), "(index=['top', 'counts', 'uniques', 'missing', 'missing_perc',\n 'types'], data=['a: 500', self.size, 3, 0, '0%', df_processors.\n DF_TYPE_CATEGORICAL], name='dcategoricals', dtype=object)\n", (8761, 8953), True, 'import pandas as pd\n'), ((9927, 9961), 'pandas.testing.assert_series_equal', 'assert_series_equal', (['tmp', 'expected'], {}), '(tmp, expected)\n', (9946, 9961), False, 'from pandas.testing import assert_series_equal\n'), ((10055, 10096), 'traceml.processors.df_processors.get_deviation_of_mean', 'df_processors.get_deviation_of_mean', (['num1'], {}), '(num1)\n', (10090, 10096), False, 'from traceml.processors import df_processors\n'), ((10117, 10166), 'traceml.processors.df_processors.get_median_absolute_deviation', 'df_processors.get_median_absolute_deviation', (['num1'], {}), '(num1)\n', (10160, 10166), False, 'from traceml.processors import df_processors\n'), ((6275, 6331), 'traceml.processors.df_processors.get_df_column_summary', 'df_processors.get_df_column_summary', (['self.df', '"""duniques"""'], {}), "(self.df, 'duniques')\n", (6310, 6331), False, 'from traceml.processors import df_processors\n'), ((6428, 6485), 'traceml.processors.df_processors.get_df_column_summary', 'df_processors.get_df_column_summary', (['self.df', '"""dconstant"""'], {}), "(self.df, 'dconstant')\n", (6463, 6485), False, 'from traceml.processors import df_processors\n'), ((7539, 7593), 'traceml.processors.df_processors.get_df_column_summary', 'df_processors.get_df_column_summary', (['self.df', '"""dbool1"""'], {}), "(self.df, 'dbool1')\n", (7574, 7593), False, 'from traceml.processors import df_processors\n'), ((8617, 8671), 'traceml.processors.df_processors.get_df_column_summary', 'df_processors.get_df_column_summary', (['self.df', '"""dbool2"""'], {}), "(self.df, 'dbool2')\n", (8652, 8671), False, 'from traceml.processors import df_processors\n'), ((9046, 9107), 'traceml.processors.df_processors.get_df_column_summary', 'df_processors.get_df_column_summary', (['self.df', '"""dcategoricals"""'], {}), "(self.df, 'dcategoricals')\n", (9081, 9107), False, 'from traceml.processors import df_processors\n'), ((12169, 12227), 'traceml.processors.df_processors.get_df_column_summary', 'df_processors.get_df_column_summary', (['self.df', '"""dnumerics1"""'], {}), "(self.df, 'dnumerics1')\n", (12204, 12227), False, 'from traceml.processors import df_processors\n'), ((2876, 2932), 'traceml.processors.df_processors.get_df_columns', 'df_processors.get_df_columns', (['self.df', 'df_processors.ALL'], {}), '(self.df, df_processors.ALL)\n', (2904, 2932), False, 'from traceml.processors import df_processors\n'), ((2991, 3099), 'traceml.processors.df_processors.get_df_columns', 'df_processors.get_df_columns', (['self.df', 'df_processors.INCLUDE', "['dnumerics1', 'dnumerics2', 'dnumerics3']"], {}), "(self.df, df_processors.INCLUDE, ['dnumerics1',\n 'dnumerics2', 'dnumerics3'])\n", (3019, 3099), False, 'from traceml.processors import df_processors\n'), ((3267, 3375), 'traceml.processors.df_processors.get_df_columns', 'df_processors.get_df_columns', (['self.df', 'df_processors.EXCLUDE', "['dnumerics1', 'dnumerics2', 'dnumerics3']"], {}), "(self.df, df_processors.EXCLUDE, ['dnumerics1',\n 'dnumerics2', 'dnumerics3'])\n", (3295, 3375), False, 'from traceml.processors import df_processors\n'), ((9260, 9490), 'pandas.Series', 'pd.Series', ([], {'index': "['max', 'min', 'range', 'counts', 'uniques', 'missing', 'missing_perc', 'types'\n ]", 'data': "[dmax, dmin, dmax - dmin, self.size, self.size, 0, '0%', df_processors.\n DF_TYPE_DATE]", 'name': '"""ddates"""', 'dtype': 'object'}), "(index=['max', 'min', 'range', 'counts', 'uniques', 'missing',\n 'missing_perc', 'types'], data=[dmax, dmin, dmax - dmin, self.size,\n self.size, 0, '0%', df_processors.DF_TYPE_DATE], name='ddates', dtype=\n object)\n", (9269, 9490), True, 'import pandas as pd\n'), ((9851, 9905), 'traceml.processors.df_processors.get_df_column_summary', 'df_processors.get_df_column_summary', (['self.df', '"""ddates"""'], {}), "(self.df, 'ddates')\n", (9886, 9905), False, 'from traceml.processors import df_processors\n'), ((1784, 1824), 'numpy.random.choice', 'np.random.choice', (['[0, 1]'], {'size': 'self.size'}), '([0, 1], size=self.size)\n', (1800, 1824), True, 'import numpy as np\n'), ((1849, 1893), 'numpy.random.choice', 'np.random.choice', (["['a', 'b']"], {'size': 'self.size'}), "(['a', 'b'], size=self.size)\n", (1865, 1893), True, 'import numpy as np\n'), ((2569, 2626), 'pandas.date_range', 'pd.date_range', (['"""2010-01-01"""'], {'periods': 'self.size', 'freq': '"""1M"""'}), "('2010-01-01', periods=self.size, freq='1M')\n", (2582, 2626), True, 'import pandas as pd\n'), ((11678, 11700), 'numpy.count_nonzero', 'np.count_nonzero', (['num1'], {}), '(num1)\n', (11694, 11700), True, 'import numpy as np\n'), ((11745, 11767), 'numpy.count_nonzero', 'np.count_nonzero', (['num1'], {}), '(num1)\n', (11761, 11767), True, 'import numpy as np\n')] |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements an interface to enumlib, <NAME>"s excellent Fortran
code for enumerating derivative structures.
This module depends on a compiled enumlib with the executables enum.x and
makestr.x available in the path. Please download the library at
https://github.com/msg-byu/enumlib and follow the instructions in the README to
compile these two executables accordingly.
If you use this module, please cite the following:
<NAME> and <NAME>, "Algorithm for generating derivative
structures," Phys. Rev. B 77 224115 (26 June 2008)
<NAME> and <NAME>, "Generating derivative structures from
multilattices: Application to hcp alloys," Phys. Rev. B 80 014120 (July 2009)
<NAME>, <NAME>, and <NAME>, "Generating
derivative structures at a fixed concentration," Comp. Mat. Sci. 59
101-107 (March 2012)
<NAME>, <NAME>, <NAME>, "Generating derivative
superstructures for systems with high configurational freedom," Comp. Mat.
Sci. 136 144-149 (May 2017)
"""
import fractions
import glob
import itertools
import logging
import math
import re
import subprocess
from threading import Timer
import numpy as np
from monty.dev import requires
from monty.fractions import lcm
from monty.os.path import which
from monty.tempfile import ScratchDir
from pymatgen.core.periodic_table import DummySpecies
from pymatgen.core.sites import PeriodicSite
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.inputs import Poscar
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
logger = logging.getLogger(__name__)
# Favor the use of the newer "enum.x" by <NAME> instead of the older
# "multienum.x"
enum_cmd = which("enum.x") or which("multienum.x")
# prefer makestr.x at present
makestr_cmd = which("makestr.x") or which("makeStr.x") or which("makeStr.py")
@requires(
enum_cmd and makestr_cmd,
"EnumlibAdaptor requires the executables 'enum.x' or 'multienum.x' "
"and 'makestr.x' or 'makeStr.py' to be in the path. Please download the "
"library at https://github.com/msg-byu/enumlib and follow the instructions "
"in the README to compile these two executables accordingly.",
)
class EnumlibAdaptor:
"""
An adaptor for enumlib.
.. attribute:: structures
List of all enumerated structures.
"""
amount_tol = 1e-5
def __init__(
self,
structure,
min_cell_size=1,
max_cell_size=1,
symm_prec=0.1,
enum_precision_parameter=0.001,
refine_structure=False,
check_ordered_symmetry=True,
timeout=None,
):
"""
Initializes the adapter with a structure and some parameters.
Args:
structure: An input structure.
min_cell_size (int): The minimum cell size wanted. Defaults to 1.
max_cell_size (int): The maximum cell size wanted. Defaults to 1.
symm_prec (float): Symmetry precision. Defaults to 0.1.
enum_precision_parameter (float): Finite precision parameter for
enumlib. Default of 0.001 is usually ok, but you might need to
tweak it for certain cells.
refine_structure (bool): If you are starting from a structure that
has been relaxed via some electronic structure code,
it is usually much better to start with symmetry determination
and then obtain a refined structure. The refined structure have
cell parameters and atomic positions shifted to the expected
symmetry positions, which makes it much less sensitive precision
issues in enumlib. If you are already starting from an
experimental cif, refinement should have already been done and
it is not necessary. Defaults to False.
check_ordered_symmetry (bool): Whether to check the symmetry of
the ordered sites. If the symmetry of the ordered sites is
lower, the lowest symmetry ordered sites is included in the
enumeration. This is important if the ordered sites break
symmetry in a way that is important getting possible
structures. But sometimes including ordered sites
slows down enumeration to the point that it cannot be
completed. Switch to False in those cases. Defaults to True.
timeout (float): If specified, will kill enumlib after specified
time in minutes. This can be useful for gracefully handling
enumerations in a high-throughput context, for some enumerations
which will not terminate in a realistic length of time.
"""
if refine_structure:
finder = SpacegroupAnalyzer(structure, symm_prec)
self.structure = finder.get_refined_structure()
else:
self.structure = structure
self.min_cell_size = min_cell_size
self.max_cell_size = max_cell_size
self.symm_prec = symm_prec
self.enum_precision_parameter = enum_precision_parameter
self.check_ordered_symmetry = check_ordered_symmetry
self.timeout = timeout
def run(self):
"""
Run the enumeration.
"""
# Create a temporary directory for working.
with ScratchDir(".") as d:
logger.debug("Temp dir : {}".format(d))
# Generate input files
self._gen_input_file()
# Perform the actual enumeration
num_structs = self._run_multienum()
# Read in the enumeration output as structures.
if num_structs > 0:
self.structures = self._get_structures(num_structs)
else:
raise EnumError("Unable to enumerate structure.")
def _gen_input_file(self):
"""
Generate the necessary struct_enum.in file for enumlib. See enumlib
documentation for details.
"""
coord_format = "{:.6f} {:.6f} {:.6f}"
# Using symmetry finder, get the symmetrically distinct sites.
fitter = SpacegroupAnalyzer(self.structure, self.symm_prec)
symmetrized_structure = fitter.get_symmetrized_structure()
logger.debug(
"Spacegroup {} ({}) with {} distinct sites".format(
fitter.get_space_group_symbol(),
fitter.get_space_group_number(),
len(symmetrized_structure.equivalent_sites),
)
)
"""
Enumlib doesn"t work when the number of species get too large. To
simplify matters, we generate the input file only with disordered sites
and exclude the ordered sites from the enumeration. The fact that
different disordered sites with the exact same species may belong to
different equivalent sites is dealt with by having determined the
spacegroup earlier and labelling the species differently.
"""
# index_species and index_amounts store mappings between the indices
# used in the enum input file, and the actual species and amounts.
index_species = []
index_amounts = []
# Stores the ordered sites, which are not enumerated.
ordered_sites = []
disordered_sites = []
coord_str = []
for sites in symmetrized_structure.equivalent_sites:
if sites[0].is_ordered:
ordered_sites.append(sites)
else:
sp_label = []
species = dict(sites[0].species.items())
if sum(species.values()) < 1 - EnumlibAdaptor.amount_tol:
# Let us first make add a dummy element for every single
# site whose total occupancies don't sum to 1.
species[DummySpecies("X")] = 1 - sum(species.values())
for sp, amt in species.items():
if sp not in index_species:
index_species.append(sp)
sp_label.append(len(index_species) - 1)
index_amounts.append(amt * len(sites))
else:
ind = index_species.index(sp)
sp_label.append(ind)
index_amounts[ind] += amt * len(sites)
sp_label = "/".join(["{}".format(i) for i in sorted(sp_label)])
for site in sites:
coord_str.append("{} {}".format(coord_format.format(*site.coords), sp_label))
disordered_sites.append(sites)
def get_sg_info(ss):
finder = SpacegroupAnalyzer(Structure.from_sites(ss), self.symm_prec)
return finder.get_space_group_number()
target_sgnum = get_sg_info(symmetrized_structure.sites)
curr_sites = list(itertools.chain.from_iterable(disordered_sites))
sgnum = get_sg_info(curr_sites)
ordered_sites = sorted(ordered_sites, key=lambda sites: len(sites))
logger.debug("Disordered sites has sg # %d" % (sgnum))
self.ordered_sites = []
# progressively add ordered sites to our disordered sites
# until we match the symmetry of our input structure
if self.check_ordered_symmetry:
while sgnum != target_sgnum and len(ordered_sites) > 0:
sites = ordered_sites.pop(0)
temp_sites = list(curr_sites) + sites
new_sgnum = get_sg_info(temp_sites)
if sgnum != new_sgnum:
logger.debug("Adding %s in enum. New sg # %d" % (sites[0].specie, new_sgnum))
index_species.append(sites[0].specie)
index_amounts.append(len(sites))
sp_label = len(index_species) - 1
for site in sites:
coord_str.append("{} {}".format(coord_format.format(*site.coords), sp_label))
disordered_sites.append(sites)
curr_sites = temp_sites
sgnum = new_sgnum
else:
self.ordered_sites.extend(sites)
for sites in ordered_sites:
self.ordered_sites.extend(sites)
self.index_species = index_species
lattice = self.structure.lattice
output = [self.structure.formula, "bulk"]
for vec in lattice.matrix:
output.append(coord_format.format(*vec))
output.append("%d" % len(index_species))
output.append("%d" % len(coord_str))
output.extend(coord_str)
output.append("{} {}".format(self.min_cell_size, self.max_cell_size))
output.append(str(self.enum_precision_parameter))
output.append("full")
ndisordered = sum([len(s) for s in disordered_sites])
base = int(
ndisordered
* lcm(
*[
f.limit_denominator(ndisordered * self.max_cell_size).denominator
for f in map(fractions.Fraction, index_amounts)
]
)
)
# This multiplicative factor of 10 is to prevent having too small bases
# which can lead to rounding issues in the next step.
# An old bug was that a base was set to 8, with a conc of 0.4:0.6. That
# resulted in a range that overlaps and a conc of 0.5 satisfying this
# enumeration. See Cu7Te5.cif test file.
base *= 10
# base = ndisordered #10 ** int(math.ceil(math.log10(ndisordered)))
# To get a reasonable number of structures, we fix concentrations to the
# range expected in the original structure.
total_amounts = sum(index_amounts)
for amt in index_amounts:
conc = amt / total_amounts
if abs(conc * base - round(conc * base)) < 1e-5:
output.append("{} {} {}".format(int(round(conc * base)), int(round(conc * base)), base))
else:
min_conc = int(math.floor(conc * base))
output.append("{} {} {}".format(min_conc - 1, min_conc + 1, base))
output.append("")
logger.debug("Generated input file:\n{}".format("\n".join(output)))
with open("struct_enum.in", "w") as f:
f.write("\n".join(output))
def _run_multienum(self):
with subprocess.Popen([enum_cmd], stdout=subprocess.PIPE, stdin=subprocess.PIPE, close_fds=True) as p:
if self.timeout:
timed_out = False
timer = Timer(self.timeout * 60, lambda p: p.kill(), [p])
try:
timer.start()
output = p.communicate()[0].decode("utf-8")
finally:
if not timer.is_alive():
timed_out = True
timer.cancel()
if timed_out:
raise TimeoutError("Enumeration took too long.")
else:
output = p.communicate()[0].decode("utf-8")
count = 0
start_count = False
for line in output.strip().split("\n"):
if line.strip().endswith("RunTot"):
start_count = True
elif start_count and re.match(r"\d+\s+.*", line.strip()):
count = int(line.split()[-1])
logger.debug("Enumeration resulted in {} structures".format(count))
return count
def _get_structures(self, num_structs):
structs = []
if ".py" in makestr_cmd:
options = ["-input", "struct_enum.out", str(1), str(num_structs)]
else:
options = ["struct_enum.out", str(0), str(num_structs - 1)]
with subprocess.Popen(
[makestr_cmd] + options,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
close_fds=True,
) as rs:
stdout, stderr = rs.communicate()
if stderr:
logger.warning(stderr.decode())
# sites retrieved from enumlib will lack site properties
# to ensure consistency, we keep track of what site properties
# are missing and set them to None
# TODO: improve this by mapping ordered structure to original
# disorded structure, and retrieving correct site properties
disordered_site_properties = {}
if len(self.ordered_sites) > 0:
original_latt = self.ordered_sites[0].lattice
# Need to strip sites of site_properties, which would otherwise
# result in an index error. Hence Structure is reconstructed in
# the next step.
site_properties = {}
for site in self.ordered_sites:
for k, v in site.properties.items():
disordered_site_properties[k] = None
if k in site_properties:
site_properties[k].append(v)
else:
site_properties[k] = [v]
ordered_structure = Structure(
original_latt,
[site.species for site in self.ordered_sites],
[site.frac_coords for site in self.ordered_sites],
site_properties=site_properties,
)
inv_org_latt = np.linalg.inv(original_latt.matrix)
for file in glob.glob("vasp.*"):
with open(file) as f:
data = f.read()
data = re.sub(r"scale factor", "1", data)
data = re.sub(r"(\d+)-(\d+)", r"\1 -\2", data)
poscar = Poscar.from_string(data, self.index_species)
sub_structure = poscar.structure
# Enumeration may have resulted in a super lattice. We need to
# find the mapping from the new lattice to the old lattice, and
# perform supercell construction if necessary.
new_latt = sub_structure.lattice
sites = []
if len(self.ordered_sites) > 0:
transformation = np.dot(new_latt.matrix, inv_org_latt)
transformation = [[int(round(cell)) for cell in row] for row in transformation]
logger.debug("Supercell matrix: {}".format(transformation))
s = ordered_structure * transformation
sites.extend([site.to_unit_cell() for site in s])
super_latt = sites[-1].lattice
else:
super_latt = new_latt
for site in sub_structure:
if site.specie.symbol != "X": # We exclude vacancies.
sites.append(
PeriodicSite(
site.species,
site.frac_coords,
super_latt,
to_unit_cell=True,
properties=disordered_site_properties,
)
)
else:
logger.debug("Skipping sites that include species X.")
structs.append(Structure.from_sites(sorted(sites)))
logger.debug("Read in a total of {} structures.".format(num_structs))
return structs
class EnumError(BaseException):
"""
Error subclass for enumeration errors.
"""
pass
| [
"subprocess.Popen",
"monty.dev.requires",
"pymatgen.core.periodic_table.DummySpecies",
"pymatgen.io.vasp.inputs.Poscar.from_string",
"monty.tempfile.ScratchDir",
"pymatgen.core.sites.PeriodicSite",
"math.floor",
"pymatgen.core.structure.Structure",
"re.sub",
"monty.os.path.which",
"numpy.linalg.... | [((1617, 1644), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1634, 1644), False, 'import logging\n'), ((1893, 2210), 'monty.dev.requires', 'requires', (['(enum_cmd and makestr_cmd)', '"""EnumlibAdaptor requires the executables \'enum.x\' or \'multienum.x\' and \'makestr.x\' or \'makeStr.py\' to be in the path. Please download the library at https://github.com/msg-byu/enumlib and follow the instructions in the README to compile these two executables accordingly."""'], {}), '(enum_cmd and makestr_cmd,\n "EnumlibAdaptor requires the executables \'enum.x\' or \'multienum.x\' and \'makestr.x\' or \'makeStr.py\' to be in the path. Please download the library at https://github.com/msg-byu/enumlib and follow the instructions in the README to compile these two executables accordingly."\n )\n', (1901, 2210), False, 'from monty.dev import requires\n'), ((1742, 1757), 'monty.os.path.which', 'which', (['"""enum.x"""'], {}), "('enum.x')\n", (1747, 1757), False, 'from monty.os.path import which\n'), ((1761, 1781), 'monty.os.path.which', 'which', (['"""multienum.x"""'], {}), "('multienum.x')\n", (1766, 1781), False, 'from monty.os.path import which\n'), ((1826, 1844), 'monty.os.path.which', 'which', (['"""makestr.x"""'], {}), "('makestr.x')\n", (1831, 1844), False, 'from monty.os.path import which\n'), ((1848, 1866), 'monty.os.path.which', 'which', (['"""makeStr.x"""'], {}), "('makeStr.x')\n", (1853, 1866), False, 'from monty.os.path import which\n'), ((1870, 1889), 'monty.os.path.which', 'which', (['"""makeStr.py"""'], {}), "('makeStr.py')\n", (1875, 1889), False, 'from monty.os.path import which\n'), ((6199, 6249), 'pymatgen.symmetry.analyzer.SpacegroupAnalyzer', 'SpacegroupAnalyzer', (['self.structure', 'self.symm_prec'], {}), '(self.structure, self.symm_prec)\n', (6217, 6249), False, 'from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n'), ((15392, 15411), 'glob.glob', 'glob.glob', (['"""vasp.*"""'], {}), "('vasp.*')\n", (15401, 15411), False, 'import glob\n'), ((4847, 4887), 'pymatgen.symmetry.analyzer.SpacegroupAnalyzer', 'SpacegroupAnalyzer', (['structure', 'symm_prec'], {}), '(structure, symm_prec)\n', (4865, 4887), False, 'from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n'), ((5417, 5432), 'monty.tempfile.ScratchDir', 'ScratchDir', (['"""."""'], {}), "('.')\n", (5427, 5432), False, 'from monty.tempfile import ScratchDir\n'), ((8919, 8966), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['disordered_sites'], {}), '(disordered_sites)\n', (8948, 8966), False, 'import itertools\n'), ((12411, 12506), 'subprocess.Popen', 'subprocess.Popen', (['[enum_cmd]'], {'stdout': 'subprocess.PIPE', 'stdin': 'subprocess.PIPE', 'close_fds': '(True)'}), '([enum_cmd], stdout=subprocess.PIPE, stdin=subprocess.PIPE,\n close_fds=True)\n', (12427, 12506), False, 'import subprocess\n'), ((13762, 13871), 'subprocess.Popen', 'subprocess.Popen', (['([makestr_cmd] + options)'], {'stdout': 'subprocess.PIPE', 'stdin': 'subprocess.PIPE', 'close_fds': '(True)'}), '([makestr_cmd] + options, stdout=subprocess.PIPE, stdin=\n subprocess.PIPE, close_fds=True)\n', (13778, 13871), False, 'import subprocess\n'), ((15073, 15238), 'pymatgen.core.structure.Structure', 'Structure', (['original_latt', '[site.species for site in self.ordered_sites]', '[site.frac_coords for site in self.ordered_sites]'], {'site_properties': 'site_properties'}), '(original_latt, [site.species for site in self.ordered_sites], [\n site.frac_coords for site in self.ordered_sites], site_properties=\n site_properties)\n', (15082, 15238), False, 'from pymatgen.core.structure import Structure\n'), ((15335, 15370), 'numpy.linalg.inv', 'np.linalg.inv', (['original_latt.matrix'], {}), '(original_latt.matrix)\n', (15348, 15370), True, 'import numpy as np\n'), ((8735, 8759), 'pymatgen.core.structure.Structure.from_sites', 'Structure.from_sites', (['ss'], {}), '(ss)\n', (8755, 8759), False, 'from pymatgen.core.structure import Structure\n'), ((15502, 15535), 're.sub', 're.sub', (['"""scale factor"""', '"""1"""', 'data'], {}), "('scale factor', '1', data)\n", (15508, 15535), False, 'import re\n'), ((15560, 15601), 're.sub', 're.sub', (['"""(\\\\d+)-(\\\\d+)"""', '"""\\\\1 -\\\\2"""', 'data'], {}), "('(\\\\d+)-(\\\\d+)', '\\\\1 -\\\\2', data)\n", (15566, 15601), False, 'import re\n'), ((15625, 15669), 'pymatgen.io.vasp.inputs.Poscar.from_string', 'Poscar.from_string', (['data', 'self.index_species'], {}), '(data, self.index_species)\n', (15643, 15669), False, 'from pymatgen.io.vasp.inputs import Poscar\n'), ((12070, 12093), 'math.floor', 'math.floor', (['(conc * base)'], {}), '(conc * base)\n', (12080, 12093), False, 'import math\n'), ((16104, 16141), 'numpy.dot', 'np.dot', (['new_latt.matrix', 'inv_org_latt'], {}), '(new_latt.matrix, inv_org_latt)\n', (16110, 16141), True, 'import numpy as np\n'), ((7898, 7915), 'pymatgen.core.periodic_table.DummySpecies', 'DummySpecies', (['"""X"""'], {}), "('X')\n", (7910, 7915), False, 'from pymatgen.core.periodic_table import DummySpecies\n'), ((16751, 16869), 'pymatgen.core.sites.PeriodicSite', 'PeriodicSite', (['site.species', 'site.frac_coords', 'super_latt'], {'to_unit_cell': '(True)', 'properties': 'disordered_site_properties'}), '(site.species, site.frac_coords, super_latt, to_unit_cell=True,\n properties=disordered_site_properties)\n', (16763, 16869), False, 'from pymatgen.core.sites import PeriodicSite\n')] |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2019 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
from pymor.algorithms.basic import almost_equal
from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth
from pymortests.fixtures.operator import operator_with_arrays_and_products
from pymortests.fixtures.vectorarray import vector_array, vector_array_without_reserve
def test_gram_schmidt(vector_array):
U = vector_array
V = U.copy()
onb = gram_schmidt(U, copy=True)
assert np.all(almost_equal(U, V))
assert np.allclose(onb.dot(onb), np.eye(len(onb)))
assert np.all(almost_equal(U, onb.lincomb(onb.dot(U).T), rtol=1e-13))
onb2 = gram_schmidt(U, copy=False)
assert np.all(almost_equal(onb, onb2))
assert np.all(almost_equal(onb, U))
def test_gram_schmidt_with_R(vector_array):
U = vector_array
V = U.copy()
onb, R = gram_schmidt(U, return_R=True, copy=True)
assert np.all(almost_equal(U, V))
assert np.allclose(onb.dot(onb), np.eye(len(onb)))
assert np.all(almost_equal(U, onb.lincomb(onb.dot(U).T), rtol=1e-13))
assert np.all(almost_equal(V, onb.lincomb(R.T)))
onb2, R2 = gram_schmidt(U, return_R=True, copy=False)
assert np.all(almost_equal(onb, onb2))
assert np.all(R == R2)
assert np.all(almost_equal(onb, U))
def test_gram_schmidt_with_product(operator_with_arrays_and_products):
_, _, U, _, p, _ = operator_with_arrays_and_products
V = U.copy()
onb = gram_schmidt(U, product=p, copy=True)
assert np.all(almost_equal(U, V))
assert np.allclose(p.apply2(onb, onb), np.eye(len(onb)))
assert np.all(almost_equal(U, onb.lincomb(p.apply2(onb, U).T), rtol=1e-13))
onb2 = gram_schmidt(U, product=p, copy=False)
assert np.all(almost_equal(onb, onb2))
assert np.all(almost_equal(onb, U))
def test_gram_schmidt_with_product_and_R(operator_with_arrays_and_products):
_, _, U, _, p, _ = operator_with_arrays_and_products
V = U.copy()
onb, R = gram_schmidt(U, product=p, return_R=True, copy=True)
assert np.all(almost_equal(U, V))
assert np.allclose(p.apply2(onb, onb), np.eye(len(onb)))
assert np.all(almost_equal(U, onb.lincomb(p.apply2(onb, U).T), rtol=1e-13))
assert np.all(almost_equal(U, onb.lincomb(R.T)))
onb2, R2 = gram_schmidt(U, product=p, return_R=True, copy=False)
assert np.all(almost_equal(onb, onb2))
assert np.all(R == R2)
assert np.all(almost_equal(onb, U))
def test_gram_schmidt_biorth(vector_array):
U = vector_array
if U.dim < 2:
return
l = len(U) // 2
l = min((l, U.dim - 1))
if l < 1:
return
U1 = U[:l].copy()
U2 = U[l:2 * l].copy()
V1 = U1.copy()
V2 = U2.copy()
A1, A2 = gram_schmidt_biorth(U1, U2, copy=True)
assert np.all(almost_equal(U1, V1))
assert np.all(almost_equal(U2, V2))
assert np.allclose(A2.dot(A1), np.eye(len(A1)))
c = np.linalg.cond(A1.to_numpy()) * np.linalg.cond(A2.to_numpy())
assert np.all(almost_equal(U1, A1.lincomb(A2.dot(U1).T), rtol=c * 1e-14))
assert np.all(almost_equal(U2, A2.lincomb(A1.dot(U2).T), rtol=c * 1e-14))
B1, B2 = gram_schmidt_biorth(U1, U2, copy=False)
assert np.all(almost_equal(A1, B1))
assert np.all(almost_equal(A2, B2))
assert np.all(almost_equal(A1, U1))
assert np.all(almost_equal(A2, U2))
def test_gram_schmidt_biorth_with_product(operator_with_arrays_and_products):
_, _, U, _, p, _ = operator_with_arrays_and_products
if U.dim < 2:
return
l = len(U) // 2
l = min((l, U.dim - 1))
if l < 1:
return
U1 = U[:l].copy()
U2 = U[l:2 * l].copy()
V1 = U1.copy()
V2 = U2.copy()
A1, A2 = gram_schmidt_biorth(U1, U2, product=p, copy=True)
assert np.all(almost_equal(U1, V1))
assert np.all(almost_equal(U2, V2))
assert np.allclose(p.apply2(A2, A1), np.eye(len(A1)))
c = np.linalg.cond(A1.to_numpy()) * np.linalg.cond(p.apply(A2).to_numpy())
assert np.all(almost_equal(U1, A1.lincomb(p.apply2(A2, U1).T), rtol=c * 1e-14))
assert np.all(almost_equal(U2, A2.lincomb(p.apply2(A1, U2).T), rtol=c * 1e-14))
B1, B2 = gram_schmidt_biorth(U1, U2, product=p, copy=False)
assert np.all(almost_equal(A1, B1))
assert np.all(almost_equal(A2, B2))
assert np.all(almost_equal(A1, U1))
assert np.all(almost_equal(A2, U2))
| [
"pymor.algorithms.gram_schmidt.gram_schmidt_biorth",
"pymor.algorithms.basic.almost_equal",
"pymor.algorithms.gram_schmidt.gram_schmidt",
"numpy.all"
] | [((616, 642), 'pymor.algorithms.gram_schmidt.gram_schmidt', 'gram_schmidt', (['U'], {'copy': '(True)'}), '(U, copy=True)\n', (628, 642), False, 'from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth\n'), ((822, 849), 'pymor.algorithms.gram_schmidt.gram_schmidt', 'gram_schmidt', (['U'], {'copy': '(False)'}), '(U, copy=False)\n', (834, 849), False, 'from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth\n'), ((1031, 1072), 'pymor.algorithms.gram_schmidt.gram_schmidt', 'gram_schmidt', (['U'], {'return_R': '(True)', 'copy': '(True)'}), '(U, return_R=True, copy=True)\n', (1043, 1072), False, 'from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth\n'), ((1309, 1351), 'pymor.algorithms.gram_schmidt.gram_schmidt', 'gram_schmidt', (['U'], {'return_R': '(True)', 'copy': '(False)'}), '(U, return_R=True, copy=False)\n', (1321, 1351), False, 'from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth\n'), ((1406, 1421), 'numpy.all', 'np.all', (['(R == R2)'], {}), '(R == R2)\n', (1412, 1421), True, 'import numpy as np\n'), ((1620, 1657), 'pymor.algorithms.gram_schmidt.gram_schmidt', 'gram_schmidt', (['U'], {'product': 'p', 'copy': '(True)'}), '(U, product=p, copy=True)\n', (1632, 1657), False, 'from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth\n'), ((1849, 1887), 'pymor.algorithms.gram_schmidt.gram_schmidt', 'gram_schmidt', (['U'], {'product': 'p', 'copy': '(False)'}), '(U, product=p, copy=False)\n', (1861, 1887), False, 'from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth\n'), ((2138, 2190), 'pymor.algorithms.gram_schmidt.gram_schmidt', 'gram_schmidt', (['U'], {'product': 'p', 'return_R': '(True)', 'copy': '(True)'}), '(U, product=p, return_R=True, copy=True)\n', (2150, 2190), False, 'from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth\n'), ((2439, 2492), 'pymor.algorithms.gram_schmidt.gram_schmidt', 'gram_schmidt', (['U'], {'product': 'p', 'return_R': '(True)', 'copy': '(False)'}), '(U, product=p, return_R=True, copy=False)\n', (2451, 2492), False, 'from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth\n'), ((2547, 2562), 'numpy.all', 'np.all', (['(R == R2)'], {}), '(R == R2)\n', (2553, 2562), True, 'import numpy as np\n'), ((2881, 2919), 'pymor.algorithms.gram_schmidt.gram_schmidt_biorth', 'gram_schmidt_biorth', (['U1', 'U2'], {'copy': '(True)'}), '(U1, U2, copy=True)\n', (2900, 2919), False, 'from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth\n'), ((3292, 3331), 'pymor.algorithms.gram_schmidt.gram_schmidt_biorth', 'gram_schmidt_biorth', (['U1', 'U2'], {'copy': '(False)'}), '(U1, U2, copy=False)\n', (3311, 3331), False, 'from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth\n'), ((3840, 3889), 'pymor.algorithms.gram_schmidt.gram_schmidt_biorth', 'gram_schmidt_biorth', (['U1', 'U2'], {'product': 'p', 'copy': '(True)'}), '(U1, U2, product=p, copy=True)\n', (3859, 3889), False, 'from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth\n'), ((4289, 4339), 'pymor.algorithms.gram_schmidt.gram_schmidt_biorth', 'gram_schmidt_biorth', (['U1', 'U2'], {'product': 'p', 'copy': '(False)'}), '(U1, U2, product=p, copy=False)\n', (4308, 4339), False, 'from pymor.algorithms.gram_schmidt import gram_schmidt, gram_schmidt_biorth\n'), ((661, 679), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['U', 'V'], {}), '(U, V)\n', (673, 679), False, 'from pymor.algorithms.basic import almost_equal\n'), ((868, 891), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['onb', 'onb2'], {}), '(onb, onb2)\n', (880, 891), False, 'from pymor.algorithms.basic import almost_equal\n'), ((911, 931), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['onb', 'U'], {}), '(onb, U)\n', (923, 931), False, 'from pymor.algorithms.basic import almost_equal\n'), ((1091, 1109), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['U', 'V'], {}), '(U, V)\n', (1103, 1109), False, 'from pymor.algorithms.basic import almost_equal\n'), ((1370, 1393), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['onb', 'onb2'], {}), '(onb, onb2)\n', (1382, 1393), False, 'from pymor.algorithms.basic import almost_equal\n'), ((1440, 1460), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['onb', 'U'], {}), '(onb, U)\n', (1452, 1460), False, 'from pymor.algorithms.basic import almost_equal\n'), ((1676, 1694), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['U', 'V'], {}), '(U, V)\n', (1688, 1694), False, 'from pymor.algorithms.basic import almost_equal\n'), ((1906, 1929), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['onb', 'onb2'], {}), '(onb, onb2)\n', (1918, 1929), False, 'from pymor.algorithms.basic import almost_equal\n'), ((1949, 1969), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['onb', 'U'], {}), '(onb, U)\n', (1961, 1969), False, 'from pymor.algorithms.basic import almost_equal\n'), ((2209, 2227), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['U', 'V'], {}), '(U, V)\n', (2221, 2227), False, 'from pymor.algorithms.basic import almost_equal\n'), ((2511, 2534), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['onb', 'onb2'], {}), '(onb, onb2)\n', (2523, 2534), False, 'from pymor.algorithms.basic import almost_equal\n'), ((2581, 2601), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['onb', 'U'], {}), '(onb, U)\n', (2593, 2601), False, 'from pymor.algorithms.basic import almost_equal\n'), ((2938, 2958), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['U1', 'V1'], {}), '(U1, V1)\n', (2950, 2958), False, 'from pymor.algorithms.basic import almost_equal\n'), ((2978, 2998), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['U2', 'V2'], {}), '(U2, V2)\n', (2990, 2998), False, 'from pymor.algorithms.basic import almost_equal\n'), ((3350, 3370), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['A1', 'B1'], {}), '(A1, B1)\n', (3362, 3370), False, 'from pymor.algorithms.basic import almost_equal\n'), ((3390, 3410), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['A2', 'B2'], {}), '(A2, B2)\n', (3402, 3410), False, 'from pymor.algorithms.basic import almost_equal\n'), ((3430, 3450), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['A1', 'U1'], {}), '(A1, U1)\n', (3442, 3450), False, 'from pymor.algorithms.basic import almost_equal\n'), ((3470, 3490), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['A2', 'U2'], {}), '(A2, U2)\n', (3482, 3490), False, 'from pymor.algorithms.basic import almost_equal\n'), ((3908, 3928), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['U1', 'V1'], {}), '(U1, V1)\n', (3920, 3928), False, 'from pymor.algorithms.basic import almost_equal\n'), ((3948, 3968), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['U2', 'V2'], {}), '(U2, V2)\n', (3960, 3968), False, 'from pymor.algorithms.basic import almost_equal\n'), ((4358, 4378), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['A1', 'B1'], {}), '(A1, B1)\n', (4370, 4378), False, 'from pymor.algorithms.basic import almost_equal\n'), ((4398, 4418), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['A2', 'B2'], {}), '(A2, B2)\n', (4410, 4418), False, 'from pymor.algorithms.basic import almost_equal\n'), ((4438, 4458), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['A1', 'U1'], {}), '(A1, U1)\n', (4450, 4458), False, 'from pymor.algorithms.basic import almost_equal\n'), ((4478, 4498), 'pymor.algorithms.basic.almost_equal', 'almost_equal', (['A2', 'U2'], {}), '(A2, U2)\n', (4490, 4498), False, 'from pymor.algorithms.basic import almost_equal\n')] |
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
n = 1000
x = np.random.rand(n)
y = np.random.rand(n)
z = np.random.rand(n)
ax.scatter(x, y, z, color="black", marker="*")
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
| [
"numpy.random.rand",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((99, 111), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (109, 111), True, 'import matplotlib.pyplot as plt\n'), ((170, 187), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (184, 187), True, 'import numpy as np\n'), ((192, 209), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (206, 209), True, 'import numpy as np\n'), ((214, 231), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (228, 231), True, 'import numpy as np\n'), ((356, 366), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (364, 366), True, 'import matplotlib.pyplot as plt\n')] |
'''
This module will attempt to predict model parameters by using a trained model.
'''
import tensorflow as tf
import os
import h5py
import numpy as np
import pickle
base_dir = os.path.expanduser('~/fluoro/data/compilation')
file_base_name = 'vox_fluoro_min_max_1'
hist_path = os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), file_base_name)
hist_file_name = file_base_name + '_hist_objects_1.pkl'
load_model_name = file_base_name + '_1.h5'
save_dir = os.path.abspath(os.path.expanduser('~/fluoro/code/jupyt/update_2019-Sep-17/predictions'))
os.makedirs(save_dir, exist_ok=True)
save_file_name = 'model_prediction_' + file_base_name + '.pkl'
predict_numb = 100
# -----------------------------------------------------------------
def inv_min_max(data_set, data_min, data_max, axis=0):
data_0_1 = (data_set - np.min(data_set, axis=axis)) / (np.max(data_set, axis=axis) - np.min(data_set, axis=axis))
inv_data = data_0_1 * (data_max - data_min) + data_min
inv_data = np.where(inv_data < data_min, data_min, inv_data)
inv_data = np.where(inv_data > data_max, data_max, inv_data)
return inv_data
def min_max_norm(data_set, feature_range=(-1, 1), axis=0, data_min=None, data_max=None):
if data_min is None:
data_min = np.min(data_set, axis=axis)
else:
data_set = np.where(data_set < data_min, data_min, data_set)
if data_max is None:
data_max = np.max(data_set, axis=axis)
else:
data_set = np.where(data_set > data_max, data_max, data_set)
data_in_std_range = (data_set - data_min) / (data_max - data_min)
data_scaled = data_in_std_range * (feature_range[1] - feature_range[0]) + feature_range[0]
return data_scaled
# -----------------------------------------------------------------
hist_file = open(os.path.join(hist_path, hist_file_name), 'rb')
hist_data = pickle.load(hist_file)
hist_file.close()
random_test_values = np.random.choice(hist_data['test_indxs'], size=predict_numb, replace=False)
random_train_values = np.random.choice(hist_data['train_indxs'], size=predict_numb, replace=False)
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py'), 'r')
vox_init = vox_file['vox_dset']
vox_test_mat = vox_init[sorted(random_test_values)]
vox_train_mat = vox_init[sorted(random_train_values)]
vox_file.close()
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images_norm_std.h5py'), 'r')
image_grp_1 = image_file['image_1']
image_grp_2 = image_file['image_2']
image_init_1 = image_grp_1['min_max_dset_per_image']
image_init_2 = image_grp_2['min_max_dset_per_image']
image_test_1 = image_init_1[sorted(random_test_values)]
image_test_2 = image_init_2[sorted(random_test_values)]
image_train_1 = image_init_1[sorted(random_train_values)]
image_train_2 = image_init_2[sorted(random_train_values)]
image_file.close()
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
label_test_mat = label_init[sorted(random_test_values)]
label_train_mat = label_init[sorted(random_train_values)]
label_file.close()
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
cali_test_mat = cali_init[sorted(random_test_values)]
cali_test_min_max = min_max_norm(cali_test_mat, data_min=hist_data['cali_train_min'], data_max=hist_data['cali_train_max'])
cali_train_mat = cali_init[sorted(random_train_values)]
cali_train_min_max = min_max_norm(cali_train_mat, data_min=hist_data['cali_train_min'], data_max=hist_data['cali_train_max'])
cali_file.close()
# -----------------------------------------------------------------
model = tf.keras.models.load_model(os.path.join(hist_path, load_model_name))
predict_test = model.predict([np.expand_dims(vox_test_mat, axis=-1), image_test_1, image_test_2, cali_test_mat], batch_size=6, verbose=2)
predict_train = model.predict([np.expand_dims(vox_train_mat, axis=-1), image_train_1, image_train_2, cali_train_mat], batch_size=6, verbose=2)
save_file = open(os.path.join(save_dir, save_file_name), 'wb')
output_dict = {}
output_dict['test_raw_ouput'] = predict_test
output_dict['test_predictions'] = inv_min_max(predict_test, hist_data['label_train_min'], hist_data['label_train_max'])
output_dict['test_actual'] = label_test_mat
output_dict['train_raw_ouput'] = predict_train
output_dict['train_predictions'] = inv_min_max(predict_train, hist_data['label_train_min'], hist_data['label_train_max'])
output_dict['train_actual'] = label_train_mat
pickle.dump(output_dict, save_file)
save_file.close()
| [
"pickle.dump",
"os.makedirs",
"os.path.join",
"numpy.expand_dims",
"numpy.min",
"pickle.load",
"numpy.where",
"numpy.max",
"numpy.random.choice",
"os.path.expanduser"
] | [((179, 226), 'os.path.expanduser', 'os.path.expanduser', (['"""~/fluoro/data/compilation"""'], {}), "('~/fluoro/data/compilation')\n", (197, 226), False, 'import os\n'), ((565, 601), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (576, 601), False, 'import os\n'), ((1878, 1900), 'pickle.load', 'pickle.load', (['hist_file'], {}), '(hist_file)\n', (1889, 1900), False, 'import pickle\n'), ((1942, 2017), 'numpy.random.choice', 'np.random.choice', (["hist_data['test_indxs']"], {'size': 'predict_numb', 'replace': '(False)'}), "(hist_data['test_indxs'], size=predict_numb, replace=False)\n", (1958, 2017), True, 'import numpy as np\n'), ((2040, 2116), 'numpy.random.choice', 'np.random.choice', (["hist_data['train_indxs']"], {'size': 'predict_numb', 'replace': '(False)'}), "(hist_data['train_indxs'], size=predict_numb, replace=False)\n", (2056, 2116), True, 'import numpy as np\n'), ((4619, 4654), 'pickle.dump', 'pickle.dump', (['output_dict', 'save_file'], {}), '(output_dict, save_file)\n', (4630, 4654), False, 'import pickle\n'), ((294, 346), 'os.path.expanduser', 'os.path.expanduser', (['"""~/fluoro/code/jupyt/vox_fluoro"""'], {}), "('~/fluoro/code/jupyt/vox_fluoro')\n", (312, 346), False, 'import os\n'), ((491, 563), 'os.path.expanduser', 'os.path.expanduser', (['"""~/fluoro/code/jupyt/update_2019-Sep-17/predictions"""'], {}), "('~/fluoro/code/jupyt/update_2019-Sep-17/predictions')\n", (509, 563), False, 'import os\n'), ((1006, 1055), 'numpy.where', 'np.where', (['(inv_data < data_min)', 'data_min', 'inv_data'], {}), '(inv_data < data_min, data_min, inv_data)\n', (1014, 1055), True, 'import numpy as np\n'), ((1071, 1120), 'numpy.where', 'np.where', (['(inv_data > data_max)', 'data_max', 'inv_data'], {}), '(inv_data > data_max, data_max, inv_data)\n', (1079, 1120), True, 'import numpy as np\n'), ((1819, 1858), 'os.path.join', 'os.path.join', (['hist_path', 'hist_file_name'], {}), '(hist_path, hist_file_name)\n', (1831, 1858), False, 'import os\n'), ((2139, 2215), 'os.path.expanduser', 'os.path.expanduser', (['"""~/fluoro/data/compilation/voxels_mark_origin_comp.h5py"""'], {}), "('~/fluoro/data/compilation/voxels_mark_origin_comp.h5py')\n", (2157, 2215), False, 'import os\n'), ((2402, 2470), 'os.path.expanduser', 'os.path.expanduser', (['"""~/fluoro/data/compilation/images_norm_std.h5py"""'], {}), "('~/fluoro/data/compilation/images_norm_std.h5py')\n", (2420, 2470), False, 'import os\n'), ((2930, 2989), 'os.path.expanduser', 'os.path.expanduser', (['"""~/fluoro/data/compilation/labels.h5py"""'], {}), "('~/fluoro/data/compilation/labels.h5py')\n", (2948, 2989), False, 'import os\n'), ((3192, 3256), 'os.path.expanduser', 'os.path.expanduser', (['"""~/fluoro/data/compilation/calibration.h5py"""'], {}), "('~/fluoro/data/compilation/calibration.h5py')\n", (3210, 3256), False, 'import os\n'), ((3785, 3825), 'os.path.join', 'os.path.join', (['hist_path', 'load_model_name'], {}), '(hist_path, load_model_name)\n', (3797, 3825), False, 'import os\n'), ((4127, 4165), 'os.path.join', 'os.path.join', (['save_dir', 'save_file_name'], {}), '(save_dir, save_file_name)\n', (4139, 4165), False, 'import os\n'), ((1277, 1304), 'numpy.min', 'np.min', (['data_set'], {'axis': 'axis'}), '(data_set, axis=axis)\n', (1283, 1304), True, 'import numpy as np\n'), ((1334, 1383), 'numpy.where', 'np.where', (['(data_set < data_min)', 'data_min', 'data_set'], {}), '(data_set < data_min, data_min, data_set)\n', (1342, 1383), True, 'import numpy as np\n'), ((1429, 1456), 'numpy.max', 'np.max', (['data_set'], {'axis': 'axis'}), '(data_set, axis=axis)\n', (1435, 1456), True, 'import numpy as np\n'), ((1486, 1535), 'numpy.where', 'np.where', (['(data_set > data_max)', 'data_max', 'data_set'], {}), '(data_set > data_max, data_max, data_set)\n', (1494, 1535), True, 'import numpy as np\n'), ((3858, 3895), 'numpy.expand_dims', 'np.expand_dims', (['vox_test_mat'], {'axis': '(-1)'}), '(vox_test_mat, axis=-1)\n', (3872, 3895), True, 'import numpy as np\n'), ((3997, 4035), 'numpy.expand_dims', 'np.expand_dims', (['vox_train_mat'], {'axis': '(-1)'}), '(vox_train_mat, axis=-1)\n', (4011, 4035), True, 'import numpy as np\n'), ((840, 867), 'numpy.min', 'np.min', (['data_set'], {'axis': 'axis'}), '(data_set, axis=axis)\n', (846, 867), True, 'import numpy as np\n'), ((872, 899), 'numpy.max', 'np.max', (['data_set'], {'axis': 'axis'}), '(data_set, axis=axis)\n', (878, 899), True, 'import numpy as np\n'), ((902, 929), 'numpy.min', 'np.min', (['data_set'], {'axis': 'axis'}), '(data_set, axis=axis)\n', (908, 929), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.