code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy.testing as npt
from cvdm.score import hkdr_chd, HkdrCHD
from cvdm.score import hkdr_hf, HkdrHF
def test_hkdr_chd():
tmp = hkdr_chd(59, True, False, 5, 105, 2.3, 3.87)
npt.assert_almost_equal(tmp, 0.082, decimal=3)
def test_hkdr_chd_json():
chd = HkdrCHD()
tmp = chd.score({"index_age": 59,
"female": True,
"cur_smoke": False,
"diab_dur": 5,
"egfr": 105,
"albumin_creat_mgmmol": 2.3,
"nonhdl_mmol": 3.87})
npt.assert_almost_equal(tmp, 0.082, decimal=3)
def test_hkdr_hf():
tmp = hkdr_hf(False, 59, 32, 8, 2.5, 13.8, True)
npt.assert_almost_equal(tmp, 0.038, decimal=3)
tmp = hkdr_hf(True, 59, 32, 8, 2.5, 13.8, True)
npt.assert_almost_equal(tmp, 0.064, decimal=3)
tmp = hkdr_hf(False, 59, 24.3, 8, 2.5, 13.8, True)
npt.assert_almost_equal(tmp, 0.024, decimal=3)
def test_hkdr_hf_json():
hf = HkdrHF()
tmp = hf.score({"index_age": 59,
"female": False,
"albumin_creat_mgmmol":2.5,
"bmi": 24.3,
"hba1c": 8,
"hb": 13.8,
"chd": True})
npt.assert_almost_equal(tmp, 0.024, decimal=3)
| [
"cvdm.score.hkdr_hf",
"cvdm.score.hkdr_chd",
"cvdm.score.HkdrHF",
"numpy.testing.assert_almost_equal",
"cvdm.score.HkdrCHD"
] | [((142, 186), 'cvdm.score.hkdr_chd', 'hkdr_chd', (['(59)', '(True)', '(False)', '(5)', '(105)', '(2.3)', '(3.87)'], {}), '(59, True, False, 5, 105, 2.3, 3.87)\n', (150, 186), False, 'from cvdm.score import hkdr_chd, HkdrCHD\n'), ((191, 237), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['tmp', '(0.082)'], {'decimal': '(3)'}), '(tmp, 0.082, decimal=3)\n', (214, 237), True, 'import numpy.testing as npt\n'), ((276, 285), 'cvdm.score.HkdrCHD', 'HkdrCHD', ([], {}), '()\n', (283, 285), False, 'from cvdm.score import hkdr_chd, HkdrCHD\n'), ((569, 615), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['tmp', '(0.082)'], {'decimal': '(3)'}), '(tmp, 0.082, decimal=3)\n', (592, 615), True, 'import numpy.testing as npt\n'), ((648, 690), 'cvdm.score.hkdr_hf', 'hkdr_hf', (['(False)', '(59)', '(32)', '(8)', '(2.5)', '(13.8)', '(True)'], {}), '(False, 59, 32, 8, 2.5, 13.8, True)\n', (655, 690), False, 'from cvdm.score import hkdr_hf, HkdrHF\n'), ((695, 741), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['tmp', '(0.038)'], {'decimal': '(3)'}), '(tmp, 0.038, decimal=3)\n', (718, 741), True, 'import numpy.testing as npt\n'), ((752, 793), 'cvdm.score.hkdr_hf', 'hkdr_hf', (['(True)', '(59)', '(32)', '(8)', '(2.5)', '(13.8)', '(True)'], {}), '(True, 59, 32, 8, 2.5, 13.8, True)\n', (759, 793), False, 'from cvdm.score import hkdr_hf, HkdrHF\n'), ((798, 844), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['tmp', '(0.064)'], {'decimal': '(3)'}), '(tmp, 0.064, decimal=3)\n', (821, 844), True, 'import numpy.testing as npt\n'), ((855, 899), 'cvdm.score.hkdr_hf', 'hkdr_hf', (['(False)', '(59)', '(24.3)', '(8)', '(2.5)', '(13.8)', '(True)'], {}), '(False, 59, 24.3, 8, 2.5, 13.8, True)\n', (862, 899), False, 'from cvdm.score import hkdr_hf, HkdrHF\n'), ((904, 950), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['tmp', '(0.024)'], {'decimal': '(3)'}), '(tmp, 0.024, decimal=3)\n', (927, 950), True, 'import numpy.testing as npt\n'), ((987, 995), 'cvdm.score.HkdrHF', 'HkdrHF', ([], {}), '()\n', (993, 995), False, 'from cvdm.score import hkdr_hf, HkdrHF\n'), ((1253, 1299), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['tmp', '(0.024)'], {'decimal': '(3)'}), '(tmp, 0.024, decimal=3)\n', (1276, 1299), True, 'import numpy.testing as npt\n')] |
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from numpy.linalg import norm
import matplotlib.pyplot as plt
from sklearn import preprocessing
import seaborn as sns; sns.set_theme()
seed = 0
np.random.seed(seed)
"""We test without normalization"""
def normalize(data, shift = 'z-score'):
if shift not in ['mean', 'min', 'z-score', 'pca']:
raise ValueError("please enter a correct shift parameter.")
if shift == 'min':
_mu = data.min(axis=0)
_scl = data.std()
cdata = data / _scl
elif shift == 'mean':
_mu = data.mean(axis=0)
cdata = data - _mu
_scl = cdata.std()
cdata = cdata / _scl
elif shift == 'pca':
_mu = data.mean(axis=0)
cdata = data - _mu # mean center
rds = norm(cdata - _mu, axis=1) # distance of each data point from 0
_scl = np.median(rds) # 50% of data points are within that radius
cdata = cdata / _scl
else: # shift == 'z-score':
_mu = data.mean(axis=0)
_scl = data.std(axis=0)
cdata = (data - _mu) / _scl
return cdata, (_mu, _scl)
def sorting(data, sorting='pca'):
if sorting=='norm-mean':
data, parameters = normalize(data, shift='mean')
size = np.linalg.norm(data, ord=2, axis=1)
ind = np.argsort(size)
if sorting=='norm-orthant':
data, parameters = normalize(data, shift='min')
size = np.linalg.norm(data, ord=2, axis=1)
ind = np.argsort(size)
if sorting=='pca':
# data, parameters = normalize(data, shift='pca')
pca = PCA(n_components=1)
size = pca.fit_transform(data).reshape(-1)
ind = np.argsort(size)
return data[ind], size
def rn_wine_dataset():
plt.style.use('ggplot')
data = pd.read_csv("data/Real_data/Wine.csv")
X = data.drop(['14'],axis=1).values
font_scale = 3
dist_corr = np.zeros((len(X), len(X)))
for i in range(len(X)):
for j in range(i, len(X)):
dist_corr[j,i] = dist_corr[i,j] = np.linalg.norm(X[i]-X[j], ord=2, axis=0)
sns.set(rc={'figure.figsize':(12,10)}, font_scale=font_scale)
fig, ax = plt.subplots()
im = ax.imshow(dist_corr, cmap='YlGnBu', aspect='auto')
fig.colorbar(im, ax=ax)
plt.xticks([0, 25, 50, 75, 100, 125, 150, 175])
plt.yticks([0, 25, 50, 75, 100, 125, 150, 175])
plt.savefig('results/original_wine.pdf', bbox_inches='tight')
# plt.show()
ndata, size_pca = sorting(X, sorting='pca')
dist_corr_sort = np.zeros((len(ndata), len(ndata)))
for i in range(len(ndata)):
for j in range(i, len(ndata)):
dist_corr_sort[j,i] = dist_corr_sort[i,j] = np.linalg.norm(ndata[i]-ndata[j], ord=2, axis=0)
sns.set(rc={'figure.figsize':(12,10)}, font_scale=font_scale)
fig, ax = plt.subplots()
im = ax.imshow(dist_corr_sort, cmap='YlGnBu', aspect='auto')
fig.colorbar(im, ax=ax)
plt.xticks([0, 25, 50, 75, 100, 125, 150, 175])
plt.yticks([0, 25, 50, 75, 100, 125, 150, 175])
plt.savefig('results/pca_wine.pdf', bbox_inches='tight')
# plt.show()
ndata, size_no = sorting(X, sorting='norm-orthant')
dist_corr_sort = np.zeros((len(ndata), len(ndata)))
for i in range(len(ndata)):
for j in range(i, len(ndata)):
dist_corr_sort[j,i] = dist_corr_sort[i,j] = np.linalg.norm(ndata[i]-ndata[j], ord=2, axis=0)
sns.set(rc={'figure.figsize':(12,10)}, font_scale=font_scale)
fig, ax = plt.subplots()
im = ax.imshow(dist_corr_sort, cmap='YlGnBu', aspect='auto')
fig.colorbar(im, ax=ax)
plt.xticks([0, 25, 50, 75, 100, 125, 150, 175])
plt.yticks([0, 25, 50, 75, 100, 125, 150, 175])
plt.savefig('results/norm-orthant_wine.pdf', bbox_inches='tight')
# plt.show()
ndata, size_nm = sorting(X, sorting='norm-mean')
dist_corr_sort = np.zeros((len(ndata), len(ndata)))
for i in range(len(ndata)):
for j in range(i, len(ndata)):
dist_corr_sort[j,i] = dist_corr_sort[i,j] = np.linalg.norm(ndata[i]-ndata[j], ord=2, axis=0)
sns.set(rc={'figure.figsize':(12,10)}, font_scale=font_scale)
fig, ax = plt.subplots()
im = ax.imshow(dist_corr_sort, cmap='YlGnBu', aspect='auto')
fig.colorbar(im, ax=ax)
plt.xticks([0, 25, 50, 75, 100, 125, 150, 175])
plt.yticks([0, 25, 50, 75, 100, 125, 150, 175])
plt.savefig('results/norm-mean_wine.pdf', bbox_inches='tight')
# plt.show()
sorting_df = pd.DataFrame()
sorting_df['PCA'] = size_pca
sorting_df['Norm-mean'] = size_nm
sorting_df['Norm-orthant'] = size_no
sns.set(style='ticks', color_codes=True, font_scale=3)
g = sns.pairplot(sorting_df, corner=True, height=4.2, aspect=1)
plt.savefig('results/sort_pair_plot_wine.pdf', bbox_inches='tight')
# plt.show()
def rn_iris_dataset():
plt.style.use('ggplot')
data = pd.read_csv("data/Real_data/Iris.csv")
le = preprocessing.LabelEncoder()
data['Species'] = le.fit_transform(data['Species'])
X = data.drop(['Species','Id'],axis=1).values
y = data['Species'].values
font_scale = 3
dist_corr = np.zeros((len(X), len(X)))
for i in range(len(X)):
for j in range(i, len(X)):
dist_corr[j,i] = dist_corr[i,j] = np.linalg.norm(X[i]-X[j], ord=2, axis=0)
sns.set(rc={'figure.figsize':(12,10)}, font_scale=font_scale)
fig, ax = plt.subplots()
im = ax.imshow(dist_corr, cmap='YlGnBu', aspect='auto')
fig.colorbar(im, ax=ax)
plt.xticks([0, 20, 40, 60, 80, 100, 120, 140])
plt.yticks([0, 20, 40, 60, 80, 100, 120, 140])
plt.savefig('results/original_iris.pdf', bbox_inches='tight')
# plt.show()
ndata, size_pca = sorting(X, sorting='pca')
dist_corr_sort = np.zeros((len(ndata), len(ndata)))
for i in range(len(ndata)):
for j in range(i, len(ndata)):
dist_corr_sort[j,i] = dist_corr_sort[i,j] = np.linalg.norm(ndata[i]-ndata[j], ord=2, axis=0)
sns.set(rc={'figure.figsize':(12,10)}, font_scale=font_scale)
fig, ax = plt.subplots()
im = ax.imshow(dist_corr_sort, cmap='YlGnBu', aspect='auto')
fig.colorbar(im, ax=ax)
plt.xticks([0, 20, 40, 60, 80, 100, 120, 140])
plt.yticks([0, 20, 40, 60, 80, 100, 120, 140])
plt.savefig('results/pca_iris.pdf', bbox_inches='tight')
# plt.show()
ndata, size_no = sorting(X, sorting='norm-orthant')
dist_corr_sort = np.zeros((len(ndata), len(ndata)))
for i in range(len(ndata)):
for j in range(i, len(ndata)):
dist_corr_sort[j,i] = dist_corr_sort[i,j] = np.linalg.norm(ndata[i]-ndata[j], ord=2, axis=0)
sns.set(rc={'figure.figsize':(12,10)}, font_scale=font_scale)
fig, ax = plt.subplots()
im = ax.imshow(dist_corr_sort, cmap='YlGnBu', aspect='auto')
fig.colorbar(im, ax=ax)
plt.xticks([0, 20, 40, 60, 80, 100, 120, 140])
plt.yticks([0, 20, 40, 60, 80, 100, 120, 140])
plt.savefig('results/norm-orthant_iris.pdf', bbox_inches='tight')
# plt.show()
ndata, size_nm = sorting(X, sorting='norm-mean')
dist_corr_sort = np.zeros((len(ndata), len(ndata)))
for i in range(len(ndata)):
for j in range(i, len(ndata)):
dist_corr_sort[j,i] = dist_corr_sort[i,j] = np.linalg.norm(ndata[i]-ndata[j], ord=2, axis=0)
sns.set(rc={'figure.figsize':(12,10)}, font_scale=font_scale)
fig, ax = plt.subplots()
im = ax.imshow(dist_corr_sort, cmap='YlGnBu', aspect='auto')
fig.colorbar(im, ax=ax)
plt.xticks([0, 20, 40, 60, 80, 100, 120, 140])
plt.yticks([0, 20, 40, 60, 80, 100, 120, 140])
plt.savefig('results/norm-mean_iris.pdf', bbox_inches='tight')
# plt.show()
sorting_df = pd.DataFrame()
sorting_df['PCA'] = size_pca
sorting_df['Norm-mean'] = size_nm
sorting_df['Norm-orthant'] = size_no
sns.set(style='ticks', color_codes=True, font_scale=3)
g = sns.pairplot(sorting_df, corner=True, height=4.2, aspect=1)
plt.savefig('results/sort_pair_plot_iris.pdf', bbox_inches='tight')
# plt.show() | [
"seaborn.set",
"sklearn.preprocessing.LabelEncoder",
"matplotlib.pyplot.savefig",
"numpy.median",
"pandas.read_csv",
"matplotlib.pyplot.xticks",
"seaborn.set_theme",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.style.use",
"numpy.argsort",
"matplotlib.pyplot.yticks",
"numpy.random.seed",
... | [((196, 211), 'seaborn.set_theme', 'sns.set_theme', ([], {}), '()\n', (209, 211), True, 'import seaborn as sns\n'), ((221, 241), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (235, 241), True, 'import numpy as np\n'), ((1770, 1793), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (1783, 1793), True, 'import matplotlib.pyplot as plt\n'), ((1805, 1843), 'pandas.read_csv', 'pd.read_csv', (['"""data/Real_data/Wine.csv"""'], {}), "('data/Real_data/Wine.csv')\n", (1816, 1843), True, 'import pandas as pd\n'), ((2101, 2164), 'seaborn.set', 'sns.set', ([], {'rc': "{'figure.figsize': (12, 10)}", 'font_scale': 'font_scale'}), "(rc={'figure.figsize': (12, 10)}, font_scale=font_scale)\n", (2108, 2164), True, 'import seaborn as sns\n'), ((2177, 2191), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2189, 2191), True, 'import matplotlib.pyplot as plt\n'), ((2284, 2331), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 25, 50, 75, 100, 125, 150, 175]'], {}), '([0, 25, 50, 75, 100, 125, 150, 175])\n', (2294, 2331), True, 'import matplotlib.pyplot as plt\n'), ((2339, 2386), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 25, 50, 75, 100, 125, 150, 175]'], {}), '([0, 25, 50, 75, 100, 125, 150, 175])\n', (2349, 2386), True, 'import matplotlib.pyplot as plt\n'), ((2394, 2455), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/original_wine.pdf"""'], {'bbox_inches': '"""tight"""'}), "('results/original_wine.pdf', bbox_inches='tight')\n", (2405, 2455), True, 'import matplotlib.pyplot as plt\n'), ((2760, 2823), 'seaborn.set', 'sns.set', ([], {'rc': "{'figure.figsize': (12, 10)}", 'font_scale': 'font_scale'}), "(rc={'figure.figsize': (12, 10)}, font_scale=font_scale)\n", (2767, 2823), True, 'import seaborn as sns\n'), ((2836, 2850), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2848, 2850), True, 'import matplotlib.pyplot as plt\n'), ((2948, 2995), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 25, 50, 75, 100, 125, 150, 175]'], {}), '([0, 25, 50, 75, 100, 125, 150, 175])\n', (2958, 2995), True, 'import matplotlib.pyplot as plt\n'), ((3003, 3050), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 25, 50, 75, 100, 125, 150, 175]'], {}), '([0, 25, 50, 75, 100, 125, 150, 175])\n', (3013, 3050), True, 'import matplotlib.pyplot as plt\n'), ((3058, 3114), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/pca_wine.pdf"""'], {'bbox_inches': '"""tight"""'}), "('results/pca_wine.pdf', bbox_inches='tight')\n", (3069, 3114), True, 'import matplotlib.pyplot as plt\n'), ((3426, 3489), 'seaborn.set', 'sns.set', ([], {'rc': "{'figure.figsize': (12, 10)}", 'font_scale': 'font_scale'}), "(rc={'figure.figsize': (12, 10)}, font_scale=font_scale)\n", (3433, 3489), True, 'import seaborn as sns\n'), ((3502, 3516), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3514, 3516), True, 'import matplotlib.pyplot as plt\n'), ((3614, 3661), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 25, 50, 75, 100, 125, 150, 175]'], {}), '([0, 25, 50, 75, 100, 125, 150, 175])\n', (3624, 3661), True, 'import matplotlib.pyplot as plt\n'), ((3669, 3716), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 25, 50, 75, 100, 125, 150, 175]'], {}), '([0, 25, 50, 75, 100, 125, 150, 175])\n', (3679, 3716), True, 'import matplotlib.pyplot as plt\n'), ((3724, 3789), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/norm-orthant_wine.pdf"""'], {'bbox_inches': '"""tight"""'}), "('results/norm-orthant_wine.pdf', bbox_inches='tight')\n", (3735, 3789), True, 'import matplotlib.pyplot as plt\n'), ((4099, 4162), 'seaborn.set', 'sns.set', ([], {'rc': "{'figure.figsize': (12, 10)}", 'font_scale': 'font_scale'}), "(rc={'figure.figsize': (12, 10)}, font_scale=font_scale)\n", (4106, 4162), True, 'import seaborn as sns\n'), ((4175, 4189), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4187, 4189), True, 'import matplotlib.pyplot as plt\n'), ((4287, 4334), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 25, 50, 75, 100, 125, 150, 175]'], {}), '([0, 25, 50, 75, 100, 125, 150, 175])\n', (4297, 4334), True, 'import matplotlib.pyplot as plt\n'), ((4342, 4389), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 25, 50, 75, 100, 125, 150, 175]'], {}), '([0, 25, 50, 75, 100, 125, 150, 175])\n', (4352, 4389), True, 'import matplotlib.pyplot as plt\n'), ((4397, 4459), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/norm-mean_wine.pdf"""'], {'bbox_inches': '"""tight"""'}), "('results/norm-mean_wine.pdf', bbox_inches='tight')\n", (4408, 4459), True, 'import matplotlib.pyplot as plt\n'), ((4495, 4509), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4507, 4509), True, 'import pandas as pd\n'), ((4627, 4681), 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""', 'color_codes': '(True)', 'font_scale': '(3)'}), "(style='ticks', color_codes=True, font_scale=3)\n", (4634, 4681), True, 'import seaborn as sns\n'), ((4690, 4749), 'seaborn.pairplot', 'sns.pairplot', (['sorting_df'], {'corner': '(True)', 'height': '(4.2)', 'aspect': '(1)'}), '(sorting_df, corner=True, height=4.2, aspect=1)\n', (4702, 4749), True, 'import seaborn as sns\n'), ((4755, 4822), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/sort_pair_plot_wine.pdf"""'], {'bbox_inches': '"""tight"""'}), "('results/sort_pair_plot_wine.pdf', bbox_inches='tight')\n", (4766, 4822), True, 'import matplotlib.pyplot as plt\n'), ((4882, 4905), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (4895, 4905), True, 'import matplotlib.pyplot as plt\n'), ((4917, 4955), 'pandas.read_csv', 'pd.read_csv', (['"""data/Real_data/Iris.csv"""'], {}), "('data/Real_data/Iris.csv')\n", (4928, 4955), True, 'import pandas as pd\n'), ((4965, 4993), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (4991, 4993), False, 'from sklearn import preprocessing\n'), ((5353, 5416), 'seaborn.set', 'sns.set', ([], {'rc': "{'figure.figsize': (12, 10)}", 'font_scale': 'font_scale'}), "(rc={'figure.figsize': (12, 10)}, font_scale=font_scale)\n", (5360, 5416), True, 'import seaborn as sns\n'), ((5429, 5443), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5441, 5443), True, 'import matplotlib.pyplot as plt\n'), ((5536, 5582), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 20, 40, 60, 80, 100, 120, 140]'], {}), '([0, 20, 40, 60, 80, 100, 120, 140])\n', (5546, 5582), True, 'import matplotlib.pyplot as plt\n'), ((5591, 5637), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 20, 40, 60, 80, 100, 120, 140]'], {}), '([0, 20, 40, 60, 80, 100, 120, 140])\n', (5601, 5637), True, 'import matplotlib.pyplot as plt\n'), ((5646, 5707), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/original_iris.pdf"""'], {'bbox_inches': '"""tight"""'}), "('results/original_iris.pdf', bbox_inches='tight')\n", (5657, 5707), True, 'import matplotlib.pyplot as plt\n'), ((6015, 6078), 'seaborn.set', 'sns.set', ([], {'rc': "{'figure.figsize': (12, 10)}", 'font_scale': 'font_scale'}), "(rc={'figure.figsize': (12, 10)}, font_scale=font_scale)\n", (6022, 6078), True, 'import seaborn as sns\n'), ((6091, 6105), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6103, 6105), True, 'import matplotlib.pyplot as plt\n'), ((6203, 6249), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 20, 40, 60, 80, 100, 120, 140]'], {}), '([0, 20, 40, 60, 80, 100, 120, 140])\n', (6213, 6249), True, 'import matplotlib.pyplot as plt\n'), ((6258, 6304), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 20, 40, 60, 80, 100, 120, 140]'], {}), '([0, 20, 40, 60, 80, 100, 120, 140])\n', (6268, 6304), True, 'import matplotlib.pyplot as plt\n'), ((6313, 6369), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/pca_iris.pdf"""'], {'bbox_inches': '"""tight"""'}), "('results/pca_iris.pdf', bbox_inches='tight')\n", (6324, 6369), True, 'import matplotlib.pyplot as plt\n'), ((6685, 6748), 'seaborn.set', 'sns.set', ([], {'rc': "{'figure.figsize': (12, 10)}", 'font_scale': 'font_scale'}), "(rc={'figure.figsize': (12, 10)}, font_scale=font_scale)\n", (6692, 6748), True, 'import seaborn as sns\n'), ((6761, 6775), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6773, 6775), True, 'import matplotlib.pyplot as plt\n'), ((6873, 6919), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 20, 40, 60, 80, 100, 120, 140]'], {}), '([0, 20, 40, 60, 80, 100, 120, 140])\n', (6883, 6919), True, 'import matplotlib.pyplot as plt\n'), ((6928, 6974), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 20, 40, 60, 80, 100, 120, 140]'], {}), '([0, 20, 40, 60, 80, 100, 120, 140])\n', (6938, 6974), True, 'import matplotlib.pyplot as plt\n'), ((6983, 7048), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/norm-orthant_iris.pdf"""'], {'bbox_inches': '"""tight"""'}), "('results/norm-orthant_iris.pdf', bbox_inches='tight')\n", (6994, 7048), True, 'import matplotlib.pyplot as plt\n'), ((7362, 7425), 'seaborn.set', 'sns.set', ([], {'rc': "{'figure.figsize': (12, 10)}", 'font_scale': 'font_scale'}), "(rc={'figure.figsize': (12, 10)}, font_scale=font_scale)\n", (7369, 7425), True, 'import seaborn as sns\n'), ((7438, 7452), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7450, 7452), True, 'import matplotlib.pyplot as plt\n'), ((7550, 7596), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0, 20, 40, 60, 80, 100, 120, 140]'], {}), '([0, 20, 40, 60, 80, 100, 120, 140])\n', (7560, 7596), True, 'import matplotlib.pyplot as plt\n'), ((7605, 7651), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, 20, 40, 60, 80, 100, 120, 140]'], {}), '([0, 20, 40, 60, 80, 100, 120, 140])\n', (7615, 7651), True, 'import matplotlib.pyplot as plt\n'), ((7660, 7722), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/norm-mean_iris.pdf"""'], {'bbox_inches': '"""tight"""'}), "('results/norm-mean_iris.pdf', bbox_inches='tight')\n", (7671, 7722), True, 'import matplotlib.pyplot as plt\n'), ((7767, 7781), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7779, 7781), True, 'import pandas as pd\n'), ((7899, 7953), 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""', 'color_codes': '(True)', 'font_scale': '(3)'}), "(style='ticks', color_codes=True, font_scale=3)\n", (7906, 7953), True, 'import seaborn as sns\n'), ((7962, 8021), 'seaborn.pairplot', 'sns.pairplot', (['sorting_df'], {'corner': '(True)', 'height': '(4.2)', 'aspect': '(1)'}), '(sorting_df, corner=True, height=4.2, aspect=1)\n', (7974, 8021), True, 'import seaborn as sns\n'), ((8026, 8093), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results/sort_pair_plot_iris.pdf"""'], {'bbox_inches': '"""tight"""'}), "('results/sort_pair_plot_iris.pdf', bbox_inches='tight')\n", (8037, 8093), True, 'import matplotlib.pyplot as plt\n'), ((1278, 1313), 'numpy.linalg.norm', 'np.linalg.norm', (['data'], {'ord': '(2)', 'axis': '(1)'}), '(data, ord=2, axis=1)\n', (1292, 1313), True, 'import numpy as np\n'), ((1328, 1344), 'numpy.argsort', 'np.argsort', (['size'], {}), '(size)\n', (1338, 1344), True, 'import numpy as np\n'), ((1448, 1483), 'numpy.linalg.norm', 'np.linalg.norm', (['data'], {'ord': '(2)', 'axis': '(1)'}), '(data, ord=2, axis=1)\n', (1462, 1483), True, 'import numpy as np\n'), ((1498, 1514), 'numpy.argsort', 'np.argsort', (['size'], {}), '(size)\n', (1508, 1514), True, 'import numpy as np\n'), ((1610, 1629), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(1)'}), '(n_components=1)\n', (1613, 1629), False, 'from sklearn.decomposition import PCA\n'), ((1695, 1711), 'numpy.argsort', 'np.argsort', (['size'], {}), '(size)\n', (1705, 1711), True, 'import numpy as np\n'), ((2055, 2097), 'numpy.linalg.norm', 'np.linalg.norm', (['(X[i] - X[j])'], {'ord': '(2)', 'axis': '(0)'}), '(X[i] - X[j], ord=2, axis=0)\n', (2069, 2097), True, 'import numpy as np\n'), ((2706, 2756), 'numpy.linalg.norm', 'np.linalg.norm', (['(ndata[i] - ndata[j])'], {'ord': '(2)', 'axis': '(0)'}), '(ndata[i] - ndata[j], ord=2, axis=0)\n', (2720, 2756), True, 'import numpy as np\n'), ((3372, 3422), 'numpy.linalg.norm', 'np.linalg.norm', (['(ndata[i] - ndata[j])'], {'ord': '(2)', 'axis': '(0)'}), '(ndata[i] - ndata[j], ord=2, axis=0)\n', (3386, 3422), True, 'import numpy as np\n'), ((4045, 4095), 'numpy.linalg.norm', 'np.linalg.norm', (['(ndata[i] - ndata[j])'], {'ord': '(2)', 'axis': '(0)'}), '(ndata[i] - ndata[j], ord=2, axis=0)\n', (4059, 4095), True, 'import numpy as np\n'), ((5307, 5349), 'numpy.linalg.norm', 'np.linalg.norm', (['(X[i] - X[j])'], {'ord': '(2)', 'axis': '(0)'}), '(X[i] - X[j], ord=2, axis=0)\n', (5321, 5349), True, 'import numpy as np\n'), ((5961, 6011), 'numpy.linalg.norm', 'np.linalg.norm', (['(ndata[i] - ndata[j])'], {'ord': '(2)', 'axis': '(0)'}), '(ndata[i] - ndata[j], ord=2, axis=0)\n', (5975, 6011), True, 'import numpy as np\n'), ((6631, 6681), 'numpy.linalg.norm', 'np.linalg.norm', (['(ndata[i] - ndata[j])'], {'ord': '(2)', 'axis': '(0)'}), '(ndata[i] - ndata[j], ord=2, axis=0)\n', (6645, 6681), True, 'import numpy as np\n'), ((7308, 7358), 'numpy.linalg.norm', 'np.linalg.norm', (['(ndata[i] - ndata[j])'], {'ord': '(2)', 'axis': '(0)'}), '(ndata[i] - ndata[j], ord=2, axis=0)\n', (7322, 7358), True, 'import numpy as np\n'), ((809, 834), 'numpy.linalg.norm', 'norm', (['(cdata - _mu)'], {'axis': '(1)'}), '(cdata - _mu, axis=1)\n', (813, 834), False, 'from numpy.linalg import norm\n'), ((887, 901), 'numpy.median', 'np.median', (['rds'], {}), '(rds)\n', (896, 901), True, 'import numpy as np\n')] |
from meta_policy_search.utils import logger
import numpy as np
import tensorflow as tf
from collections import OrderedDict
from meta_policy_search.optimizers.base import Optimizer
class FiniteDifferenceHvp(Optimizer):
def __init__(self, base_eps=1e-5, symmetric=True, grad_clip=None):
self.base_eps = np.cast['float32'](base_eps)
self.symmetric = symmetric
self.grad_clip = grad_clip
self._target = None
self.reg_coeff = None
self._constraint_gradient = None
self._input_ph_dict = None
def build_graph(self, constraint_obj, target, input_val_dict, reg_coeff):
"""
Sets the objective function and target weights for the optimize function
Args:
constraint_obj (tf_op) : constraint objective
target (Policy) : Policy whose values we are optimizing over
inputs (list) : tuple of tf.placeholders for input data which may be subsampled. The first dimension corresponds to the number of data points
reg_coeff (float): regularization coefficient
"""
self._target = target
self.reg_coeff = reg_coeff
self._input_ph_dict = input_val_dict
params = list(target.get_params().values())
constraint_grads = tf.gradients(constraint_obj, xs=params)
for idx, (grad, param) in enumerate(zip(constraint_grads, params)):
if grad is None:
constraint_grads[idx] = tf.zeros_like(param)
constraint_gradient = tf.concat([tf.reshape(grad, [-1]) for grad in constraint_grads], axis=0)
self._constraint_gradient = constraint_gradient
def constraint_gradient(self, input_val_dict):
"""
Computes the gradient of the constraint objective
Args:
inputs (list): inputs needed to compute the gradient
Returns:
(np.ndarray): flattened gradient
"""
sess = tf.get_default_session()
feed_dict = self.create_feed_dict(input_val_dict)
constraint_gradient = sess.run(self._constraint_gradient, feed_dict)
return constraint_gradient
def Hx(self, input_val_dict, x):
"""
Compute the second derivative of the constraint val in the direction of the vector x
Args:
inputs (list): inputs needed to compute the gradient of the constraint objective
x (np.ndarray): vector indicating the direction on which the Hessian has to be computed
Returns: (np.ndarray): second derivative in the direction of x
"""
assert isinstance(x, np.ndarray)
param_vals = self._target.get_param_values().copy()
flat_param_vals = _flatten_params(param_vals)
eps = self.base_eps
params_plus_eps_vals = _unflatten_params(flat_param_vals + eps * x, params_example=param_vals)
self._target.set_params(params_plus_eps_vals)
constraint_grad_plus_eps = self.constraint_gradient(input_val_dict)
self._target.set_params(param_vals)
if self.symmetric:
params_minus_eps_vals = _unflatten_params(flat_param_vals - eps * x, params_example=param_vals)
self._target.set_params(params_minus_eps_vals)
constraint_grad_minus_eps = self.constraint_gradient(input_val_dict)
self._target.set_params(param_vals)
hx = (constraint_grad_plus_eps - constraint_grad_minus_eps)/(2 * eps)
else:
constraint_grad = self.constraint_gradient(input_val_dict)
hx = (constraint_grad_plus_eps - constraint_grad)/eps
return hx
def build_eval(self, inputs):
"""
Build the Hessian evaluation function. It let's you evaluate the hessian of the constraint objective
in any direction.
Args:
inputs (list): inputs needed to compute the gradient of the constraint objective
Returns:
(function): function that evaluates the Hessian of the constraint objective in the input direction
"""
def evaluate_hessian(x):
return self.Hx(inputs, x) + self.reg_coeff * x
return evaluate_hessian
class ConjugateGradientOptimizer(Optimizer):
"""
Performs constrained optimization via line search. The search direction is computed using a conjugate gradient
algorithm, which gives x = A^{-1}g, where A is a second order approximation of the constraint and g is the gradient
of the loss function.
Args:
cg_iters (int) : The number of conjugate gradients iterations used to calculate A^-1 g
reg_coeff (float) : A small value so that A -> A + reg*I
subsample_factor (float) : Subsampling factor to reduce samples when using "conjugate gradient. Since the computation time for the descent direction dominates, this can greatly reduce the overall computation time.
backtrack_ratio (float) : ratio for decreasing the step size for the line search
max_backtracks (int) : maximum number of backtracking iterations for the line search
debug_nan (bool) : if set to True, NanGuard will be added to the compilation, and ipdb will be invoked when nan is detected
accept_violation (bool) : whether to accept the descent step if it violates the line search condition after exhausting all backtracking budgets
hvp_approach (obj) : Hessian vector product approach
"""
def __init__(
self,
cg_iters=10,
reg_coeff=0,
subsample_factor=1.,
backtrack_ratio=0.8,
max_backtracks=15,
debug_nan=False,
accept_violation=False,
hvp_approach=FiniteDifferenceHvp(),
):
self._cg_iters = cg_iters
self._reg_coeff = reg_coeff
self._subsample_factor = subsample_factor
self._backtrack_ratio = backtrack_ratio
self._max_backtracks = max_backtracks
self._target = None
self._max_constraint_val = None
self._constraint_name = "kl-div"
self._debug_nan = debug_nan
self._accept_violation = accept_violation
self._hvp_approach = hvp_approach
self._loss = None
self._gradient = None
self._constraint_objective = None
self._input_ph_dict = None
def build_graph(self, loss, target, input_ph_dict, leq_constraint):
"""
Sets the objective function and target weights for the optimize function
Args:
loss (tf_op) : minimization objective
target (Policy) : Policy whose values we are optimizing over
inputs (list) : tuple of tf.placeholders for input data which may be subsampled. The first dimension corresponds to the number of data points
extra_inputs (list) : tuple of tf.placeholders for hyperparameters (e.g. learning rate, if annealed)
leq_constraint (tuple) : A constraint provided as a tuple (f, epsilon), of the form f(*inputs) <= epsilon.
"""
assert isinstance(loss, tf.Tensor)
assert hasattr(target, 'get_params')
assert isinstance(input_ph_dict, dict)
constraint_objective, constraint_value = leq_constraint
self._target = target
self._constraint_objective = constraint_objective
self._max_constraint_val = constraint_value
self._input_ph_dict = input_ph_dict
self._loss = loss
# build the graph of the hessian vector product (hvp)
self._hvp_approach.build_graph(constraint_objective, target, self._input_ph_dict, self._reg_coeff)
# build the graph of the gradients
params = list(target.get_params().values())
grads = tf.gradients(loss, xs=params)
for idx, (grad, param) in enumerate(zip(grads, params)):
if grad is None:
grads[idx] = tf.zeros_like(param)
gradient = tf.concat([tf.reshape(grad, [-1]) for grad in grads], axis=0)
self._gradient = gradient
def loss(self, input_val_dict):
"""
Computes the value of the loss for given inputs
Args:
inputs (list): inputs needed to compute the loss function
extra_inputs (list): additional inputs needed to compute the loss function
Returns:
(float): value of the loss
"""
sess = tf.get_default_session()
feed_dict = self.create_feed_dict(input_val_dict)
loss = sess.run(self._loss, feed_dict=feed_dict)
return loss
def constraint_val(self, input_val_dict):
"""
Computes the value of the KL-divergence between pre-update policies for given inputs
Args:
inputs (list): inputs needed to compute the inner KL
extra_inputs (list): additional inputs needed to compute the inner KL
Returns:
(float): value of the loss
"""
sess = tf.get_default_session()
feed_dict = self.create_feed_dict(input_val_dict)
constrain_val = sess.run(self._constraint_objective, feed_dict)
return constrain_val
def gradient(self, input_val_dict):
"""
Computes the gradient of the loss function
Args:
inputs (list): inputs needed to compute the gradient
extra_inputs (list): additional inputs needed to compute the loss function
Returns:
(np.ndarray): flattened gradient
"""
sess = tf.get_default_session()
feed_dict = self.create_feed_dict(input_val_dict)
gradient = sess.run(self._gradient, feed_dict)
return gradient
def optimize(self, input_val_dict):
"""
Carries out the optimization step
Args:
inputs (list): inputs for the optimization
extra_inputs (list): extra inputs for the optimization
subsample_grouped_inputs (None or list): subsample data from each element of the list
"""
logger.log("Start CG optimization")
logger.log("computing loss before")
loss_before = self.loss(input_val_dict)
logger.log("performing update")
logger.log("computing gradient")
gradient = self.gradient(input_val_dict)
logger.log("gradient computed")
logger.log("computing descent direction")
Hx = self._hvp_approach.build_eval(input_val_dict)
descent_direction = conjugate_gradients(Hx, gradient, cg_iters=self._cg_iters)
initial_step_size = np.sqrt(2.0 * self._max_constraint_val *
(1. / (descent_direction.dot(Hx(descent_direction)) + 1e-8)))
if np.isnan(initial_step_size):
logger.log("Initial step size is NaN! Rejecting the step!")
return
initial_descent_step = initial_step_size * descent_direction
logger.log("descent direction computed")
prev_params = self._target.get_param_values()
prev_params_values = _flatten_params(prev_params)
loss, constraint_val, n_iter, violated = 0, 0, 0, False
for n_iter, ratio in enumerate(self._backtrack_ratio ** np.arange(self._max_backtracks)):
cur_step = ratio * initial_descent_step
cur_params_values = prev_params_values - cur_step
cur_params = _unflatten_params(cur_params_values, params_example=prev_params)
self._target.set_params(cur_params)
loss, constraint_val = self.loss(input_val_dict), self.constraint_val(input_val_dict)
if loss < loss_before and constraint_val <= self._max_constraint_val:
break
""" ------------------- Logging Stuff -------------------------- """
if np.isnan(loss):
violated = True
logger.log("Line search violated because loss is NaN")
if np.isnan(constraint_val):
violated = True
logger.log("Line search violated because constraint %s is NaN" % self._constraint_name)
if loss >= loss_before:
violated = True
logger.log("Line search violated because loss not improving")
if constraint_val >= self._max_constraint_val:
violated = True
logger.log("Line search violated because constraint %s is violated" % self._constraint_name)
if violated and not self._accept_violation:
logger.log("Line search condition violated. Rejecting the step!")
self._target.set_params(prev_params)
logger.log("backtrack iters: %d" % n_iter)
logger.log("computing loss after")
logger.log("optimization finished")
def _unflatten_params(flat_params, params_example):
unflat_params = []
idx = 0
for key, param in params_example.items():
size_param = np.prod(param.shape)
reshaped_param = np.reshape(flat_params[idx:idx+size_param], newshape=param.shape)
unflat_params.append((key, reshaped_param))
idx += size_param
return OrderedDict(unflat_params)
def _flatten_params(params):
return np.concatenate([param.reshape(-1) for param in params.values()])
def conjugate_gradients(f_Ax, b, cg_iters=10, verbose=False, residual_tol=1e-10):
"""
Demmel p 312
"""
p = b.copy()
r = b.copy()
x = np.zeros_like(b, dtype=np.float32)
rdotr = r.dot(r)
fmtstr = "%10i %10.3g %10.3g"
titlestr = "%10s %10s %10s"
if verbose: print(titlestr % ("iter", "residual norm", "soln norm"))
for i in range(cg_iters):
if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x)))
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v * p
r -= v * z
newrdotr = r.dot(r)
mu = newrdotr / rdotr
p = r + mu * p
rdotr = newrdotr
if rdotr < residual_tol:
break
if verbose: print(fmtstr % (i + 1, rdotr, np.linalg.norm(x)))
return x
| [
"meta_policy_search.utils.logger.log",
"numpy.prod",
"collections.OrderedDict",
"numpy.reshape",
"tensorflow.get_default_session",
"numpy.linalg.norm",
"tensorflow.gradients",
"numpy.isnan",
"tensorflow.reshape",
"tensorflow.zeros_like",
"numpy.zeros_like",
"numpy.arange"
] | [((12987, 13013), 'collections.OrderedDict', 'OrderedDict', (['unflat_params'], {}), '(unflat_params)\n', (12998, 13013), False, 'from collections import OrderedDict\n'), ((13280, 13314), 'numpy.zeros_like', 'np.zeros_like', (['b'], {'dtype': 'np.float32'}), '(b, dtype=np.float32)\n', (13293, 13314), True, 'import numpy as np\n'), ((1280, 1319), 'tensorflow.gradients', 'tf.gradients', (['constraint_obj'], {'xs': 'params'}), '(constraint_obj, xs=params)\n', (1292, 1319), True, 'import tensorflow as tf\n'), ((1941, 1965), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (1963, 1965), True, 'import tensorflow as tf\n'), ((7708, 7737), 'tensorflow.gradients', 'tf.gradients', (['loss'], {'xs': 'params'}), '(loss, xs=params)\n', (7720, 7737), True, 'import tensorflow as tf\n'), ((8360, 8384), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (8382, 8384), True, 'import tensorflow as tf\n'), ((8919, 8943), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (8941, 8943), True, 'import tensorflow as tf\n'), ((9464, 9488), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (9486, 9488), True, 'import tensorflow as tf\n'), ((9977, 10012), 'meta_policy_search.utils.logger.log', 'logger.log', (['"""Start CG optimization"""'], {}), "('Start CG optimization')\n", (9987, 10012), False, 'from meta_policy_search.utils import logger\n'), ((10022, 10057), 'meta_policy_search.utils.logger.log', 'logger.log', (['"""computing loss before"""'], {}), "('computing loss before')\n", (10032, 10057), False, 'from meta_policy_search.utils import logger\n'), ((10115, 10146), 'meta_policy_search.utils.logger.log', 'logger.log', (['"""performing update"""'], {}), "('performing update')\n", (10125, 10146), False, 'from meta_policy_search.utils import logger\n'), ((10156, 10188), 'meta_policy_search.utils.logger.log', 'logger.log', (['"""computing gradient"""'], {}), "('computing gradient')\n", (10166, 10188), False, 'from meta_policy_search.utils import logger\n'), ((10246, 10277), 'meta_policy_search.utils.logger.log', 'logger.log', (['"""gradient computed"""'], {}), "('gradient computed')\n", (10256, 10277), False, 'from meta_policy_search.utils import logger\n'), ((10287, 10328), 'meta_policy_search.utils.logger.log', 'logger.log', (['"""computing descent direction"""'], {}), "('computing descent direction')\n", (10297, 10328), False, 'from meta_policy_search.utils import logger\n'), ((10654, 10681), 'numpy.isnan', 'np.isnan', (['initial_step_size'], {}), '(initial_step_size)\n', (10662, 10681), True, 'import numpy as np\n'), ((10852, 10892), 'meta_policy_search.utils.logger.log', 'logger.log', (['"""descent direction computed"""'], {}), "('descent direction computed')\n", (10862, 10892), False, 'from meta_policy_search.utils import logger\n'), ((11713, 11727), 'numpy.isnan', 'np.isnan', (['loss'], {}), '(loss)\n', (11721, 11727), True, 'import numpy as np\n'), ((11835, 11859), 'numpy.isnan', 'np.isnan', (['constraint_val'], {}), '(constraint_val)\n', (11843, 11859), True, 'import numpy as np\n'), ((12500, 12542), 'meta_policy_search.utils.logger.log', 'logger.log', (["('backtrack iters: %d' % n_iter)"], {}), "('backtrack iters: %d' % n_iter)\n", (12510, 12542), False, 'from meta_policy_search.utils import logger\n'), ((12551, 12585), 'meta_policy_search.utils.logger.log', 'logger.log', (['"""computing loss after"""'], {}), "('computing loss after')\n", (12561, 12585), False, 'from meta_policy_search.utils import logger\n'), ((12594, 12629), 'meta_policy_search.utils.logger.log', 'logger.log', (['"""optimization finished"""'], {}), "('optimization finished')\n", (12604, 12629), False, 'from meta_policy_search.utils import logger\n'), ((12786, 12806), 'numpy.prod', 'np.prod', (['param.shape'], {}), '(param.shape)\n', (12793, 12806), True, 'import numpy as np\n'), ((12832, 12899), 'numpy.reshape', 'np.reshape', (['flat_params[idx:idx + size_param]'], {'newshape': 'param.shape'}), '(flat_params[idx:idx + size_param], newshape=param.shape)\n', (12842, 12899), True, 'import numpy as np\n'), ((10695, 10754), 'meta_policy_search.utils.logger.log', 'logger.log', (['"""Initial step size is NaN! Rejecting the step!"""'], {}), "('Initial step size is NaN! Rejecting the step!')\n", (10705, 10754), False, 'from meta_policy_search.utils import logger\n'), ((11769, 11823), 'meta_policy_search.utils.logger.log', 'logger.log', (['"""Line search violated because loss is NaN"""'], {}), "('Line search violated because loss is NaN')\n", (11779, 11823), False, 'from meta_policy_search.utils import logger\n'), ((11901, 11993), 'meta_policy_search.utils.logger.log', 'logger.log', (["('Line search violated because constraint %s is NaN' % self._constraint_name)"], {}), "('Line search violated because constraint %s is NaN' % self.\n _constraint_name)\n", (11911, 11993), False, 'from meta_policy_search.utils import logger\n'), ((12061, 12122), 'meta_policy_search.utils.logger.log', 'logger.log', (['"""Line search violated because loss not improving"""'], {}), "('Line search violated because loss not improving')\n", (12071, 12122), False, 'from meta_policy_search.utils import logger\n'), ((12218, 12315), 'meta_policy_search.utils.logger.log', 'logger.log', (["('Line search violated because constraint %s is violated' % self.\n _constraint_name)"], {}), "('Line search violated because constraint %s is violated' % self.\n _constraint_name)\n", (12228, 12315), False, 'from meta_policy_search.utils import logger\n'), ((12376, 12441), 'meta_policy_search.utils.logger.log', 'logger.log', (['"""Line search condition violated. Rejecting the step!"""'], {}), "('Line search condition violated. Rejecting the step!')\n", (12386, 12441), False, 'from meta_policy_search.utils import logger\n'), ((1466, 1486), 'tensorflow.zeros_like', 'tf.zeros_like', (['param'], {}), '(param)\n', (1479, 1486), True, 'import tensorflow as tf\n'), ((1529, 1551), 'tensorflow.reshape', 'tf.reshape', (['grad', '[-1]'], {}), '(grad, [-1])\n', (1539, 1551), True, 'import tensorflow as tf\n'), ((7861, 7881), 'tensorflow.zeros_like', 'tf.zeros_like', (['param'], {}), '(param)\n', (7874, 7881), True, 'import tensorflow as tf\n'), ((7912, 7934), 'tensorflow.reshape', 'tf.reshape', (['grad', '[-1]'], {}), '(grad, [-1])\n', (7922, 7934), True, 'import tensorflow as tf\n'), ((11135, 11166), 'numpy.arange', 'np.arange', (['self._max_backtracks'], {}), '(self._max_backtracks)\n', (11144, 11166), True, 'import numpy as np\n'), ((13553, 13570), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (13567, 13570), True, 'import numpy as np\n'), ((13869, 13886), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {}), '(x)\n', (13883, 13886), True, 'import numpy as np\n')] |
"""
Plotting convenience functions.
"""
from math import ceil
import ipywidgets as widgets
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
from model_base import get_ext_input
# define basics
prop_cycle = plt.rcParams["axes.prop_cycle"]
colors = prop_cycle.by_key()["color"]
plt.style.use("seaborn-muted")
INPUT_START = 1000 # dt, i.e. 100ms
LABEL_SIZE = 16
def setup_sliders_layout(model_specific_sliders):
"""
Set up interactive part of the plot, i.e. sliders and grid layout.
model_params: list of model parameters names
"""
assert isinstance(model_specific_sliders, dict)
num_model_sliders = len(model_specific_sliders)
# define general sliders
I_m_slider = widgets.FloatSlider(
min=-5, max=20, step=0.5, value=10.0, description="I max"
)
T_slider = widgets.IntSlider(
min=500, max=2000, step=5, value=750, description="time"
)
I_types = widgets.ToggleButtons(
options=["constant", "sq. pulse", "sine", "ramp", "Ornstein-Uhlenbeck"],
value="constant",
description="Current type",
disabled=False,
layout=widgets.Layout(height="auto", width="auto"),
)
I_period = widgets.FloatSlider(
min=10, max=1000, step=5, value=200, description="I period"
)
# define grid
grid = widgets.GridspecLayout(ceil(5 + num_model_sliders / 2), 2)
grid[0, :] = widgets.Button(
description="Model parameters",
layout=widgets.Layout(height="auto", width="auto"),
)
# assign model sliders
for idx, (_, slider) in enumerate(model_specific_sliders.items()):
grid[idx // 2 + 1, idx % 2] = slider
grid[idx // 2 + 2, :] = widgets.Button(
description="External current parameters",
layout=widgets.Layout(height="auto", width="auto"),
)
grid[idx // 2 + 3, 0] = I_period
grid[idx // 2 + 4, 0] = I_m_slider
grid[idx // 2 + 4, 1] = T_slider
grid[idx // 2 + 5, :] = I_types
sliders = {
**model_specific_sliders,
"I_max": I_m_slider,
"I_period": I_period,
"T": T_slider,
"current_type": I_types,
}
for _, slider in sliders.items():
# lower number of "waiting" updates in the pipe
slider.msg_throttle = 1
return grid, sliders
def integrate_and_plot(model_cls, **kwargs):
"""
Integrate the model given its parameters and plot.
"""
T = kwargs.pop("T")
I_max = kwargs.pop("I_max")
I_period = kwargs.pop("I_period")
current_type = kwargs.pop("current_type")
model = model_cls(parameters=kwargs, T=T)
ext_current = np.zeros((model.n_points + 1))
input_length = ext_current.shape[0] - INPUT_START
ext_current[INPUT_START:] = get_ext_input(
I_max, I_period, current_type, model.T_total, input_length
)
model.set_input(ext_current)
t, y = model.integrate()
# set up figure
fig = plt.figure(constrained_layout=True, figsize=(15, 8))
spec = gridspec.GridSpec(ncols=3, nrows=3, figure=fig)
# set up axis for timeseries of input current
ax2 = fig.add_subplot(spec[2, :2])
ax2.set_ylim([-20, 30])
ax2.set_ylabel("INPUT CURRENT [AU]", size=LABEL_SIZE)
ax2.set_xlabel("TIME [ms]", size=LABEL_SIZE)
ax2.axvline(100.0, 0, 1, linestyle="--", color="grey", linewidth=0.7)
ax2.spines["right"].set_visible(False)
ax2.spines["top"].set_visible(False)
ax2.tick_params(axis="both", which="major", labelsize=LABEL_SIZE - 2)
# set up axis for timeseries of state vector
ax1 = fig.add_subplot(spec[:2, :2], sharex=ax2)
ax1.set_ylim([-90, 30])
ax1.set_ylabel("MEMBRANE POTENTIAL [mV]", size=LABEL_SIZE)
ax1.spines["right"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["bottom"].set_visible(False)
ax1.axvline(100.0, 0, 1, linestyle="--", color="grey", linewidth=0.7)
ax1.tick_params(axis="both", which="major", labelsize=LABEL_SIZE - 2)
ax12 = ax1.twinx()
ax12.set_ylim([-20, 10])
ax12.set_yticklabels([])
ax12.set_yticks([])
ax12.spines["right"].set_visible(False)
ax12.spines["top"].set_visible(False)
ax12.spines["bottom"].set_visible(False)
ax12.tick_params(axis="both", which="major", labelsize=LABEL_SIZE - 2)
# set up axis for scatter u vs v
ax3 = fig.add_subplot(spec[:2, 2], sharey=ax1)
ax3.spines["right"].set_visible(False)
ax3.spines["top"].set_visible(False)
ax3.set_xlabel("MEMBRANE RECOVERY", size=LABEL_SIZE)
scatter_colors = colors[3]
ax3.set_ylim([-90, 30])
ax3.set_xlim([-20, 10])
ax3.tick_params(axis="both", which="major", labelsize=LABEL_SIZE - 2)
# plot
ax1.plot(t, y[0, :], color=colors[0], linewidth=2.5)
ax12.plot(t, y[1:, :].T, color=colors[1])
ax2.plot(t, model.ext_current[1:], color=colors[2])
ax3.scatter(y[1, :], y[0, :], s=7, c=scatter_colors)
plt.suptitle(f"Number of spikes: {model.num_spikes}", size=LABEL_SIZE + 3)
| [
"model_base.get_ext_input",
"ipywidgets.IntSlider",
"math.ceil",
"matplotlib.pyplot.style.use",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.gridspec.GridSpec",
"ipywidgets.Layout",
"ipywidgets.FloatSlider",
"matplotlib.pyplot.suptitle"
] | [((321, 351), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-muted"""'], {}), "('seaborn-muted')\n", (334, 351), True, 'import matplotlib.pyplot as plt\n'), ((745, 823), 'ipywidgets.FloatSlider', 'widgets.FloatSlider', ([], {'min': '(-5)', 'max': '(20)', 'step': '(0.5)', 'value': '(10.0)', 'description': '"""I max"""'}), "(min=-5, max=20, step=0.5, value=10.0, description='I max')\n", (764, 823), True, 'import ipywidgets as widgets\n'), ((853, 928), 'ipywidgets.IntSlider', 'widgets.IntSlider', ([], {'min': '(500)', 'max': '(2000)', 'step': '(5)', 'value': '(750)', 'description': '"""time"""'}), "(min=500, max=2000, step=5, value=750, description='time')\n", (870, 928), True, 'import ipywidgets as widgets\n'), ((1228, 1313), 'ipywidgets.FloatSlider', 'widgets.FloatSlider', ([], {'min': '(10)', 'max': '(1000)', 'step': '(5)', 'value': '(200)', 'description': '"""I period"""'}), "(min=10, max=1000, step=5, value=200, description='I period'\n )\n", (1247, 1313), True, 'import ipywidgets as widgets\n'), ((2649, 2677), 'numpy.zeros', 'np.zeros', (['(model.n_points + 1)'], {}), '(model.n_points + 1)\n', (2657, 2677), True, 'import numpy as np\n'), ((2766, 2839), 'model_base.get_ext_input', 'get_ext_input', (['I_max', 'I_period', 'current_type', 'model.T_total', 'input_length'], {}), '(I_max, I_period, current_type, model.T_total, input_length)\n', (2779, 2839), False, 'from model_base import get_ext_input\n'), ((2947, 2999), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'constrained_layout': '(True)', 'figsize': '(15, 8)'}), '(constrained_layout=True, figsize=(15, 8))\n', (2957, 2999), True, 'import matplotlib.pyplot as plt\n'), ((3011, 3058), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', ([], {'ncols': '(3)', 'nrows': '(3)', 'figure': 'fig'}), '(ncols=3, nrows=3, figure=fig)\n', (3028, 3058), True, 'import matplotlib.gridspec as gridspec\n'), ((4918, 4992), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['f"""Number of spikes: {model.num_spikes}"""'], {'size': '(LABEL_SIZE + 3)'}), "(f'Number of spikes: {model.num_spikes}', size=LABEL_SIZE + 3)\n", (4930, 4992), True, 'import matplotlib.pyplot as plt\n'), ((1376, 1407), 'math.ceil', 'ceil', (['(5 + num_model_sliders / 2)'], {}), '(5 + num_model_sliders / 2)\n', (1380, 1407), False, 'from math import ceil\n'), ((1162, 1205), 'ipywidgets.Layout', 'widgets.Layout', ([], {'height': '"""auto"""', 'width': '"""auto"""'}), "(height='auto', width='auto')\n", (1176, 1205), True, 'import ipywidgets as widgets\n'), ((1500, 1543), 'ipywidgets.Layout', 'widgets.Layout', ([], {'height': '"""auto"""', 'width': '"""auto"""'}), "(height='auto', width='auto')\n", (1514, 1543), True, 'import ipywidgets as widgets\n'), ((1805, 1848), 'ipywidgets.Layout', 'widgets.Layout', ([], {'height': '"""auto"""', 'width': '"""auto"""'}), "(height='auto', width='auto')\n", (1819, 1848), True, 'import ipywidgets as widgets\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : <NAME>
# E-mail : <EMAIL>
# Description:
# Date : 21/10/2020 14:20
# File Name : generate-dataset-Dexterous_vacuum.py
import numpy as np
import sys
import pickle
import datetime
from vstsim.grasping.quality import PointGraspMetrics3D
from vstsim.grasping import GaussianGraspSampler, AntipodalGraspSampler, UniformGraspSampler, GpgGraspSampler, \
VacuumGraspSampler, DexterousVacuumGrasp
from vstsim.grasping import RobotGripper, GraspableObject3D, GraspQualityConfigFactory, PointGraspSampler
# from vstsim.visualization.visualizer3d import DexNetVisualizer3D as Vis
import vstsim
from autolab_core import YamlConfig
from meshpy.obj_file import ObjFile
from meshpy.sdf_file import SdfFile
import os
import multiprocessing
import matplotlib.pyplot as plt
from mayavi import mlab
import logging
try:
import pcl
except ImportError:
logging.warning('Failed to import pcl!')
# home_dir = os.environ['HOME']
# file_dir = home_dir + "/dataset/ycb_meshes_google/backup/003_typical"
# os_path = os.environ
# print(os_path)
work_dir = os.environ['PWD'][:-13]
file_dir = work_dir + "/3D_meshes"
def get_file_name(file_dir_):
file_list = []
for root, dirs, files in os.walk(file_dir_):
if root.count('/') == file_dir_.count('/') + 1:
file_list.append(root)
file_list.sort()
return file_list
'''
def display_object(obj_):
"""display object only using mayavi"""
Vis.figure(bgcolor=(1, 1, 1), size=(1000, 1000))
Vis.mesh(obj_.mesh.trimesh, color=(0.5, 0.5, 0.5), style='surface')
Vis.show()
'''
def worker(curr_obj, obj_name, sample_nums, grasp_generations, flg_vis=False, flg_test=False):
object_name = obj_name
print('a worker of task {} start'.format(object_name))
# yaml_config = YamlConfig(home_dir + "/Dexterous_grasp_01/vst_sim/test/config.yaml")
yaml_config = YamlConfig(work_dir + "/vst_sim/test/config.yaml")
gripper_name = "dexterous_vacuum"
# gripper = RobotGripper.load_dex_vacuum(gripper_name, home_dir + "/Dexterous_grasp_01/vst_sim/data/grippers")
gripper = RobotGripper.load_dex_vacuum(gripper_name, work_dir + "/vst_sim/data/grippers")
grasp_sample_method = "dexterous_vacuum"
if grasp_sample_method == "uniform":
ags = UniformGraspSampler(gripper, yaml_config)
elif grasp_sample_method == "vacuum_point":
ags = VacuumGraspSampler(gripper, yaml_config)
elif grasp_sample_method == "dexterous_vacuum":
ags = DexterousVacuumGrasp(gripper, yaml_config)
else:
raise NameError("Can't support this sampler")
print("Log: do job", curr_obj)
# file_gripper = home_dir + "/Dexterous_grasp_01/vst_sim/data/grippers/dexterous_vacuum/gripper.obj"
file_gripper = work_dir + "/vst_sim/data/grippers/dexterous_vacuum/gripper.obj"
mesh_gripper = None
if flg_vis:
of = ObjFile(file_gripper)
mesh_gripper = of.read()
if os.path.exists(file_dir + "/" + obj_name + "/google_512k/nontextured.obj"):
of = ObjFile(file_dir + "/" + obj_name + "/google_512k/nontextured.obj")
sf = SdfFile(file_dir + "/" + obj_name + "/google_512k/nontextured.sdf")
nf = np.load(file_dir + "/" + obj_name + "/google_512k/surface_normals_pcl.npy")
else:
print("can't find any obj or sdf file!")
raise NameError("can't find any obj or sdf file!")
mesh = of.read()
sdf = sf.read()
mesh.set_normals(nf)
obj = GraspableObject3D(sdf, mesh, model_name=str(obj_name))
print("dim_grasp_matrix: ", int(ags.dim_grasp_matrix))
grasps = \
ags.generate_grasps_dex_vacuum(obj,
target_num_grasps=sample_nums,
grasp_gen_mult=grasp_generations,
multi_approach_angle=True,
vis_surface=flg_vis, mesh_gripper=mesh_gripper,
flg_test=flg_test,
flg_desample_g=True, dim_g_matrix=int(ags.dim_grasp_matrix),
num_repeat_QP=3)
str_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')
'''
grasps_file_name = home_dir + "/Dexterous_grasp_01/vst_sim/apps/generated_grasps/{}_ttt{}_d{}_a{}_s{}_n{}".format(
object_name,
str(str_time[0:12]),
str(int(ags.dim_grasp_matrix)),
str(int(ags.angle_range_max)),
str(int(ags.num_angle_steps)),
str(np.shape(grasps)[0]))
'''
grasps_file_name = work_dir + "/vst_sim/apps/generated_grasps/{}_ttt{}_d{}_a{}_s{}_n{}".format(
object_name,
str(str_time[0:12]),
str(int(ags.dim_grasp_matrix)),
str(int(ags.angle_range_max)),
str(int(ags.num_angle_steps)),
str(np.shape(grasps)[0]))
# np.save(grasps_file_name + '.npy', np.array(grasps))
with open(grasps_file_name + '.pickle', 'wb') as f:
pickle.dump(grasps, f)
def main():
target_num_grasps = 8
file_list_all = get_file_name(file_dir)
object_numbers = file_list_all.__len__()
job_list = np.arange(object_numbers)
job_list = list(job_list)
cores = multiprocessing.cpu_count()
# number of jobs done at the same time
pool_size = np.max([1, int(cores // 3)])
pool_size = 4
# Initialize pool
pool = []
curr_obj = 0
'''Test
object_name = file_list_all[curr_obj][len(home_dir) + 35:]
worker(curr_obj, object_name, 1, 1, flg_vis=True, flg_test=True)
'''
#########################
assert (pool_size <= len(job_list))
for _ in range(pool_size):
job_i = job_list.pop(0)
# object_name = file_list_all[curr_obj][len(home_dir) + 35:]
object_name = file_list_all[curr_obj][len(file_dir):]
pool.append(multiprocessing.Process(target=worker, args=(curr_obj,
object_name,
target_num_grasps, 5)))
curr_obj += 1
[p.start() for p in pool]
while len(job_list) > 0:
for ind, p in enumerate(pool):
if not p.is_alive():
pool.pop(ind)
job_i = job_list.pop(0)
# object_name = file_list_all[curr_obj][len(home_dir) + 35:]
object_name = file_list_all[curr_obj][len(file_dir):]
p = multiprocessing.Process(target=worker, args=(curr_obj, object_name, target_num_grasps,
5))
curr_obj += 1
p.start()
pool.append(p)
break
print('All job done.')
if __name__ == '__main__':
main()
| [
"os.path.exists",
"pickle.dump",
"vstsim.grasping.DexterousVacuumGrasp",
"meshpy.obj_file.ObjFile",
"multiprocessing.Process",
"os.walk",
"logging.warning",
"multiprocessing.cpu_count",
"vstsim.grasping.RobotGripper.load_dex_vacuum",
"vstsim.grasping.VacuumGraspSampler",
"datetime.datetime.now",... | [((1255, 1273), 'os.walk', 'os.walk', (['file_dir_'], {}), '(file_dir_)\n', (1262, 1273), False, 'import os\n'), ((1918, 1968), 'autolab_core.YamlConfig', 'YamlConfig', (["(work_dir + '/vst_sim/test/config.yaml')"], {}), "(work_dir + '/vst_sim/test/config.yaml')\n", (1928, 1968), False, 'from autolab_core import YamlConfig\n'), ((2137, 2216), 'vstsim.grasping.RobotGripper.load_dex_vacuum', 'RobotGripper.load_dex_vacuum', (['gripper_name', "(work_dir + '/vst_sim/data/grippers')"], {}), "(gripper_name, work_dir + '/vst_sim/data/grippers')\n", (2165, 2216), False, 'from vstsim.grasping import RobotGripper, GraspableObject3D, GraspQualityConfigFactory, PointGraspSampler\n'), ((2979, 3053), 'os.path.exists', 'os.path.exists', (["(file_dir + '/' + obj_name + '/google_512k/nontextured.obj')"], {}), "(file_dir + '/' + obj_name + '/google_512k/nontextured.obj')\n", (2993, 3053), False, 'import os\n'), ((5183, 5208), 'numpy.arange', 'np.arange', (['object_numbers'], {}), '(object_numbers)\n', (5192, 5208), True, 'import numpy as np\n'), ((5252, 5279), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (5277, 5279), False, 'import multiprocessing\n'), ((917, 957), 'logging.warning', 'logging.warning', (['"""Failed to import pcl!"""'], {}), "('Failed to import pcl!')\n", (932, 957), False, 'import logging\n'), ((2318, 2359), 'vstsim.grasping.UniformGraspSampler', 'UniformGraspSampler', (['gripper', 'yaml_config'], {}), '(gripper, yaml_config)\n', (2337, 2359), False, 'from vstsim.grasping import GaussianGraspSampler, AntipodalGraspSampler, UniformGraspSampler, GpgGraspSampler, VacuumGraspSampler, DexterousVacuumGrasp\n'), ((2916, 2937), 'meshpy.obj_file.ObjFile', 'ObjFile', (['file_gripper'], {}), '(file_gripper)\n', (2923, 2937), False, 'from meshpy.obj_file import ObjFile\n'), ((3068, 3135), 'meshpy.obj_file.ObjFile', 'ObjFile', (["(file_dir + '/' + obj_name + '/google_512k/nontextured.obj')"], {}), "(file_dir + '/' + obj_name + '/google_512k/nontextured.obj')\n", (3075, 3135), False, 'from meshpy.obj_file import ObjFile\n'), ((3149, 3216), 'meshpy.sdf_file.SdfFile', 'SdfFile', (["(file_dir + '/' + obj_name + '/google_512k/nontextured.sdf')"], {}), "(file_dir + '/' + obj_name + '/google_512k/nontextured.sdf')\n", (3156, 3216), False, 'from meshpy.sdf_file import SdfFile\n'), ((3230, 3305), 'numpy.load', 'np.load', (["(file_dir + '/' + obj_name + '/google_512k/surface_normals_pcl.npy')"], {}), "(file_dir + '/' + obj_name + '/google_512k/surface_normals_pcl.npy')\n", (3237, 3305), True, 'import numpy as np\n'), ((5013, 5035), 'pickle.dump', 'pickle.dump', (['grasps', 'f'], {}), '(grasps, f)\n', (5024, 5035), False, 'import pickle\n'), ((2422, 2462), 'vstsim.grasping.VacuumGraspSampler', 'VacuumGraspSampler', (['gripper', 'yaml_config'], {}), '(gripper, yaml_config)\n', (2440, 2462), False, 'from vstsim.grasping import GaussianGraspSampler, AntipodalGraspSampler, UniformGraspSampler, GpgGraspSampler, VacuumGraspSampler, DexterousVacuumGrasp\n'), ((4199, 4222), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4220, 4222), False, 'import datetime\n'), ((5882, 5976), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'worker', 'args': '(curr_obj, object_name, target_num_grasps, 5)'}), '(target=worker, args=(curr_obj, object_name,\n target_num_grasps, 5))\n', (5905, 5976), False, 'import multiprocessing\n'), ((2529, 2571), 'vstsim.grasping.DexterousVacuumGrasp', 'DexterousVacuumGrasp', (['gripper', 'yaml_config'], {}), '(gripper, yaml_config)\n', (2549, 2571), False, 'from vstsim.grasping import GaussianGraspSampler, AntipodalGraspSampler, UniformGraspSampler, GpgGraspSampler, VacuumGraspSampler, DexterousVacuumGrasp\n'), ((6496, 6590), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'worker', 'args': '(curr_obj, object_name, target_num_grasps, 5)'}), '(target=worker, args=(curr_obj, object_name,\n target_num_grasps, 5))\n', (6519, 6590), False, 'import multiprocessing\n'), ((4867, 4883), 'numpy.shape', 'np.shape', (['grasps'], {}), '(grasps)\n', (4875, 4883), True, 'import numpy as np\n')] |
# %% [markdown]
# #
import matplotlib as mpl
from matplotlib.cm import ScalarMappable
import networkx as nx
import numpy as np
from src.hierarchy import signal_flow
from graspy.models import SBMEstimator
node_signal_flow = signal_flow(adj)
mean_sf = np.zeros(k)
for i in np.unique(pred_labels):
inds = np.where(pred_labels == i)[0]
mean_sf[i] = np.mean(node_signal_flow[inds])
cluster_mean_latent = gmm.model_.means_[:, 0]
block_probs = SBMEstimator().fit(bin_adj, y=pred_labels).block_p_
block_prob_df = pd.DataFrame(data=block_probs, index=range(k), columns=range(k))
block_g = nx.from_pandas_adjacency(block_prob_df, create_using=nx.DiGraph)
plt.figure(figsize=(10, 10))
# don't ever let em tell you you're too pythonic
pos = dict(zip(range(k), zip(cluster_mean_latent, mean_sf)))
# nx.draw_networkx_nodes(block_g, pos=pos)
labels = nx.get_edge_attributes(block_g, "weight")
# nx.draw_networkx_edge_labels(block_g, pos, edge_labels=labels)
norm = mpl.colors.LogNorm(vmin=0.01, vmax=0.1)
sm = ScalarMappable(cmap="Reds", norm=norm)
cmap = sm.to_rgba(np.array(list(labels.values())) + 0.01)
nx.draw_networkx(
block_g,
pos,
edge_cmap="Reds",
edge_color=cmap,
connectionstyle="arc3,rad=0.2",
width=1.5,
)
| [
"numpy.mean",
"numpy.unique",
"numpy.where",
"networkx.get_edge_attributes",
"networkx.draw_networkx",
"numpy.zeros",
"matplotlib.cm.ScalarMappable",
"graspy.models.SBMEstimator",
"matplotlib.colors.LogNorm",
"src.hierarchy.signal_flow",
"networkx.from_pandas_adjacency"
] | [((224, 240), 'src.hierarchy.signal_flow', 'signal_flow', (['adj'], {}), '(adj)\n', (235, 240), False, 'from src.hierarchy import signal_flow\n'), ((251, 262), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (259, 262), True, 'import numpy as np\n'), ((272, 294), 'numpy.unique', 'np.unique', (['pred_labels'], {}), '(pred_labels)\n', (281, 294), True, 'import numpy as np\n'), ((590, 654), 'networkx.from_pandas_adjacency', 'nx.from_pandas_adjacency', (['block_prob_df'], {'create_using': 'nx.DiGraph'}), '(block_prob_df, create_using=nx.DiGraph)\n', (614, 654), True, 'import networkx as nx\n'), ((846, 887), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['block_g', '"""weight"""'], {}), "(block_g, 'weight')\n", (868, 887), True, 'import networkx as nx\n'), ((962, 1001), 'matplotlib.colors.LogNorm', 'mpl.colors.LogNorm', ([], {'vmin': '(0.01)', 'vmax': '(0.1)'}), '(vmin=0.01, vmax=0.1)\n', (980, 1001), True, 'import matplotlib as mpl\n'), ((1008, 1046), 'matplotlib.cm.ScalarMappable', 'ScalarMappable', ([], {'cmap': '"""Reds"""', 'norm': 'norm'}), "(cmap='Reds', norm=norm)\n", (1022, 1046), False, 'from matplotlib.cm import ScalarMappable\n'), ((1105, 1217), 'networkx.draw_networkx', 'nx.draw_networkx', (['block_g', 'pos'], {'edge_cmap': '"""Reds"""', 'edge_color': 'cmap', 'connectionstyle': '"""arc3,rad=0.2"""', 'width': '(1.5)'}), "(block_g, pos, edge_cmap='Reds', edge_color=cmap,\n connectionstyle='arc3,rad=0.2', width=1.5)\n", (1121, 1217), True, 'import networkx as nx\n'), ((354, 385), 'numpy.mean', 'np.mean', (['node_signal_flow[inds]'], {}), '(node_signal_flow[inds])\n', (361, 385), True, 'import numpy as np\n'), ((307, 333), 'numpy.where', 'np.where', (['(pred_labels == i)'], {}), '(pred_labels == i)\n', (315, 333), True, 'import numpy as np\n'), ((447, 461), 'graspy.models.SBMEstimator', 'SBMEstimator', ([], {}), '()\n', (459, 461), False, 'from graspy.models import SBMEstimator\n')] |
from __future__ import division
import numpy as np
from scipy.interpolate import Akima1DInterpolator
def cubic_spline_3pts(x, y, T):
"""
Apperently scipy.interpolate.interp1d does not support
cubic spline for less than 4 points.
"""
x0, x1, x2 = x
y0, y1, y2 = y
x1x0, x2x1 = x1-x0, x2-x1
y1y0, y2y1 = y1-y0, y2-y1
_x1x0, _x2x1 = 1./x1x0, 1./x2x1
m11, m12, m13= 2*_x1x0, _x1x0, 0
m21, m22, m23 = _x1x0, 2.*(_x1x0+_x2x1), _x2x1
m31, m32, m33 = 0, _x2x1, 2.*_x2x1
v1 = 3*y1y0*_x1x0*_x1x0
v3 = 3*y2y1*_x2x1*_x2x1
v2 = v1+v3
M = np.matrix([[m11,m12,m13],[m21,m22,m23],[m31,m32,m33]])
v = np.matrix([v1,v2,v3]).T
k = np.array(np.linalg.inv(M)*v)
a1 = k[0]*x1x0 - y1y0
b1 =-k[1]*x1x0 + y1y0
a2 = k[1]*x2x1 - y2y1
b2 =-k[2]*x2x1 + y2y1
t = T[np.r_[T>=x0] & np.r_[T<=x2]]
t1 = (T[np.r_[T>=x0]&np.r_[T< x1]] - x0)/x1x0
t2 = (T[np.r_[T>=x1]&np.r_[T<=x2]] - x1)/x2x1
t11, t22 = 1.-t1, 1.-t2
q1 = t11*y0 + t1*y1 + t1*t11*(a1*t11 + b1*t1)
q2 = t22*y1 + t2*y2 + t2*t22*(a2*t22 + b2*t2)
q = np.append(q1,q2)
return t, q
def akima(X, Y, x):
spl = Akima1DInterpolator(X,Y)
return spl(x)
| [
"numpy.append",
"numpy.matrix",
"scipy.interpolate.Akima1DInterpolator",
"numpy.linalg.inv"
] | [((620, 682), 'numpy.matrix', 'np.matrix', (['[[m11, m12, m13], [m21, m22, m23], [m31, m32, m33]]'], {}), '([[m11, m12, m13], [m21, m22, m23], [m31, m32, m33]])\n', (629, 682), True, 'import numpy as np\n'), ((1142, 1159), 'numpy.append', 'np.append', (['q1', 'q2'], {}), '(q1, q2)\n', (1151, 1159), True, 'import numpy as np\n'), ((1214, 1239), 'scipy.interpolate.Akima1DInterpolator', 'Akima1DInterpolator', (['X', 'Y'], {}), '(X, Y)\n', (1233, 1239), False, 'from scipy.interpolate import Akima1DInterpolator\n'), ((684, 707), 'numpy.matrix', 'np.matrix', (['[v1, v2, v3]'], {}), '([v1, v2, v3])\n', (693, 707), True, 'import numpy as np\n'), ((726, 742), 'numpy.linalg.inv', 'np.linalg.inv', (['M'], {}), '(M)\n', (739, 742), True, 'import numpy as np\n')] |
import os
import sys
import json
from pathlib import Path
import warnings
import pandas as pd
import numpy as np
from skimage.io import imsave
from skimage import img_as_ubyte
import torch
from xarray.core.dataset import Dataset
from xarray.core.dataarray import DataArray
import xarray
from typing import Tuple, List
import matplotlib.pyplot as plt
from GIS_utils import bbox_from_point
from config import CubeConfig
from model import Model
def preprocess(cube: Dataset, max_cloud_proba: float = 0.1, nans_how: str = 'any', verbose: int = 1,
plot_NDWI: bool = True) -> Tuple[Dataset, DataArray]:
""" Preprocess cube for boat detect.
Args:
cube: xCube object with time dimension and (B03, B08, CLP) bands or (B03, B08, B04).
max_cloud_proba: float in [0,1]. Default 0.005 will keep imagery 99.5% cloudless.
nans_how: str, 'any' or 'all'.
verbose: int or bool.
plot_NDWI: bool. Default True will plot the NDWI layer.
Return:
cube: xCube object, preprocessed.
"""
n_snaps = len(cube.time)
cube = cube.dropna(dim='time',how=nans_how) # drop images w/ any nan
if verbose:
print('Keeping {}/{} images without nans'.format(len(cube.time), n_snaps))
if hasattr(cube, 'CLP'):
cube = cube.where(cube.CLP.mean(dim=('lat','lon'))<255*max_cloud_proba, drop=True) # sentinelhub cloud mask in [0,255.]
cube['CLP'] = cube.CLP/255.
elif hasattr(cube, 'B03') and hasattr(cube, 'B04'):
cloud_mask = ( (cube.B03>0.175)*(cube.B03>cube.B04) + (cube.B03>0.39) )>0 # cloud detector, reference (Braaten-Cohen-Yang, 2015)
cube = cube.where(cloud_mask.mean(dim=('lat','lon'))<max_cloud_proba, drop=True)
if verbose:
print('Keeping {}/{} images {}% cloudless'.format(len(cube.time), n_snaps, 100*(1-max_cloud_proba))) # keep cloudless imagery
ndwi = (cube.B03-cube.B08)/(cube.B03+cube.B08) # NDWI, reference (McFeeters, 1996)
ndwi.attrs['long_name']='-NDWI'
ndwi.attrs['units']='unitless'
cube['NDWI'] = -ndwi # add negative NDWI (high value for non water)
if plot_NDWI:
cube.NDWI.plot.imshow(col='time', col_wrap=4, cmap='coolwarm') ##### plot False Color instead!!!
cube['NDWI'] = (cube.NDWI+1.0)/2.0 # from [-1,1] to [0,1]
cube = cube*(cube<=1.0) + 1.*(cube>1.0) # clip other bands to [0,1]
background_ndwi = cube.NDWI.min(dim='time')
return cube, background_ndwi
def cube2tensor(cube, max_cloud_proba=0.1, nans_how='any', verbose=1, plot_NDWI=True):
""" Convert xcube to tensor and metadata"""
cube, background_ndwi = preprocess(cube, max_cloud_proba=max_cloud_proba, nans_how=nans_how, verbose=verbose, plot_NDWI=plot_NDWI)
timestamps = [str(t)[:10] for t in cube.time.values] # format yyyy-mm-dd
#x = np.stack([np.stack([cube.B08.values[t], background_ndwi.values, cube.CLP.values[t]], 0) for t in range(len(timestamps))], 0) # (T,3,H,W)
x = np.stack([np.stack([cube.B08.values[t], background_ndwi.values], 0) for t in range(len(timestamps))], 0) # (T,3,H,W)
x = torch.from_numpy(x)
return x, timestamps
def plot_cube_and_background(cube, background_ndwi, t=0, figsize=(25,5)):
""" Plot a cube and background.
Args:
cube: xCube object with time dimension and (B08,CLP) bands.
background: xCube object with NDWI bands.
t: int, time index
"""
device = 'cuda:0' if torch.cuda.is_available() else 'cpu' # gpu support
model = Model(input_dim=2, hidden_dim=16, kernel_size=3, device=device, version='0.1.1')
checkpoint_file = os.path.join("../factory", model.folder, 'model.pth')
model.load_checkpoint(checkpoint_file=checkpoint_file)
model = model.eval()
#x = np.stack([np.stack([cube.B08.values[t], background_ndwi.values, cube.CLP.values[t]], 0) for t in range(len(cube.time))], 0) # (T,3,H,W)
x = np.stack([np.stack([cube.B08.values[t], background_ndwi.values], 0) for t in range(len(cube.time))], 0) # (T,3,H,W)
x = torch.from_numpy(x)
heatmaps, counts = model.chip_and_count(x, filter_peaks=True, downsample=True, plot_heatmap=False, plot_indicator=False)
import matplotlib.pyplot as plt
plt.figure(figsize=figsize)
plt.subplot(1,4,1)
background_ndwi.plot(cmap='coolwarm')
plt.subplot(1,4,2)
cube.B08.isel(time=t).plot(cmap='coolwarm')
plt.xticks([]); plt.yticks([])
plt.subplot(1,4,3)
plt.xticks([]); plt.yticks([])
plt.imshow(heatmaps[t], cmap='Reds')
plt.title('y_hat = {:.1f}'.format(counts[t]))
plt.subplot(1,4,4)
cube.CLP.isel(time=t).plot(cmap='gray')
plt.xticks([]); plt.yticks([])
plt.show()
def save_labels(cube, background_ndwi, label, lat_lon, data_dir='data/chips', label_filename='data/labels.csv'):
""" Save preprocessed imagery and labels to disk.
Args:
cube: xCube object with time dimension and (B02,B03,B04,B08,NDWI) bands.
background: xCube object with (B02,B03,B04,B08,NDWI) bands.
label: list of int, boat counts for each time stamps.
lat_lon: tuple of floats, latitude and longitude in degrees.
data_dir: str, path to chips folder.
label_filename: str, path to filename of labels dict (for recovery).
"""
if len(label) != len(cube.time):
print('Error: Cube and Label have different length')
return 0
if not sys.warnoptions:
warnings.simplefilter("ignore")
os.makedirs(data_dir, exist_ok=True)
subdir = 'lat_{}_lon_{}'.format(str(lat_lon[0]).replace('.','_'), str(lat_lon[1]).replace('.','_'))
os.makedirs(os.path.join(data_dir,subdir), exist_ok=True)
# save background + imagery and labels
imsave(os.path.join(data_dir,subdir,'bg_ndwi.png'), img_as_ubyte(background_ndwi.values))
df_labels = pd.read_csv(label_filename, usecols=["lat_lon", "timestamp", "count"])
labeled_files = []
for t, y in enumerate(label):
snap_date = str(cube.isel(time=t).time.values)[:10]
imsave(os.path.join(data_dir,subdir,'img_03_t_{}.png'.format(snap_date)), img_as_ubyte(cube.isel(time=t).B03.values))
imsave(os.path.join(data_dir,subdir,'img_08_t_{}.png'.format(snap_date)), img_as_ubyte(cube.isel(time=t).B08.values))
imsave(os.path.join(data_dir,subdir,'img_clp_t_{}.png'.format(snap_date)), img_as_ubyte(cube.isel(time=t).CLP.values))
labeled_files.append((subdir, snap_date, y))
df1 = pd.DataFrame(labeled_files).rename(columns={0: "lat_lon", 1: "timestamp", 2: "count"})
df_labels = df_labels.append(df1, ignore_index=True)
df_labels.to_csv(label_filename, index=False)
print('Saved {} labels for {}'.format(len(label), subdir))
def save_cubes(cube, background_ndwi, lat_lon, data_dir='data/chips', verbose = True):
""" Save preprocessed imagery to disk.
Args:
cube: xCube object with time dimension and (B02,B03,B04,B08,NDWI) bands.
background: xCube object with (B02,B03,B04,B08,NDWI) bands.
lat_lon: tuple of floats, latitude and longitude in degrees.
data_dir: str, path to chips folder.
"""
if not sys.warnoptions:
warnings.simplefilter("ignore")
os.makedirs(data_dir, exist_ok=True)
subdir = 'lat_{}_lon_{}'.format(str(lat_lon[0]).replace('.', '_'), str(lat_lon[1]).replace('.', '_'))
os.makedirs(os.path.join(data_dir, subdir), exist_ok=True)
# save background + imagery and labels
imsave(os.path.join(data_dir, subdir, 'bg_ndwi.png'), img_as_ubyte(background_ndwi.values))
for t in cube.time: # y is the account of ships in the image
snap_date = str(t.values)[:10]
imsave(os.path.join(data_dir, subdir, 'img_03_t_{}.png'.format(snap_date)),
img_as_ubyte(cube.sel(time=t).B03.values))
imsave(os.path.join(data_dir, subdir, 'img_08_t_{}.png'.format(snap_date)),
img_as_ubyte(cube.sel(time=t).B08.values))
imsave(os.path.join(data_dir, subdir, 'img_clp_t_{}.png'.format(snap_date)),
img_as_ubyte(cube.sel(time=t).CLP.values))
imsave(os.path.join(data_dir, subdir, 'img_ndwi_t_{}.png'.format(snap_date)),
img_as_ubyte(cube.sel(time=t).NDWI.values))
if verbose:
print('Saved cubes with timestamp {} under {}'.format(snap_date, subdir))
def request_save_cubes(start_date: str, end_date: str, lat: float, lon: float, data_chips_dir: str,
RADIUS: int = 500, dataset_name: str = 'S2L1C', band_names: List = ['B03', 'B08', 'CLP'],
max_cloud_proba: float = 0.1, time_period: str = '1D'):
"""
:param start_date: '2019-01-01'
:param end_date: '2019-06-30'
:param lat: latitude
:param lon: longitude
:param data_chips_dir: download image will be saved to this dir like 'data/chips'
:param RADIUS: radius in meter
:param dataset_name: either S2L1C or S2L2A
:param band_names: a list of bands to be saved
:param max_cloud_proba: maximum probability of cloud
:param time_period: 1D
:return:
"""
from xcube_sh.cube import open_cube
from xcube_sh.observers import Observers
bbox = bbox_from_point(lat=lat, lon=lon, r=RADIUS)
cube_config = CubeConfig(dataset_name=dataset_name,
band_names=band_names, # GREEN + NIR + Clouds
tile_size=[2 * RADIUS // 10, 2 * RADIUS // 10],
geometry=bbox,
time_range=[start_date, end_date],
time_period=time_period,
)
request_collector = Observers.request_collector()
cube = open_cube(cube_config, observer=request_collector)
cube, background_ndwi = preprocess(cube, max_cloud_proba=max_cloud_proba,
nans_how='any', verbose=1, plot_NDWI=False)
save_cubes(cube, background_ndwi, lat_lon=(lat, lon), data_dir=Path(data_chips_dir), verbose=False)
def remove_s1_empty_nans(cube: Dataset, nans_how='any'):
"""
remove any bands that doesn't exist in cube and time stamps that has no
:param cube:
:param nans_how:
:return:
"""
all_bands = ["HH", "HV", "VH", "VV", "HH+HV", "VV+VH"]
for band in all_bands:
try:
cube[band].values
cube[band] = cube[band].dropna(dim='time', how=nans_how)
cube = cube.where(cube[band].mean(dim=('lat', 'lon')) > 0.0, drop=True)
except:
cube = cube.drop_vars(band)
return cube
def generate_bg_from_s1(cube: Dataset, bg_band="VV", fused_by="min"):
"""
:param cube: xarray dataset
:param bg_band: from which band the background should be generated
:param fused_by: either min, max, median or mean
:return: the backgound xarray
"""
if fused_by == "min":
background = cube[bg_band].min(dim="time")
elif fused_by == "mean":
background = cube[bg_band].mean(dim="time")
elif fused_by == "median":
background = cube[bg_band].median(dim="time")
else:
background = cube[bg_band].max(dim="time")
return background
def generate_landwater_mask(cube:Dataset, threshold_quantile=0.625, plot=True):
"""
generate a binary land water mask of given cube
:param cube: xarray Dataset
:param plot: if to plot
:param threshold_quantile:
:return: a binary land water mask
"""
bg_VV = generate_bg_from_s1(cube, bg_band="VV", fused_by="min")
bg_VH = generate_bg_from_s1(cube, bg_band="VH", fused_by="min")
threshold_VV = bg_VV.quantile(q=threshold_quantile)
threshold_VH = bg_VH.quantile(q=threshold_quantile)
binary_bg_VH = bg_VH.where(bg_VH > threshold_VH).fillna(1).where(bg_VH <= threshold_VH).fillna(0)
binary_bg_VV = bg_VV.where(bg_VV > threshold_VV).fillna(1).where(bg_VV <= threshold_VV).fillna(0)
binary_bg = binary_bg_VH * binary_bg_VV
if plot:
plt.figure(figsize=(20, 5))
plt.subplot(1, 3, 1)
binary_bg_VH.plot.imshow()
plt.title("binary_bg_VH")
plt.subplot(1, 3, 2)
binary_bg_VV.plot.imshow()
plt.title("binary_bg_VV")
plt.subplot(1, 3, 3)
binary_bg.plot.imshow()
plt.title("binary_bg")
stacked_binary_bg_VH = xarray.concat([binary_bg_VH.expand_dims('time') for i in range(len(cube.time))], dim='time')
stacked_binary_bg_VV = xarray.concat([binary_bg_VV.expand_dims('time') for i in range(len(cube.time))], dim='time')
stacked_binary_bg = xarray.concat([binary_bg.expand_dims('time') for i in range(len(cube.time))], dim='time')
return stacked_binary_bg_VH, stacked_binary_bg_VV, stacked_binary_bg | [
"GIS_utils.bbox_from_point",
"model.Model",
"pandas.read_csv",
"torch.from_numpy",
"torch.cuda.is_available",
"xcube_sh.cube.open_cube",
"matplotlib.pyplot.imshow",
"pathlib.Path",
"numpy.stack",
"matplotlib.pyplot.yticks",
"warnings.simplefilter",
"pandas.DataFrame",
"skimage.img_as_ubyte",... | [((3085, 3104), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (3101, 3104), False, 'import torch\n'), ((3503, 3588), 'model.Model', 'Model', ([], {'input_dim': '(2)', 'hidden_dim': '(16)', 'kernel_size': '(3)', 'device': 'device', 'version': '"""0.1.1"""'}), "(input_dim=2, hidden_dim=16, kernel_size=3, device=device, version='0.1.1'\n )\n", (3508, 3588), False, 'from model import Model\n'), ((3606, 3659), 'os.path.join', 'os.path.join', (['"""../factory"""', 'model.folder', '"""model.pth"""'], {}), "('../factory', model.folder, 'model.pth')\n", (3618, 3659), False, 'import os\n'), ((4021, 4040), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (4037, 4040), False, 'import torch\n'), ((4211, 4238), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (4221, 4238), True, 'import matplotlib.pyplot as plt\n'), ((4243, 4263), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(1)'], {}), '(1, 4, 1)\n', (4254, 4263), True, 'import matplotlib.pyplot as plt\n'), ((4308, 4328), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(2)'], {}), '(1, 4, 2)\n', (4319, 4328), True, 'import matplotlib.pyplot as plt\n'), ((4379, 4393), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4389, 4393), True, 'import matplotlib.pyplot as plt\n'), ((4395, 4409), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4405, 4409), True, 'import matplotlib.pyplot as plt\n'), ((4414, 4434), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(3)'], {}), '(1, 4, 3)\n', (4425, 4434), True, 'import matplotlib.pyplot as plt\n'), ((4437, 4451), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4447, 4451), True, 'import matplotlib.pyplot as plt\n'), ((4453, 4467), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4463, 4467), True, 'import matplotlib.pyplot as plt\n'), ((4472, 4508), 'matplotlib.pyplot.imshow', 'plt.imshow', (['heatmaps[t]'], {'cmap': '"""Reds"""'}), "(heatmaps[t], cmap='Reds')\n", (4482, 4508), True, 'import matplotlib.pyplot as plt\n'), ((4563, 4583), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(4)', '(4)'], {}), '(1, 4, 4)\n', (4574, 4583), True, 'import matplotlib.pyplot as plt\n'), ((4630, 4644), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4640, 4644), True, 'import matplotlib.pyplot as plt\n'), ((4646, 4660), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4656, 4660), True, 'import matplotlib.pyplot as plt\n'), ((4665, 4675), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4673, 4675), True, 'import matplotlib.pyplot as plt\n'), ((5475, 5511), 'os.makedirs', 'os.makedirs', (['data_dir'], {'exist_ok': '(True)'}), '(data_dir, exist_ok=True)\n', (5486, 5511), False, 'import os\n'), ((5832, 5902), 'pandas.read_csv', 'pd.read_csv', (['label_filename'], {'usecols': "['lat_lon', 'timestamp', 'count']"}), "(label_filename, usecols=['lat_lon', 'timestamp', 'count'])\n", (5843, 5902), True, 'import pandas as pd\n'), ((7211, 7247), 'os.makedirs', 'os.makedirs', (['data_dir'], {'exist_ok': '(True)'}), '(data_dir, exist_ok=True)\n', (7222, 7247), False, 'import os\n'), ((9183, 9226), 'GIS_utils.bbox_from_point', 'bbox_from_point', ([], {'lat': 'lat', 'lon': 'lon', 'r': 'RADIUS'}), '(lat=lat, lon=lon, r=RADIUS)\n', (9198, 9226), False, 'from GIS_utils import bbox_from_point\n'), ((9245, 9436), 'config.CubeConfig', 'CubeConfig', ([], {'dataset_name': 'dataset_name', 'band_names': 'band_names', 'tile_size': '[2 * RADIUS // 10, 2 * RADIUS // 10]', 'geometry': 'bbox', 'time_range': '[start_date, end_date]', 'time_period': 'time_period'}), '(dataset_name=dataset_name, band_names=band_names, tile_size=[2 *\n RADIUS // 10, 2 * RADIUS // 10], geometry=bbox, time_range=[start_date,\n end_date], time_period=time_period)\n', (9255, 9436), False, 'from config import CubeConfig\n'), ((9653, 9682), 'xcube_sh.observers.Observers.request_collector', 'Observers.request_collector', ([], {}), '()\n', (9680, 9682), False, 'from xcube_sh.observers import Observers\n'), ((9694, 9744), 'xcube_sh.cube.open_cube', 'open_cube', (['cube_config'], {'observer': 'request_collector'}), '(cube_config, observer=request_collector)\n', (9703, 9744), False, 'from xcube_sh.cube import open_cube\n'), ((3440, 3465), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3463, 3465), False, 'import torch\n'), ((5430, 5461), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (5451, 5461), False, 'import warnings\n'), ((5632, 5662), 'os.path.join', 'os.path.join', (['data_dir', 'subdir'], {}), '(data_dir, subdir)\n', (5644, 5662), False, 'import os\n'), ((5733, 5778), 'os.path.join', 'os.path.join', (['data_dir', 'subdir', '"""bg_ndwi.png"""'], {}), "(data_dir, subdir, 'bg_ndwi.png')\n", (5745, 5778), False, 'import os\n'), ((5778, 5814), 'skimage.img_as_ubyte', 'img_as_ubyte', (['background_ndwi.values'], {}), '(background_ndwi.values)\n', (5790, 5814), False, 'from skimage import img_as_ubyte\n'), ((7174, 7205), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (7195, 7205), False, 'import warnings\n'), ((7370, 7400), 'os.path.join', 'os.path.join', (['data_dir', 'subdir'], {}), '(data_dir, subdir)\n', (7382, 7400), False, 'import os\n'), ((7471, 7516), 'os.path.join', 'os.path.join', (['data_dir', 'subdir', '"""bg_ndwi.png"""'], {}), "(data_dir, subdir, 'bg_ndwi.png')\n", (7483, 7516), False, 'import os\n'), ((7518, 7554), 'skimage.img_as_ubyte', 'img_as_ubyte', (['background_ndwi.values'], {}), '(background_ndwi.values)\n', (7530, 7554), False, 'from skimage import img_as_ubyte\n'), ((11970, 11997), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 5)'}), '(figsize=(20, 5))\n', (11980, 11997), True, 'import matplotlib.pyplot as plt\n'), ((12006, 12026), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (12017, 12026), True, 'import matplotlib.pyplot as plt\n'), ((12070, 12095), 'matplotlib.pyplot.title', 'plt.title', (['"""binary_bg_VH"""'], {}), "('binary_bg_VH')\n", (12079, 12095), True, 'import matplotlib.pyplot as plt\n'), ((12104, 12124), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (12115, 12124), True, 'import matplotlib.pyplot as plt\n'), ((12168, 12193), 'matplotlib.pyplot.title', 'plt.title', (['"""binary_bg_VV"""'], {}), "('binary_bg_VV')\n", (12177, 12193), True, 'import matplotlib.pyplot as plt\n'), ((12202, 12222), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (12213, 12222), True, 'import matplotlib.pyplot as plt\n'), ((12263, 12285), 'matplotlib.pyplot.title', 'plt.title', (['"""binary_bg"""'], {}), "('binary_bg')\n", (12272, 12285), True, 'import matplotlib.pyplot as plt\n'), ((2970, 3027), 'numpy.stack', 'np.stack', (['[cube.B08.values[t], background_ndwi.values]', '(0)'], {}), '([cube.B08.values[t], background_ndwi.values], 0)\n', (2978, 3027), True, 'import numpy as np\n'), ((3907, 3964), 'numpy.stack', 'np.stack', (['[cube.B08.values[t], background_ndwi.values]', '(0)'], {}), '([cube.B08.values[t], background_ndwi.values], 0)\n', (3915, 3964), True, 'import numpy as np\n'), ((6462, 6489), 'pandas.DataFrame', 'pd.DataFrame', (['labeled_files'], {}), '(labeled_files)\n', (6474, 6489), True, 'import pandas as pd\n'), ((9974, 9994), 'pathlib.Path', 'Path', (['data_chips_dir'], {}), '(data_chips_dir)\n', (9978, 9994), False, 'from pathlib import Path\n')] |
import os
import sys
import random
import itertools
import colorsys
import numpy as np
from skimage.measure import find_contours
import matplotlib.pyplot as plt
from matplotlib import patches, lines
from matplotlib.patches import Polygon
# import IPython.display
def random_colors(N, bright=True):
'''
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
'''
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def display_instances(image, boxes, class_ids, class_names,
scores=None, title="",
figsize=(16, 16), ax=None):
'''
boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.
class_ids: [num_instances]
class_names: list of class names of the dataset
scores: (optional) confidence scores for each box
figsize: (optional) the size of the image.
'''
# Number of instances
N = boxes.shape[0]
if not N:
print("\n*** No instances to display *** \n")
# else:
# assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]
if not ax:
_, ax = plt.subplots(1, figsize=figsize)
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(title)
masked_image = image.astype(np.uint32).copy()
for i in range(N):
color = colors[i]
# Bounding box
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in image cropping.
continue
y1, x1, y2, x2 = boxes[i]
y1, x1, y2, x2 = int(y1), int(x1), int(y2), int(x2)
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Label
class_id = class_ids[i]
score = scores[i] if scores is not None else None
label = class_names[class_id]
x = random.randint(x1, (x1 + x2) // 2)
caption = "{} {:.3f}".format(label, score) if score else label
ax.text(x1, y1 + 8, caption,
color='w', size=11, backgroundcolor="none")
plt.imshow(image.astype(np.uint8))
def draw_boxes(image, boxes=None, refined_boxes=None,
captions=None, visibilities=None,
title="", ax=None):
'''Draw bounding boxes and segmentation masks with differnt
customizations.
boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates.
refined_boxes: Like boxes, but draw with solid lines to show
that they're the result of refining 'boxes'.
captions: List of N titles to display on each box
visibilities: (optional) List of values of 0, 1, or 2. Determine how
prominant each bounding box should be.
title: An optional title to show over the image
ax: (optional) Matplotlib axis to draw on.
'''
# Number of boxes
N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0]
# Matplotlib Axis
if not ax:
_, ax = plt.subplots(1, figsize=(16, 16))
# Generate random colors
colors = random_colors(N)
# Show area outside image boundaries.
margin = image.shape[0] // 10
ax.set_ylim(image.shape[0] + margin, -margin)
ax.set_xlim(-margin, image.shape[1] + margin)
ax.axis('off')
ax.set_title(title)
for i in range(N):
# Box visibility
visibility = visibilities[i] if visibilities is not None else 1
if visibility == 0:
color = "gray"
style = "dotted"
alpha = 0.5
elif visibility == 1:
color = colors[i]
style = "dotted"
alpha = 1
elif visibility == 2:
color = colors[i]
style = "solid"
alpha = 1
# Boxes
if boxes is not None:
if not np.any(boxes[i]):
# Skip this instance. Has no bbox. Likely lost in cropping.
continue
y1, x1, y2, x2 = boxes[i]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=alpha, linestyle=style,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Refined boxes
if refined_boxes is not None and visibility > 0:
ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32)
p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2,
edgecolor=color, facecolor='none')
ax.add_patch(p)
# Connect the top-left corners of the anchor and proposal
if boxes is not None:
ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))
# Captions
if captions is not None:
caption = captions[i]
# If there are refined boxes, display captions on them
if refined_boxes is not None:
y1, x1, y2, x2 = ry1, rx1, ry2, rx2
x = random.randint(x1, (x1 + x2) // 2)
ax.text(x1, y1, caption, size=11, verticalalignment='top',
color='w', backgroundcolor="none",
bbox={'facecolor': color, 'alpha': 0.5,
'pad': 2, 'edgecolor': 'none'})
ax.imshow(image.astype(np.uint8)) | [
"matplotlib.patches.Rectangle",
"random.randint",
"random.shuffle",
"numpy.any",
"colorsys.hsv_to_rgb",
"matplotlib.lines.Line2D",
"matplotlib.pyplot.subplots"
] | [((594, 616), 'random.shuffle', 'random.shuffle', (['colors'], {}), '(colors)\n', (608, 616), False, 'import random\n'), ((1301, 1333), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': 'figsize'}), '(1, figsize=figsize)\n', (1313, 1333), True, 'import matplotlib.pyplot as plt\n'), ((1945, 2073), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(x1, y1)', '(x2 - x1)', '(y2 - y1)'], {'linewidth': '(2)', 'alpha': '(0.7)', 'linestyle': '"""dashed"""', 'edgecolor': 'color', 'facecolor': '"""none"""'}), "((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=0.7,\n linestyle='dashed', edgecolor=color, facecolor='none')\n", (1962, 2073), False, 'from matplotlib import patches, lines\n'), ((2311, 2345), 'random.randint', 'random.randint', (['x1', '((x1 + x2) // 2)'], {}), '(x1, (x1 + x2) // 2)\n', (2325, 2345), False, 'import random\n'), ((3394, 3427), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(16, 16)'}), '(1, figsize=(16, 16))\n', (3406, 3427), True, 'import matplotlib.pyplot as plt\n'), ((1722, 1738), 'numpy.any', 'np.any', (['boxes[i]'], {}), '(boxes[i])\n', (1728, 1738), True, 'import numpy as np\n'), ((4398, 4525), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(x1, y1)', '(x2 - x1)', '(y2 - y1)'], {'linewidth': '(2)', 'alpha': 'alpha', 'linestyle': 'style', 'edgecolor': 'color', 'facecolor': '"""none"""'}), "((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=alpha,\n linestyle=style, edgecolor=color, facecolor='none')\n", (4415, 4525), False, 'from matplotlib import patches, lines\n'), ((4783, 4887), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(rx1, ry1)', '(rx2 - rx1)', '(ry2 - ry1)'], {'linewidth': '(2)', 'edgecolor': 'color', 'facecolor': '"""none"""'}), "((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2, edgecolor=\n color, facecolor='none')\n", (4800, 4887), False, 'from matplotlib import patches, lines\n'), ((5390, 5424), 'random.randint', 'random.randint', (['x1', '((x1 + x2) // 2)'], {}), '(x1, (x1 + x2) // 2)\n', (5404, 5424), False, 'import random\n'), ((559, 582), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*c'], {}), '(*c)\n', (578, 582), False, 'import colorsys\n'), ((4225, 4241), 'numpy.any', 'np.any', (['boxes[i]'], {}), '(boxes[i])\n', (4231, 4241), True, 'import numpy as np\n'), ((5077, 5124), 'matplotlib.lines.Line2D', 'lines.Line2D', (['[x1, rx1]', '[y1, ry1]'], {'color': 'color'}), '([x1, rx1], [y1, ry1], color=color)\n', (5089, 5124), False, 'from matplotlib import patches, lines\n')] |
import numpy as np
import matplotlib.pyplot as plt
# numpy.linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None)
#
# 在指定的间隔内返回均匀间隔的数字。
#
# 返回num均匀分布的样本,在[start, stop]。
#
# 这个区间的端点可以任意的被排除在外。
X = np.linspace(-np.pi, np.pi, 256, endpoint=True)
print(X)
C, S = np.cos(X), np.sin(X)
plt.plot(X, C)
plt.plot(X, S)
plt.show()
| [
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.show"
] | [((215, 261), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(256)'], {'endpoint': '(True)'}), '(-np.pi, np.pi, 256, endpoint=True)\n', (226, 261), True, 'import numpy as np\n'), ((300, 314), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'C'], {}), '(X, C)\n', (308, 314), True, 'import matplotlib.pyplot as plt\n'), ((315, 329), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'S'], {}), '(X, S)\n', (323, 329), True, 'import matplotlib.pyplot as plt\n'), ((331, 341), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (339, 341), True, 'import matplotlib.pyplot as plt\n'), ((278, 287), 'numpy.cos', 'np.cos', (['X'], {}), '(X)\n', (284, 287), True, 'import numpy as np\n'), ((289, 298), 'numpy.sin', 'np.sin', (['X'], {}), '(X)\n', (295, 298), True, 'import numpy as np\n')] |
from utils.eom import *
from sympy import symbols, factor
from sympy import simplify
from sympy.physics.mechanics import *
from sympy import sin, cos, symbols, Matrix, solve
from sympy.physics.vector import init_vprinting
import pylab as pl
import control
import numpy as np
from scipy.integrate import odeint
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from matplotlib.animation import FuncAnimation
from IPython.display import HTML
import pickle
with open("src/dual_gazebo/src/save_parameter/sys0.txt",'rb') as inf:
sys0 = pickle.load(inf)
import time
t1 = time.time()
# Define Symbolic Variables
x,x_l,theta = dynamicsymbols('x,x_l,theta')
phi = dynamicsymbols('phi')
F,F_l = dynamicsymbols('F,F_l')
r,h_c,h_t = symbols('r,h_c,h_t')
I_w,I_c,I_t = symbols('I_w,I_c,I_t')
m_w, m_c, m_t, g, t = symbols('m_w, m_c, m_t, g, t')
# Newtonian Reference Frames
N = ReferenceFrame('N')
No = Point('No')
No.set_vel(N, 0)
# Wheel Center Point
Wo = No.locatenew('Wo', x*N.x + r*N.z)
Wo.set_vel(N, Wo.pos_from(No).diff(t, N))
# Pendulum
P = N.orientnew('P', 'Axis', [theta, N.y])
Po = Wo.locatenew('Po', h_c*P.z)
Po.set_vel(P, 0)
J_pend = inertia(P, 0, I_c, 0)
Pend = RigidBody('Pend', Po, P, m_c, (J_pend, Po))
# Torso
T = P.orientnew('T', 'Axis', [0, P.y])
To = Wo.locatenew('To', x_l*P.x + h_t*P.z)
To.set_vel(T, 0)
J_torso = inertia(T, 0, I_t, 0)
Torso = RigidBody('Torso', To, T, m_t, (J_torso, To))
# Wheel
W = P.orientnew('W', 'Axis', [phi, P.y])
Wo.set_vel(W, 0)
J_wheel = inertia(W, 0, I_w, 0)
Wheel = RigidBody('Wheel', Wo, W, m_w, (J_wheel, Wo))
Wn = Wo.locatenew('Wn', -r*N.z)
Wn.v2pt_theory(Wo, N, W)
constraints = Wn.vel(N).express(N).args[0][0]
con = solve(constraints, [phi.diff()])
con_rhs = Matrix(list(con.values()))
con_lhs = Matrix(list(con.keys()))
# Generalized coordinates
q = Matrix([[x], [x_l], [theta]])
qd = q.diff()
qdd = qd.diff()
flist = [(Wo, -m_w*g*N.z),
(Po, -m_c*g*N.z),
(To, -m_t*g*N.z),
(Wo, F*N.x),
(To, F_l*T.x),
(P, -F_l*h_t*P.y)]
Lag = Lagrangian(N, Pend, Torso, Wheel)
nonslip_condition = {con_lhs[0]:con_rhs[0]}
Lag_constrainted = msubs(Lag, nonslip_condition)
Le = LagrangesMethod(Lag_constrainted, q, forcelist=flist, frame=N)
eoms = Le.form_lagranges_equations()
eoms_simple = simplify(eoms) ## save point
inv_dyn = get_Simplified_EoM(eoms_simple, q)
linearlize_eq = {sin(theta):theta, cos(theta):1, theta.diff():0, x_l:0}
inv_dyn_linear = msubs(inv_dyn, linearlize_eq)
# Control Input Variable
u = Matrix([[F], [F_l]])
# Inverse Dynamics Equation
# M(q)*qdd + C(q,qd) + G(q) = W*u
M, C, G, W = get_EoM_from_T(inv_dyn,qdd,g,u)
Ml, Cl, Gl, Wl = get_EoM_from_T(inv_dyn_linear,qdd,g,u) ## save point
# Physical Parameters
#param = {r:0.25, h_c:0.25, h_t:0.25, m_w:30, m_c:370, m_t:300, g:9.8}
param = {r:0.25, h_c:0.25, h_t:0.5, m_w:60, m_c:340, m_t:300, g:9.8}
param['c_width'] = 0.7 #0.5
param['c_height'] = 0.2 #0.25
# torso size
param['t_width'] = 2.5
param['t_height'] = 0.5
# Moment of Inertia
param[I_w] = 1/2*param[m_w]*param[r]**2
param[I_c] = 1/12*param[m_c]*(param['c_width']**2 + param['c_height']**2)
param[I_t] = 1/12*param[m_t]*(param['t_width']**2 + param['t_height']**2)
Mlp = msubs(Ml, param)
Clp = msubs(Cl, param)
Glp = msubs(Gl, param)
Wlp = msubs(Wl, param)
Mlp_inv = simplify(Mlp.inv())
qdd_rhs_A = simplify(Mlp_inv*(-Clp -Glp))
qdd_rhs_B = simplify(Mlp_inv*Wlp*u)
Mp = msubs(M, param)
Cp = msubs(C, param)
Gp = msubs(G, param)
Wp = msubs(W, param)
Mp_inv = (Mp.inv())
qdd_rhs_A_nonL = (Mp_inv*(-Cp -Gp))
qdd_rhs_B_nonL = (Mp_inv*Wp*u)
sys0_output = sys0[3,0]
tf_20 = tf_clean(control.minreal(control.ss2tf(sys0_output)))
#Q = pl.eye(sys0.A.shape[0])
#R = pl.eye(sys0.B.shape[1])*0.00001
# state : [x, x_l, theta, xdot, x_ldot, thetadot]
Q = Matrix([ [1,0,0,0,0,0],
[0,5,0,0,0,0],
[0,0,1,0,0,0],
[0,0,0,1,0,0],
[0,0,0,0,1,0],
[0,0,0,0,0,1] ])
R = Matrix([ [0.0000001,0],
[0,0.00001] ])
K, S, E = control.lqr(sys0.A, sys0.B, Q, R)
sysc = sys0.feedback(K)
x0 = [0, 0, 0.1, 0, 0, 0]
u = 0
dt = 0.01
tf = 10
t, y = control.forced_response(sysc, X0=x0, T=pl.linspace(0,tf), transpose=True)
vmax_ = 22/3.6
t_ = 20# sec
target_pos = vmax_*t_
v = vmax_/target_pos
a = v/4
t_s, traj_s = Trapezoidal_Traj_Gen_Given_Amax_and_T(a,t_,0.01)
x_des = traj_s[:,0]*target_pos
xdot_des = traj_s[:,1]*target_pos
xl_des = traj_s[:,2]*target_pos/4 # using acceleration as xl_des
zeros = np.zeros(len(traj_s))
Xdes = x_des
Xdes = np.vstack((Xdes, xl_des))
#Xdes = np.vstack((Xdes, zeros))
Xdes = np.vstack((Xdes, zeros))
Xdes = np.vstack((Xdes, xdot_des))
Xdes = np.vstack((Xdes, zeros))
Xdes = np.vstack((Xdes, zeros))
ss = sys0
rad2deg = 180/np.pi
def simulate_model_closed(X0, Xdes, K_gain, time_array, dt):
Aop = ss.A
Bop = ss.B
t = 0
j = 0
X = Xref = Xd_prev = Xd = X0
t_save = [0]
x_save = xref_save = np.array([0,0,0,0,0,0])
u_save = np.array([0,0])
for i in range(len(time_array)):
t = time_array[i]
if t<2:
Xref = X0
elif t>=2 and j<(Xdes.shape[1]):
Xref = Xdes[:,j]
j+=1
else:
Xdes_final = Xdes[:,Xdes.shape[1]-1]
Xdes_final[1] = 0 # force to set xl_des as 0
Xref = Xdes_final
# full-state feedback
#u = K@(Xgoal - X)
# partial feedback
u1 = K_gain[0][1:]@(Xref[1:] - X[1:])
u2 = K_gain[1][1:]@(Xref[1:] - X[1:])
# Forward Dynamics
Xd_prev = Xd
# Linear Model
#u = [u1, u2]
#Xd = Aop@X + Bop@u # Xd = [xd, x_ld, thetad, xdd, x_ldd, thetadd]
# NonLinear Model
q_qd = {x:X[0], x_l:X[1], theta:X[2], x.diff():X[3], x_l.diff():X[4], theta.diff():X[5]}
q_qd[F] = u1
q_qd[F_l] = u2
qdd = msubs(qdd_rhs_A_nonL,q_qd) + msubs(qdd_rhs_B_nonL,q_qd)
Xd = np.array([X[3], X[4], X[5], float(qdd[0]), float(qdd[1]), float(qdd[2])])
t_save = np.vstack((t_save, t))
x_save = np.vstack((x_save, X))
xref_save = np.vstack((xref_save, Xref))
u_save = np.vstack((u_save, np.array([u1,u2])))
X = X + Xd*dt
t = t + dt
i+=1
#limit setting
xl_limit = 0.5
if X[1] >= xl_limit:
X[1] = xl_limit
elif X[1] <= -xl_limit:
X[1] = -xl_limit
return t_save, x_save, xref_save, u_save
# initial condition
# [x, x_l, theta, x_dot,x_l_dot, theta_dot]
X0 = np.array([0,0,0,0,0,0])
tf = 20 + 7
dt = 0.01
N = int(tf/dt)
# time points
t = np.linspace(0,tf,N)
# simulation
t_sim, x_sim, xref_sim, u_sim = simulate_model_closed(X0, Xdes, K, t, dt)
print(time.time() - t1) | [
"sympy.sin",
"sympy.cos",
"control.ss2tf",
"control.lqr",
"sympy.simplify",
"sympy.Matrix",
"pickle.load",
"sympy.symbols",
"numpy.array",
"numpy.linspace",
"numpy.vstack",
"pylab.linspace",
"time.time"
] | [((600, 611), 'time.time', 'time.time', ([], {}), '()\n', (609, 611), False, 'import time\n'), ((757, 777), 'sympy.symbols', 'symbols', (['"""r,h_c,h_t"""'], {}), "('r,h_c,h_t')\n", (764, 777), False, 'from sympy import sin, cos, symbols, Matrix, solve\n'), ((792, 814), 'sympy.symbols', 'symbols', (['"""I_w,I_c,I_t"""'], {}), "('I_w,I_c,I_t')\n", (799, 814), False, 'from sympy import sin, cos, symbols, Matrix, solve\n'), ((837, 867), 'sympy.symbols', 'symbols', (['"""m_w, m_c, m_t, g, t"""'], {}), "('m_w, m_c, m_t, g, t')\n", (844, 867), False, 'from sympy import sin, cos, symbols, Matrix, solve\n'), ((1845, 1874), 'sympy.Matrix', 'Matrix', (['[[x], [x_l], [theta]]'], {}), '([[x], [x_l], [theta]])\n', (1851, 1874), False, 'from sympy import sin, cos, symbols, Matrix, solve\n'), ((2320, 2334), 'sympy.simplify', 'simplify', (['eoms'], {}), '(eoms)\n', (2328, 2334), False, 'from sympy import simplify\n'), ((2546, 2566), 'sympy.Matrix', 'Matrix', (['[[F], [F_l]]'], {}), '([[F], [F_l]])\n', (2552, 2566), False, 'from sympy import sin, cos, symbols, Matrix, solve\n'), ((3375, 3407), 'sympy.simplify', 'simplify', (['(Mlp_inv * (-Clp - Glp))'], {}), '(Mlp_inv * (-Clp - Glp))\n', (3383, 3407), False, 'from sympy import simplify\n'), ((3417, 3444), 'sympy.simplify', 'simplify', (['(Mlp_inv * Wlp * u)'], {}), '(Mlp_inv * Wlp * u)\n', (3425, 3444), False, 'from sympy import simplify\n'), ((3825, 3958), 'sympy.Matrix', 'Matrix', (['[[1, 0, 0, 0, 0, 0], [0, 5, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 0,\n 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0, 0], [0, 5, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, \n 0, 1, 0, 0], [0, 0, 0, 0, 1, 0], [0, 0, 0, 0, 0, 1]])\n', (3831, 3958), False, 'from sympy import sin, cos, symbols, Matrix, solve\n'), ((3995, 4027), 'sympy.Matrix', 'Matrix', (['[[1e-07, 0], [0, 1e-05]]'], {}), '([[1e-07, 0], [0, 1e-05]])\n', (4001, 4027), False, 'from sympy import sin, cos, symbols, Matrix, solve\n'), ((4059, 4092), 'control.lqr', 'control.lqr', (['sys0.A', 'sys0.B', 'Q', 'R'], {}), '(sys0.A, sys0.B, Q, R)\n', (4070, 4092), False, 'import control\n'), ((4577, 4602), 'numpy.vstack', 'np.vstack', (['(Xdes, xl_des)'], {}), '((Xdes, xl_des))\n', (4586, 4602), True, 'import numpy as np\n'), ((4643, 4667), 'numpy.vstack', 'np.vstack', (['(Xdes, zeros)'], {}), '((Xdes, zeros))\n', (4652, 4667), True, 'import numpy as np\n'), ((4676, 4703), 'numpy.vstack', 'np.vstack', (['(Xdes, xdot_des)'], {}), '((Xdes, xdot_des))\n', (4685, 4703), True, 'import numpy as np\n'), ((4711, 4735), 'numpy.vstack', 'np.vstack', (['(Xdes, zeros)'], {}), '((Xdes, zeros))\n', (4720, 4735), True, 'import numpy as np\n'), ((4743, 4767), 'numpy.vstack', 'np.vstack', (['(Xdes, zeros)'], {}), '((Xdes, zeros))\n', (4752, 4767), True, 'import numpy as np\n'), ((6657, 6685), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (6665, 6685), True, 'import numpy as np\n'), ((6738, 6759), 'numpy.linspace', 'np.linspace', (['(0)', 'tf', 'N'], {}), '(0, tf, N)\n', (6749, 6759), True, 'import numpy as np\n'), ((562, 578), 'pickle.load', 'pickle.load', (['inf'], {}), '(inf)\n', (573, 578), False, 'import pickle\n'), ((2413, 2423), 'sympy.sin', 'sin', (['theta'], {}), '(theta)\n', (2416, 2423), False, 'from sympy import sin, cos, symbols, Matrix, solve\n'), ((2431, 2441), 'sympy.cos', 'cos', (['theta'], {}), '(theta)\n', (2434, 2441), False, 'from sympy import sin, cos, symbols, Matrix, solve\n'), ((5002, 5030), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (5010, 5030), True, 'import numpy as np\n'), ((5039, 5055), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (5047, 5055), True, 'import numpy as np\n'), ((3673, 3699), 'control.ss2tf', 'control.ss2tf', (['sys0_output'], {}), '(sys0_output)\n', (3686, 3699), False, 'import control\n'), ((4216, 4234), 'pylab.linspace', 'pl.linspace', (['(0)', 'tf'], {}), '(0, tf)\n', (4227, 4234), True, 'import pylab as pl\n'), ((6120, 6142), 'numpy.vstack', 'np.vstack', (['(t_save, t)'], {}), '((t_save, t))\n', (6129, 6142), True, 'import numpy as np\n'), ((6160, 6182), 'numpy.vstack', 'np.vstack', (['(x_save, X)'], {}), '((x_save, X))\n', (6169, 6182), True, 'import numpy as np\n'), ((6203, 6231), 'numpy.vstack', 'np.vstack', (['(xref_save, Xref)'], {}), '((xref_save, Xref))\n', (6212, 6231), True, 'import numpy as np\n'), ((6853, 6864), 'time.time', 'time.time', ([], {}), '()\n', (6862, 6864), False, 'import time\n'), ((6268, 6286), 'numpy.array', 'np.array', (['[u1, u2]'], {}), '([u1, u2])\n', (6276, 6286), True, 'import numpy as np\n')] |
from numpy import interp
from os import listdir
from PIL import Image, ImageStat
# Directory for block textures extracted from version jar
textures = 'assets/minecraft/textures/block'
# Special case: animated blocks like crimson_stem are
# taller than 64px: crop when compositing later?
# List of blocks to allow loading
# > Change this file for different lists
with open('blocks_full.txt') as reader:
allow_blocks = reader.read().splitlines()
# Unused because redundant
# # List of blocks to deny loading
# with open('blocks_deny.txt') as reader:
# deny_blocks = reader.read().splitlines()
# Find png filenames in textures directory and remove .png extension
# (Create list of all blocks)
block_ids = [filename[:-4] for filename in listdir(textures) if filename.endswith('.png')]
# Remove all blocks except those in allow list from block id list
block_ids = [id for id in block_ids if id in allow_blocks]
# Unused because redundant
# # Remove blocks in deny list from block id list
# block_ids = [id for id in block_ids if not id in deny_blocks]
# Convert HSV into hsv(360°, 100%, 100%) color code string
def hsv_string (h, s, v):
hsv_string = f'hsv({round(h)}, {round(s)}%, {round(v)}%)'
return (hsv_string)
# Get average HSV color from image
def avg_hsv(block_id):
# Open Minecraft texture as RGBA image
im = Image.open(f'{textures}/{block_id}.png')
# Convert RGBA image into HSV (Hue, Saturation, Value) image
im = im.convert('HSV')
# Split HSV into separate channels
hue_channel = im.getchannel('H')
sat_channel = im.getchannel('S')
val_channel = im.getchannel('V')
# Get average of each channel
h = ImageStat.Stat(hue_channel).mean
s = ImageStat.Stat(sat_channel).mean
v = ImageStat.Stat(val_channel).mean
# Scale from 8-bit channel range (255, 255, 255) to hsv(360°, 100%, 100%) range
# These are converted to floats
h = interp(h, [0, 255], [0, 360])[0]
s = interp(s, [0, 255], [0, 100])[0]
v = interp(v, [0, 255], [0, 100])[0]
# Collect this block's data in a dictionary
return {'block_id': block_id, 'hue': h, 'sat': s, 'val': v, 'hsv_string': hsv_string(h, s, v)}
# Make a list of blocks and their average colors
blocks = map(avg_hsv, block_ids)
# Sort blocks by hue, then saturation, then value
blocks = sorted(blocks, key = lambda block: (block['hue'], block['sat'], block['val']))
# Print blocks and their color
for block in blocks:
print(f"{block['block_id']} : {block['hsv_string']}") | [
"PIL.ImageStat.Stat",
"os.listdir",
"PIL.Image.open",
"numpy.interp"
] | [((1346, 1386), 'PIL.Image.open', 'Image.open', (['f"""{textures}/{block_id}.png"""'], {}), "(f'{textures}/{block_id}.png')\n", (1356, 1386), False, 'from PIL import Image, ImageStat\n'), ((746, 763), 'os.listdir', 'listdir', (['textures'], {}), '(textures)\n', (753, 763), False, 'from os import listdir\n'), ((1674, 1701), 'PIL.ImageStat.Stat', 'ImageStat.Stat', (['hue_channel'], {}), '(hue_channel)\n', (1688, 1701), False, 'from PIL import Image, ImageStat\n'), ((1715, 1742), 'PIL.ImageStat.Stat', 'ImageStat.Stat', (['sat_channel'], {}), '(sat_channel)\n', (1729, 1742), False, 'from PIL import Image, ImageStat\n'), ((1756, 1783), 'PIL.ImageStat.Stat', 'ImageStat.Stat', (['val_channel'], {}), '(val_channel)\n', (1770, 1783), False, 'from PIL import Image, ImageStat\n'), ((1918, 1947), 'numpy.interp', 'interp', (['h', '[0, 255]', '[0, 360]'], {}), '(h, [0, 255], [0, 360])\n', (1924, 1947), False, 'from numpy import interp\n'), ((1959, 1988), 'numpy.interp', 'interp', (['s', '[0, 255]', '[0, 100]'], {}), '(s, [0, 255], [0, 100])\n', (1965, 1988), False, 'from numpy import interp\n'), ((2000, 2029), 'numpy.interp', 'interp', (['v', '[0, 255]', '[0, 100]'], {}), '(v, [0, 255], [0, 100])\n', (2006, 2029), False, 'from numpy import interp\n')] |
import argparse
import numpy as np
from tqdm import tqdm
from os.path import join, isfile
from data import Labels
from joblib import Parallel, delayed
labels = Labels()
def job(text_path, numpy_path):
with open(text_path, 'r', encoding='utf8') as file:
text = file.read()
if not labels.is_accepted(text):
return None
required_frames = labels.required_frames(text)
actual_frames = len(np.load(numpy_path))
if required_frames > actual_frames:
return None
return '%s,%d,%s' % (numpy_path, actual_frames, text)
parser = argparse.ArgumentParser(description='Collect utterances')
parser.add_argument('--manifest', type=str)
parser.add_argument('--jobs', type=int, default=8)
args = parser.parse_args()
prefix = args.manifest.replace('.csv', '')
print(prefix)
files = dict()
with open(args.manifest) as f:
progress = tqdm(f.readlines())
for line in progress:
path = line.split(',')[0]
text_path = join(prefix, path.replace('.wav', '.txt'))
if not isfile(text_path):
continue
numpy_path = join(prefix, path.replace('.wav', '.npy'))
if not isfile(numpy_path):
continue
files[text_path] = numpy_path
tasks = []
for text_path, numpy_path in files.items():
tasks.append(delayed(job)(text_path, numpy_path))
print('Tasks:', len(tasks))
results = Parallel(n_jobs=args.jobs, backend='multiprocessing', verbose=1)(tasks)
utterances = sorted([r for r in results if r is not None])
print('Success:', len(utterances))
with open(prefix + '.txt', 'w', encoding='utf8') as file:
file.write('path,frames,text\n')
file.writelines(utterances)
| [
"argparse.ArgumentParser",
"data.Labels",
"joblib.Parallel",
"os.path.isfile",
"joblib.delayed",
"numpy.load"
] | [((161, 169), 'data.Labels', 'Labels', ([], {}), '()\n', (167, 169), False, 'from data import Labels\n'), ((574, 631), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Collect utterances"""'}), "(description='Collect utterances')\n", (597, 631), False, 'import argparse\n'), ((1388, 1452), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'args.jobs', 'backend': '"""multiprocessing"""', 'verbose': '(1)'}), "(n_jobs=args.jobs, backend='multiprocessing', verbose=1)\n", (1396, 1452), False, 'from joblib import Parallel, delayed\n'), ((422, 441), 'numpy.load', 'np.load', (['numpy_path'], {}), '(numpy_path)\n', (429, 441), True, 'import numpy as np\n'), ((1038, 1055), 'os.path.isfile', 'isfile', (['text_path'], {}), '(text_path)\n', (1044, 1055), False, 'from os.path import join, isfile\n'), ((1158, 1176), 'os.path.isfile', 'isfile', (['numpy_path'], {}), '(numpy_path)\n', (1164, 1176), False, 'from os.path import join, isfile\n'), ((1311, 1323), 'joblib.delayed', 'delayed', (['job'], {}), '(job)\n', (1318, 1323), False, 'from joblib import Parallel, delayed\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 14 11:11:55 2022
@author: Hatlab-RRK
Purpose: create a neat, almost-executable file that can quickly plot a 3-state pulse file, and have the option to do just histograms,
or additionally try to fit using majority vote and give classification accuracy
"""
from data_processing.AWG_and_Alazar.Pulse_Processing_utils import extract_3pulse_histogram_from_filepath, extract_3pulse_noise_from_filepath, plot_stats_from_filepath, plot_custom_stats_from_filepath
import os
import matplotlib.pyplot as plt
plt.style.use('hatlab')
import numpy as np
#histogram plotting
# datapath = r'Z:\Data\N25_L3_SQ\BP1\loopback_after_rebuild_2\2022-05-05_0002_test1_Rep_0__.ddh5'
# datapath = r'Z:\Data\N25_L3_SQ\BP1\amp_tests_0.4mA\2022-05-05_0001_amp_onoff_-90_Amp_0__.ddh5'
# datapath = r'Z:\Data\N25_L3_SQ\BP1\amp_tests_0.4mA\2022-05-05_0002_amp_onoff_-90_Amp_1__.ddh5'
# datapath = r'Z:\Data\N25_L3_SQ\BP1\amp_tests_0.4mA\2022-05-05_0003_amp_onoff_-120_Amp_0__.ddh5'
# datapath = r'Z:\Data\N25_L3_SQ\BP1\amp_tests_0.4mA\2022-05-05_0004_amp_onoff_-120_Amp_1__.ddh5'
# datapath = r'Z:\Data\N25_L3_SQ\BP1\amp_tests_0.4mA\2022-05-05_0005_amp_onoff_-120_Amp_0__.ddh5'
# datapath = r'Z:\Data\N25_L3_SQ\BP1\amp_tests_0.4mA\2022-05-05_0006_amp_onoff_-120_Amp_1__.ddh5'
# datapath = r'Z:\Data\N25_L3_SQ\BP1\amp_saturation_0.4mA\2022-05-05_0006_amp_onoff_-100_higher_range_Amp_1__.ddh5'
# datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA/2022-05-05_0003_amp_onoff_-100_higher_range_Amp_0__.ddh5'
#now trying to see saturation
# datapath = r'Z:\Data\N25_L3_SQ\BP1\amp_saturation_0.4mA\2022-05-05_0002_amp_onoff_-100_Amp_1__.ddh5'
datapath = r'Z:\Data\N25_L3_SQ\BP1\amp_saturation_0.4mA\2022-05-05_0009_amp_onoff_-80_high_pwr_var_Rep_0__.ddh5'
datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep/2022-05-05_0008_pwr_sweep_30dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.35_V_.ddh5'
datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep/2022-05-05_0018_pwr_sweep_30dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_1.35_V_.ddh5'
datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep/2022-05-05_0050_pwr_sweep_30dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.35_V_.ddh5'
datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep/2022-05-05_0060_pwr_sweep_30dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_1.35_V_.ddh5'
datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0046_pwr_sweep_20dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.35_V_.ddh5'
datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0056_pwr_sweep_20dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_1.35_V_.ddh5'
datapath = r'Z:\Data\N25_L3_SQ\BP1\amp_saturation_0.4mA_sweep_even_higher_pwr\2022-05-05_0006_pwr_sweep_10dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.55_V_.ddh5'
datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0028_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_1.35_V_.ddh5'
datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0056_pwr_sweep_20dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_1.35_V_.ddh5'
datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0001_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.05_V_.ddh5'
datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0004_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.35_V_.ddh5'
datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0014_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_1.35_V_.ddh5'
datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0029_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.05_V_.ddh5'
datapath = r'Z:\Data\N25_L3_SQ\BP1\amp_saturation_0.4mA_sweep_0dB_att\2022-05-05_0002_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.15_V_.ddh5'
datapath = r'Z:\Data\N25_L3_SQ\BP1\amp_saturation_0.4mA_sweep_0dB_att\2022-05-05_0006_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.5_V_.ddh5'
# datapath = r'Z:\Data\N25_L3_SQ\BP1\amp_saturation_0.4mA_sweep_0dB_att\2022-05-05_0064_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_1.35_V_.ddh5'
# datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0039_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_1.05_V_.ddh5'
# datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0048_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.55_V_.ddh5'
# datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0052_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.95_V_.ddh5'
#
# datapath = r'Z:\Data\N25_L3_SQ\BP1\amp_saturation_0.4mA_farther_detuned\2022-05-05_0001_amp_onoff_-110_Amp_0__.ddh5'
# datapath = r'Z:\Data\N25_L3_SQ\BP1\amp_saturation_0.4mA_farther_detuned\2022-05-05_0002_amp_onoff_-110_Amp_1__.ddh5'
datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0013_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.45_V_.ddh5'
datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0005_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.45_V_.ddh5'
#this one looks like a Poisson distribution
datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep/2022-05-05_0010_pwr_sweep_30dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.55_V_.ddh5'
#this is those SAME settings, but with the amplifier on
datapath = r''
#10dB more power
#amp_off
datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0001_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.05_V_.ddh5'
# datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0006_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.55_V_.ddh5'
#amp on
# datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0006_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.55_V_.ddh5'
#higher power
# datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep/2022-05-05_0018_pwr_sweep_30dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_1.35_V_.ddh5'
# datapath = r"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0001_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.05_V_.ddh5"
datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0001_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.05_V_.ddh5'
#
# datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0002_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.15_V_.ddh5'
#reduce detuning
# datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0016_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.15_V_.ddh5'
# datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0003_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.25_V_.ddh5'
# datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0006_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.55_V_.ddh5'
# datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0010_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.95_V_.ddh5'
#amp off high power
# datapath = r'Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_even_higher_pwr/2022-05-05_0010_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.95_V_.ddh5'
# print(datapath)
nr = 7686
# #%%
# print(datapath)
# fid = extract_3pulse_histogram_from_filepath(datapath, plot = True, hist_scale = 0.01, numRecords = nr, numRecordsUsed = 400, IQ_offset = (0,0), fit = False, lpf = True, lpf_wc = 50e6, boxcar = True, bc_window = [50, 150], record_track = False, tuneup_plots = False, debug = False, tstart_index = 0, tstop_index = -1)
#%%
# print(datapath)
# fid = extract_3pulse_histogram_from_filepath(datapath, figscale = 1.2, plot = True, hist_scale = 0.0006, numRecords = nr, rec_start = 0, rec_stop = 2500, IQ_offset = (0,0), fit = False, lpf = False, lpf_wc = 50e6, boxcar = False, bc_window = [80, 100], record_track = False, tuneup_plots = False, debug = False, tstart_index = 60 , tstop_index = 120, phase_correction_rate = 0.0000, guess = 1)
tstart = 50
tstop = 150
fid = extract_3pulse_histogram_from_filepath(datapath, figscale = 1.2, plot = True, hist_scale = 0.003, numRecords = nr, rec_start = 1, rec_stop = 7686, rec_skip = 1, IQ_offset = (0,0), fit = 0, lpf = 0, lpf_wc = 15e6, boxcar = False, bc_window = [tstart, tstop], record_track = False, tuneup_plots = False, debug = False, tstart_index = tstart , tstop_index = tstop, phase_correction_rate = 0.0000, guess = 1)
print(fid)
#%%custom stats
for trace in [0,1,2]:
Pvar, Pvar_fit, Pavg, Ivar, Qvar = plot_custom_stats_from_filepath(datapath, debug = True, trace = trace, fit = 0, timeslice = 58)
print("Variance (numpy):", Pvar, "\nVariance (fit): ", Pvar_fit, "\nAverage: ", Pavg)
#%%old stats
plot_stats_from_filepath(datapath, plt_avg = 0, plt_var = 1, vscale = 100, plot = 1)
#%%
file_arr_amp_off = [
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0014_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_1.35_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0001_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.05_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0002_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.15_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0003_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.25_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0004_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.35_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0005_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.45_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0006_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.55_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0007_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.65_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0008_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.75_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0009_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.85_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0010_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.95_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0011_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_1.05_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0012_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_1.15_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_higher_pwr/2022-05-05_0013_pwr_sweep_20dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_1.25_V_.ddh5"
]
file_arr_amp_on = [
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0014_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_1.35_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0001_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.05_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0002_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.15_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0003_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.25_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0004_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.35_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0005_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.45_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0006_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.55_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0007_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.65_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0008_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.75_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0009_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.85_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0010_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.95_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0011_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_1.05_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0012_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_1.15_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_20dB_att/2022-05-05_0013_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_1.25_V_.ddh5"
]
Pvar_arr, Pvar_fit_arr, Pavg_arr, Ivar_arr, Qvar_arr = [],[],[],[],[]
for fp in file_arr_amp_off:
Pvar, Pvar_fit, Pavg, Ivar, Qvar = plot_custom_stats_from_filepath(fp, debug = True, trace = 0)
# print("Variance (numpy):", Pvar, "\nVariance (fit): ", Pvar_fit, "\nAverage: ", Pavg)
Pvar_arr.append(Pvar)
Pvar_fit_arr.append(Pvar_fit)
Pavg_arr.append(Pavg)
Ivar_arr.append(Ivar)
Qvar_arr.append(Qvar)
#%%
plt.plot(Pavg_arr, Ivar_arr, '.')
plt.plot(Pavg_arr, Qvar_arr, '.')
# plt.plot(Pavg_arr, Pvar_arr, '.', label = 'numpy variance')
# plt.plot(Pavg_arr, Pvar_fit_arr, '.', label = 'fit_variance')
# plt.xlim(0, 0.04)
# plt.ylim(0, 0.0031)
plt.legend()
#%%
pwr_file_arr = [
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0022_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_1.35_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0009_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.05_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0010_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.15_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0011_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.25_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0012_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.35_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0013_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.45_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0014_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.55_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0015_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.65_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0016_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.75_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0017_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.85_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0018_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_0.95_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0019_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_1.05_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0020_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_1.15_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0021_pwr_sweep_0dB_att_pump_pwr_9.76_dBm_LO_freq_5914000000.0_Hz_Sig_Volt_1.25_V_.ddh5"
]
pwr_file_arr = [
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0051_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.05_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0052_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.15_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0053_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.25_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0054_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.35_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0055_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.45_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0056_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.55_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0057_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.65_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0058_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.75_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0059_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.85_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0060_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.95_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0061_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_1.05_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0062_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_1.15_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0063_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_1.25_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_saturation_0.4mA_sweep_0dB_att/2022-05-05_0064_pwr_sweep_0dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_1.35_V_.ddh5"
]
pwr_file_arr = [
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0028_pwr_sweep_10dB_att_pump_pwr_9.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_1.35_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0015_pwr_sweep_10dB_att_pump_pwr_9.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.05_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0016_pwr_sweep_10dB_att_pump_pwr_9.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.15_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0017_pwr_sweep_10dB_att_pump_pwr_9.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.25_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0018_pwr_sweep_10dB_att_pump_pwr_9.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.35_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0019_pwr_sweep_10dB_att_pump_pwr_9.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.45_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0020_pwr_sweep_10dB_att_pump_pwr_9.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.55_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0021_pwr_sweep_10dB_att_pump_pwr_9.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.65_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0022_pwr_sweep_10dB_att_pump_pwr_9.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.75_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0023_pwr_sweep_10dB_att_pump_pwr_9.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.85_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0024_pwr_sweep_10dB_att_pump_pwr_9.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.95_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0025_pwr_sweep_10dB_att_pump_pwr_9.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_1.05_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0026_pwr_sweep_10dB_att_pump_pwr_9.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_1.15_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0027_pwr_sweep_10dB_att_pump_pwr_9.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_1.25_V_.ddh5"
]
pwr_file_arr = [
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0056_pwr_sweep_10dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_1.35_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0043_pwr_sweep_10dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.05_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0044_pwr_sweep_10dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.15_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0045_pwr_sweep_10dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.25_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0046_pwr_sweep_10dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.35_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0047_pwr_sweep_10dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.45_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0048_pwr_sweep_10dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.55_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0049_pwr_sweep_10dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.65_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0050_pwr_sweep_10dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.75_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0051_pwr_sweep_10dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.85_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0052_pwr_sweep_10dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_0.95_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0053_pwr_sweep_10dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_1.05_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0054_pwr_sweep_10dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_1.15_V_.ddh5",
"Z:/Data/N25_L3_SQ/BP1/amp_was_actually_off/amp_saturation_0.4mA_sweep_even_higher_pwr/2022-05-05_0055_pwr_sweep_10dB_att_pump_pwr_10.76_dBm_LO_freq_5917000000.0_Hz_Sig_Volt_1.25_V_.ddh5"
]
Gvar_arr, Evar_arr, Fvar_arr = [],[],[]
Gavg_arr, Eavg_arr, Favg_arr = [],[],[]
for i, fp in enumerate(pwr_file_arr):
print(i, fp)
[Gvar,Gavg], [Evar,Eavg], [Fvar,Favg] = plot_stats_from_filepath(fp, vscale = 1, plot = 0)
Gvar_arr.append(Gvar)
Evar_arr.append(Evar)
Fvar_arr.append(Fvar)
Gavg_arr.append(Gavg)
Eavg_arr.append(Eavg)
Favg_arr.append(Favg)
#%%
plt.plot(np.array(Gavg_arr),np.array(Gvar_arr), '.') | [
"data_processing.AWG_and_Alazar.Pulse_Processing_utils.plot_custom_stats_from_filepath",
"data_processing.AWG_and_Alazar.Pulse_Processing_utils.plot_stats_from_filepath",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"numpy.array",
"data_processing.AWG_and_Alazar.Pulse_Processing_utils.extract_... | [((547, 570), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""hatlab"""'], {}), "('hatlab')\n", (560, 570), True, 'import matplotlib.pyplot as plt\n'), ((9008, 9395), 'data_processing.AWG_and_Alazar.Pulse_Processing_utils.extract_3pulse_histogram_from_filepath', 'extract_3pulse_histogram_from_filepath', (['datapath'], {'figscale': '(1.2)', 'plot': '(True)', 'hist_scale': '(0.003)', 'numRecords': 'nr', 'rec_start': '(1)', 'rec_stop': '(7686)', 'rec_skip': '(1)', 'IQ_offset': '(0, 0)', 'fit': '(0)', 'lpf': '(0)', 'lpf_wc': '(15000000.0)', 'boxcar': '(False)', 'bc_window': '[tstart, tstop]', 'record_track': '(False)', 'tuneup_plots': '(False)', 'debug': '(False)', 'tstart_index': 'tstart', 'tstop_index': 'tstop', 'phase_correction_rate': '(0.0)', 'guess': '(1)'}), '(datapath, figscale=1.2, plot=True,\n hist_scale=0.003, numRecords=nr, rec_start=1, rec_stop=7686, rec_skip=1,\n IQ_offset=(0, 0), fit=0, lpf=0, lpf_wc=15000000.0, boxcar=False,\n bc_window=[tstart, tstop], record_track=False, tuneup_plots=False,\n debug=False, tstart_index=tstart, tstop_index=tstop,\n phase_correction_rate=0.0, guess=1)\n', (9046, 9395), False, 'from data_processing.AWG_and_Alazar.Pulse_Processing_utils import extract_3pulse_histogram_from_filepath, extract_3pulse_noise_from_filepath, plot_stats_from_filepath, plot_custom_stats_from_filepath\n'), ((9703, 9779), 'data_processing.AWG_and_Alazar.Pulse_Processing_utils.plot_stats_from_filepath', 'plot_stats_from_filepath', (['datapath'], {'plt_avg': '(0)', 'plt_var': '(1)', 'vscale': '(100)', 'plot': '(1)'}), '(datapath, plt_avg=0, plt_var=1, vscale=100, plot=1)\n', (9727, 9779), False, 'from data_processing.AWG_and_Alazar.Pulse_Processing_utils import extract_3pulse_histogram_from_filepath, extract_3pulse_noise_from_filepath, plot_stats_from_filepath, plot_custom_stats_from_filepath\n'), ((15084, 15117), 'matplotlib.pyplot.plot', 'plt.plot', (['Pavg_arr', 'Ivar_arr', '"""."""'], {}), "(Pavg_arr, Ivar_arr, '.')\n", (15092, 15117), True, 'import matplotlib.pyplot as plt\n'), ((15118, 15151), 'matplotlib.pyplot.plot', 'plt.plot', (['Pavg_arr', 'Qvar_arr', '"""."""'], {}), "(Pavg_arr, Qvar_arr, '.')\n", (15126, 15151), True, 'import matplotlib.pyplot as plt\n'), ((15320, 15332), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (15330, 15332), True, 'import matplotlib.pyplot as plt\n'), ((9503, 9594), 'data_processing.AWG_and_Alazar.Pulse_Processing_utils.plot_custom_stats_from_filepath', 'plot_custom_stats_from_filepath', (['datapath'], {'debug': '(True)', 'trace': 'trace', 'fit': '(0)', 'timeslice': '(58)'}), '(datapath, debug=True, trace=trace, fit=0,\n timeslice=58)\n', (9534, 9594), False, 'from data_processing.AWG_and_Alazar.Pulse_Processing_utils import extract_3pulse_histogram_from_filepath, extract_3pulse_noise_from_filepath, plot_stats_from_filepath, plot_custom_stats_from_filepath\n'), ((14786, 14842), 'data_processing.AWG_and_Alazar.Pulse_Processing_utils.plot_custom_stats_from_filepath', 'plot_custom_stats_from_filepath', (['fp'], {'debug': '(True)', 'trace': '(0)'}), '(fp, debug=True, trace=0)\n', (14817, 14842), False, 'from data_processing.AWG_and_Alazar.Pulse_Processing_utils import extract_3pulse_histogram_from_filepath, extract_3pulse_noise_from_filepath, plot_stats_from_filepath, plot_custom_stats_from_filepath\n'), ((25340, 25386), 'data_processing.AWG_and_Alazar.Pulse_Processing_utils.plot_stats_from_filepath', 'plot_stats_from_filepath', (['fp'], {'vscale': '(1)', 'plot': '(0)'}), '(fp, vscale=1, plot=0)\n', (25364, 25386), False, 'from data_processing.AWG_and_Alazar.Pulse_Processing_utils import extract_3pulse_histogram_from_filepath, extract_3pulse_noise_from_filepath, plot_stats_from_filepath, plot_custom_stats_from_filepath\n'), ((25570, 25588), 'numpy.array', 'np.array', (['Gavg_arr'], {}), '(Gavg_arr)\n', (25578, 25588), True, 'import numpy as np\n'), ((25589, 25607), 'numpy.array', 'np.array', (['Gvar_arr'], {}), '(Gvar_arr)\n', (25597, 25607), True, 'import numpy as np\n')] |
## test_attack.py -- sample code to test attack procedure
##
## Copyright (C) 2016, <NAME> <<EMAIL>>.
##
## This program is licenced under the BSD 2-Clause licence,
## contained in the LICENCE file in this directory.
import tensorflow as tf
import numpy as np
import time
import random
import argparse
from cv2 import imwrite
from setup_cifar import CIFAR, CIFARModel
from setup_mnist import MNIST, MNISTModel
from setup_mnets import MNETSModel, ImagenetTF, NodeLookup
from l2_attack import CarliniL2
from l0_attack import CarliniL0
from li_attack import CarliniLi
slim = tf.contrib.slim
def show(img_input, name=None):
"""
Show MNSIT digits in the console.
"""
remap = " .*#"+"#"*100
img = (img_input.flatten()+.5)*3
if len(img) != 784 and name is not None:
scaled = (0.5+img_input)*255
imwrite(name, scaled)
return
print("START")
for i in range(28):
print("".join([remap[int(round(x))] for x in img[i*28:i*28+28]]))
def generate_data(data, samples, num_targets=1, targeted=True, target_classes=None, start=0, imagenet=True):
"""
Generate the input data to the attack algorithm.
data: the images to attack
samples: number of samples to use
targeted: if true, construct targeted attacks, otherwise untargeted attacks
start: offset into data to use
inception: if targeted and inception, randomly sample 100 targets intead of 1000
"""
targets = []
inputs_scaled = []
print(data.test_data.shape)
print(data.test_labels.shape)
if not imagenet:
seq = range(data.test_labels.shape[1])
else:
seq = target_classes if target_classes else random.sample(range(1,1001), num_targets)
print("Target classes", seq)
for i in range(samples):
if targeted:
for j in seq:
if (j == np.argmax(data.test_labels[start+i])) and (imagenet == False):
continue
inputs_scaled.append(data.test_data[start+i]/2.0)
targets.append(np.eye(data.test_labels.shape[1])[j])
else:
inputs_scaled.append(data.test_data[start+i]/2.0)
targets.append(data.test_labels[start+i])
inputs_scaled = np.array(inputs_scaled)
#i = 1
#for img in inputs_scaled:
# show(img, 'orig' + str(i) + '.png')
# i += 1
targets = np.array(targets)
return inputs_scaled, targets
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Checkpoint file')
parser.add_argument('--ckpt', dest='ckpt', type=str,
help='Checkpoint file for model')
parser.add_argument('--model', dest='model_name', type=str,
help='model_name')
parser.add_argument('--norm', dest='norm', type=str,
help='distance metric')
parser.add_argument('--conf', dest='conf', type=int,
help='confidence')
#parser.add_argument('--targets', dest='num_targets', type=int,
# help='number of targets')
args = parser.parse_args()
batch_size = 5
with tf.Session() as sess:
print("Running model {}".format(args.model_name))
model = MNETSModel(args.ckpt, args.model_name, batch_size, sess)
data = ImagenetTF(args.model_name, model.image_size)
tf.train.start_queue_runners(sess)
data.get_batch(sess)
if args.norm == "0":
norm = CarliniL0
elif args.norm == "i":
norm = CarliniLi
else:
norm = CarliniL2
target_classes = [893, 858, 350, 71, 948, 715, 558, 408, 349, 215]
target_classes = target_classes
attack = norm(sess, model, max_iterations=1000, confidence=args.conf)
inputs, targets = generate_data(data, samples=len(target_classes),
targeted=True, target_classes=target_classes,
start=0, imagenet=True)
print("Attack constructed")
#print(tf.global_variables())
if args.model_name == "inception_v3":
variables_to_restore = slim.get_variables(scope="InceptionV3")
elif args.model_name == "resnet_v2_152":
variables_to_restore = slim.get_variables(scope="ResnetV2152")
elif args.model_name.startswith("mobilenet"):
variables_to_restore = slim.get_variables(scope="MobilenetV1")
saver = tf.train.Saver(variables_to_restore)
saver.restore(sess, args.ckpt)
print("Checkpoint restored")
print("Running attack")
timestart = time.time()
adv = attack.attack(inputs, targets)
timeend = time.time()
print("Took",timeend-timestart,"seconds to run",len(inputs),"samples.")
adv = adv.astype(np.float32)
# Creates node ID --> English string lookup.
node_lookup = NodeLookup()
def topk(predictions):
top_k = predictions.argsort()#[-FLAGS.num_top_predictions:][::-1]
top_k = top_k[::-1]
count = 1
for node_id in top_k[:10]:
#print('ID {}, score {}'.format(node_id, predictions[node_id]))
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print("{}. {} (score = {})".format(count, human_string, score))
count += 1
for i in range(len(adv)):
print("Types:", inputs[i].dtype, adv[i].dtype)
print("Valid:")
show(inputs[i], name="input" + str(i) + ".png")
print("Adversarial:")
show(adv[i], name="adv" + str(i) + ".png")
pred = model.predict(tf.convert_to_tensor(inputs[i:i+1]), reuse=True)
pred_adv = model.predict(tf.convert_to_tensor(adv[i:i+1]), reuse=True)
pred = sess.run(pred)
pred_adv = sess.run(pred_adv)
pred = np.squeeze(pred)
pred_adv = np.squeeze(pred_adv)
print("Original classification:")
topk(pred)
print("Adversarial classification:")
topk(pred_adv)
print("Total distortion:", np.sum((adv[i]-inputs[i])**2)**.5)
| [
"cv2.imwrite",
"numpy.eye",
"argparse.ArgumentParser",
"setup_mnets.NodeLookup",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.train.start_queue_runners",
"numpy.argmax",
"numpy.squeeze",
"numpy.array",
"numpy.sum",
"tensorflow.convert_to_tensor",
"time.time",
"setup_mnets.Im... | [((2230, 2253), 'numpy.array', 'np.array', (['inputs_scaled'], {}), '(inputs_scaled)\n', (2238, 2253), True, 'import numpy as np\n'), ((2375, 2392), 'numpy.array', 'np.array', (['targets'], {}), '(targets)\n', (2383, 2392), True, 'import numpy as np\n'), ((2469, 2523), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Checkpoint file"""'}), "(description='Checkpoint file')\n", (2492, 2523), False, 'import argparse\n'), ((834, 855), 'cv2.imwrite', 'imwrite', (['name', 'scaled'], {}), '(name, scaled)\n', (841, 855), False, 'from cv2 import imwrite\n'), ((3129, 3141), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3139, 3141), True, 'import tensorflow as tf\n'), ((3225, 3281), 'setup_mnets.MNETSModel', 'MNETSModel', (['args.ckpt', 'args.model_name', 'batch_size', 'sess'], {}), '(args.ckpt, args.model_name, batch_size, sess)\n', (3235, 3281), False, 'from setup_mnets import MNETSModel, ImagenetTF, NodeLookup\n'), ((3298, 3343), 'setup_mnets.ImagenetTF', 'ImagenetTF', (['args.model_name', 'model.image_size'], {}), '(args.model_name, model.image_size)\n', (3308, 3343), False, 'from setup_mnets import MNETSModel, ImagenetTF, NodeLookup\n'), ((3362, 3396), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', (['sess'], {}), '(sess)\n', (3390, 3396), True, 'import tensorflow as tf\n'), ((4485, 4521), 'tensorflow.train.Saver', 'tf.train.Saver', (['variables_to_restore'], {}), '(variables_to_restore)\n', (4499, 4521), True, 'import tensorflow as tf\n'), ((4659, 4670), 'time.time', 'time.time', ([], {}), '()\n', (4668, 4670), False, 'import time\n'), ((4734, 4745), 'time.time', 'time.time', ([], {}), '()\n', (4743, 4745), False, 'import time\n'), ((4945, 4957), 'setup_mnets.NodeLookup', 'NodeLookup', ([], {}), '()\n', (4955, 4957), False, 'from setup_mnets import MNETSModel, ImagenetTF, NodeLookup\n'), ((6003, 6019), 'numpy.squeeze', 'np.squeeze', (['pred'], {}), '(pred)\n', (6013, 6019), True, 'import numpy as np\n'), ((6043, 6063), 'numpy.squeeze', 'np.squeeze', (['pred_adv'], {}), '(pred_adv)\n', (6053, 6063), True, 'import numpy as np\n'), ((5763, 5800), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['inputs[i:i + 1]'], {}), '(inputs[i:i + 1])\n', (5783, 5800), True, 'import tensorflow as tf\n'), ((5849, 5883), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['adv[i:i + 1]'], {}), '(adv[i:i + 1])\n', (5869, 5883), True, 'import tensorflow as tf\n'), ((6263, 6296), 'numpy.sum', 'np.sum', (['((adv[i] - inputs[i]) ** 2)'], {}), '((adv[i] - inputs[i]) ** 2)\n', (6269, 6296), True, 'import numpy as np\n'), ((1852, 1890), 'numpy.argmax', 'np.argmax', (['data.test_labels[start + i]'], {}), '(data.test_labels[start + i])\n', (1861, 1890), True, 'import numpy as np\n'), ((2041, 2074), 'numpy.eye', 'np.eye', (['data.test_labels.shape[1]'], {}), '(data.test_labels.shape[1])\n', (2047, 2074), True, 'import numpy as np\n')] |
import numpy as np
import pylab as plt
import pandas as pd
data = pd.read_csv('data/data.csv')
for feature in ['nikkei', 'nasdaq', 'currency']:
dataset = data[feature]
print("[{}] Mean: {}".format(feature, np.mean(dataset)))
print("[{}] Standard deviation: {}".format(feature, np.std(dataset)))
plt.xlabel(feature, fontsize=18)
plt.hist(dataset, normed=True, bins=50)
plt.show()
| [
"numpy.mean",
"pylab.hist",
"pandas.read_csv",
"pylab.xlabel",
"numpy.std",
"pylab.show"
] | [((67, 95), 'pandas.read_csv', 'pd.read_csv', (['"""data/data.csv"""'], {}), "('data/data.csv')\n", (78, 95), True, 'import pandas as pd\n'), ((312, 344), 'pylab.xlabel', 'plt.xlabel', (['feature'], {'fontsize': '(18)'}), '(feature, fontsize=18)\n', (322, 344), True, 'import pylab as plt\n'), ((349, 388), 'pylab.hist', 'plt.hist', (['dataset'], {'normed': '(True)', 'bins': '(50)'}), '(dataset, normed=True, bins=50)\n', (357, 388), True, 'import pylab as plt\n'), ((393, 403), 'pylab.show', 'plt.show', ([], {}), '()\n', (401, 403), True, 'import pylab as plt\n'), ((215, 231), 'numpy.mean', 'np.mean', (['dataset'], {}), '(dataset)\n', (222, 231), True, 'import numpy as np\n'), ((290, 305), 'numpy.std', 'np.std', (['dataset'], {}), '(dataset)\n', (296, 305), True, 'import numpy as np\n')] |
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
n = 10
s = 1.0
x = np.linspace(0, n - 1, n + (n - 1) * 20)
def rho(r, k):
if k == 0:
y = np.exp(-(r/s)**2)
else:
e = np.exp(1)
y = (e/k**2)**(k**2) * (r/s)**(2*k**2) * np.exp(-(r/s)**2)
return y
plt.figure(figsize=(6, 3))
colors = cm.rainbow(np.linspace(1, 0, 7))
for k, c in enumerate(colors):
plt.plot(x, rho(x, k), color=c, label=r'$k = {}$'.format(k))
plt.plot(x, sum([rho(x, k) for k in range(n)]),
color='black', label=r'$\sum\rho_k$')
plt.xlabel(r'$u = r/\sigma$')
plt.xlim((0, n - 1))
plt.grid(axis='x')
plt.ylabel(r'$\rho_k(u)$')
plt.ylim((0, 1.5))
plt.yticks(np.arange(0, 2, 0.5))
plt.legend()
plt.tight_layout()
#plt.show()
#plt.savefig('basex-basis.svg')
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend"
] | [((148, 187), 'numpy.linspace', 'np.linspace', (['(0)', '(n - 1)', '(n + (n - 1) * 20)'], {}), '(0, n - 1, n + (n - 1) * 20)\n', (159, 187), True, 'import numpy as np\n'), ((364, 390), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 3)'}), '(figsize=(6, 3))\n', (374, 390), True, 'import matplotlib.pyplot as plt\n'), ((628, 657), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$u = r/\\\\sigma$"""'], {}), "('$u = r/\\\\sigma$')\n", (638, 657), True, 'import matplotlib.pyplot as plt\n'), ((658, 678), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0, n - 1)'], {}), '((0, n - 1))\n', (666, 678), True, 'import matplotlib.pyplot as plt\n'), ((679, 697), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""x"""'}), "(axis='x')\n", (687, 697), True, 'import matplotlib.pyplot as plt\n'), ((699, 725), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\rho_k(u)$"""'], {}), "('$\\\\rho_k(u)$')\n", (709, 725), True, 'import matplotlib.pyplot as plt\n'), ((726, 744), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0, 1.5)'], {}), '((0, 1.5))\n', (734, 744), True, 'import matplotlib.pyplot as plt\n'), ((779, 791), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (789, 791), True, 'import matplotlib.pyplot as plt\n'), ((792, 810), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (808, 810), True, 'import matplotlib.pyplot as plt\n'), ((412, 432), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', '(7)'], {}), '(1, 0, 7)\n', (423, 432), True, 'import numpy as np\n'), ((756, 776), 'numpy.arange', 'np.arange', (['(0)', '(2)', '(0.5)'], {}), '(0, 2, 0.5)\n', (765, 776), True, 'import numpy as np\n'), ((232, 253), 'numpy.exp', 'np.exp', (['(-(r / s) ** 2)'], {}), '(-(r / s) ** 2)\n', (238, 253), True, 'import numpy as np\n'), ((272, 281), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (278, 281), True, 'import numpy as np\n'), ((331, 352), 'numpy.exp', 'np.exp', (['(-(r / s) ** 2)'], {}), '(-(r / s) ** 2)\n', (337, 352), True, 'import numpy as np\n')] |
"""
Functionality for reading Capella SAR data into a SICD model.
"""
__classification__ = "UNCLASSIFIED"
__author__ = ("<NAME>", "<NAME>")
import logging
import json
from typing import Dict, Any, Tuple
from datetime import datetime
from collections import OrderedDict
from scipy.constants import speed_of_light
import numpy
from sarpy.compliance import string_types
from sarpy.io.general.base import BaseReader, SarpyIOError
from sarpy.io.general.tiff import TiffDetails, NativeTiffChipper
from sarpy.io.general.utils import parse_timestring, get_seconds, is_file_like
from sarpy.io.complex.base import SICDTypeReader
from sarpy.io.complex.utils import fit_position_xvalidation
from sarpy.io.complex.sicd_elements.blocks import XYZPolyType
from sarpy.io.complex.sicd_elements.SICD import SICDType
from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType, RadarModeType
from sarpy.io.complex.sicd_elements.ImageCreation import ImageCreationType
from sarpy.io.complex.sicd_elements.ImageData import ImageDataType
from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType
from sarpy.io.complex.sicd_elements.Position import PositionType
from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType
from sarpy.io.complex.sicd_elements.RadarCollection import RadarCollectionType, \
WaveformParametersType, ChanParametersType
from sarpy.io.complex.sicd_elements.Timeline import TimelineType, IPPSetType
from sarpy.io.complex.sicd_elements.ImageFormation import ImageFormationType, \
RcvChanProcType, ProcessingType
logger = logging.getLogger(__name__)
########
# base expected functionality for a module with an implemented Reader
def is_a(file_name):
"""
Tests whether a given file_name corresponds to a Capella SAR file.
Returns a reader instance, if so.
Parameters
----------
file_name : str
the file_name to check
Returns
-------
CapellaReader|None
`CapellaReader` instance if Capella file, `None` otherwise
"""
if is_file_like(file_name):
return None
try:
capella_details = CapellaDetails(file_name)
logger.info('File {} is determined to be a Capella file.'.format(file_name))
return CapellaReader(capella_details)
except SarpyIOError:
return None
###########
# parser and interpreter for tiff attributes
class CapellaDetails(object):
"""
Parses and converts the Cosmo Skymed metadata
"""
__slots__ = ('_tiff_details', '_img_desc_tags')
def __init__(self, file_name):
"""
Parameters
----------
file_name : str
"""
# verify that the file is a tiff file
self._tiff_details = TiffDetails(file_name)
# verify that ImageDescription tiff tag exists
if 'ImageDescription' not in self._tiff_details.tags:
raise SarpyIOError('No "ImageDescription" tag in the tiff.')
img_format = self._tiff_details.tags['ImageDescription']
# verify that ImageDescription has a reasonable format
try:
self._img_desc_tags = json.loads(img_format) # type: Dict[str, Any]
except Exception as e:
logger.error('Failed deserializing the ImageDescription tag as json with error {}'.format(e))
raise e
# verify the file is not compressed
self._tiff_details.check_compression()
# verify the file is not tiled
self._tiff_details.check_tiled()
@property
def file_name(self):
"""
str: the file name
"""
return self._tiff_details.file_name
@property
def tiff_details(self):
# type: () -> TiffDetails
"""
TiffDetails: The tiff details object.
"""
return self._tiff_details
def get_symmetry(self):
# type: () -> Tuple[bool, bool, bool]
"""
Gets the symmetry definition.
Returns
-------
Tuple[bool, bool, bool]
"""
pointing = self._img_desc_tags['collect']['radar']['pointing'].lower()
if pointing == 'left':
return False, False, False
elif pointing == 'right':
return False, True, False
else:
raise ValueError('Got unhandled pointing value {}'.format(pointing))
def get_sicd(self):
"""
Get the SICD metadata for the image.
Returns
-------
SICDType
"""
def convert_string_dict(dict_in):
# type: (dict) -> dict
dict_out = OrderedDict()
for key, val in dict_in.items():
if isinstance(val, string_types):
dict_out[key] = val
elif isinstance(val, int):
dict_out[key] = str(val)
elif isinstance(val, float):
dict_out[key] = '{0:0.16G}'.format(val)
else:
raise TypeError('Got unhandled type {}'.format(type(val)))
return dict_out
def extract_state_vector():
# type: () -> (numpy.ndarray, numpy.ndarray, numpy.ndarray)
vecs = collect['state']['state_vectors']
times = numpy.zeros((len(vecs), ), dtype=numpy.float64)
positions = numpy.zeros((len(vecs), 3), dtype=numpy.float64)
velocities = numpy.zeros((len(vecs), 3), dtype=numpy.float64)
for i, entry in enumerate(vecs):
times[i] = get_seconds(parse_timestring(entry['time'], precision='ns'), start_time, precision='ns')
positions[i, :] = entry['position']
velocities[i, :] = entry['velocity']
return times, positions, velocities
def get_collection_info():
# type: () -> CollectionInfoType
coll_name = collect['platform']
start_dt = start_time.astype('datetime64[us]').astype(datetime)
mode = collect['mode'].strip().lower()
if mode == 'stripmap':
radar_mode = RadarModeType(ModeType='STRIPMAP')
elif mode == 'sliding_spotlight':
radar_mode = RadarModeType(ModeType='DYNAMIC STRIPMAP')
else:
raise ValueError('Got unhandled radar mode {}'.format(mode))
return CollectionInfoType(
CollectorName=coll_name,
CoreName='{}{}{}'.format(start_dt.strftime('%d%b%y').upper(),
coll_name,
start_dt.strftime('%H%M%S')),
RadarMode=radar_mode,
Classification='UNCLASSIFIED',
CollectType='MONOSTATIC')
def get_image_creation():
# type: () -> ImageCreationType
from sarpy.__about__ import __version__
return ImageCreationType(
Application=self._tiff_details.tags['Software'],
DateTime=parse_timestring(self._img_desc_tags['processing_time'], precision='us'),
Profile='sarpy {}'.format(__version__),
Site='Unknown')
def get_image_data():
# type: () -> ImageDataType
img = collect['image']
rows = int(img['columns']) # capella uses flipped row/column definition?
cols = int(img['rows'])
if img['data_type'] == 'CInt16':
pixel_type = 'RE16I_IM16I'
else:
raise ValueError('Got unhandled data_type {}'.format(img['data_type']))
scp_pixel = (int(0.5 * rows), int(0.5 * cols))
if collect['radar']['pointing'] == 'left':
scp_pixel = (rows - scp_pixel[0] - 1, cols - scp_pixel[1] - 1)
return ImageDataType(
NumRows=rows, NumCols=cols,
FirstRow=0, FirstCol=0,
PixelType=pixel_type,
FullImage=(rows, cols),
SCPPixel=scp_pixel)
def get_geo_data():
# type: () -> GeoDataType
return GeoDataType(SCP=SCPType(ECF=collect['image']['center_pixel']['target_position']))
def get_position():
# type: () -> PositionType
px, py, pz = fit_position_xvalidation(state_time, state_position, state_velocity, max_degree=6)
return PositionType(ARPPoly=XYZPolyType(X=px, Y=py, Z=pz))
def get_grid():
# type: () -> GridType
img = collect['image']
image_plane = 'OTHER'
grid_type = 'PLANE'
if self._img_desc_tags['product_type'] == 'SLC' and img['algorithm'] != 'backprojection':
image_plane = 'SLANT'
grid_type = 'RGZERO'
coa_time = parse_timestring(img['center_pixel']['center_time'], precision='ns')
row_imp_rsp_bw = 2*bw/speed_of_light
row = DirParamType(
SS=img['pixel_spacing_column'],
ImpRespBW=row_imp_rsp_bw,
ImpRespWid=img['range_resolution'],
KCtr=2*fc/speed_of_light,
DeltaK1=-0.5*row_imp_rsp_bw,
DeltaK2=0.5*row_imp_rsp_bw,
DeltaKCOAPoly=[[0.0, ], ],
WgtType=WgtTypeType(
WindowName=img['range_window']['name'],
Parameters=convert_string_dict(img['range_window']['parameters'])))
# get timecoa value
timecoa_value = get_seconds(coa_time, start_time) # TODO: constant?
# find an approximation for zero doppler spacing - necessarily rough for backprojected images
# find velocity at coatime
arp_velocity = position.ARPPoly.derivative_eval(timecoa_value, der_order=1)
arp_speed = numpy.linalg.norm(arp_velocity)
col_ss = img['pixel_spacing_row']
dop_bw = img['processed_azimuth_bandwidth']
# ss_zd_s = col_ss/arp_speed
col = DirParamType(
SS=col_ss,
ImpRespWid=img['azimuth_resolution'],
ImpRespBW=dop_bw/arp_speed,
KCtr=0,
WgtType=WgtTypeType(
WindowName=img['azimuth_window']['name'],
Parameters=convert_string_dict(img['azimuth_window']['parameters'])))
# TODO: from Wade - account for numeric WgtFunct
return GridType(
ImagePlane=image_plane,
Type=grid_type,
TimeCOAPoly=[[timecoa_value, ], ],
Row=row,
Col=col)
def get_radar_colection():
# type: () -> RadarCollectionType
radar = collect['radar']
freq_min = fc - 0.5*bw
return RadarCollectionType(
TxPolarization=radar['transmit_polarization'],
TxFrequency=(freq_min, freq_min + bw),
Waveform=[WaveformParametersType(
TxRFBandwidth=bw,
TxPulseLength=radar['pulse_duration'],
RcvDemodType='CHIRP',
ADCSampleRate=radar['sampling_frequency'],
TxFreqStart=freq_min)],
RcvChannels=[ChanParametersType(
TxRcvPolarization='{}:{}'.format(radar['transmit_polarization'],
radar['receive_polarization']))])
def get_timeline():
# type: () -> TimelineType
prf = collect['radar']['prf'][0]['prf']
return TimelineType(
CollectStart=start_time,
CollectDuration=duration,
IPP=[
IPPSetType(
TStart=0,
TEnd=duration,
IPPStart=0,
IPPEnd=duration*prf,
IPPPoly=(0, prf)), ])
def get_image_formation():
# type: () -> ImageFormationType
radar = collect['radar']
algo = collect['image']['algorithm'].upper()
processings = None
if algo == 'BACKPROJECTION':
processings = [ProcessingType(Type='Backprojected to DEM', Applied=True), ]
if algo not in ('PFA', 'RMA', 'RGAZCOMP'):
logger.warning(
'Image formation algorithm {} not one of the recognized SICD options, '
'being set to "OTHER".'.format(algo))
algo = 'OTHER'
return ImageFormationType(
RcvChanProc=RcvChanProcType(NumChanProc=1, PRFScaleFactor=1),
ImageFormAlgo=algo,
TStartProc=0,
TEndProc=duration,
TxRcvPolarizationProc='{}:{}'.format(radar['transmit_polarization'], radar['receive_polarization']),
TxFrequencyProc=(
radar_collection.TxFrequency.Min,
radar_collection.TxFrequency.Max),
STBeamComp='NO',
ImageBeamComp='NO',
AzAutofocus='NO',
RgAutofocus='NO',
Processings=processings)
# TODO: From Wade - Radiometric is not suitable?
# extract general use information
collect = self._img_desc_tags['collect']
start_time = parse_timestring(collect['start_timestamp'], precision='ns')
end_time = parse_timestring(collect['stop_timestamp'], precision='ns')
duration = get_seconds(end_time, start_time, precision='ns')
state_time, state_position, state_velocity = extract_state_vector()
bw = collect['radar']['pulse_bandwidth']
fc = collect['radar']['center_frequency']
# define the sicd elements
collection_info = get_collection_info()
image_creation = get_image_creation()
image_data = get_image_data()
geo_data = get_geo_data()
position = get_position()
grid = get_grid()
radar_collection = get_radar_colection()
timeline = get_timeline()
image_formation = get_image_formation()
sicd = SICDType(
CollectionInfo=collection_info,
ImageCreation=image_creation,
ImageData=image_data,
GeoData=geo_data,
Position=position,
Grid=grid,
RadarCollection=radar_collection,
Timeline=timeline,
ImageFormation=image_formation)
sicd.derive()
# this would be a rough estimate - waiting for radiometric data
# sicd.populate_rniirs(override=False)
return sicd
class CapellaReader(BaseReader, SICDTypeReader):
"""
The Capella reader object.
"""
__slots__ = ('_capella_details', )
def __init__(self, capella_details):
"""
Parameters
----------
capella_details : str|CapellaDetails
"""
if isinstance(capella_details, string_types):
capella_details = CapellaDetails(capella_details)
if not isinstance(capella_details, CapellaDetails):
raise TypeError('The input argument for capella_details must be a '
'filename or CapellaDetails object')
self._capella_details = capella_details
sicd = self.capella_details.get_sicd()
chipper = NativeTiffChipper(self.capella_details.tiff_details, symmetry=self.capella_details.get_symmetry())
SICDTypeReader.__init__(self, sicd)
BaseReader.__init__(self, chipper, reader_type="SICD")
@property
def capella_details(self):
# type: () -> CapellaDetails
"""
CapellaDetails: The capella details object.
"""
return self._capella_details
@property
def file_name(self):
return self.capella_details.file_name
| [
"logging.getLogger",
"sarpy.io.complex.sicd_elements.CollectionInfo.RadarModeType",
"sarpy.io.complex.sicd_elements.ImageFormation.RcvChanProcType",
"sarpy.io.complex.sicd_elements.RadarCollection.WaveformParametersType",
"numpy.linalg.norm",
"sarpy.io.complex.sicd_elements.ImageData.ImageDataType",
"sa... | [((1591, 1618), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1608, 1618), False, 'import logging\n'), ((2053, 2076), 'sarpy.io.general.utils.is_file_like', 'is_file_like', (['file_name'], {}), '(file_name)\n', (2065, 2076), False, 'from sarpy.io.general.utils import parse_timestring, get_seconds, is_file_like\n'), ((2744, 2766), 'sarpy.io.general.tiff.TiffDetails', 'TiffDetails', (['file_name'], {}), '(file_name)\n', (2755, 2766), False, 'from sarpy.io.general.tiff import TiffDetails, NativeTiffChipper\n'), ((13364, 13424), 'sarpy.io.general.utils.parse_timestring', 'parse_timestring', (["collect['start_timestamp']"], {'precision': '"""ns"""'}), "(collect['start_timestamp'], precision='ns')\n", (13380, 13424), False, 'from sarpy.io.general.utils import parse_timestring, get_seconds, is_file_like\n'), ((13444, 13503), 'sarpy.io.general.utils.parse_timestring', 'parse_timestring', (["collect['stop_timestamp']"], {'precision': '"""ns"""'}), "(collect['stop_timestamp'], precision='ns')\n", (13460, 13503), False, 'from sarpy.io.general.utils import parse_timestring, get_seconds, is_file_like\n'), ((13523, 13572), 'sarpy.io.general.utils.get_seconds', 'get_seconds', (['end_time', 'start_time'], {'precision': '"""ns"""'}), "(end_time, start_time, precision='ns')\n", (13534, 13572), False, 'from sarpy.io.general.utils import parse_timestring, get_seconds, is_file_like\n'), ((14157, 14395), 'sarpy.io.complex.sicd_elements.SICD.SICDType', 'SICDType', ([], {'CollectionInfo': 'collection_info', 'ImageCreation': 'image_creation', 'ImageData': 'image_data', 'GeoData': 'geo_data', 'Position': 'position', 'Grid': 'grid', 'RadarCollection': 'radar_collection', 'Timeline': 'timeline', 'ImageFormation': 'image_formation'}), '(CollectionInfo=collection_info, ImageCreation=image_creation,\n ImageData=image_data, GeoData=geo_data, Position=position, Grid=grid,\n RadarCollection=radar_collection, Timeline=timeline, ImageFormation=\n image_formation)\n', (14165, 14395), False, 'from sarpy.io.complex.sicd_elements.SICD import SICDType\n'), ((15486, 15521), 'sarpy.io.complex.base.SICDTypeReader.__init__', 'SICDTypeReader.__init__', (['self', 'sicd'], {}), '(self, sicd)\n', (15509, 15521), False, 'from sarpy.io.complex.base import SICDTypeReader\n'), ((15530, 15584), 'sarpy.io.general.base.BaseReader.__init__', 'BaseReader.__init__', (['self', 'chipper'], {'reader_type': '"""SICD"""'}), "(self, chipper, reader_type='SICD')\n", (15549, 15584), False, 'from sarpy.io.general.base import BaseReader, SarpyIOError\n'), ((2902, 2956), 'sarpy.io.general.base.SarpyIOError', 'SarpyIOError', (['"""No "ImageDescription" tag in the tiff."""'], {}), '(\'No "ImageDescription" tag in the tiff.\')\n', (2914, 2956), False, 'from sarpy.io.general.base import BaseReader, SarpyIOError\n'), ((3133, 3155), 'json.loads', 'json.loads', (['img_format'], {}), '(img_format)\n', (3143, 3155), False, 'import json\n'), ((4590, 4603), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4601, 4603), False, 'from collections import OrderedDict\n'), ((7782, 7918), 'sarpy.io.complex.sicd_elements.ImageData.ImageDataType', 'ImageDataType', ([], {'NumRows': 'rows', 'NumCols': 'cols', 'FirstRow': '(0)', 'FirstCol': '(0)', 'PixelType': 'pixel_type', 'FullImage': '(rows, cols)', 'SCPPixel': 'scp_pixel'}), '(NumRows=rows, NumCols=cols, FirstRow=0, FirstCol=0, PixelType\n =pixel_type, FullImage=(rows, cols), SCPPixel=scp_pixel)\n', (7795, 7918), False, 'from sarpy.io.complex.sicd_elements.ImageData import ImageDataType\n'), ((8256, 8342), 'sarpy.io.complex.utils.fit_position_xvalidation', 'fit_position_xvalidation', (['state_time', 'state_position', 'state_velocity'], {'max_degree': '(6)'}), '(state_time, state_position, state_velocity,\n max_degree=6)\n', (8280, 8342), False, 'from sarpy.io.complex.utils import fit_position_xvalidation\n'), ((8774, 8842), 'sarpy.io.general.utils.parse_timestring', 'parse_timestring', (["img['center_pixel']['center_time']"], {'precision': '"""ns"""'}), "(img['center_pixel']['center_time'], precision='ns')\n", (8790, 8842), False, 'from sarpy.io.general.utils import parse_timestring, get_seconds, is_file_like\n'), ((9486, 9519), 'sarpy.io.general.utils.get_seconds', 'get_seconds', (['coa_time', 'start_time'], {}), '(coa_time, start_time)\n', (9497, 9519), False, 'from sarpy.io.general.utils import parse_timestring, get_seconds, is_file_like\n'), ((9796, 9827), 'numpy.linalg.norm', 'numpy.linalg.norm', (['arp_velocity'], {}), '(arp_velocity)\n', (9813, 9827), False, 'import numpy\n'), ((10425, 10527), 'sarpy.io.complex.sicd_elements.Grid.GridType', 'GridType', ([], {'ImagePlane': 'image_plane', 'Type': 'grid_type', 'TimeCOAPoly': '[[timecoa_value]]', 'Row': 'row', 'Col': 'col'}), '(ImagePlane=image_plane, Type=grid_type, TimeCOAPoly=[[\n timecoa_value]], Row=row, Col=col)\n', (10433, 10527), False, 'from sarpy.io.complex.sicd_elements.Grid import GridType, DirParamType, WgtTypeType\n'), ((6068, 6102), 'sarpy.io.complex.sicd_elements.CollectionInfo.RadarModeType', 'RadarModeType', ([], {'ModeType': '"""STRIPMAP"""'}), "(ModeType='STRIPMAP')\n", (6081, 6102), False, 'from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType, RadarModeType\n'), ((5522, 5569), 'sarpy.io.general.utils.parse_timestring', 'parse_timestring', (["entry['time']"], {'precision': '"""ns"""'}), "(entry['time'], precision='ns')\n", (5538, 5569), False, 'from sarpy.io.general.utils import parse_timestring, get_seconds, is_file_like\n'), ((6178, 6220), 'sarpy.io.complex.sicd_elements.CollectionInfo.RadarModeType', 'RadarModeType', ([], {'ModeType': '"""DYNAMIC STRIPMAP"""'}), "(ModeType='DYNAMIC STRIPMAP')\n", (6191, 6220), False, 'from sarpy.io.complex.sicd_elements.CollectionInfo import CollectionInfoType, RadarModeType\n'), ((6984, 7056), 'sarpy.io.general.utils.parse_timestring', 'parse_timestring', (["self._img_desc_tags['processing_time']"], {'precision': '"""us"""'}), "(self._img_desc_tags['processing_time'], precision='us')\n", (7000, 7056), False, 'from sarpy.io.general.utils import parse_timestring, get_seconds, is_file_like\n'), ((8097, 8161), 'sarpy.io.complex.sicd_elements.GeoData.SCPType', 'SCPType', ([], {'ECF': "collect['image']['center_pixel']['target_position']"}), "(ECF=collect['image']['center_pixel']['target_position'])\n", (8104, 8161), False, 'from sarpy.io.complex.sicd_elements.GeoData import GeoDataType, SCPType\n'), ((8379, 8408), 'sarpy.io.complex.sicd_elements.blocks.XYZPolyType', 'XYZPolyType', ([], {'X': 'px', 'Y': 'py', 'Z': 'pz'}), '(X=px, Y=py, Z=pz)\n', (8390, 8408), False, 'from sarpy.io.complex.sicd_elements.blocks import XYZPolyType\n'), ((12207, 12264), 'sarpy.io.complex.sicd_elements.ImageFormation.ProcessingType', 'ProcessingType', ([], {'Type': '"""Backprojected to DEM"""', 'Applied': '(True)'}), "(Type='Backprojected to DEM', Applied=True)\n", (12221, 12264), False, 'from sarpy.io.complex.sicd_elements.ImageFormation import ImageFormationType, RcvChanProcType, ProcessingType\n'), ((12604, 12652), 'sarpy.io.complex.sicd_elements.ImageFormation.RcvChanProcType', 'RcvChanProcType', ([], {'NumChanProc': '(1)', 'PRFScaleFactor': '(1)'}), '(NumChanProc=1, PRFScaleFactor=1)\n', (12619, 12652), False, 'from sarpy.io.complex.sicd_elements.ImageFormation import ImageFormationType, RcvChanProcType, ProcessingType\n'), ((10947, 11123), 'sarpy.io.complex.sicd_elements.RadarCollection.WaveformParametersType', 'WaveformParametersType', ([], {'TxRFBandwidth': 'bw', 'TxPulseLength': "radar['pulse_duration']", 'RcvDemodType': '"""CHIRP"""', 'ADCSampleRate': "radar['sampling_frequency']", 'TxFreqStart': 'freq_min'}), "(TxRFBandwidth=bw, TxPulseLength=radar[\n 'pulse_duration'], RcvDemodType='CHIRP', ADCSampleRate=radar[\n 'sampling_frequency'], TxFreqStart=freq_min)\n", (10969, 11123), False, 'from sarpy.io.complex.sicd_elements.RadarCollection import RadarCollectionType, WaveformParametersType, ChanParametersType\n'), ((11716, 11808), 'sarpy.io.complex.sicd_elements.Timeline.IPPSetType', 'IPPSetType', ([], {'TStart': '(0)', 'TEnd': 'duration', 'IPPStart': '(0)', 'IPPEnd': '(duration * prf)', 'IPPPoly': '(0, prf)'}), '(TStart=0, TEnd=duration, IPPStart=0, IPPEnd=duration * prf,\n IPPPoly=(0, prf))\n', (11726, 11808), False, 'from sarpy.io.complex.sicd_elements.Timeline import TimelineType, IPPSetType\n')] |
"""
Common routines for models in Chainer.
"""
__all__ = ['round_channels', 'BreakBlock', 'ReLU6', 'HSwish', 'get_activation_layer', 'GlobalAvgPool2D',
'SelectableDense', 'DenseBlock', 'ConvBlock1d', 'conv1x1', 'conv3x3', 'depthwise_conv3x3', 'ConvBlock',
'conv1x1_block', 'conv3x3_block', 'conv5x5_block', 'conv7x7_block', 'dwconv_block', 'dwconv3x3_block',
'dwconv5x5_block', 'dwsconv3x3_block', 'PreConvBlock', 'pre_conv1x1_block', 'pre_conv3x3_block',
'DeconvBlock', 'ChannelShuffle', 'ChannelShuffle2', 'SEBlock', 'SABlock', 'SAConvBlock', 'saconv3x3_block',
'PixelShuffle', 'DucBlock', 'SimpleSequential', 'DualPathSequential', 'Concurrent', 'SequentialConcurrent',
'ParametricSequential', 'ParametricConcurrent', 'Hourglass', 'SesquialteralHourglass',
'MultiOutputSequential', 'ParallelConcurent', 'DualPathParallelConcurent', 'Flatten', 'AdaptiveAvgPool2D',
'NormActivation', 'InterpolationBlock', 'HeatmapMaxDetBlock']
from inspect import isfunction
from functools import partial
import numpy as np
from chainer import Chain
import chainer.functions as F
import chainer.links as L
from chainer import link
from chainer.initializers import _get_initializer
from chainer.variable import Parameter
def round_channels(channels,
divisor=8):
"""
Round weighted channel number (make divisible operation).
Parameters:
----------
channels : int or float
Original number of channels.
divisor : int, default 8
Alignment value.
Returns:
-------
int
Weighted number of channels.
"""
rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor)
if float(rounded_channels) < 0.9 * channels:
rounded_channels += divisor
return rounded_channels
class BreakBlock(Chain):
"""
Break coonnection block for hourglass.
"""
def __init__(self):
super(BreakBlock, self).__init__()
def __call__(self, x):
return None
def __repr__(self):
return '{name}()'.format(name=self.__class__.__name__)
class ReLU6(Chain):
"""
ReLU6 activation layer.
"""
def __call__(self, x):
return F.clip(x, 0.0, 6.0)
class Swish(Chain):
"""
Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941.
"""
def __call__(self, x):
return x * F.sigmoid(x)
class HSigmoid(Chain):
"""
Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,'
https://arxiv.org/abs/1905.02244.
"""
def __call__(self, x):
return F.clip(x + 3.0, 0.0, 6.0) / 6.0
class HSwish(Chain):
"""
H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
"""
def __call__(self, x):
return x * F.clip(x + 3.0, 0.0, 6.0) / 6.0
def get_activation_layer(activation):
"""
Create activation layer from string/function.
Parameters:
----------
activation : function or str
Activation function or name of activation function.
Returns:
-------
function
Activation layer.
"""
assert (activation is not None)
if isfunction(activation):
return activation()
elif isinstance(activation, str):
if activation == "relu":
return F.relu
elif activation == "relu6":
return ReLU6()
elif activation == "swish":
return Swish()
# return partial(
# F.swish,
# beta=[1.0])
elif activation == "hswish":
return HSwish()
elif activation == "sigmoid":
return F.sigmoid
elif activation == "hsigmoid":
return HSigmoid()
else:
raise NotImplementedError()
else:
return activation
class GlobalAvgPool2D(Chain):
"""
Global average pooling operation for spatial data.
"""
def __call__(self, x):
return F.average_pooling_2d(x, ksize=x.shape[2:])
class SelectableDense(link.Link):
"""
Selectable dense layer.
Parameters:
----------
in_channels : int
Number of input features.
out_channels : int
Number of output features.
use_bias : bool, default False
Whether the layer uses a bias vector.
initial_weight : `types.InitializerSpec`, default None
Initializer for the `kernel` weights matrix.
initial_bias: `types.InitializerSpec`, default 0
Initializer for the bias vector.
num_options : int, default 1
Number of selectable options.
"""
def __init__(self,
in_channels,
out_channels,
use_bias=False,
initial_weight=None,
initial_bias=0,
num_options=1):
super(SelectableDense, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.use_bias = use_bias
self.num_options = num_options
with self.init_scope():
weight_initializer = _get_initializer(initial_weight)
self.weight = Parameter(
initializer=weight_initializer,
shape=(num_options, out_channels, in_channels),
name="weight")
if use_bias:
bias_initializer = _get_initializer(initial_bias)
self.bias = Parameter(
initializer=bias_initializer,
shape=(num_options, out_channels),
name="bias")
else:
self.bias = None
def forward(self, x, indices):
weight = self.xp.take(self.weight.data, indices=indices, axis=0)
x = F.expand_dims(x, axis=-1)
x = F.batch_matmul(weight, x)
x = F.squeeze(x, axis=-1)
if self.use_bias:
bias = self.xp.take(self.bias.data, indices=indices, axis=0)
x += bias
return x
@property
def printable_specs(self):
specs = [
('in_channels', self.in_channels),
('out_channels', self.out_channels),
('use_bias', self.use_bias),
('num_options', self.num_options),
]
for spec in specs:
yield spec
class DenseBlock(Chain):
"""
Standard dense block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: F.relu)):
super(DenseBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
with self.init_scope():
self.fc = L.Linear(
in_size=in_channels,
out_size=out_channels,
nobias=(not use_bias))
if self.use_bn:
self.bn = L.BatchNormalization(
size=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def __call__(self, x):
x = self.fc(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
class ConvBlock1d(Chain):
"""
Standard 1D convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
ksize : int
Convolution window size.
stride : int
Stride of the convolution.
pad : int
Padding value for convolution layer.
dilate : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
ksize,
stride,
pad,
dilate=1,
groups=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: F.relu)):
super(ConvBlock1d, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
with self.init_scope():
self.conv = L.Convolution1D(
in_channels=in_channels,
out_channels=out_channels,
ksize=ksize,
stride=stride,
pad=pad,
nobias=(not use_bias),
dilate=dilate,
groups=groups)
if self.use_bn:
self.bn = L.BatchNormalization(
size=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def __call__(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1(in_channels,
out_channels,
stride=1,
groups=1,
use_bias=False):
"""
Convolution 1x1 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
"""
return L.Convolution2D(
in_channels=in_channels,
out_channels=out_channels,
ksize=1,
stride=stride,
nobias=(not use_bias),
groups=groups)
def conv3x3(in_channels,
out_channels,
stride=1,
pad=1,
dilate=1,
groups=1,
use_bias=False):
"""
Convolution 3x3 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
"""
return L.Convolution2D(
in_channels=in_channels,
out_channels=out_channels,
ksize=3,
stride=stride,
pad=pad,
nobias=(not use_bias),
dilate=dilate,
groups=groups)
def depthwise_conv3x3(channels,
stride=1,
pad=1,
dilate=1,
use_bias=False):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
channels : int
Number of input/output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
"""
return L.Convolution2D(
in_channels=channels,
out_channels=channels,
ksize=3,
stride=stride,
pad=pad,
nobias=(not use_bias),
dilate=dilate,
groups=channels)
class ConvBlock(Chain):
"""
Standard convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
ksize : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Stride of the convolution.
pad : int or tuple/list of 2 int
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
ksize,
stride,
pad,
dilate=1,
groups=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: F.relu)):
super(ConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
with self.init_scope():
self.conv = L.Convolution2D(
in_channels=in_channels,
out_channels=out_channels,
ksize=ksize,
stride=stride,
pad=pad,
nobias=(not use_bias),
dilate=dilate,
groups=groups)
if self.use_bn:
self.bn = L.BatchNormalization(
size=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def __call__(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1_block(in_channels,
out_channels,
stride=1,
groups=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: F.relu)):
"""
1x1 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=1,
stride=stride,
pad=0,
groups=groups,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv3x3_block(in_channels,
out_channels,
stride=1,
pad=1,
dilate=1,
groups=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: F.relu)):
"""
3x3 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=3,
stride=stride,
pad=pad,
dilate=dilate,
groups=groups,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv5x5_block(in_channels,
out_channels,
stride=1,
pad=2,
dilate=1,
groups=1,
use_bias=False,
bn_eps=1e-5,
activation=(lambda: F.relu)):
"""
5x5 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=5,
stride=stride,
pad=pad,
dilate=dilate,
groups=groups,
use_bias=use_bias,
bn_eps=bn_eps,
activation=activation)
def conv7x7_block(in_channels,
out_channels,
stride=1,
pad=3,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: F.relu)):
"""
7x7 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=7,
stride=stride,
pad=pad,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def dwconv_block(in_channels,
out_channels,
ksize,
stride,
pad,
dilate=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: F.relu)):
"""
Depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
ksize : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Stride of the convolution.
pad : int or tuple/list of 2 int
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=ksize,
stride=stride,
pad=pad,
dilate=dilate,
groups=out_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def dwconv3x3_block(in_channels,
out_channels,
stride=1,
pad=1,
dilate=1,
use_bias=False,
bn_eps=1e-5,
activation=(lambda: F.relu)):
"""
3x3 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
ksize=3,
stride=stride,
pad=pad,
dilate=dilate,
use_bias=use_bias,
bn_eps=bn_eps,
activation=activation)
def dwconv5x5_block(in_channels,
out_channels,
stride=1,
pad=2,
dilate=1,
use_bias=False,
bn_eps=1e-5,
activation=(lambda: F.relu)):
"""
5x5 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
ksize=5,
stride=stride,
pad=pad,
dilate=dilate,
use_bias=use_bias,
bn_eps=bn_eps,
activation=activation)
class DwsConvBlock(Chain):
"""
Depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
ksize : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Stride of the convolution.
pad : int or tuple/list of 2 int
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
pw_use_bn : bool, default True
Whether to use BatchNorm layer (pointwise convolution block).
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default F.relu
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default F.relu
Activation function after the pointwise convolution block.
"""
def __init__(self,
in_channels,
out_channels,
ksize,
stride,
pad,
dilate=1,
use_bias=False,
dw_use_bn=True,
pw_use_bn=True,
bn_eps=1e-5,
dw_activation=(lambda: F.relu),
pw_activation=(lambda: F.relu)):
super(DwsConvBlock, self).__init__()
with self.init_scope():
self.dw_conv = dwconv_block(
in_channels=in_channels,
out_channels=in_channels,
ksize=ksize,
stride=stride,
pad=pad,
dilate=dilate,
use_bias=use_bias,
use_bn=dw_use_bn,
bn_eps=bn_eps,
activation=dw_activation)
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=pw_use_bn,
bn_eps=bn_eps,
activation=pw_activation)
def __call__(self, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
def dwsconv3x3_block(in_channels,
out_channels,
stride=1,
pad=1,
dilate=1,
use_bias=False,
bn_eps=1e-5,
dw_activation=(lambda: F.relu),
pw_activation=(lambda: F.relu),
**kwargs):
"""
3x3 depthwise separable version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default F.relu
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default F.relu
Activation function after the pointwise convolution block.
"""
return DwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=3,
stride=stride,
pad=pad,
dilate=dilate,
use_bias=use_bias,
bn_eps=bn_eps,
dw_activation=dw_activation,
pw_activation=pw_activation,
**kwargs)
class PreConvBlock(Chain):
"""
Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
ksize : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Stride of the convolution.
pad : int or tuple/list of 2 int
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
ksize,
stride,
pad,
dilate=1,
use_bias=False,
use_bn=True,
return_preact=False,
activate=True):
super(PreConvBlock, self).__init__()
self.return_preact = return_preact
self.activate = activate
self.use_bn = use_bn
with self.init_scope():
if self.use_bn:
self.bn = L.BatchNormalization(
size=in_channels,
eps=1e-5)
if self.activate:
self.activ = F.relu
self.conv = L.Convolution2D(
in_channels=in_channels,
out_channels=out_channels,
ksize=ksize,
stride=stride,
pad=pad,
nobias=(not use_bias),
dilate=dilate)
def __call__(self, x):
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_conv1x1_block(in_channels,
out_channels,
stride=1,
use_bias=False,
use_bn=True,
return_preact=False,
activate=True):
"""
1x1 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=1,
stride=stride,
pad=0,
use_bias=use_bias,
use_bn=use_bn,
return_preact=return_preact,
activate=activate)
def pre_conv3x3_block(in_channels,
out_channels,
stride=1,
pad=1,
dilate=1,
use_bias=False,
use_bn=True,
return_preact=False,
activate=True):
"""
3x3 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=3,
stride=stride,
pad=pad,
dilate=dilate,
use_bias=use_bias,
use_bn=use_bn,
return_preact=return_preact,
activate=activate)
class DeconvBlock(Chain):
"""
Deconvolution block with batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
ksize : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Stride of the deconvolution.
pad : int or tuple/list of 2 int
Padding value for deconvolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for deconvolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
ksize,
stride,
pad,
dilate=1,
groups=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: F.relu),
**kwargs):
super(DeconvBlock, self).__init__(**kwargs)
self.activate = (activation is not None)
self.use_bn = use_bn
with self.init_scope():
self.conv = L.Deconvolution2D(
in_channels=in_channels,
out_channels=out_channels,
ksize=ksize,
stride=stride,
pad=pad,
nobias=(not use_bias),
dilate=dilate,
groups=groups)
if self.use_bn:
self.bn = L.BatchNormalization(
size=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation)
def __call__(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def channel_shuffle(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
x : chainer.Variable or numpy.ndarray or cupy.ndarray
Input variable.
groups : int
Number of groups.
Returns:
-------
chainer.Variable or numpy.ndarray or cupy.ndarray
Resulted variable.
"""
batch, channels, height, width = x.shape
channels_per_group = channels // groups
x = F.reshape(x, shape=(batch, groups, channels_per_group, height, width))
x = F.swapaxes(x, axis1=1, axis2=2)
x = F.reshape(x, shape=(batch, channels, height, width))
return x
class ChannelShuffle(Chain):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelShuffle, self).__init__()
assert (channels % groups == 0)
self.groups = groups
def __call__(self, x):
return channel_shuffle(x, self.groups)
def channel_shuffle2(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083. The alternative version.
Parameters:
----------
x : chainer.Variable or numpy.ndarray or cupy.ndarray
Input variable.
groups : int
Number of groups.
Returns:
-------
chainer.Variable or numpy.ndarray or cupy.ndarray
Resulted variable.
"""
batch, channels, height, width = x.shape
channels_per_group = channels // groups
x = F.reshape(x, shape=(batch, channels_per_group, groups, height, width))
x = F.swapaxes(x, axis1=1, axis2=2)
x = F.reshape(x, shape=(batch, channels, height, width))
return x
class ChannelShuffle2(Chain):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
The alternative version.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups):
super(ChannelShuffle2, self).__init__()
assert (channels % groups == 0)
self.groups = groups
def __call__(self, x):
return channel_shuffle2(x, self.groups)
class SEBlock(Chain):
"""
Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
mid_channels : int or None, default None
Number of middle channels.
round_mid : bool, default False
Whether to round middle channel number (make divisible by 8).
use_conv : bool, default True
Whether to convolutional layers instead of fully-connected ones.
activation : function or str, default F.relu
Activation function after the first convolution.
out_activation : function or str, default F.sigmoid
Activation function after the last convolution.
"""
def __init__(self,
channels,
reduction=16,
mid_channels=None,
round_mid=False,
use_conv=True,
mid_activation=(lambda: F.relu),
out_activation=(lambda: F.sigmoid)):
super(SEBlock, self).__init__()
self.use_conv = use_conv
if mid_channels is None:
mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)
with self.init_scope():
if use_conv:
self.conv1 = conv1x1(
in_channels=channels,
out_channels=mid_channels,
use_bias=True)
else:
self.fc1 = L.Linear(
in_size=channels,
out_size=mid_channels)
self.activ = get_activation_layer(mid_activation)
if use_conv:
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=channels,
use_bias=True)
else:
self.fc2 = L.Linear(
in_size=mid_channels,
out_size=channels)
self.sigmoid = get_activation_layer(out_activation)
def __call__(self, x):
w = F.average_pooling_2d(x, ksize=x.shape[2:])
if not self.use_conv:
w = F.reshape(w, shape=(w.shape[0], -1))
w = self.conv1(w) if self.use_conv else self.fc1(w)
w = self.activ(w)
w = self.conv2(w) if self.use_conv else self.fc2(w)
w = self.sigmoid(w)
if not self.use_conv:
w = F.expand_dims(F.expand_dims(w, axis=2), axis=3)
x = x * w
return x
class SABlock(Chain):
"""
Split-Attention block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
out_channels : int
Number of output channels.
groups : int
Number of channel groups (cardinality, without radix).
radix : int
Number of splits within a cardinal group.
reduction : int, default 4
Squeeze reduction value.
min_channels : int, default 32
Minimal number of squeezed channels.
use_conv : bool, default True
Whether to convolutional layers instead of fully-connected ones.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
"""
def __init__(self,
out_channels,
groups,
radix,
reduction=4,
min_channels=32,
use_conv=True,
bn_eps=1e-5):
super(SABlock, self).__init__()
self.groups = groups
self.radix = radix
self.use_conv = use_conv
in_channels = out_channels * radix
mid_channels = max(in_channels // reduction, min_channels)
with self.init_scope():
if use_conv:
self.conv1 = conv1x1(
in_channels=out_channels,
out_channels=mid_channels,
use_bias=True)
else:
self.fc1 = L.Linear(
in_size=out_channels,
out_size=mid_channels)
self.bn = L.BatchNormalization(
size=mid_channels,
eps=bn_eps)
self.activ = F.relu
if use_conv:
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=in_channels,
use_bias=True)
else:
self.fc2 = L.Linear(
in_size=mid_channels,
out_size=in_channels)
self.softmax = partial(
F.softmax,
axis=1)
def __call__(self, x):
batch, channels, height, width = x.shape
x = F.reshape(x, shape=(batch, self.radix, channels // self.radix, height, width))
w = F.sum(x, axis=1)
w = F.average_pooling_2d(w, ksize=w.shape[2:])
if not self.use_conv:
w = F.reshape(w, shape=(w.shape[0], -1))
w = self.conv1(w) if self.use_conv else self.fc1(w)
w = self.bn(w)
w = self.activ(w)
w = self.conv2(w) if self.use_conv else self.fc2(w)
w = F.reshape(w, shape=(batch, self.groups, self.radix, -1))
w = F.swapaxes(w, axis1=1, axis2=2)
w = self.softmax(w)
w = F.reshape(w, shape=(batch, self.radix, -1, 1, 1))
x = x * w
x = F.sum(x, axis=1)
return x
class SAConvBlock(Chain):
"""
Split-Attention convolution block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
ksize : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Stride of the convolution.
pad : int or tuple/list of 2 int
Padding value for convolution layer.
dilate : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
radix : int, default 2
Number of splits within a cardinal group.
reduction : int, default 4
Squeeze reduction value.
min_channels : int, default 32
Minimal number of squeezed channels.
use_conv : bool, default True
Whether to convolutional layers instead of fully-connected ones.
"""
def __init__(self,
in_channels,
out_channels,
ksize,
stride,
pad,
dilate=1,
groups=1,
use_bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: F.relu),
radix=2,
reduction=4,
min_channels=32,
use_conv=True):
super(SAConvBlock, self).__init__()
with self.init_scope():
self.conv = ConvBlock(
in_channels=in_channels,
out_channels=(out_channels * radix),
ksize=ksize,
stride=stride,
pad=pad,
dilate=dilate,
groups=(groups * radix),
use_bias=use_bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
self.att = SABlock(
out_channels=out_channels,
groups=groups,
radix=radix,
reduction=reduction,
min_channels=min_channels,
use_conv=use_conv,
bn_eps=bn_eps)
def __call__(self, x):
x = self.conv(x)
x = self.att(x)
return x
def saconv3x3_block(in_channels,
out_channels,
stride=1,
pad=1,
**kwargs):
"""
3x3 version of the Split-Attention convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Stride of the convolution.
pad : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
"""
return SAConvBlock(
in_channels=in_channels,
out_channels=out_channels,
ksize=3,
stride=stride,
pad=pad,
**kwargs)
class PixelShuffle(Chain):
"""
Pixel-shuffle operation from 'Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel
Convolutional Neural Network,' https://arxiv.org/abs/1609.05158.
Parameters:
----------
scale_factor : int
Multiplier for spatial size.
"""
def __init__(self,
scale_factor,
**kwargs):
super(PixelShuffle, self).__init__(**kwargs)
self.scale_factor = scale_factor
def __call__(self, x):
f1 = self.scale_factor
f2 = self.scale_factor
batch, channels, height, width = x.shape
assert (channels % f1 % f2 == 0)
new_channels = channels // f1 // f2
x = F.reshape(x, shape=(batch, new_channels, f1 * f2, height, width))
x = F.reshape(x, shape=(batch, new_channels, f1, f2, height, width))
x = F.transpose(x, axes=(0, 1, 4, 2, 5, 3))
x = F.reshape(x, shape=(batch, new_channels, height * f1, width * f2))
return x
class DucBlock(Chain):
"""
Dense Upsampling Convolution (DUC) block from 'Understanding Convolution for Semantic Segmentation,'
https://arxiv.org/abs/1702.08502.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
scale_factor : int
Multiplier for spatial size.
"""
def __init__(self,
in_channels,
out_channels,
scale_factor,
**kwargs):
super(DucBlock, self).__init__(**kwargs)
mid_channels = (scale_factor * scale_factor) * out_channels
with self.init_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
self.pix_shuffle = PixelShuffle(scale_factor=scale_factor)
def __call__(self, x):
x = self.conv(x)
x = self.pix_shuffle(x)
return x
class SimpleSequential(Chain):
"""
A sequential chain that can be used instead of Sequential.
"""
def __init__(self):
super(SimpleSequential, self).__init__()
self.layer_names = []
def __setattr__(self, name, value):
super(SimpleSequential, self).__setattr__(name, value)
if self.within_init_scope and callable(value):
self.layer_names.append(name)
def __delattr__(self, name):
super(SimpleSequential, self).__delattr__(name)
try:
self.layer_names.remove(name)
except ValueError:
pass
def __len__(self):
return len(self.layer_names)
def __call__(self, x):
for name in self.layer_names:
x = self[name](x)
return x
def el(self, index):
return self[self.layer_names[index]]
class DualPathSequential(SimpleSequential):
"""
A sequential container for blocks with dual inputs/outputs.
Blocks will be executed in the order they are added.
Parameters:
----------
return_two : bool, default True
Whether to return two output after execution.
first_ordinals : int, default 0
Number of the first blocks with single input/output.
last_ordinals : int, default 0
Number of the final blocks with single input/output.
dual_path_scheme : function
Scheme of dual path response for a block.
dual_path_scheme_ordinal : function
Scheme of dual path response for an ordinal block.
"""
def __init__(self,
return_two=True,
first_ordinals=0,
last_ordinals=0,
dual_path_scheme=(lambda block, x1, x2: block(x1, x2)),
dual_path_scheme_ordinal=(lambda block, x1, x2: (block(x1), x2))):
super(DualPathSequential, self).__init__()
self.return_two = return_two
self.first_ordinals = first_ordinals
self.last_ordinals = last_ordinals
self.dual_path_scheme = dual_path_scheme
self.dual_path_scheme_ordinal = dual_path_scheme_ordinal
def __call__(self, x1, x2=None):
length = len(self.layer_names)
for i, block_name in enumerate(self.layer_names):
block = self[block_name]
if (i < self.first_ordinals) or (i >= length - self.last_ordinals):
x1, x2 = self.dual_path_scheme_ordinal(block, x1, x2)
else:
x1, x2 = self.dual_path_scheme(block, x1, x2)
if self.return_two:
return x1, x2
else:
return x1
class Concurrent(SimpleSequential):
"""
A container for concatenation of modules on the base of the sequential container.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
stack : bool, default False
Whether to concatenate tensors along a new dimension.
merge_type : str, default None
Type of branch merging.
"""
def __init__(self,
axis=1,
stack=False,
merge_type=None):
super(Concurrent, self).__init__()
assert (merge_type is None) or (merge_type in ["cat", "stack", "sum"])
self.axis = axis
if merge_type is not None:
self.merge_type = merge_type
else:
self.merge_type = "stack" if stack else "cat"
def __call__(self, x):
out = []
for name in self.layer_names:
out.append(self[name](x))
if self.merge_type == "stack":
out = F.stack(tuple(out), axis=self.axis)
elif self.merge_type == "cat":
out = F.concat(tuple(out), axis=self.axis)
elif self.merge_type == "sum":
out = F.sum(F.stack(tuple(out), axis=self.axis), self.axis)
else:
raise NotImplementedError()
return out
class SequentialConcurrent(SimpleSequential):
"""
A sequential container with concatenated outputs.
Blocks will be executed in the order they are added.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
stack : bool, default False
Whether to concatenate tensors along a new dimension.
cat_input : bool, default True
Whether to concatenate input tensor.
"""
def __init__(self,
axis=1,
stack=False,
cat_input=True):
super(SequentialConcurrent, self).__init__()
self.axis = axis
self.stack = stack
self.cat_input = cat_input
def __call__(self, x):
out = [x] if self.cat_input else []
for name in self.layer_names:
x = self[name](x)
out.append(x)
if self.stack:
out = F.stack(tuple(out), axis=self.axis)
else:
out = F.concat(tuple(out), axis=self.axis)
return out
class ParametricSequential(SimpleSequential):
"""
A sequential container for modules with parameters.
Blocks will be executed in the order they are added.
"""
def __init__(self):
super(ParametricSequential, self).__init__()
def __call__(self, x, **kwargs):
for name in self.layer_names:
x = self[name](x, **kwargs)
return x
class ParametricConcurrent(SimpleSequential):
"""
A container for concatenation of modules on the base of the sequential container.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
"""
def __init__(self, axis=1):
super(ParametricConcurrent, self).__init__()
self.axis = axis
def __call__(self, x, **kwargs):
out = []
for name in self.layer_names:
out.append(self[name](x, **kwargs))
out = F.concat(tuple(out), axis=self.axis)
return out
class Hourglass(Chain):
"""
A hourglass block.
Parameters:
----------
down_seq : SimpleSequential
Down modules as sequential.
up_seq : SimpleSequential
Up modules as sequential.
skip_seq : SimpleSequential
Skip connection modules as sequential.
merge_type : str, default 'add'
Type of concatenation of up and skip outputs.
return_first_skip : bool, default False
Whether return the first skip connection output. Used in ResAttNet.
"""
def __init__(self,
down_seq,
up_seq,
skip_seq,
merge_type="add",
return_first_skip=False):
super(Hourglass, self).__init__()
self.depth = len(down_seq)
assert (merge_type in ["cat", "add"])
assert (len(up_seq) == self.depth)
assert (len(skip_seq) in (self.depth, self.depth + 1))
self.merge_type = merge_type
self.return_first_skip = return_first_skip
self.extra_skip = (len(skip_seq) == self.depth + 1)
with self.init_scope():
self.down_seq = down_seq
self.up_seq = up_seq
self.skip_seq = skip_seq
def _merge(self, x, y):
if y is not None:
if self.merge_type == "cat":
x = F.concat((x, y), axis=1)
elif self.merge_type == "add":
x = x + y
return x
def __call__(self, x):
y = None
down_outs = [x]
for down_module_name in self.down_seq.layer_names:
down_module = self.down_seq[down_module_name]
x = down_module(x)
down_outs.append(x)
for i in range(len(down_outs)):
if i != 0:
y = down_outs[self.depth - i]
skip_module = self.skip_seq.el(self.depth - i)
y = skip_module(y)
x = self._merge(x, y)
if i != len(down_outs) - 1:
if (i == 0) and self.extra_skip:
skip_module = self.skip_seq.el(self.depth)
x = skip_module(x)
up_module = self.up_seq.el(self.depth - 1 - i)
x = up_module(x)
if self.return_first_skip:
return x, y
else:
return x
class SesquialteralHourglass(Chain):
"""
A sesquialteral hourglass block.
Parameters:
----------
down1_seq : SimpleSequential
The first down modules as sequential.
skip1_seq : SimpleSequential
The first skip connection modules as sequential.
up_seq : SimpleSequential
Up modules as sequential.
skip2_seq : SimpleSequential
The second skip connection modules as sequential.
down2_seq : SimpleSequential
The second down modules as sequential.
merge_type : str, default 'cat'
Type of concatenation of up and skip outputs.
"""
def __init__(self,
down1_seq,
skip1_seq,
up_seq,
skip2_seq,
down2_seq,
merge_type="cat"):
super(SesquialteralHourglass, self).__init__()
assert (len(down1_seq) == len(up_seq))
assert (len(down1_seq) == len(down2_seq))
assert (len(skip1_seq) == len(skip2_seq))
assert (len(down1_seq) == len(skip1_seq) - 1)
assert (merge_type in ["cat", "add"])
self.merge_type = merge_type
self.depth = len(down1_seq)
with self.init_scope():
self.down1_seq = down1_seq
self.skip1_seq = skip1_seq
self.up_seq = up_seq
self.skip2_seq = skip2_seq
self.down2_seq = down2_seq
def _merge(self, x, y):
if y is not None:
if self.merge_type == "cat":
x = F.concat((x, y), axis=1)
elif self.merge_type == "add":
x = x + y
return x
def __call__(self, x):
y = self.skip1_seq[self.skip1_seq.layer_names[0]](x)
skip1_outs = [y]
for i in range(self.depth):
x = self.down1_seq[self.down1_seq.layer_names[i]](x)
y = self.skip1_seq[self.skip1_seq.layer_names[i + 1]](x)
skip1_outs.append(y)
x = skip1_outs[self.depth]
y = self.skip2_seq[self.skip2_seq.layer_names[0]](x)
skip2_outs = [y]
for i in range(self.depth):
x = self.up_seq[self.up_seq.layer_names[i]](x)
y = skip1_outs[self.depth - 1 - i]
x = self._merge(x, y)
y = self.skip2_seq[self.skip2_seq.layer_names[i + 1]](x)
skip2_outs.append(y)
x = self.skip2_seq[self.skip2_seq.layer_names[self.depth]](x)
for i in range(self.depth):
x = self.down2_seq[self.down2_seq.layer_names[i]](x)
y = skip2_outs[self.depth - 1 - i]
x = self._merge(x, y)
return x
class MultiOutputSequential(SimpleSequential):
"""
A sequential container with multiple outputs.
Blocks will be executed in the order they are added.
Parameters:
----------
multi_output : bool, default True
Whether to return multiple output.
dual_output : bool, default False
Whether to return dual output.
return_last : bool, default True
Whether to forcibly return last value.
"""
def __init__(self,
multi_output=True,
dual_output=False,
return_last=True):
super(MultiOutputSequential, self).__init__()
self.multi_output = multi_output
self.dual_output = dual_output
self.return_last = return_last
def __call__(self, x):
outs = []
for name in self.layer_names:
block = self[name]
x = block(x)
if hasattr(block, "do_output") and block.do_output:
outs.append(x)
elif hasattr(block, "do_output2") and block.do_output2:
assert (type(x) == tuple)
outs.extend(x[1])
x = x[0]
if self.multi_output:
return [x] + outs if self.return_last else outs
elif self.dual_output:
return x, outs
else:
return x
class ParallelConcurent(SimpleSequential):
"""
A sequential container with multiple inputs and single/multiple outputs.
Modules will be executed in the order they are added.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
merge_type : str, default 'list'
Type of branch merging.
"""
def __init__(self,
axis=1,
merge_type="list"):
super(ParallelConcurent, self).__init__()
assert (merge_type is None) or (merge_type in ["list", "cat", "stack", "sum"])
self.axis = axis
self.merge_type = merge_type
def __call__(self, x):
out = []
for name, xi in zip(self.layer_names, x):
out.append(self[name](xi))
if self.merge_type == "list":
pass
elif self.merge_type == "stack":
out = F.stack(tuple(out), axis=self.axis)
elif self.merge_type == "cat":
out = F.concat(tuple(out), axis=self.axis)
elif self.merge_type == "sum":
out = F.sum(F.stack(tuple(out), axis=self.axis), self.axis)
else:
raise NotImplementedError()
return out
class DualPathParallelConcurent(SimpleSequential):
"""
A sequential container with multiple dual-path inputs and single/multiple outputs.
Modules will be executed in the order they are added.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
merge_type : str, default 'list'
Type of branch merging.
"""
def __init__(self,
axis=1,
merge_type="list"):
super(DualPathParallelConcurent, self).__init__()
assert (merge_type is None) or (merge_type in ["list", "cat", "stack", "sum"])
self.axis = axis
self.merge_type = merge_type
def __call__(self, x1, x2):
x1_out = []
x2_out = []
for name, x1i, x2i in zip(self.layer_names, x1, x2):
y1i, y2i = self[name](x1i, x2i)
x1_out.append(y1i)
x2_out.append(y2i)
if self.merge_type == "list":
pass
elif self.merge_type == "stack":
x1_out = F.stack(tuple(x1_out), axis=self.axis)
x2_out = F.stack(tuple(x2_out), axis=self.axis)
elif self.merge_type == "cat":
x1_out = F.concat(tuple(x1_out), axis=self.axis)
x2_out = F.concat(tuple(x2_out), axis=self.axis)
elif self.merge_type == "sum":
x1_out = F.sum(F.stack(tuple(x1_out), axis=self.axis), self.axis)
x2_out = F.sum(F.stack(tuple(x2_out), axis=self.axis), self.axis)
else:
raise NotImplementedError()
return x1_out, x2_out
class Flatten(Chain):
"""
Simple flatten block.
"""
def __call__(self, x):
return x.reshape(x.shape[0], -1)
class AdaptiveAvgPool2D(Chain):
"""
Simple adaptive average pooling block.
"""
def __call__(self, x):
return F.average_pooling_2d(x, ksize=x.shape[2:])
class NormActivation(Chain):
"""
Activation block with preliminary batch normalization. It's used by itself as the final block in PreResNet.
Parameters:
----------
in_channels : int
Number of input channels.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default F.relu
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
bn_eps=1e-5,
activation=(lambda: F.relu),
**kwargs):
super(NormActivation, self).__init__(**kwargs)
with self.init_scope():
self.bn = L.BatchNormalization(
size=in_channels,
eps=bn_eps)
self.activ = get_activation_layer(activation)
def __call__(self, x):
x = self.bn(x)
x = self.activ(x)
return x
class InterpolationBlock(Chain):
"""
Interpolation block.
Parameters:
----------
scale_factor : int
Multiplier for spatial size.
out_size : tuple of 2 int, default None
Spatial size of the output tensor for the bilinear interpolation operation.
up : bool, default True
Whether to upsample or downsample.
mode : str, default 'bilinear'
Algorithm used for upsampling.
align_corners : bool, default True
Whether to align the corner pixels of the input and output tensors.
"""
def __init__(self,
scale_factor,
out_size=None,
up=True,
mode="bilinear",
align_corners=True,
**kwargs):
super(InterpolationBlock, self).__init__(**kwargs)
self.scale_factor = scale_factor
self.out_size = out_size
self.up = up
self.mode = mode
self.align_corners = align_corners
def __call__(self, x, size=None):
out_size = self.calc_out_size(x) if size is None else size
return F.resize_images(x, output_shape=out_size, mode=self.mode, align_corners=self.align_corners)
def calc_out_size(self, x):
if self.out_size is not None:
return self.out_size
if self.up:
return tuple(s * self.scale_factor for s in x.shape[2:])
else:
return tuple(s // self.scale_factor for s in x.shape[2:])
class HeatmapMaxDetBlock(Chain):
"""
Heatmap maximum detector block (for human pose estimation task).
"""
def __init__(self,
**kwargs):
super(HeatmapMaxDetBlock, self).__init__(**kwargs)
def __call__(self, x):
heatmap = x
vector_dim = 2
batch = heatmap.shape[0]
channels = heatmap.shape[1]
in_size = x.shape[2:]
heatmap_vector = F.reshape(heatmap, shape=(batch, channels, -1))
indices = F.cast(F.expand_dims(F.argmax(heatmap_vector, axis=vector_dim), axis=vector_dim), np.float32)
scores = F.max(heatmap_vector, axis=vector_dim, keepdims=True)
scores_mask = (scores.array > 0.0).astype(np.float32)
pts_x = (indices.array % in_size[1]) * scores_mask
pts_y = (indices.array // in_size[1]) * scores_mask
pts = F.concat((pts_x, pts_y, scores), axis=vector_dim).array
for b in range(batch):
for k in range(channels):
hm = heatmap[b, k, :, :].array
px = int(pts_x[b, k])
py = int(pts_y[b, k])
if (0 < px < in_size[1] - 1) and (0 < py < in_size[0] - 1):
pts[b, k, 0] += np.sign(hm[py, px + 1] - hm[py, px - 1]) * 0.25
pts[b, k, 1] += np.sign(hm[py + 1, px] - hm[py - 1, px]) * 0.25
return pts
| [
"chainer.functions.max",
"chainer.functions.concat",
"chainer.functions.argmax",
"chainer.links.Convolution2D",
"chainer.functions.clip",
"chainer.functions.batch_matmul",
"chainer.functions.average_pooling_2d",
"chainer.links.Linear",
"chainer.functions.resize_images",
"chainer.initializers._get_... | [((3294, 3316), 'inspect.isfunction', 'isfunction', (['activation'], {}), '(activation)\n', (3304, 3316), False, 'from inspect import isfunction\n'), ((10640, 10771), 'chainer.links.Convolution2D', 'L.Convolution2D', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'ksize': '(1)', 'stride': 'stride', 'nobias': '(not use_bias)', 'groups': 'groups'}), '(in_channels=in_channels, out_channels=out_channels, ksize=1,\n stride=stride, nobias=not use_bias, groups=groups)\n', (10655, 10771), True, 'import chainer.links as L\n'), ((11597, 11752), 'chainer.links.Convolution2D', 'L.Convolution2D', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'ksize': '(3)', 'stride': 'stride', 'pad': 'pad', 'nobias': '(not use_bias)', 'dilate': 'dilate', 'groups': 'groups'}), '(in_channels=in_channels, out_channels=out_channels, ksize=3,\n stride=stride, pad=pad, nobias=not use_bias, dilate=dilate, groups=groups)\n', (11612, 11752), True, 'import chainer.links as L\n'), ((12495, 12650), 'chainer.links.Convolution2D', 'L.Convolution2D', ([], {'in_channels': 'channels', 'out_channels': 'channels', 'ksize': '(3)', 'stride': 'stride', 'pad': 'pad', 'nobias': '(not use_bias)', 'dilate': 'dilate', 'groups': 'channels'}), '(in_channels=channels, out_channels=channels, ksize=3,\n stride=stride, pad=pad, nobias=not use_bias, dilate=dilate, groups=channels\n )\n', (12510, 12650), True, 'import chainer.links as L\n'), ((36223, 36293), 'chainer.functions.reshape', 'F.reshape', (['x'], {'shape': '(batch, groups, channels_per_group, height, width)'}), '(x, shape=(batch, groups, channels_per_group, height, width))\n', (36232, 36293), True, 'import chainer.functions as F\n'), ((36302, 36333), 'chainer.functions.swapaxes', 'F.swapaxes', (['x'], {'axis1': '(1)', 'axis2': '(2)'}), '(x, axis1=1, axis2=2)\n', (36312, 36333), True, 'import chainer.functions as F\n'), ((36342, 36394), 'chainer.functions.reshape', 'F.reshape', (['x'], {'shape': '(batch, channels, height, width)'}), '(x, shape=(batch, channels, height, width))\n', (36351, 36394), True, 'import chainer.functions as F\n'), ((37576, 37646), 'chainer.functions.reshape', 'F.reshape', (['x'], {'shape': '(batch, channels_per_group, groups, height, width)'}), '(x, shape=(batch, channels_per_group, groups, height, width))\n', (37585, 37646), True, 'import chainer.functions as F\n'), ((37655, 37686), 'chainer.functions.swapaxes', 'F.swapaxes', (['x'], {'axis1': '(1)', 'axis2': '(2)'}), '(x, axis1=1, axis2=2)\n', (37665, 37686), True, 'import chainer.functions as F\n'), ((37695, 37747), 'chainer.functions.reshape', 'F.reshape', (['x'], {'shape': '(batch, channels, height, width)'}), '(x, shape=(batch, channels, height, width))\n', (37704, 37747), True, 'import chainer.functions as F\n'), ((2258, 2277), 'chainer.functions.clip', 'F.clip', (['x', '(0.0)', '(6.0)'], {}), '(x, 0.0, 6.0)\n', (2264, 2277), True, 'import chainer.functions as F\n'), ((4092, 4134), 'chainer.functions.average_pooling_2d', 'F.average_pooling_2d', (['x'], {'ksize': 'x.shape[2:]'}), '(x, ksize=x.shape[2:])\n', (4112, 4134), True, 'import chainer.functions as F\n'), ((5859, 5884), 'chainer.functions.expand_dims', 'F.expand_dims', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (5872, 5884), True, 'import chainer.functions as F\n'), ((5897, 5922), 'chainer.functions.batch_matmul', 'F.batch_matmul', (['weight', 'x'], {}), '(weight, x)\n', (5911, 5922), True, 'import chainer.functions as F\n'), ((5935, 5956), 'chainer.functions.squeeze', 'F.squeeze', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (5944, 5956), True, 'import chainer.functions as F\n'), ((40499, 40541), 'chainer.functions.average_pooling_2d', 'F.average_pooling_2d', (['x'], {'ksize': 'x.shape[2:]'}), '(x, ksize=x.shape[2:])\n', (40519, 40541), True, 'import chainer.functions as F\n'), ((43120, 43198), 'chainer.functions.reshape', 'F.reshape', (['x'], {'shape': '(batch, self.radix, channels // self.radix, height, width)'}), '(x, shape=(batch, self.radix, channels // self.radix, height, width))\n', (43129, 43198), True, 'import chainer.functions as F\n'), ((43211, 43227), 'chainer.functions.sum', 'F.sum', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (43216, 43227), True, 'import chainer.functions as F\n'), ((43240, 43282), 'chainer.functions.average_pooling_2d', 'F.average_pooling_2d', (['w'], {'ksize': 'w.shape[2:]'}), '(w, ksize=w.shape[2:])\n', (43260, 43282), True, 'import chainer.functions as F\n'), ((43547, 43603), 'chainer.functions.reshape', 'F.reshape', (['w'], {'shape': '(batch, self.groups, self.radix, -1)'}), '(w, shape=(batch, self.groups, self.radix, -1))\n', (43556, 43603), True, 'import chainer.functions as F\n'), ((43616, 43647), 'chainer.functions.swapaxes', 'F.swapaxes', (['w'], {'axis1': '(1)', 'axis2': '(2)'}), '(w, axis1=1, axis2=2)\n', (43626, 43647), True, 'import chainer.functions as F\n'), ((43688, 43737), 'chainer.functions.reshape', 'F.reshape', (['w'], {'shape': '(batch, self.radix, -1, 1, 1)'}), '(w, shape=(batch, self.radix, -1, 1, 1))\n', (43697, 43737), True, 'import chainer.functions as F\n'), ((43768, 43784), 'chainer.functions.sum', 'F.sum', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (43773, 43784), True, 'import chainer.functions as F\n'), ((47971, 48036), 'chainer.functions.reshape', 'F.reshape', (['x'], {'shape': '(batch, new_channels, f1 * f2, height, width)'}), '(x, shape=(batch, new_channels, f1 * f2, height, width))\n', (47980, 48036), True, 'import chainer.functions as F\n'), ((48049, 48113), 'chainer.functions.reshape', 'F.reshape', (['x'], {'shape': '(batch, new_channels, f1, f2, height, width)'}), '(x, shape=(batch, new_channels, f1, f2, height, width))\n', (48058, 48113), True, 'import chainer.functions as F\n'), ((48126, 48165), 'chainer.functions.transpose', 'F.transpose', (['x'], {'axes': '(0, 1, 4, 2, 5, 3)'}), '(x, axes=(0, 1, 4, 2, 5, 3))\n', (48137, 48165), True, 'import chainer.functions as F\n'), ((48178, 48244), 'chainer.functions.reshape', 'F.reshape', (['x'], {'shape': '(batch, new_channels, height * f1, width * f2)'}), '(x, shape=(batch, new_channels, height * f1, width * f2))\n', (48187, 48244), True, 'import chainer.functions as F\n'), ((64515, 64557), 'chainer.functions.average_pooling_2d', 'F.average_pooling_2d', (['x'], {'ksize': 'x.shape[2:]'}), '(x, ksize=x.shape[2:])\n', (64535, 64557), True, 'import chainer.functions as F\n'), ((66620, 66716), 'chainer.functions.resize_images', 'F.resize_images', (['x'], {'output_shape': 'out_size', 'mode': 'self.mode', 'align_corners': 'self.align_corners'}), '(x, output_shape=out_size, mode=self.mode, align_corners=\n self.align_corners)\n', (66635, 66716), True, 'import chainer.functions as F\n'), ((67414, 67461), 'chainer.functions.reshape', 'F.reshape', (['heatmap'], {'shape': '(batch, channels, -1)'}), '(heatmap, shape=(batch, channels, -1))\n', (67423, 67461), True, 'import chainer.functions as F\n'), ((67591, 67644), 'chainer.functions.max', 'F.max', (['heatmap_vector'], {'axis': 'vector_dim', 'keepdims': '(True)'}), '(heatmap_vector, axis=vector_dim, keepdims=True)\n', (67596, 67644), True, 'import chainer.functions as F\n'), ((2469, 2481), 'chainer.functions.sigmoid', 'F.sigmoid', (['x'], {}), '(x)\n', (2478, 2481), True, 'import chainer.functions as F\n'), ((2706, 2731), 'chainer.functions.clip', 'F.clip', (['(x + 3.0)', '(0.0)', '(6.0)'], {}), '(x + 3.0, 0.0, 6.0)\n', (2712, 2731), True, 'import chainer.functions as F\n'), ((5206, 5238), 'chainer.initializers._get_initializer', '_get_initializer', (['initial_weight'], {}), '(initial_weight)\n', (5222, 5238), False, 'from chainer.initializers import _get_initializer\n'), ((5265, 5373), 'chainer.variable.Parameter', 'Parameter', ([], {'initializer': 'weight_initializer', 'shape': '(num_options, out_channels, in_channels)', 'name': '"""weight"""'}), "(initializer=weight_initializer, shape=(num_options, out_channels,\n in_channels), name='weight')\n", (5274, 5373), False, 'from chainer.variable import Parameter\n'), ((7414, 7487), 'chainer.links.Linear', 'L.Linear', ([], {'in_size': 'in_channels', 'out_size': 'out_channels', 'nobias': '(not use_bias)'}), '(in_size=in_channels, out_size=out_channels, nobias=not use_bias)\n', (7422, 7487), True, 'import chainer.links as L\n'), ((9390, 9554), 'chainer.links.Convolution1D', 'L.Convolution1D', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'ksize': 'ksize', 'stride': 'stride', 'pad': 'pad', 'nobias': '(not use_bias)', 'dilate': 'dilate', 'groups': 'groups'}), '(in_channels=in_channels, out_channels=out_channels, ksize=\n ksize, stride=stride, pad=pad, nobias=not use_bias, dilate=dilate,\n groups=groups)\n', (9405, 9554), True, 'import chainer.links as L\n'), ((14231, 14395), 'chainer.links.Convolution2D', 'L.Convolution2D', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'ksize': 'ksize', 'stride': 'stride', 'pad': 'pad', 'nobias': '(not use_bias)', 'dilate': 'dilate', 'groups': 'groups'}), '(in_channels=in_channels, out_channels=out_channels, ksize=\n ksize, stride=stride, pad=pad, nobias=not use_bias, dilate=dilate,\n groups=groups)\n', (14246, 14395), True, 'import chainer.links as L\n'), ((30239, 30384), 'chainer.links.Convolution2D', 'L.Convolution2D', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'ksize': 'ksize', 'stride': 'stride', 'pad': 'pad', 'nobias': '(not use_bias)', 'dilate': 'dilate'}), '(in_channels=in_channels, out_channels=out_channels, ksize=\n ksize, stride=stride, pad=pad, nobias=not use_bias, dilate=dilate)\n', (30254, 30384), True, 'import chainer.links as L\n'), ((34928, 35094), 'chainer.links.Deconvolution2D', 'L.Deconvolution2D', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'ksize': 'ksize', 'stride': 'stride', 'pad': 'pad', 'nobias': '(not use_bias)', 'dilate': 'dilate', 'groups': 'groups'}), '(in_channels=in_channels, out_channels=out_channels, ksize\n =ksize, stride=stride, pad=pad, nobias=not use_bias, dilate=dilate,\n groups=groups)\n', (34945, 35094), True, 'import chainer.links as L\n'), ((40588, 40624), 'chainer.functions.reshape', 'F.reshape', (['w'], {'shape': '(w.shape[0], -1)'}), '(w, shape=(w.shape[0], -1))\n', (40597, 40624), True, 'import chainer.functions as F\n'), ((42498, 42549), 'chainer.links.BatchNormalization', 'L.BatchNormalization', ([], {'size': 'mid_channels', 'eps': 'bn_eps'}), '(size=mid_channels, eps=bn_eps)\n', (42518, 42549), True, 'import chainer.links as L\n'), ((42971, 42997), 'functools.partial', 'partial', (['F.softmax'], {'axis': '(1)'}), '(F.softmax, axis=1)\n', (42978, 42997), False, 'from functools import partial\n'), ((43329, 43365), 'chainer.functions.reshape', 'F.reshape', (['w'], {'shape': '(w.shape[0], -1)'}), '(w, shape=(w.shape[0], -1))\n', (43338, 43365), True, 'import chainer.functions as F\n'), ((65274, 65324), 'chainer.links.BatchNormalization', 'L.BatchNormalization', ([], {'size': 'in_channels', 'eps': 'bn_eps'}), '(size=in_channels, eps=bn_eps)\n', (65294, 65324), True, 'import chainer.links as L\n'), ((67840, 67889), 'chainer.functions.concat', 'F.concat', (['(pts_x, pts_y, scores)'], {'axis': 'vector_dim'}), '((pts_x, pts_y, scores), axis=vector_dim)\n', (67848, 67889), True, 'import chainer.functions as F\n'), ((2923, 2948), 'chainer.functions.clip', 'F.clip', (['(x + 3.0)', '(0.0)', '(6.0)'], {}), '(x + 3.0, 0.0, 6.0)\n', (2929, 2948), True, 'import chainer.functions as F\n'), ((5479, 5509), 'chainer.initializers._get_initializer', '_get_initializer', (['initial_bias'], {}), '(initial_bias)\n', (5495, 5509), False, 'from chainer.initializers import _get_initializer\n'), ((5538, 5629), 'chainer.variable.Parameter', 'Parameter', ([], {'initializer': 'bias_initializer', 'shape': '(num_options, out_channels)', 'name': '"""bias"""'}), "(initializer=bias_initializer, shape=(num_options, out_channels),\n name='bias')\n", (5547, 5629), False, 'from chainer.variable import Parameter\n'), ((7593, 7644), 'chainer.links.BatchNormalization', 'L.BatchNormalization', ([], {'size': 'out_channels', 'eps': 'bn_eps'}), '(size=out_channels, eps=bn_eps)\n', (7613, 7644), True, 'import chainer.links as L\n'), ((9731, 9782), 'chainer.links.BatchNormalization', 'L.BatchNormalization', ([], {'size': 'out_channels', 'eps': 'bn_eps'}), '(size=out_channels, eps=bn_eps)\n', (9751, 9782), True, 'import chainer.links as L\n'), ((14572, 14623), 'chainer.links.BatchNormalization', 'L.BatchNormalization', ([], {'size': 'out_channels', 'eps': 'bn_eps'}), '(size=out_channels, eps=bn_eps)\n', (14592, 14623), True, 'import chainer.links as L\n'), ((30059, 30108), 'chainer.links.BatchNormalization', 'L.BatchNormalization', ([], {'size': 'in_channels', 'eps': '(1e-05)'}), '(size=in_channels, eps=1e-05)\n', (30079, 30108), True, 'import chainer.links as L\n'), ((35271, 35322), 'chainer.links.BatchNormalization', 'L.BatchNormalization', ([], {'size': 'out_channels', 'eps': 'bn_eps'}), '(size=out_channels, eps=bn_eps)\n', (35291, 35322), True, 'import chainer.links as L\n'), ((39919, 39968), 'chainer.links.Linear', 'L.Linear', ([], {'in_size': 'channels', 'out_size': 'mid_channels'}), '(in_size=channels, out_size=mid_channels)\n', (39927, 39968), True, 'import chainer.links as L\n'), ((40304, 40353), 'chainer.links.Linear', 'L.Linear', ([], {'in_size': 'mid_channels', 'out_size': 'channels'}), '(in_size=mid_channels, out_size=channels)\n', (40312, 40353), True, 'import chainer.links as L\n'), ((40859, 40883), 'chainer.functions.expand_dims', 'F.expand_dims', (['w'], {'axis': '(2)'}), '(w, axis=2)\n', (40872, 40883), True, 'import chainer.functions as F\n'), ((42381, 42434), 'chainer.links.Linear', 'L.Linear', ([], {'in_size': 'out_channels', 'out_size': 'mid_channels'}), '(in_size=out_channels, out_size=mid_channels)\n', (42389, 42434), True, 'import chainer.links as L\n'), ((42850, 42902), 'chainer.links.Linear', 'L.Linear', ([], {'in_size': 'mid_channels', 'out_size': 'in_channels'}), '(in_size=mid_channels, out_size=in_channels)\n', (42858, 42902), True, 'import chainer.links as L\n'), ((56494, 56518), 'chainer.functions.concat', 'F.concat', (['(x, y)'], {'axis': '(1)'}), '((x, y), axis=1)\n', (56502, 56518), True, 'import chainer.functions as F\n'), ((59007, 59031), 'chainer.functions.concat', 'F.concat', (['(x, y)'], {'axis': '(1)'}), '((x, y), axis=1)\n', (59015, 59031), True, 'import chainer.functions as F\n'), ((67501, 67542), 'chainer.functions.argmax', 'F.argmax', (['heatmap_vector'], {'axis': 'vector_dim'}), '(heatmap_vector, axis=vector_dim)\n', (67509, 67542), True, 'import chainer.functions as F\n'), ((68200, 68240), 'numpy.sign', 'np.sign', (['(hm[py, px + 1] - hm[py, px - 1])'], {}), '(hm[py, px + 1] - hm[py, px - 1])\n', (68207, 68240), True, 'import numpy as np\n'), ((68284, 68324), 'numpy.sign', 'np.sign', (['(hm[py + 1, px] - hm[py - 1, px])'], {}), '(hm[py + 1, px] - hm[py - 1, px])\n', (68291, 68324), True, 'import numpy as np\n')] |
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Isolation Forest Anomaly Detector."""
from madi.detectors.base_detector import BaseAnomalyDetectionAlgorithm
import numpy as np
import pandas as pd
import sklearn.ensemble
class IsolationForestAd(sklearn.ensemble.IsolationForest,
BaseAnomalyDetectionAlgorithm):
"""Wrapper class around the scikit-learn Isolation Forest Implementation."""
def train_model(self, x_train: pd.DataFrame) -> pd.DataFrame:
super(IsolationForestAd, self).fit(X=x_train)
return x_train
def predict(self, x_test: pd.DataFrame) -> pd.DataFrame:
preds = super(IsolationForestAd, self).predict(x_test)
x_test['class_prob'] = np.where(preds == -1, 0, preds)
return x_test
| [
"numpy.where"
] | [((1284, 1315), 'numpy.where', 'np.where', (['(preds == -1)', '(0)', 'preds'], {}), '(preds == -1, 0, preds)\n', (1292, 1315), True, 'import numpy as np\n')] |
import numpy
from .eval_splines import eval_cubic
## the functions in this file provide backward compatibility calls
##
## they can optionnally allocate memory for the result
## they work for any dimension, except the functions which compute the gradient
#######################
# Compatibility calls #
#######################
from numba import generated_jit
from .codegen import source_to_function
@generated_jit
def get_grid(a, b, n, C):
d = C.ndim
s = "({},)".format(str.join(", ", [f"(a[{k}],b[{k}],n[{k}])" for k in range(d)]))
txt = "def get_grid(a,b,n,C): return {}".format(s)
f = source_to_function(txt)
return f
def eval_cubic_spline(a, b, orders, coefs, point):
"""Evaluates a cubic spline at one point
Parameters:
-----------
a : array of size d (float)
Lower bounds of the cartesian grid.
b : array of size d (float)
Upper bounds of the cartesian grid.
orders : array of size d (int)
Number of nodes along each dimension (=(n1,...,nd) )
coefs : array of dimension d, and size (n1+2, ..., nd+2)
Filtered coefficients.
point : array of size d
Coordinate of the point where the splines must be interpolated.
Returns
-------
value : float
Interpolated value.
"""
grid = get_grid(a, b, orders, coefs)
return eval_cubic(grid, coefs, point)
def vec_eval_cubic_spline(a, b, orders, coefs, points, values=None):
"""Evaluates a cubic spline at many points
Parameters:
-----------
a : array of size d (float)
Lower bounds of the cartesian grid.
b : array of size d (float)
Upper bounds of the cartesian grid.
orders : array of size d (int)
Number of nodes along each dimension. (=(n1,...,nd))
coefs : array of dimension d, and size (n1+2, ..., nd+2)
Filtered coefficients.
points : array of size N x d
List of points where the splines must be interpolated.
values (optional) : array of size (N)
If not None, contains the result.
Returns
-------
values : array of size (N)
Interpolated values. values[i] contains spline evaluated at point points[i,:].
"""
grid = get_grid(a, b, orders, coefs)
if values is None:
return eval_cubic(grid, coefs, points)
else:
eval_cubic(grid, coefs, points, values)
def eval_cubic_splines(a, b, orders, mcoefs, point, values=None):
"""Evaluates multi-splines at one point.
Parameters:
-----------
a : array of size d (float)
Lower bounds of the cartesian grid.
b : array of size d (float)
Upper bounds of the cartesian grid.
orders : array of size d (int)
Number of nodes along each dimension.
mcoefs : array of dimension d+1, and size (p, n1+2, ..., nd+2)
Filtered coefficients. For i in 1:(mcoefs.shape[0]), mcoefs[i,...] contains
the coefficients of spline number i.
point : array of size d
Point where the spline must be interpolated.
values (optional) : array of size (p)
If not None, contains the result.
Returns
-------
values : array of size (p)
Interpolated values. values[j] contains spline n-j evaluated at point `point`.
"""
grid = get_grid(a, b, orders, mcoefs[..., 0])
if values is None:
return eval_cubic(grid, mcoefs, point)
else:
eval_cubic(grid, mcoefs, point, values)
def vec_eval_cubic_splines(a, b, orders, mcoefs, points, values=None):
"""Evaluates multi-splines on a series of points.
Parameters:
-----------
a : array of size d (float)
Lower bounds of the cartesian grid.
b : array of size d (float)
Upper bounds of the cartesian grid.
orders : array of size d (int)
Number of nodes along each dimension. ( =(n1,...nd) )
mcoefs : array of dimension d+1, and size (n1+2, ..., nd+2, p)
Filtered coefficients. coefs[i,...] contains the coefficients of spline number i.
points : array of size N x d
List of points where the splines must be interpolated.
values (optional) : array of size (N x p)
If not None, contains the result.
Returns
-------
values : array of size (N x p)
Interpolated values. values[i,j] contains spline n-j evaluated at point points[i,:].
"""
grid = get_grid(a, b, orders, mcoefs[..., 0])
if values is None:
return eval_cubic(grid, mcoefs, points)
else:
eval_cubic(grid, mcoefs, points, values)
#########
from .eval_cubic_numba import (
vec_eval_cubic_splines_G_1,
vec_eval_cubic_splines_G_2,
vec_eval_cubic_splines_G_3,
vec_eval_cubic_splines_G_4,
)
def vec_eval_cubic_splines_G(a, b, orders, mcoefs, points, values=None, dvalues=None):
a = numpy.array(a, dtype=float)
b = numpy.array(b, dtype=float)
orders = numpy.array(orders, dtype=int)
d = a.shape[0]
N = points.shape[0]
n_sp = mcoefs.shape[-1]
if values is None:
values = numpy.empty((N, n_sp))
if dvalues is None:
dvalues = numpy.empty((N, d, n_sp))
if d == 1:
vec_eval_cubic_splines_G_1(a, b, orders, mcoefs, points, values, dvalues)
elif d == 2:
vec_eval_cubic_splines_G_2(a, b, orders, mcoefs, points, values, dvalues)
elif d == 3:
vec_eval_cubic_splines_G_3(a, b, orders, mcoefs, points, values, dvalues)
elif d == 4:
vec_eval_cubic_splines_G_4(a, b, orders, mcoefs, points, values, dvalues)
return [values, dvalues]
| [
"numpy.array",
"numpy.empty"
] | [((4810, 4837), 'numpy.array', 'numpy.array', (['a'], {'dtype': 'float'}), '(a, dtype=float)\n', (4821, 4837), False, 'import numpy\n'), ((4846, 4873), 'numpy.array', 'numpy.array', (['b'], {'dtype': 'float'}), '(b, dtype=float)\n', (4857, 4873), False, 'import numpy\n'), ((4887, 4917), 'numpy.array', 'numpy.array', (['orders'], {'dtype': 'int'}), '(orders, dtype=int)\n', (4898, 4917), False, 'import numpy\n'), ((5031, 5053), 'numpy.empty', 'numpy.empty', (['(N, n_sp)'], {}), '((N, n_sp))\n', (5042, 5053), False, 'import numpy\n'), ((5097, 5122), 'numpy.empty', 'numpy.empty', (['(N, d, n_sp)'], {}), '((N, d, n_sp))\n', (5108, 5122), False, 'import numpy\n')] |
import numpy as np
# 2 x 3
arr = np.linspace(1.1, 6.6, 6).reshape(2, 3)
print(arr)
arr = arr.astype('int')
print(arr) | [
"numpy.linspace"
] | [((35, 59), 'numpy.linspace', 'np.linspace', (['(1.1)', '(6.6)', '(6)'], {}), '(1.1, 6.6, 6)\n', (46, 59), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 15 10:07:53 2016
@author: <NAME>
"""
import numpy as np
n_1 = 0.2
n_2 = 0.2
n_3 = 0.2
n_4 = 0.2
Ms_list = np.array([ 68.74, 75.71, 82.33, 84.77, 88.27])
Mf_list = np.array([ 57.74, 65.39, 71.29, 74.07, 77.88])
As_list = np.array([ 78.47, 83.82, 88.81, 91.38, 94.78])
Af_list = np.array([ 88.75, 95.02, 102.15, 105.12, 108.85]) | [
"numpy.array"
] | [((157, 202), 'numpy.array', 'np.array', (['[68.74, 75.71, 82.33, 84.77, 88.27]'], {}), '([68.74, 75.71, 82.33, 84.77, 88.27])\n', (165, 202), True, 'import numpy as np\n'), ((214, 259), 'numpy.array', 'np.array', (['[57.74, 65.39, 71.29, 74.07, 77.88]'], {}), '([57.74, 65.39, 71.29, 74.07, 77.88])\n', (222, 259), True, 'import numpy as np\n'), ((271, 316), 'numpy.array', 'np.array', (['[78.47, 83.82, 88.81, 91.38, 94.78]'], {}), '([78.47, 83.82, 88.81, 91.38, 94.78])\n', (279, 316), True, 'import numpy as np\n'), ((328, 376), 'numpy.array', 'np.array', (['[88.75, 95.02, 102.15, 105.12, 108.85]'], {}), '([88.75, 95.02, 102.15, 105.12, 108.85])\n', (336, 376), True, 'import numpy as np\n')] |
from typing import Any
import numpy as np
from matplotlib import pyplot as plt
from time import perf_counter
from scipy import integrate
from .study_configuration import StudyConfiguration
class FatigueIntegrator:
def __init__(self, study_configuration: StudyConfiguration):
self.study = study_configuration
self._has_run: bool = False
self._results: list[np.ndarray, ...] = []
self._performing_time: list[float, ...] = []
self.axes = plt.axes()
def perform(self):
"""
Perform the integration for all the fatigue_models
"""
t_span = (self.study.t[0], self.study.t[-1])
t_eval = self.study.t
for fatigue, x0, plot_options in zip(self.study.fatigue_models.models, self.study.x0, self.study.plot_options):
starting_time = perf_counter()
out: Any = integrate.solve_ivp(lambda t, x: self._dynamics(t, x, fatigue), t_span, x0, t_eval=t_eval)
self._performing_time.append(perf_counter() - starting_time)
self._results.append(out.y)
self._add_result_to_plot(out.t, out.y, plot_options)
self._has_run = True
def plot_results(self):
if not self._has_run:
raise RuntimeError("run() must be called before plotting the results")
self.axes.set_ylim((0, 1))
plt.show()
def print_final_sum(self):
if not self._has_run:
raise RuntimeError("run() must be called before printing the results")
print("Sum of components at the final index")
for model, results in zip(self.study.fatigue_models.models, self._results):
print(f"\t{type(model).__name__}: {np.sum(results, axis=0)[0]}")
def print_integration_time(self):
if not self._has_run:
raise RuntimeError("run() must be called before printing the results")
print(f"Individual integration time:")
for model, t in zip(self.study.fatigue_models.models, self._performing_time):
print(f"\t{type(model).__name__}: {t:1.3f} seconds")
print(f"Total integration time: {sum(self._performing_time):1.3f} seconds")
def print_rmse(self):
if not self._has_run:
raise RuntimeError("run() must be called before printing the results")
if len(self.study.fatigue_models.models) != 2:
raise RuntimeError("rmse must have exactly 2 models to be called")
if self.study.rms_indices is None:
raise ValueError("rms_indices were not provided in the study configuration")
# Get aliases
models = self.study.fatigue_models.models
idx = self.study.rms_indices
e = self._results[0][idx[0], :] - self._results[1][idx[0], :]
se = e**2
mse = np.sum(se, axis=1) / self.study.n_points
rmse = np.sqrt(mse)
print(f"The RMSE between {type(models[0]).__name__} and {type(models[1]).__name__} is {rmse}")
def _dynamics(self, t, x, fatigue):
return np.array(fatigue.apply_dynamics(self.study.target_function.function(t) / fatigue.scaling, *x))[:, 0]
def _add_result_to_plot(self, t: np.ndarray, out: np.ndarray, plot_options: Any):
plt.plot(t, out[0, :], color="tab:green", **plot_options)
if out.shape[0] > 1:
plt.plot(t, out[1, :], color="tab:orange", **plot_options)
plt.plot(t, out[2, :], color="tab:red", **plot_options)
if out.shape[0] > 3:
plt.plot(t, out[3, :], "tab:gray", **plot_options)
plt.plot(t, [self.study.target_function.function(_t) for _t in t], color="tab:blue", alpha=0.5, **plot_options)
plt.plot(t, np.sum(out[:4, :], axis=0), color="black", **plot_options)
| [
"numpy.sqrt",
"matplotlib.pyplot.plot",
"time.perf_counter",
"numpy.sum",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.show"
] | [((483, 493), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (491, 493), True, 'from matplotlib import pyplot as plt\n'), ((1356, 1366), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1364, 1366), True, 'from matplotlib import pyplot as plt\n'), ((2840, 2852), 'numpy.sqrt', 'np.sqrt', (['mse'], {}), '(mse)\n', (2847, 2852), True, 'import numpy as np\n'), ((3209, 3266), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'out[0, :]'], {'color': '"""tab:green"""'}), "(t, out[0, :], color='tab:green', **plot_options)\n", (3217, 3266), True, 'from matplotlib import pyplot as plt\n'), ((833, 847), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (845, 847), False, 'from time import perf_counter\n'), ((2784, 2802), 'numpy.sum', 'np.sum', (['se'], {'axis': '(1)'}), '(se, axis=1)\n', (2790, 2802), True, 'import numpy as np\n'), ((3308, 3366), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'out[1, :]'], {'color': '"""tab:orange"""'}), "(t, out[1, :], color='tab:orange', **plot_options)\n", (3316, 3366), True, 'from matplotlib import pyplot as plt\n'), ((3379, 3434), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'out[2, :]'], {'color': '"""tab:red"""'}), "(t, out[2, :], color='tab:red', **plot_options)\n", (3387, 3434), True, 'from matplotlib import pyplot as plt\n'), ((3476, 3526), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'out[3, :]', '"""tab:gray"""'], {}), "(t, out[3, :], 'tab:gray', **plot_options)\n", (3484, 3526), True, 'from matplotlib import pyplot as plt\n'), ((3667, 3693), 'numpy.sum', 'np.sum', (['out[:4, :]'], {'axis': '(0)'}), '(out[:4, :], axis=0)\n', (3673, 3693), True, 'import numpy as np\n'), ((1003, 1017), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (1015, 1017), False, 'from time import perf_counter\n'), ((1698, 1721), 'numpy.sum', 'np.sum', (['results'], {'axis': '(0)'}), '(results, axis=0)\n', (1704, 1721), True, 'import numpy as np\n')] |
################################################################################
# The Neural Network (NN) based Speech Synthesis System
# https://github.com/CSTR-Edinburgh/merlin
#
# Centre for Speech Technology Research
# University of Edinburgh, UK
# Copyright (c) 2014-2015
# All Rights Reserved.
#
# The system as a whole and most of the files in it are distributed
# under the following copyright and conditions
#
# Permission is hereby granted, free of charge, to use and distribute
# this software and its documentation without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of this work, and to
# permit persons to whom this work is furnished to do so, subject to
# the following conditions:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# - The authors' names may not be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THE UNIVERSITY OF EDINBURGH AND THE CONTRIBUTORS TO THIS WORK
# DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
# SHALL THE UNIVERSITY OF EDINBURGH NOR THE CONTRIBUTORS BE LIABLE
# FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
# AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
# THIS SOFTWARE.
################################################################################
import pickle
import gzip
import os, sys, errno
import time
import math
import subprocess
import socket # only for socket.getfqdn()
import multiprocessing
# numpy & theano imports need to be done in this order (only for some numpy installations, not sure why)
import numpy
#import gnumpy as gnp
# we need to explicitly import this in some cases, not sure why this doesn't get imported with numpy itself
import numpy.distutils.__config__
# and only after that can we import theano
import theano
from utils.providers import ListDataProvider
from utils.data_augmentation import DataAugmentation
from frontend.label_normalisation import HTSLabelNormalisation
from frontend.silence_remover import SilenceRemover
from frontend.silence_remover import trim_silence
from frontend.min_max_norm import MinMaxNormalisation
from frontend.acoustic_composition import AcousticComposition
from frontend.parameter_generation import ParameterGeneration
from frontend.mean_variance_norm import MeanVarianceNorm
# the new class for label composition and normalisation
from frontend.label_composer import LabelComposer
from frontend.label_modifier import HTSLabelModification
from frontend.merge_features import MergeFeat
from frontend.score_analysis import ScoreAnalyzer
import configuration
from models.deep_rnn import DeepRecurrentNetwork
from utils.compute_distortion import DistortionComputation, IndividualDistortionComp
from utils.generate import generate_wav
from utils.acous_feat_extraction import acous_feat_extraction
from utils.learn_rates import ExpDecreaseLearningRate
from io_funcs.binary_io import BinaryIOCollection
# our custom logging class that can also plot
from logplot.logging_plotting import LoggerPlotter, MultipleSeriesPlot, SingleWeightMatrixPlot
import logging # as logging
import logging.config
import io
from utils.file_paths import FilePaths
from utils.utils import read_file_list, prepare_file_path_list
def extract_file_id_list(file_list):
file_id_list = []
for file_name in file_list:
file_id = os.path.basename(os.path.splitext(file_name)[0])
file_id_list.append(file_id)
return file_id_list
def make_output_file_list(out_dir, in_file_lists):
out_file_lists = []
for in_file_name in in_file_lists:
file_id = os.path.basename(in_file_name)
out_file_name = out_dir + '/' + file_id
out_file_lists.append(out_file_name)
return out_file_lists
def visualize_dnn(dnn):
plotlogger = logging.getLogger("plotting")
# reference activation weights in layers
W = list(); layer_name = list()
for i in range(len(dnn.params)):
aa = dnn.params[i].get_value(borrow=True).T
print(aa.shape, aa.size)
if aa.size > aa.shape[0]:
W.append(aa)
layer_name.append(dnn.params[i].name)
## plot activation weights including input and output
layer_num = len(W)
for i_layer in range(layer_num):
fig_name = 'Activation weights W' + str(i_layer) + '_' + layer_name[i_layer]
fig_title = 'Activation weights of W' + str(i_layer)
xlabel = 'Neuron index of hidden layer ' + str(i_layer)
ylabel = 'Neuron index of hidden layer ' + str(i_layer+1)
if i_layer == 0:
xlabel = 'Input feature index'
if i_layer == layer_num-1:
ylabel = 'Output feature index'
logger.create_plot(fig_name, SingleWeightMatrixPlot)
plotlogger.add_plot_point(fig_name, fig_name, W[i_layer])
plotlogger.save_plot(fig_name, title=fig_name, xlabel=xlabel, ylabel=ylabel)
def load_covariance(var_file_dict, out_dimension_dict):
var = {}
io_funcs = BinaryIOCollection()
for feature_name in list(var_file_dict.keys()):
var_values, dimension = io_funcs.load_binary_file_frame(var_file_dict[feature_name], 1)
var_values = numpy.reshape(var_values, (out_dimension_dict[feature_name], 1))
var[feature_name] = var_values
return var
def train_DNN(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False, var_dict=None,
cmp_mean_vector = None, cmp_std_vector = None, init_dnn_model_file = None):
# get loggers for this function
# this one writes to both console and file
logger = logging.getLogger("main.train_DNN")
logger.debug('Starting train_DNN')
if plot:
# this one takes care of plotting duties
plotlogger = logging.getLogger("plotting")
# create an (empty) plot of training convergence, ready to receive data points
logger.create_plot('training convergence',MultipleSeriesPlot)
try:
assert numpy.sum(ms_outs) == n_outs
except AssertionError:
logger.critical('the summation of multi-stream outputs does not equal to %d' %(n_outs))
raise
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layer_size = hyper_params['hidden_layer_size']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
model_type = hyper_params['model_type']
hidden_layer_type = hyper_params['hidden_layer_type']
## use a switch to turn on pretraining
## pretraining may not help too much, if this case, we turn it off to save time
do_pretraining = hyper_params['do_pretraining']
pretraining_epochs = int(hyper_params['pretraining_epochs'])
pretraining_lr = float(hyper_params['pretraining_lr'])
sequential_training = hyper_params['sequential_training']
dropout_rate = hyper_params['dropout_rate']
buffer_size = int(buffer_size / batch_size) * batch_size
###################
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProvider(x_file_list = train_x_file_list, y_file_list = train_y_file_list,
n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size,
sequential = sequential_training, shuffle = True)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProvider(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list,
n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size,
sequential = sequential_training, shuffle = False)
if cfg.rnn_batch_training:
train_data_reader.set_rnn_params(training_algo=cfg.training_algo, batch_size=cfg.batch_size, seq_length=cfg.seq_length, merge_size=cfg.merge_size, bucket_range=cfg.bucket_range)
valid_data_reader.reshape_input_output()
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_one_partition()
train_set_x, train_set_y = shared_train_set_xy
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_y = valid_data_reader.load_one_partition()
valid_set_x, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
##temporally we use the training set as pretrain_set_x.
##we need to support any data for pretraining
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
dnn_model = None
pretrain_fn = None ## not all the model support pretraining right now
train_fn = None
valid_fn = None
valid_model = None ## valid_fn and valid_model are the same. reserve to computer multi-stream distortion
if model_type == 'DNN':
dnn_model = DeepRecurrentNetwork(n_in= n_ins, hidden_layer_size = hidden_layer_size, n_out = n_outs,
L1_reg = l1_reg, L2_reg = l2_reg, hidden_layer_type = hidden_layer_type, output_type = cfg.output_layer_type,
dropout_rate = dropout_rate, optimizer = cfg.optimizer, rnn_batch_training = cfg.rnn_batch_training)
else:
logger.critical('%s type NN model is not supported!' %(model_type))
raise
## Model adaptation -- fine tuning the existing model
## We can't just unpickle the old model and use that because fine-tune functions
## depend on opt_l2e option used in construction of initial model. One way around this
## would be to unpickle, manually set unpickled_dnn_model.opt_l2e=True and then call
## unpickled_dnn_model.build_finetne_function() again. This is another way, construct
## new model from scratch with opt_l2e=True, then copy existing weights over:
use_lhuc = cfg.use_lhuc
if init_dnn_model_file != "_":
logger.info('load parameters from existing model: %s' %(init_dnn_model_file))
if not os.path.isfile(init_dnn_model_file):
sys.exit('Model file %s does not exist'%(init_dnn_model_file))
existing_dnn_model = pickle.load(open(init_dnn_model_file, 'rb'))
if not use_lhuc and not len(existing_dnn_model.params) == len(dnn_model.params):
sys.exit('Old and new models have different numbers of weight matrices')
elif use_lhuc and len(dnn_model.params) < len(existing_dnn_model.params):
sys.exit('In LHUC adaptation new model must have more parameters than old model.')
# assign the existing dnn model parameters to the new dnn model
k = 0
for i in range(len(dnn_model.params)):
## Added for LHUC ##
# In LHUC, we keep all the old parameters intact and learn only a small set of new
# parameters
if dnn_model.params[i].name == 'c':
continue
else:
old_val = existing_dnn_model.params[k].get_value()
new_val = dnn_model.params[i].get_value()
if numpy.shape(old_val) == numpy.shape(new_val):
dnn_model.params[i].set_value(old_val)
else:
sys.exit('old and new weight matrices have different shapes')
k = k + 1
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y), use_lhuc, layer_index=cfg.freeze_layers) #, batch_size=batch_size
logger.info('fine-tuning the %s model' %(model_type))
start_time = time.time()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
lr_decay = cfg.lr_decay
if lr_decay>0:
early_stop_epoch *= lr_decay
early_stop = 0
val_loss_counter = 0
previous_finetune_lr = finetune_lr
epoch = 0
while (epoch < training_epochs):
epoch = epoch + 1
if lr_decay==0:
# fixed learning rate
reduce_lr = False
elif lr_decay<0:
# exponential decay
reduce_lr = False if epoch <= warmup_epoch else True
elif val_loss_counter > 0:
# linear decay
reduce_lr = False
if val_loss_counter%lr_decay==0:
reduce_lr = True
val_loss_counter = 0
else:
# no decay
reduce_lr = False
if reduce_lr:
current_finetune_lr = previous_finetune_lr * 0.5
current_momentum = momentum
else:
current_finetune_lr = previous_finetune_lr
current_momentum = warmup_momentum
previous_finetune_lr = current_finetune_lr
train_error = []
sub_start_time = time.time()
logger.debug("training params -- learning rate: %f, early_stop: %d/%d" % (current_finetune_lr, early_stop, early_stop_epoch))
while (not train_data_reader.is_finish()):
_, temp_train_set_x, temp_train_set_y = train_data_reader.load_one_partition()
# if sequential training, the batch size will be the number of frames in an utterance
# batch_size for sequential training is considered only when rnn_batch_training is set to True
if sequential_training == True:
batch_size = temp_train_set_x.shape[0]
n_train_batches = temp_train_set_x.shape[0] // batch_size
for index in range(n_train_batches):
## send a batch to the shared variable, rather than pass the batch size and batch index to the finetune function
train_set_x.set_value(numpy.asarray(temp_train_set_x[index*batch_size:(index + 1)*batch_size], dtype=theano.config.floatX), borrow=True)
train_set_y.set_value(numpy.asarray(temp_train_set_y[index*batch_size:(index + 1)*batch_size], dtype=theano.config.floatX), borrow=True)
this_train_error = train_fn(current_finetune_lr, current_momentum)
train_error.append(this_train_error)
train_data_reader.reset()
logger.debug('calculating validation loss')
validation_losses = []
while (not valid_data_reader.is_finish()):
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_y = valid_data_reader.load_one_partition()
valid_set_x.set_value(numpy.asarray(temp_valid_set_x, dtype=theano.config.floatX), borrow=True)
valid_set_y.set_value(numpy.asarray(temp_valid_set_y, dtype=theano.config.floatX), borrow=True)
this_valid_loss = valid_fn()
validation_losses.append(this_valid_loss)
valid_data_reader.reset()
this_validation_loss = numpy.mean(validation_losses)
this_train_valid_loss = numpy.mean(numpy.asarray(train_error))
sub_end_time = time.time()
loss_difference = this_validation_loss - previous_loss
logger.info('epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time)))
if plot:
plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
plotlogger.save_plot('training convergence',title='Progress of training and validation error',xlabel='epochs',ylabel='error')
if this_validation_loss < best_validation_loss:
pickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
best_dnn_model = dnn_model
best_validation_loss = this_validation_loss
if this_validation_loss >= previous_loss:
logger.debug('validation loss increased')
val_loss_counter+=1
early_stop+=1
if epoch > 15 and early_stop > early_stop_epoch:
logger.debug('stopping early')
break
if math.isnan(this_validation_loss):
break
previous_loss = this_validation_loss
end_time = time.time()
logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
if plot:
plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
return best_validation_loss
def dnn_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list, reshape_io=False):
# print('ASDFLKASJDFKLSAJDFLKSAJDFK;LASDKLFSADKLFASKDLFSAD;KFASDFKSDFK')
# print(valid_file_list)
# print('ASDFLKASJDFKLSAJDFLKSAJDFK;LASDKLFSADKLFASKDLFSAD;KFASDFKSDFK')
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
for i in range(file_number): #file_number
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size // n_ins))]
test_set_x = features.reshape((-1, n_ins))
n_rows = test_set_x.shape[0]
if reshape_io:
test_set_x = numpy.reshape(test_set_x, (1, test_set_x.shape[0], n_ins))
test_set_x = numpy.array(test_set_x, 'float32')
predicted_parameter = dnn_model.parameter_prediction(test_set_x)
predicted_parameter = predicted_parameter.reshape(-1, n_outs)
predicted_parameter = predicted_parameter[0:n_rows]
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
##generate bottleneck layer as features
def dnn_hidden_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list, bottleneck_index):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = pickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
for i in range(file_number):
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size // n_ins))]
features = features.reshape((-1, n_ins))
temp_set_x = features.tolist()
test_set_x = theano.shared(numpy.asarray(temp_set_x, dtype=theano.config.floatX))
predicted_parameter = dnn_model.generate_hidden_layer(test_set_x, bottleneck_index)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
def perform_acoustic_composition_on_split(args):
""" Performs acoustic composition on one chunk of data.
This is used as input for Pool.map to allow parallel acoustic composition.
"""
(delta_win, acc_win, in_file_list_dict, nn_cmp_file_list, in_dimension_dict, out_dimension_dict) = args
acoustic_worker = AcousticComposition(delta_win = delta_win, acc_win = acc_win)
acoustic_worker.prepare_nn_data(in_file_list_dict, nn_cmp_file_list, in_dimension_dict, out_dimension_dict)
def perform_acoustic_composition(delta_win, acc_win, in_file_list_dict, nn_cmp_file_list, cfg, parallel=True):
""" Runs acoustic composition from in_file_list_dict to nn_cmp_file_list.
If parallel is true, splits the data into multiple chunks and calls
perform_acoustic_composition_on_split for each chunk.
"""
if parallel:
num_splits = multiprocessing.cpu_count()
pool = multiprocessing.Pool(num_splits)
# split data into a list of num_splits tuples with each tuple representing
# the parameters for perform_acoustic_compositon_on_split
splits_full = [
(delta_win,
acc_win,
{stream: in_file_list_dict[stream][i::num_splits] for stream in in_file_list_dict},
nn_cmp_file_list[i::num_splits],
cfg.in_dimension_dict,
cfg.out_dimension_dict
) for i in range(num_splits) ]
pool.map(perform_acoustic_composition_on_split, splits_full)
pool.close()
pool.join()
else:
acoustic_worker = AcousticComposition(delta_win = delta_win, acc_win = acc_win)
acoustic_worker.prepare_nn_data(in_file_list_dict, nn_cmp_file_list, cfg.in_dimension_dict, cfg.out_dimension_dict)
def main_function(cfg):
file_paths = FilePaths(cfg)
# get a logger for this main function
logger = logging.getLogger("main")
# get another logger to handle plotting duties
plotlogger = logging.getLogger("plotting")
# later, we might do this via a handler that is created, attached and configured
# using the standard config mechanism of the logging module
# but for now we need to do it manually
plotlogger.set_plot_path(cfg.plot_dir)
# create plot dir if set to True
if not os.path.exists(cfg.plot_dir) and cfg.plot:
os.makedirs(cfg.plot_dir)
#### parameter setting########
hidden_layer_size = cfg.hyper_params['hidden_layer_size']
####prepare environment
try:
file_id_list = read_file_list(cfg.file_id_scp)
logger.debug('Loaded file id list from %s' % cfg.file_id_scp)
except IOError:
# this means that open(...) threw an error
logger.critical('Could not load file id list from %s' % cfg.file_id_scp)
raise
###total file number including training, development, and testing
total_file_number = len(file_id_list)
assert cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number == total_file_number, 'check train, valid, test file number'
data_dir = cfg.data_dir
inter_data_dir = cfg.inter_data_dir
nn_cmp_dir = file_paths.nn_cmp_dir
nn_cmp_norm_dir = file_paths.nn_cmp_norm_dir
model_dir = file_paths.model_dir
gen_dir = file_paths.gen_dir
in_file_list_dict = {}
for feature_name in list(cfg.in_dir_dict.keys()):
in_file_list_dict[feature_name] = prepare_file_path_list(file_id_list, cfg.in_dir_dict[feature_name], cfg.file_extension_dict[feature_name], False)
nn_cmp_file_list = file_paths.get_nn_cmp_file_list()
nn_cmp_norm_file_list = file_paths.get_nn_cmp_norm_file_list()
###normalisation information
norm_info_file = file_paths.norm_info_file
### normalise input full context label
# currently supporting two different forms of lingustic features
# later, we should generalise this
assert cfg.label_style == 'HTS', 'Only HTS-style labels are now supported as input to Merlin'
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name, add_frame_features=cfg.add_frame_features, subphone_feats=cfg.subphone_feats)
add_feat_dim = sum(cfg.additional_features.values())
lab_dim = label_normaliser.dimension + add_feat_dim + cfg.appended_input_dim
if cfg.VoiceConversion:
lab_dim = cfg.cmp_dim
logger.info('Input label dimension is %d' % lab_dim)
suffix=str(lab_dim)
if cfg.process_labels_in_work_dir:
inter_data_dir = cfg.work_dir
# the number can be removed
file_paths.set_label_dir(label_normaliser.dimension, suffix, lab_dim)
file_paths.set_label_file_list()
binary_label_dir = file_paths.binary_label_dir
nn_label_dir = file_paths.nn_label_dir
nn_label_norm_dir = file_paths.nn_label_norm_dir
in_label_align_file_list = file_paths.in_label_align_file_list
binary_label_file_list = file_paths.binary_label_file_list
nn_label_file_list = file_paths.nn_label_file_list
nn_label_norm_file_list = file_paths.nn_label_norm_file_list
min_max_normaliser = None
label_norm_file = file_paths.label_norm_file
test_id_list = file_paths.test_id_list
# Debug:----------------------------------
if cfg.ACFTEXTR:
logger.info('acoustic feature extraction')
acous_feat_extraction(cfg.nat_wav_dir, file_id_list, cfg)
#generate_wav(gen_dir, file_id_list, cfg) # generated speech
#-----------------------------------------
if cfg.NORMLAB:
# simple HTS labels
logger.info('preparing label data (input) using standard HTS style labels')
label_normaliser.perform_normalisation(in_label_align_file_list, binary_label_file_list, label_type=cfg.label_type)
if cfg.additional_features:
out_feat_file_list = file_paths.out_feat_file_list
in_dim = label_normaliser.dimension
for new_feature, new_feature_dim in cfg.additional_features.items():
new_feat_dir = os.path.join(data_dir, new_feature)
new_feat_file_list = prepare_file_path_list(file_id_list, new_feat_dir, '.'+new_feature)
merger = MergeFeat(lab_dim = in_dim, feat_dim = new_feature_dim)
merger.merge_data(binary_label_file_list, new_feat_file_list, out_feat_file_list)
in_dim += new_feature_dim
binary_label_file_list = out_feat_file_list
remover = SilenceRemover(n_cmp = lab_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type, remove_frame_features = cfg.add_frame_features, subphone_feats = cfg.subphone_feats)
remover.remove_silence(binary_label_file_list, in_label_align_file_list, nn_label_file_list)
DATAAUG = False
if DATAAUG:
data_augmentor = DataAugmentation(7, 9)
augmented_lab_file_list = data_augmentor.lab_augmentation(file_paths.nn_label_dir)
augmented_dur_file_list = data_augmentor.generate_dur_filename_list(augmented_lab_file_list)
cfg.train_file_number *= 17
nn_label_file_list = [os.path.join(file_paths.nn_label_dir, f) for f in augmented_lab_file_list]
nn_label_norm_file_list = [os.path.join(file_paths.nn_label_norm_dir, f) for f in augmented_lab_file_list]
file_paths.nn_label_file_list = [os.path.join(file_paths.nn_label_dir, f) for f in augmented_lab_file_list]
file_paths.nn_label_norm_file_list = [os.path.join(file_paths.nn_label_norm_dir, f) for f in augmented_lab_file_list]
file_paths.nn_cmp_norm_file_list = [os.path.join(file_paths.nn_cmp_norm_dir, f) for f in augmented_dur_file_list]
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
###use only training data to find min-max information, then apply on the whole dataset
if cfg.GenTestList:
min_max_normaliser.load_min_max_values(label_norm_file)
else:
min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
### enforce silence such that the normalization runs without removing silence: only for final synthesis
if cfg.GenTestList and cfg.enforce_silence:
min_max_normaliser.normalise_data(binary_label_file_list, nn_label_norm_file_list)
else:
min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
if min_max_normaliser != None and not cfg.GenTestList:
### save label normalisation information for unseen testing labels
label_min_vector = min_max_normaliser.min_vector
label_max_vector = min_max_normaliser.max_vector
label_norm_info = numpy.concatenate((label_min_vector, label_max_vector), axis=0)
label_norm_info = numpy.array(label_norm_info, 'float32')
fid = open(label_norm_file, 'wb')
label_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(label_min_vector.size, label_norm_file))
### make output duration data
if cfg.MAKEDUR:
logger.info('creating duration (output) features')
label_normaliser.prepare_dur_data(in_label_align_file_list, file_paths.dur_file_list, cfg.label_type, cfg.dur_feature_type)
### make output acoustic data
if cfg.MAKECMP:
logger.info('creating acoustic (output) features')
delta_win = cfg.delta_win #[-0.5, 0.0, 0.5]
acc_win = cfg.acc_win #[1.0, -2.0, 1.0]
if cfg.GenTestList:
for feature_name in list(cfg.in_dir_dict.keys()):
in_file_list_dict[feature_name] = prepare_file_path_list(test_id_list, cfg.in_dir_dict[feature_name], cfg.file_extension_dict[feature_name], False)
nn_cmp_file_list = prepare_file_path_list(test_id_list, nn_cmp_dir, cfg.cmp_ext)
nn_cmp_norm_file_list = prepare_file_path_list(test_id_list, nn_cmp_norm_dir, cfg.cmp_ext)
if 'dur' in list(cfg.in_dir_dict.keys()) and cfg.AcousticModel:
lf0_file_list = file_paths.get_lf0_file_list()
acoustic_worker = AcousticComposition(delta_win = delta_win, acc_win = acc_win)
acoustic_worker.make_equal_frames(dur_file_list, lf0_file_list, cfg.in_dimension_dict)
acoustic_worker.prepare_nn_data(in_file_list_dict, nn_cmp_file_list, cfg.in_dimension_dict, cfg.out_dimension_dict)
else:
perform_acoustic_composition(delta_win, acc_win, in_file_list_dict, nn_cmp_file_list, cfg, parallel=True)
if cfg.remove_silence_using_binary_labels:
## do this to get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
silence_feature = 0 ## use first feature in label -- hardcoded for now
logger.info('Silence removal from CMP using binary label file')
## overwrite the untrimmed audio with the trimmed version:
trim_silence(nn_cmp_file_list, nn_cmp_file_list, cfg.cmp_dim,
binary_label_file_list, lab_dim, silence_feature)
elif cfg.remove_silence_using_hts_labels:
## back off to previous method using HTS labels:
remover = SilenceRemover(n_cmp = cfg.cmp_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type, remove_frame_features = cfg.add_frame_features, subphone_feats = cfg.subphone_feats)
remover.remove_silence(nn_cmp_file_list, in_label_align_file_list, nn_cmp_file_list) # save to itself
### save acoustic normalisation information for normalising the features back
var_dir = file_paths.var_dir
var_file_dict = file_paths.get_var_dic()
### normalise output acoustic data
if cfg.NORMCMP:
logger.info('normalising acoustic (output) features using method %s' % cfg.output_feature_normalisation)
cmp_norm_info = None
if cfg.output_feature_normalisation == 'MVN':
normaliser = MeanVarianceNorm(feature_dimension=cfg.cmp_dim)
if cfg.GenTestList:
# load mean std values
global_mean_vector, global_std_vector = normaliser.load_mean_std_values(norm_info_file)
else:
###calculate mean and std vectors on the training data, and apply on the whole dataset
global_mean_vector = normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number], 0, cfg.cmp_dim)
global_std_vector = normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector, 0, cfg.cmp_dim)
# for hmpd vocoder we don't need to normalize the
# pdd values
if cfg.vocoder_type == 'hmpd':
stream_start_index = {}
dimension_index = 0
recorded_vuv = False
vuv_dimension = None
for feature_name in cfg.out_dimension_dict.keys():
if feature_name != 'vuv':
stream_start_index[feature_name] = dimension_index
else:
vuv_dimension = dimension_index
recorded_vuv = True
dimension_index += cfg.out_dimension_dict[feature_name]
logger.info('hmpd pdd values are not normalized since they are in 0 to 1')
global_mean_vector[:,stream_start_index['pdd']: stream_start_index['pdd'] + cfg.out_dimension_dict['pdd']] = 0
global_std_vector[:,stream_start_index['pdd']: stream_start_index['pdd'] + cfg.out_dimension_dict['pdd']] = 1
normaliser.feature_normalisation(nn_cmp_file_list, nn_cmp_norm_file_list)
cmp_norm_info = numpy.concatenate((global_mean_vector, global_std_vector), axis=0)
elif cfg.output_feature_normalisation == 'MINMAX':
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim, min_value = 0.01, max_value = 0.99)
if cfg.GenTestList:
min_max_normaliser.load_min_max_values(norm_info_file)
else:
min_max_normaliser.find_min_max_values(nn_cmp_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_cmp_file_list, nn_cmp_norm_file_list)
cmp_min_vector = min_max_normaliser.min_vector
cmp_max_vector = min_max_normaliser.max_vector
cmp_norm_info = numpy.concatenate((cmp_min_vector, cmp_max_vector), axis=0)
else:
logger.critical('Normalisation type %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
if not cfg.GenTestList:
cmp_norm_info = numpy.array(cmp_norm_info, 'float32')
fid = open(norm_info_file, 'wb')
cmp_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(cfg.output_feature_normalisation, norm_info_file))
feature_index = 0
for feature_name in list(cfg.out_dimension_dict.keys()):
feature_std_vector = numpy.array(global_std_vector[:,feature_index:feature_index+cfg.out_dimension_dict[feature_name]], 'float32')
fid = open(var_file_dict[feature_name], 'w')
feature_var_vector = feature_std_vector**2
feature_var_vector.tofile(fid)
fid.close()
logger.info('saved %s variance vector to %s' %(feature_name, var_file_dict[feature_name]))
feature_index += cfg.out_dimension_dict[feature_name]
train_x_file_list, train_y_file_list = file_paths.get_train_list_x_y()
valid_x_file_list, valid_y_file_list = file_paths.get_valid_list_x_y()
test_x_file_list, test_y_file_list = file_paths.get_test_list_x_y()
# we need to know the label dimension before training the DNN
# computing that requires us to look at the labels
#
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name, add_frame_features=cfg.add_frame_features, subphone_feats=cfg.subphone_feats)
add_feat_dim = sum(cfg.additional_features.values())
lab_dim = label_normaliser.dimension + add_feat_dim + cfg.appended_input_dim
if cfg.VoiceConversion:
lab_dim = cfg.cmp_dim
logger.info('label dimension is %d' % lab_dim)
combined_model_arch = str(len(hidden_layer_size))
for hid_size in hidden_layer_size:
combined_model_arch += '_' + str(hid_size)
nnets_file_name = file_paths.get_nnets_file_name()
temp_dir_name = file_paths.get_temp_nn_dir_name()
gen_dir = os.path.join(gen_dir, temp_dir_name)
if cfg.switch_to_keras or cfg.switch_to_tensorflow:
### set configuration variables ###
cfg.inp_dim = lab_dim
cfg.out_dim = cfg.cmp_dim
cfg.inp_feat_dir = nn_label_norm_dir
cfg.out_feat_dir = nn_cmp_norm_dir
cfg.pred_feat_dir = gen_dir
if cfg.GenTestList and cfg.test_synth_dir!="None":
cfg.inp_feat_dir = cfg.test_synth_dir
cfg.pred_feat_dir = cfg.test_synth_dir
if cfg.switch_to_keras:
### call kerasclass and use an instance ###
from run_keras_with_merlin_io import KerasClass
keras_instance = KerasClass(cfg)
elif cfg.switch_to_tensorflow:
### call Tensorflowclass and use an instance ###
from run_tensorflow_with_merlin_io import TensorflowClass
tf_instance = TensorflowClass(cfg)
### DNN model training
if cfg.TRAINDNN:
var_dict = load_covariance(var_file_dict, cfg.out_dimension_dict)
logger.info('training DNN')
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
cmp_mean_vector = cmp_min_max[0, ]
cmp_std_vector = cmp_min_max[1, ]
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create model directory %s' % model_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
try:
if cfg.switch_to_keras:
keras_instance.train_keras_model()
elif cfg.switch_to_tensorflow:
tf_instance.train_tensorflow_model()
else:
train_DNN(train_xy_file_list = (train_x_file_list, train_y_file_list), \
valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
nnets_file_name = nnets_file_name, \
n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot, var_dict = var_dict,
cmp_mean_vector = cmp_mean_vector, cmp_std_vector = cmp_std_vector,init_dnn_model_file=cfg.start_from_trained_model)
except KeyboardInterrupt:
logger.critical('train_DNN interrupted via keyboard')
# Could 'raise' the exception further, but that causes a deep traceback to be printed
# which we don't care about for a keyboard interrupt. So, just bail out immediately
sys.exit(1)
except:
logger.critical('train_DNN threw an exception')
raise
if cfg.GENBNFEA:
# Please only tune on this step when you want to generate bottleneck features from DNN
gen_dir = file_paths.bottleneck_features
bottleneck_size = min(hidden_layer_size)
bottleneck_index = 0
for i in range(len(hidden_layer_size)):
if hidden_layer_size[i] == bottleneck_size:
bottleneck_index = i
logger.info('generating bottleneck features from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
gen_file_id_list = file_id_list[0:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_x_file_list = nn_label_norm_file_list[0:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
gen_file_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.cmp_ext)
dnn_hidden_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list, bottleneck_index)
### generate parameters from DNN
gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.GenTestList:
gen_file_id_list = test_id_list
test_x_file_list = nn_label_norm_file_list
if cfg.test_synth_dir!="None":
gen_dir = cfg.test_synth_dir
if cfg.DNNGEN:
logger.info('generating from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
gen_file_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.cmp_ext)
if cfg.switch_to_keras:
keras_instance.test_keras_model()
elif cfg.switch_to_tensorflow:
tf_instance.test_tensorflow_model()
else:
reshape_io = True if cfg.rnn_batch_training else False
dnn_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list, reshape_io)
logger.debug('denormalising generated output using method %s' % cfg.output_feature_normalisation)
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
cmp_min_vector = cmp_min_max[0, ]
cmp_max_vector = cmp_min_max[1, ]
if cfg.output_feature_normalisation == 'MVN':
denormaliser = MeanVarianceNorm(feature_dimension = cfg.cmp_dim)
denormaliser.feature_denormalisation(gen_file_list, gen_file_list, cmp_min_vector, cmp_max_vector)
elif cfg.output_feature_normalisation == 'MINMAX':
denormaliser = MinMaxNormalisation(cfg.cmp_dim, min_value = 0.01, max_value = 0.99, min_vector = cmp_min_vector, max_vector = cmp_max_vector)
denormaliser.denormalise_data(gen_file_list, gen_file_list)
else:
logger.critical('denormalising method %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
if cfg.AcousticModel:
##perform MLPG to smooth parameter trajectory
## lf0 is included, the output features much have vuv.
generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features, enforce_silence = cfg.enforce_silence)
if cfg.singing:
meta = pickle.load(open(os.path.join(cfg.singing_inter_data_dir, 'meta'), 'rb'))
generator.acoustic_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict, var_file_dict, do_MLPG=cfg.do_MLPG, cfg=cfg, meta=meta)
else:
generator.acoustic_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict, var_file_dict, do_MLPG=cfg.do_MLPG, cfg=cfg)
if cfg.DurationModel:
### Perform duration normalization(min. state dur set to 1) ###
gen_dur_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.dur_ext)
gen_label_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.lab_ext)
in_gen_label_align_file_list = prepare_file_path_list(gen_file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features)
if cfg.singing:
meta = pickle.load(open(os.path.join(cfg.singing_inter_data_dir, 'meta'), 'rb'))
generator.duration_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict, meta)
else:
generator.duration_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict)
label_modifier = HTSLabelModification(silence_pattern = cfg.silence_pattern, label_type = cfg.label_type)
label_modifier.modify_duration_labels(in_gen_label_align_file_list, gen_dur_list, gen_label_list)
### generate wav
if cfg.GENWAV:
logger.info('reconstructing waveform(s)')
generate_wav(gen_dir, gen_file_id_list, cfg) # generated speech
# generate_wav(nn_cmp_dir, gen_file_id_list, cfg) # reference copy synthesis speech
### setting back to original conditions before calculating objective scores ###
if cfg.GenTestList:
in_label_align_file_list = prepare_file_path_list(file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
binary_label_file_list = prepare_file_path_list(file_id_list, binary_label_dir, cfg.lab_ext)
gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
### evaluation: RMSE and CORR for duration
if cfg.CALMCD and cfg.DurationModel:
logger.info('calculating MCD')
ref_data_dir = os.path.join(inter_data_dir, 'ref_data')
ref_dur_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.dur_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
calculator = IndividualDistortionComp()
valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['dur'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_dur_list, cfg.dur_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.dur_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type, remove_frame_features = cfg.add_frame_features)
remover.remove_silence(in_file_list_dict['dur'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_dur_list)
valid_dur_rmse, valid_dur_corr = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.dur_ext, cfg.dur_dim)
test_dur_rmse, test_dur_corr = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.dur_ext, cfg.dur_dim)
logger.info('Develop: DNN -- RMSE: %.3f frames/phoneme; CORR: %.3f; ' \
%(valid_dur_rmse, valid_dur_corr))
logger.info('Test: DNN -- RMSE: %.3f frames/phoneme; CORR: %.3f; ' \
%(test_dur_rmse, test_dur_corr))
### evaluation: calculate distortion
if cfg.CALMCD and cfg.AcousticModel:
logger.info('calculating MCD')
ref_data_dir = os.path.join(inter_data_dir, 'ref_data')
ref_lf0_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.lf0_ext)
# for straight or world vocoders
ref_mgc_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.mgc_ext)
ref_bap_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.bap_ext)
# for magphase vocoder
ref_mag_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.mag_ext)
ref_real_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.real_ext)
ref_imag_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.imag_ext)
# for GlottDNN vocoder
ref_lsf_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.lsf_ext)
ref_slsf_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.slsf_ext)
ref_gain_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.gain_ext)
ref_hnr_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.hnr_ext)
# for pulsemodel vocoder
ref_pdd_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.pdd_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
calculator = IndividualDistortionComp()
spectral_distortion = 0.0
bap_mse = 0.0
f0_mse = 0.0
vuv_error = 0.0
valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.remove_silence_using_binary_labels:
## get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
## use first feature in label -- hardcoded for now
silence_feature = 0
## Use these to trim silence:
untrimmed_test_labels = binary_label_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if 'mgc' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_mgc_list, cfg.mgc_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
elif cfg.remove_silence_using_hts_labels:
remover = SilenceRemover(n_cmp = cfg.mgc_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type)
remover.remove_silence(in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_mgc_list)
else:
ref_data_dir = os.path.join(data_dir, 'mgc')
valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
valid_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
test_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
if 'bap' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_bap_list, cfg.bap_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
elif cfg.remove_silence_using_hts_labels:
remover = SilenceRemover(n_cmp = cfg.bap_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type)
remover.remove_silence(in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_bap_list)
else:
ref_data_dir = os.path.join(data_dir, 'bap')
valid_bap_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
test_bap_mse = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
valid_bap_mse = valid_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
test_bap_mse = test_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
if 'lf0' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_lf0_list, cfg.lf0_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
elif cfg.remove_silence_using_hts_labels:
remover = SilenceRemover(n_cmp = cfg.lf0_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type)
remover.remove_silence(in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_lf0_list)
else:
if cfg.vocoder_type == 'MAGPHASE':
ref_data_dir = os.path.join(data_dir, 'feats')
else:
ref_data_dir = os.path.join(data_dir, 'lf0')
valid_f0_mse, valid_f0_corr, valid_vuv_error = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
test_f0_mse , test_f0_corr, test_vuv_error = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
if 'mag' in cfg.in_dimension_dict:
if cfg.remove_silence_using_hts_labels:
remover = SilenceRemover(n_cmp = cfg.mag_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type)
remover.remove_silence(in_file_list_dict['mag'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_mag_list)
else:
ref_data_dir = os.path.join(data_dir, 'feats')
valid_mag_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.mag_ext, cfg.mag_dim)
test_mag_mse = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.mag_ext, cfg.mag_dim)
valid_mag_mse = 10.0*numpy.log10(valid_mag_mse)
test_mag_mse = 10.0*numpy.log10(test_mag_mse)
if 'real' in cfg.in_dimension_dict:
if cfg.remove_silence_using_hts_labels:
remover = SilenceRemover(n_cmp = cfg.real_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type)
remover.remove_silence(in_file_list_dict['real'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_real_list)
else:
ref_data_dir = os.path.join(data_dir, 'feats')
valid_real_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.real_ext, cfg.real_dim)
test_real_mse = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.real_ext, cfg.real_dim)
valid_real_mse = 10.0*numpy.log10(valid_real_mse)
test_real_mse = 10.0*numpy.log10(test_real_mse)
if 'imag' in cfg.in_dimension_dict:
if cfg.remove_silence_using_hts_labels:
remover = SilenceRemover(n_cmp = cfg.imag_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type)
remover.remove_silence(in_file_list_dict['imag'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_imag_list)
else:
ref_data_dir = os.path.join(data_dir, 'feats')
valid_imag_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.imag_ext, cfg.imag_dim)
test_imag_mse = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.imag_ext, cfg.imag_dim)
valid_imag_mse = 10.0*numpy.log10(valid_imag_mse)
test_imag_mse = 10.0*numpy.log10(test_imag_mse)
if 'lsf' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['lsf'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_lsf_list, cfg.lsf_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.lsf_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type)
remover.remove_silence(in_file_list_dict['lsf'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_lsf_list)
valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.lsf_ext, cfg.lsf_dim)
test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.lsf_ext, cfg.lsf_dim)
if 'slsf' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['slsf'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_slsf_list, cfg.slsf_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.slsf_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type)
remover.remove_silence(in_file_list_dict['slsf'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_slsf_list)
valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.slsf_ext, cfg.slsf_dim)
test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.slsf_ext, cfg.slsf_dim)
if 'hnr' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['hnr'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_hnr_list, cfg.hnr_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.hnr_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type)
remover.remove_silence(in_file_list_dict['hnr'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_hnr_list)
valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.hnr_ext, cfg.hnr_dim)
test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.hnr_ext, cfg.hnr_dim)
if 'gain' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['gain'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_gain_list, cfg.gain_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.gain_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type)
remover.remove_silence(in_file_list_dict['gain'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_gain_list)
valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.gain_ext, cfg.gain_dim)
test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.gain_ext, cfg.gain_dim)
if 'pdd' in cfg.in_dimension_dict:
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['pdd'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_pdd_list, cfg.pdd_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.pdd_dim, silence_pattern = cfg.silence_pattern, label_type=cfg.label_type)
remover.remove_silence(in_file_list_dict['pdd'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_pdd_list)
valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.pdd_ext, cfg.pdd_dim)
test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.pdd_ext, cfg.pdd_dim)
if cfg.vocoder_type == 'MAGPHASE':
logger.info('Develop: DNN -- MAG: %.3f dB; REAL: %.3f dB; IMAG: %.3f dB; F0:- RMSE: %.3f Hz; CORR: %.3f; VUV: %.3f%%' \
%(valid_mag_mse, valid_real_mse, valid_imag_mse, valid_f0_mse, valid_f0_corr, valid_vuv_error*100.))
logger.info('Test : DNN -- MAG: %.3f dB; REAL: %.3f dB; IMAG: %.3f dB; F0:- RMSE: %.3f Hz; CORR: %.3f; VUV: %.3f%%' \
%(test_mag_mse, test_real_mse, test_imag_mse , test_f0_mse , test_f0_corr, test_vuv_error*100.))
else:
logger.info('Develop: DNN -- MCD: %.3f dB; BAP: %.3f dB; F0:- RMSE: %.3f Hz; CORR: %.3f; VUV: %.3f%%' \
%(valid_spectral_distortion, valid_bap_mse, valid_f0_mse, valid_f0_corr, valid_vuv_error*100.))
logger.info('Test : DNN -- MCD: %.3f dB; BAP: %.3f dB; F0:- RMSE: %.3f Hz; CORR: %.3f; VUV: %.3f%%' \
%(test_spectral_distortion , test_bap_mse , test_f0_mse , test_f0_corr, test_vuv_error*100.))
if __name__ == '__main__':
# these things should be done even before trying to parse the command line
# create a configuration instance
# and get a short name for this instance
cfg=configuration.cfg
# set up logging to use our custom class
logging.setLoggerClass(LoggerPlotter)
# get a logger for this main function
logger = logging.getLogger("main")
if len(sys.argv) != 2:
logger.critical('usage: run_merlin.sh [config file name]')
sys.exit(1)
config_file = sys.argv[1]
config_file = os.path.abspath(config_file)
cfg.configure(config_file)
logger.info('Installation information:')
logger.info(' Merlin directory: '+os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)))
logger.info(' PATH:')
env_PATHs = os.getenv('PATH')
if env_PATHs:
env_PATHs = env_PATHs.split(':')
for p in env_PATHs:
if len(p)>0: logger.info(' '+p)
logger.info(' LD_LIBRARY_PATH:')
env_LD_LIBRARY_PATHs = os.getenv('LD_LIBRARY_PATH')
if env_LD_LIBRARY_PATHs:
env_LD_LIBRARY_PATHs = env_LD_LIBRARY_PATHs.split(':')
for p in env_LD_LIBRARY_PATHs:
if len(p)>0: logger.info(' '+p)
logger.info(' Python version: '+sys.version.replace('\n',''))
logger.info(' PYTHONPATH:')
env_PYTHONPATHs = os.getenv('PYTHONPATH')
if env_PYTHONPATHs:
env_PYTHONPATHs = env_PYTHONPATHs.split(':')
for p in env_PYTHONPATHs:
if len(p)>0:
logger.info(' '+p)
logger.info(' Numpy version: '+numpy.version.version)
logger.info(' Theano version: '+theano.version.version)
logger.info(' THEANO_FLAGS: '+os.getenv('THEANO_FLAGS'))
logger.info(' device: '+theano.config.device)
# Check for the presence of git
ret = os.system('git status > /dev/null')
if ret==0:
logger.info(' Git is available in the working directory:')
git_describe = subprocess.Popen(['git', 'describe', '--tags', '--always'], stdout=subprocess.PIPE).communicate()[0][:-1]
logger.info(' Merlin version: {}'.format(git_describe))
git_branch = subprocess.Popen(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], stdout=subprocess.PIPE).communicate()[0][:-1]
logger.info(' branch: {}'.format(git_branch))
git_diff = subprocess.Popen(['git', 'diff', '--name-status'], stdout=subprocess.PIPE).communicate()[0]
if sys.version_info.major >= 3:
git_diff = git_diff.decode('utf-8')
git_diff = git_diff.replace('\t',' ').split('\n')
logger.info(' diff to Merlin version:')
for filediff in git_diff:
if len(filediff)>0: logger.info(' '+filediff)
logger.info(' (all diffs logged in '+os.path.basename(cfg.log_file)+'.gitdiff'+')')
os.system('git diff > '+cfg.log_file+'.gitdiff')
logger.info('Execution information:')
logger.info(' HOSTNAME: '+socket.getfqdn())
logger.info(' USER: '+os.getenv('USER'))
logger.info(' PID: '+str(os.getpid()))
PBS_JOBID = os.getenv('PBS_JOBID')
if PBS_JOBID:
logger.info(' PBS_JOBID: '+PBS_JOBID)
if cfg.profile:
logger.info('profiling is activated')
import cProfile, pstats
cProfile.run('main_function(cfg)', 'mainstats')
# create a stream for the profiler to write to
profiling_output = io.StringIO()
p = pstats.Stats('mainstats', stream=profiling_output)
# print stats to that stream
# here we just report the top 10 functions, sorted by total amount of time spent in each
p.strip_dirs().sort_stats('tottime').print_stats(10)
# print the result to the log
logger.info('---Profiling result follows---\n%s' % profiling_output.getvalue() )
profiling_output.close()
logger.info('---End of profiling result---')
else:
main_function(cfg)
sys.exit(0)
| [
"logging.getLogger",
"numpy.fromfile",
"numpy.log10",
"numpy.sqrt",
"numpy.log",
"multiprocessing.cpu_count",
"frontend.label_composer.LabelComposer",
"numpy.array",
"frontend.parameter_generation.ParameterGeneration",
"frontend.min_max_norm.MinMaxNormalisation",
"run_keras_with_merlin_io.KerasC... | [((4534, 4563), 'logging.getLogger', 'logging.getLogger', (['"""plotting"""'], {}), "('plotting')\n", (4551, 4563), False, 'import logging\n'), ((5721, 5741), 'io_funcs.binary_io.BinaryIOCollection', 'BinaryIOCollection', ([], {}), '()\n', (5739, 5741), False, 'from io_funcs.binary_io import BinaryIOCollection\n'), ((6388, 6423), 'logging.getLogger', 'logging.getLogger', (['"""main.train_DNN"""'], {}), "('main.train_DNN')\n", (6405, 6423), False, 'import logging\n'), ((8483, 8669), 'utils.providers.ListDataProvider', 'ListDataProvider', ([], {'x_file_list': 'train_x_file_list', 'y_file_list': 'train_y_file_list', 'n_ins': 'n_ins', 'n_outs': 'n_outs', 'buffer_size': 'buffer_size', 'sequential': 'sequential_training', 'shuffle': '(True)'}), '(x_file_list=train_x_file_list, y_file_list=\n train_y_file_list, n_ins=n_ins, n_outs=n_outs, buffer_size=buffer_size,\n sequential=sequential_training, shuffle=True)\n', (8499, 8669), False, 'from utils.providers import ListDataProvider\n'), ((8810, 8997), 'utils.providers.ListDataProvider', 'ListDataProvider', ([], {'x_file_list': 'valid_x_file_list', 'y_file_list': 'valid_y_file_list', 'n_ins': 'n_ins', 'n_outs': 'n_outs', 'buffer_size': 'buffer_size', 'sequential': 'sequential_training', 'shuffle': '(False)'}), '(x_file_list=valid_x_file_list, y_file_list=\n valid_y_file_list, n_ins=n_ins, n_outs=n_outs, buffer_size=buffer_size,\n sequential=sequential_training, shuffle=False)\n', (8826, 8997), False, 'from utils.providers import ListDataProvider\n'), ((9849, 9878), 'numpy.random.RandomState', 'numpy.random.RandomState', (['(123)'], {}), '(123)\n', (9873, 9878), False, 'import numpy\n'), ((12928, 12939), 'time.time', 'time.time', ([], {}), '()\n', (12937, 12939), False, 'import time\n'), ((17462, 17473), 'time.time', 'time.time', ([], {}), '()\n', (17471, 17473), False, 'import time\n'), ((18076, 18111), 'logging.getLogger', 'logging.getLogger', (['"""dnn_generation"""'], {}), "('dnn_generation')\n", (18093, 18111), False, 'import logging\n'), ((18174, 18203), 'logging.getLogger', 'logging.getLogger', (['"""plotting"""'], {}), "('plotting')\n", (18191, 18203), False, 'import logging\n'), ((19572, 19607), 'logging.getLogger', 'logging.getLogger', (['"""dnn_generation"""'], {}), "('dnn_generation')\n", (19589, 19607), False, 'import logging\n'), ((19670, 19699), 'logging.getLogger', 'logging.getLogger', (['"""plotting"""'], {}), "('plotting')\n", (19687, 19699), False, 'import logging\n'), ((21034, 21091), 'frontend.acoustic_composition.AcousticComposition', 'AcousticComposition', ([], {'delta_win': 'delta_win', 'acc_win': 'acc_win'}), '(delta_win=delta_win, acc_win=acc_win)\n', (21053, 21091), False, 'from frontend.acoustic_composition import AcousticComposition\n'), ((22520, 22534), 'utils.file_paths.FilePaths', 'FilePaths', (['cfg'], {}), '(cfg)\n', (22529, 22534), False, 'from utils.file_paths import FilePaths\n'), ((22591, 22616), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (22608, 22616), False, 'import logging\n'), ((22686, 22715), 'logging.getLogger', 'logging.getLogger', (['"""plotting"""'], {}), "('plotting')\n", (22703, 22715), False, 'import logging\n'), ((24726, 24877), 'frontend.label_normalisation.HTSLabelNormalisation', 'HTSLabelNormalisation', ([], {'question_file_name': 'cfg.question_file_name', 'add_frame_features': 'cfg.add_frame_features', 'subphone_feats': 'cfg.subphone_feats'}), '(question_file_name=cfg.question_file_name,\n add_frame_features=cfg.add_frame_features, subphone_feats=cfg.\n subphone_feats)\n', (24747, 24877), False, 'from frontend.label_normalisation import HTSLabelNormalisation\n'), ((36848, 36999), 'frontend.label_normalisation.HTSLabelNormalisation', 'HTSLabelNormalisation', ([], {'question_file_name': 'cfg.question_file_name', 'add_frame_features': 'cfg.add_frame_features', 'subphone_feats': 'cfg.subphone_feats'}), '(question_file_name=cfg.question_file_name,\n add_frame_features=cfg.add_frame_features, subphone_feats=cfg.\n subphone_feats)\n', (36869, 36999), False, 'from frontend.label_normalisation import HTSLabelNormalisation\n'), ((37509, 37545), 'os.path.join', 'os.path.join', (['gen_dir', 'temp_dir_name'], {}), '(gen_dir, temp_dir_name)\n', (37521, 37545), False, 'import os, sys, errno\n'), ((64583, 64620), 'logging.setLoggerClass', 'logging.setLoggerClass', (['LoggerPlotter'], {}), '(LoggerPlotter)\n', (64605, 64620), False, 'import logging\n'), ((64677, 64702), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (64694, 64702), False, 'import logging\n'), ((64869, 64897), 'os.path.abspath', 'os.path.abspath', (['config_file'], {}), '(config_file)\n', (64884, 64897), False, 'import os, sys, errno\n'), ((65145, 65162), 'os.getenv', 'os.getenv', (['"""PATH"""'], {}), "('PATH')\n", (65154, 65162), False, 'import os, sys, errno\n'), ((65364, 65392), 'os.getenv', 'os.getenv', (['"""LD_LIBRARY_PATH"""'], {}), "('LD_LIBRARY_PATH')\n", (65373, 65392), False, 'import os, sys, errno\n'), ((65697, 65720), 'os.getenv', 'os.getenv', (['"""PYTHONPATH"""'], {}), "('PYTHONPATH')\n", (65706, 65720), False, 'import os, sys, errno\n'), ((66181, 66216), 'os.system', 'os.system', (['"""git status > /dev/null"""'], {}), "('git status > /dev/null')\n", (66190, 66216), False, 'import os, sys, errno\n'), ((67440, 67462), 'os.getenv', 'os.getenv', (['"""PBS_JOBID"""'], {}), "('PBS_JOBID')\n", (67449, 67462), False, 'import os, sys, errno\n'), ((68298, 68309), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (68306, 68309), False, 'import os, sys, errno\n'), ((4339, 4369), 'os.path.basename', 'os.path.basename', (['in_file_name'], {}), '(in_file_name)\n', (4355, 4369), False, 'import os, sys, errno\n'), ((5912, 5976), 'numpy.reshape', 'numpy.reshape', (['var_values', '(out_dimension_dict[feature_name], 1)'], {}), '(var_values, (out_dimension_dict[feature_name], 1))\n', (5925, 5976), False, 'import numpy\n'), ((6547, 6576), 'logging.getLogger', 'logging.getLogger', (['"""plotting"""'], {}), "('plotting')\n", (6564, 6576), False, 'import logging\n'), ((10212, 10512), 'models.deep_rnn.DeepRecurrentNetwork', 'DeepRecurrentNetwork', ([], {'n_in': 'n_ins', 'hidden_layer_size': 'hidden_layer_size', 'n_out': 'n_outs', 'L1_reg': 'l1_reg', 'L2_reg': 'l2_reg', 'hidden_layer_type': 'hidden_layer_type', 'output_type': 'cfg.output_layer_type', 'dropout_rate': 'dropout_rate', 'optimizer': 'cfg.optimizer', 'rnn_batch_training': 'cfg.rnn_batch_training'}), '(n_in=n_ins, hidden_layer_size=hidden_layer_size, n_out\n =n_outs, L1_reg=l1_reg, L2_reg=l2_reg, hidden_layer_type=\n hidden_layer_type, output_type=cfg.output_layer_type, dropout_rate=\n dropout_rate, optimizer=cfg.optimizer, rnn_batch_training=cfg.\n rnn_batch_training)\n', (10232, 10512), False, 'from models.deep_rnn import DeepRecurrentNetwork\n'), ((14140, 14151), 'time.time', 'time.time', ([], {}), '()\n', (14149, 14151), False, 'import time\n'), ((16086, 16115), 'numpy.mean', 'numpy.mean', (['validation_losses'], {}), '(validation_losses)\n', (16096, 16115), False, 'import numpy\n'), ((16212, 16223), 'time.time', 'time.time', ([], {}), '()\n', (16221, 16223), False, 'import time\n'), ((17348, 17380), 'math.isnan', 'math.isnan', (['this_validation_loss'], {}), '(this_validation_loss)\n', (17358, 17380), False, 'import math\n'), ((18507, 18551), 'numpy.fromfile', 'numpy.fromfile', (['fid_lab'], {'dtype': 'numpy.float32'}), '(fid_lab, dtype=numpy.float32)\n', (18521, 18551), False, 'import numpy\n'), ((19162, 19205), 'numpy.array', 'numpy.array', (['predicted_parameter', '"""float32"""'], {}), "(predicted_parameter, 'float32')\n", (19173, 19205), False, 'import numpy\n'), ((19989, 20033), 'numpy.fromfile', 'numpy.fromfile', (['fid_lab'], {'dtype': 'numpy.float32'}), '(fid_lab, dtype=numpy.float32)\n', (20003, 20033), False, 'import numpy\n'), ((20455, 20498), 'numpy.array', 'numpy.array', (['predicted_parameter', '"""float32"""'], {}), "(predicted_parameter, 'float32')\n", (20466, 20498), False, 'import numpy\n'), ((21583, 21610), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (21608, 21610), False, 'import multiprocessing\n'), ((21626, 21658), 'multiprocessing.Pool', 'multiprocessing.Pool', (['num_splits'], {}), '(num_splits)\n', (21646, 21658), False, 'import multiprocessing\n'), ((22291, 22348), 'frontend.acoustic_composition.AcousticComposition', 'AcousticComposition', ([], {'delta_win': 'delta_win', 'acc_win': 'acc_win'}), '(delta_win=delta_win, acc_win=acc_win)\n', (22310, 22348), False, 'from frontend.acoustic_composition import AcousticComposition\n'), ((23053, 23078), 'os.makedirs', 'os.makedirs', (['cfg.plot_dir'], {}), '(cfg.plot_dir)\n', (23064, 23078), False, 'import os, sys, errno\n'), ((23238, 23269), 'utils.utils.read_file_list', 'read_file_list', (['cfg.file_id_scp'], {}), '(cfg.file_id_scp)\n', (23252, 23269), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((24120, 24238), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['file_id_list', 'cfg.in_dir_dict[feature_name]', 'cfg.file_extension_dict[feature_name]', '(False)'], {}), '(file_id_list, cfg.in_dir_dict[feature_name], cfg.\n file_extension_dict[feature_name], False)\n', (24142, 24238), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((26053, 26110), 'utils.acous_feat_extraction.acous_feat_extraction', 'acous_feat_extraction', (['cfg.nat_wav_dir', 'file_id_list', 'cfg'], {}), '(cfg.nat_wav_dir, file_id_list, cfg)\n', (26074, 26110), False, 'from utils.acous_feat_extraction import acous_feat_extraction\n'), ((27196, 27378), 'frontend.silence_remover.SilenceRemover', 'SilenceRemover', ([], {'n_cmp': 'lab_dim', 'silence_pattern': 'cfg.silence_pattern', 'label_type': 'cfg.label_type', 'remove_frame_features': 'cfg.add_frame_features', 'subphone_feats': 'cfg.subphone_feats'}), '(n_cmp=lab_dim, silence_pattern=cfg.silence_pattern,\n label_type=cfg.label_type, remove_frame_features=cfg.add_frame_features,\n subphone_feats=cfg.subphone_feats)\n', (27210, 27378), False, 'from frontend.silence_remover import SilenceRemover\n'), ((28453, 28531), 'frontend.min_max_norm.MinMaxNormalisation', 'MinMaxNormalisation', ([], {'feature_dimension': 'lab_dim', 'min_value': '(0.01)', 'max_value': '(0.99)'}), '(feature_dimension=lab_dim, min_value=0.01, max_value=0.99)\n', (28472, 28531), False, 'from frontend.min_max_norm import MinMaxNormalisation\n'), ((29482, 29545), 'numpy.concatenate', 'numpy.concatenate', (['(label_min_vector, label_max_vector)'], {'axis': '(0)'}), '((label_min_vector, label_max_vector), axis=0)\n', (29499, 29545), False, 'import numpy\n'), ((29573, 29612), 'numpy.array', 'numpy.array', (['label_norm_info', '"""float32"""'], {}), "(label_norm_info, 'float32')\n", (29584, 29612), False, 'import numpy\n'), ((38162, 38177), 'run_keras_with_merlin_io.KerasClass', 'KerasClass', (['cfg'], {}), '(cfg)\n', (38172, 38177), False, 'from run_keras_with_merlin_io import KerasClass\n'), ((38605, 38645), 'numpy.fromfile', 'numpy.fromfile', (['fid'], {'dtype': 'numpy.float32'}), '(fid, dtype=numpy.float32)\n', (38619, 38645), False, 'import numpy\n'), ((41522, 41584), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['gen_file_id_list', 'gen_dir', 'cfg.cmp_ext'], {}), '(gen_file_id_list, gen_dir, cfg.cmp_ext)\n', (41544, 41584), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((42686, 42748), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['gen_file_id_list', 'gen_dir', 'cfg.cmp_ext'], {}), '(gen_file_id_list, gen_dir, cfg.cmp_ext)\n', (42708, 42748), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((43279, 43319), 'numpy.fromfile', 'numpy.fromfile', (['fid'], {'dtype': 'numpy.float32'}), '(fid, dtype=numpy.float32)\n', (43293, 43319), False, 'import numpy\n'), ((46162, 46206), 'utils.generate.generate_wav', 'generate_wav', (['gen_dir', 'gen_file_id_list', 'cfg'], {}), '(gen_dir, gen_file_id_list, cfg)\n', (46174, 46206), False, 'from utils.generate import generate_wav\n'), ((46465, 46550), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['file_id_list', 'cfg.in_label_align_dir', 'cfg.lab_ext', '(False)'], {}), '(file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False\n )\n', (46487, 46550), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((46581, 46648), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['file_id_list', 'binary_label_dir', 'cfg.lab_ext'], {}), '(file_id_list, binary_label_dir, cfg.lab_ext)\n', (46603, 46648), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((46929, 46969), 'os.path.join', 'os.path.join', (['inter_data_dir', '"""ref_data"""'], {}), "(inter_data_dir, 'ref_data')\n", (46941, 46969), False, 'import os, sys, errno\n'), ((46994, 47061), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['gen_file_id_list', 'ref_data_dir', 'cfg.dur_ext'], {}), '(gen_file_id_list, ref_data_dir, cfg.dur_ext)\n', (47016, 47061), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((47236, 47262), 'utils.compute_distortion.IndividualDistortionComp', 'IndividualDistortionComp', ([], {}), '()\n', (47260, 47262), False, 'from utils.compute_distortion import DistortionComputation, IndividualDistortionComp\n'), ((48960, 49000), 'os.path.join', 'os.path.join', (['inter_data_dir', '"""ref_data"""'], {}), "(inter_data_dir, 'ref_data')\n", (48972, 49000), False, 'import os, sys, errno\n'), ((49024, 49091), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['gen_file_id_list', 'ref_data_dir', 'cfg.lf0_ext'], {}), '(gen_file_id_list, ref_data_dir, cfg.lf0_ext)\n', (49046, 49091), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((49156, 49223), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['gen_file_id_list', 'ref_data_dir', 'cfg.mgc_ext'], {}), '(gen_file_id_list, ref_data_dir, cfg.mgc_ext)\n', (49178, 49223), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((49247, 49314), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['gen_file_id_list', 'ref_data_dir', 'cfg.bap_ext'], {}), '(gen_file_id_list, ref_data_dir, cfg.bap_ext)\n', (49269, 49314), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((49369, 49436), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['gen_file_id_list', 'ref_data_dir', 'cfg.mag_ext'], {}), '(gen_file_id_list, ref_data_dir, cfg.mag_ext)\n', (49391, 49436), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((49461, 49529), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['gen_file_id_list', 'ref_data_dir', 'cfg.real_ext'], {}), '(gen_file_id_list, ref_data_dir, cfg.real_ext)\n', (49483, 49529), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((49554, 49622), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['gen_file_id_list', 'ref_data_dir', 'cfg.imag_ext'], {}), '(gen_file_id_list, ref_data_dir, cfg.imag_ext)\n', (49576, 49622), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((49677, 49744), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['gen_file_id_list', 'ref_data_dir', 'cfg.lsf_ext'], {}), '(gen_file_id_list, ref_data_dir, cfg.lsf_ext)\n', (49699, 49744), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((49769, 49837), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['gen_file_id_list', 'ref_data_dir', 'cfg.slsf_ext'], {}), '(gen_file_id_list, ref_data_dir, cfg.slsf_ext)\n', (49791, 49837), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((49862, 49930), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['gen_file_id_list', 'ref_data_dir', 'cfg.gain_ext'], {}), '(gen_file_id_list, ref_data_dir, cfg.gain_ext)\n', (49884, 49930), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((49954, 50021), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['gen_file_id_list', 'ref_data_dir', 'cfg.hnr_ext'], {}), '(gen_file_id_list, ref_data_dir, cfg.hnr_ext)\n', (49976, 50021), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((50078, 50145), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['gen_file_id_list', 'ref_data_dir', 'cfg.pdd_ext'], {}), '(gen_file_id_list, ref_data_dir, cfg.pdd_ext)\n', (50100, 50145), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((50320, 50346), 'utils.compute_distortion.IndividualDistortionComp', 'IndividualDistortionComp', ([], {}), '()\n', (50344, 50346), False, 'from utils.compute_distortion import DistortionComputation, IndividualDistortionComp\n'), ((64807, 64818), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (64815, 64818), False, 'import os, sys, errno\n'), ((67193, 67245), 'os.system', 'os.system', (["('git diff > ' + cfg.log_file + '.gitdiff')"], {}), "('git diff > ' + cfg.log_file + '.gitdiff')\n", (67202, 67245), False, 'import os, sys, errno\n'), ((67636, 67683), 'cProfile.run', 'cProfile.run', (['"""main_function(cfg)"""', '"""mainstats"""'], {}), "('main_function(cfg)', 'mainstats')\n", (67648, 67683), False, 'import cProfile, pstats\n'), ((67767, 67780), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (67778, 67780), False, 'import io\n'), ((67793, 67843), 'pstats.Stats', 'pstats.Stats', (['"""mainstats"""'], {'stream': 'profiling_output'}), "('mainstats', stream=profiling_output)\n", (67805, 67843), False, 'import cProfile, pstats\n'), ((6759, 6777), 'numpy.sum', 'numpy.sum', (['ms_outs'], {}), '(ms_outs)\n', (6768, 6777), False, 'import numpy\n'), ((11355, 11390), 'os.path.isfile', 'os.path.isfile', (['init_dnn_model_file'], {}), '(init_dnn_model_file)\n', (11369, 11390), False, 'import os, sys, errno\n'), ((11404, 11466), 'sys.exit', 'sys.exit', (["('Model file %s does not exist' % init_dnn_model_file)"], {}), "('Model file %s does not exist' % init_dnn_model_file)\n", (11412, 11466), False, 'import os, sys, errno\n'), ((11642, 11714), 'sys.exit', 'sys.exit', (['"""Old and new models have different numbers of weight matrices"""'], {}), "('Old and new models have different numbers of weight matrices')\n", (11650, 11714), False, 'import os, sys, errno\n'), ((16160, 16186), 'numpy.asarray', 'numpy.asarray', (['train_error'], {}), '(train_error)\n', (16173, 16186), False, 'import numpy\n'), ((18778, 18836), 'numpy.reshape', 'numpy.reshape', (['test_set_x', '(1, test_set_x.shape[0], n_ins)'], {}), '(test_set_x, (1, test_set_x.shape[0], n_ins))\n', (18791, 18836), False, 'import numpy\n'), ((18862, 18896), 'numpy.array', 'numpy.array', (['test_set_x', '"""float32"""'], {}), "(test_set_x, 'float32')\n", (18873, 18896), False, 'import numpy\n'), ((20246, 20299), 'numpy.asarray', 'numpy.asarray', (['temp_set_x'], {'dtype': 'theano.config.floatX'}), '(temp_set_x, dtype=theano.config.floatX)\n', (20259, 20299), False, 'import numpy\n'), ((23002, 23030), 'os.path.exists', 'os.path.exists', (['cfg.plot_dir'], {}), '(cfg.plot_dir)\n', (23016, 23030), False, 'import os, sys, errno\n'), ((27555, 27577), 'utils.data_augmentation.DataAugmentation', 'DataAugmentation', (['(7)', '(9)'], {}), '(7, 9)\n', (27571, 27577), False, 'from utils.data_augmentation import DataAugmentation\n'), ((30554, 30615), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['test_id_list', 'nn_cmp_dir', 'cfg.cmp_ext'], {}), '(test_id_list, nn_cmp_dir, cfg.cmp_ext)\n', (30576, 30615), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((30652, 30718), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['test_id_list', 'nn_cmp_norm_dir', 'cfg.cmp_ext'], {}), '(test_id_list, nn_cmp_norm_dir, cfg.cmp_ext)\n', (30674, 30718), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((30881, 30938), 'frontend.acoustic_composition.AcousticComposition', 'AcousticComposition', ([], {'delta_win': 'delta_win', 'acc_win': 'acc_win'}), '(delta_win=delta_win, acc_win=acc_win)\n', (30900, 30938), False, 'from frontend.acoustic_composition import AcousticComposition\n'), ((31422, 31437), 'frontend.label_composer.LabelComposer', 'LabelComposer', ([], {}), '()\n', (31435, 31437), False, 'from frontend.label_composer import LabelComposer\n'), ((31818, 31933), 'frontend.silence_remover.trim_silence', 'trim_silence', (['nn_cmp_file_list', 'nn_cmp_file_list', 'cfg.cmp_dim', 'binary_label_file_list', 'lab_dim', 'silence_feature'], {}), '(nn_cmp_file_list, nn_cmp_file_list, cfg.cmp_dim,\n binary_label_file_list, lab_dim, silence_feature)\n', (31830, 31933), False, 'from frontend.silence_remover import trim_silence\n'), ((32840, 32887), 'frontend.mean_variance_norm.MeanVarianceNorm', 'MeanVarianceNorm', ([], {'feature_dimension': 'cfg.cmp_dim'}), '(feature_dimension=cfg.cmp_dim)\n', (32856, 32887), False, 'from frontend.mean_variance_norm import MeanVarianceNorm\n'), ((34639, 34705), 'numpy.concatenate', 'numpy.concatenate', (['(global_mean_vector, global_std_vector)'], {'axis': '(0)'}), '((global_mean_vector, global_std_vector), axis=0)\n', (34656, 34705), False, 'import numpy\n'), ((35604, 35641), 'numpy.array', 'numpy.array', (['cmp_norm_info', '"""float32"""'], {}), "(cmp_norm_info, 'float32')\n", (35615, 35641), False, 'import numpy\n'), ((38359, 38379), 'run_tensorflow_with_merlin_io.TensorflowClass', 'TensorflowClass', (['cfg'], {}), '(cfg)\n', (38374, 38379), False, 'from run_tensorflow_with_merlin_io import TensorflowClass\n'), ((38830, 38852), 'os.makedirs', 'os.makedirs', (['model_dir'], {}), '(model_dir)\n', (38841, 38852), False, 'import os, sys, errno\n'), ((40897, 40917), 'os.makedirs', 'os.makedirs', (['gen_dir'], {}), '(gen_dir)\n', (40908, 40917), False, 'import os, sys, errno\n'), ((42289, 42309), 'os.makedirs', 'os.makedirs', (['gen_dir'], {}), '(gen_dir)\n', (42300, 42309), False, 'import os, sys, errno\n'), ((43557, 43604), 'frontend.mean_variance_norm.MeanVarianceNorm', 'MeanVarianceNorm', ([], {'feature_dimension': 'cfg.cmp_dim'}), '(feature_dimension=cfg.cmp_dim)\n', (43573, 43604), False, 'from frontend.mean_variance_norm import MeanVarianceNorm\n'), ((44327, 44427), 'frontend.parameter_generation.ParameterGeneration', 'ParameterGeneration', ([], {'gen_wav_features': 'cfg.gen_wav_features', 'enforce_silence': 'cfg.enforce_silence'}), '(gen_wav_features=cfg.gen_wav_features, enforce_silence=\n cfg.enforce_silence)\n', (44346, 44427), False, 'from frontend.parameter_generation import ParameterGeneration\n'), ((45062, 45124), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['gen_file_id_list', 'gen_dir', 'cfg.dur_ext'], {}), '(gen_file_id_list, gen_dir, cfg.dur_ext)\n', (45084, 45124), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((45154, 45216), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['gen_file_id_list', 'gen_dir', 'cfg.lab_ext'], {}), '(gen_file_id_list, gen_dir, cfg.lab_ext)\n', (45176, 45216), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((45260, 45349), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['gen_file_id_list', 'cfg.in_label_align_dir', 'cfg.lab_ext', '(False)'], {}), '(gen_file_id_list, cfg.in_label_align_dir, cfg.\n lab_ext, False)\n', (45282, 45349), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((45370, 45428), 'frontend.parameter_generation.ParameterGeneration', 'ParameterGeneration', ([], {'gen_wav_features': 'cfg.gen_wav_features'}), '(gen_wav_features=cfg.gen_wav_features)\n', (45389, 45428), False, 'from frontend.parameter_generation import ParameterGeneration\n'), ((45863, 45952), 'frontend.label_modifier.HTSLabelModification', 'HTSLabelModification', ([], {'silence_pattern': 'cfg.silence_pattern', 'label_type': 'cfg.label_type'}), '(silence_pattern=cfg.silence_pattern, label_type=cfg.\n label_type)\n', (45883, 45952), False, 'from frontend.label_modifier import HTSLabelModification\n'), ((47741, 47859), 'frontend.silence_remover.trim_silence', 'trim_silence', (['untrimmed_reference_data', 'ref_dur_list', 'cfg.dur_dim', 'untrimmed_test_labels', 'lab_dim', 'silence_feature'], {}), '(untrimmed_reference_data, ref_dur_list, cfg.dur_dim,\n untrimmed_test_labels, lab_dim, silence_feature)\n', (47753, 47859), False, 'from frontend.silence_remover import trim_silence\n'), ((47926, 48073), 'frontend.silence_remover.SilenceRemover', 'SilenceRemover', ([], {'n_cmp': 'cfg.dur_dim', 'silence_pattern': 'cfg.silence_pattern', 'label_type': 'cfg.label_type', 'remove_frame_features': 'cfg.add_frame_features'}), '(n_cmp=cfg.dur_dim, silence_pattern=cfg.silence_pattern,\n label_type=cfg.label_type, remove_frame_features=cfg.add_frame_features)\n', (47940, 48073), False, 'from frontend.silence_remover import SilenceRemover\n'), ((50855, 50870), 'frontend.label_composer.LabelComposer', 'LabelComposer', ([], {}), '()\n', (50868, 50870), False, 'from frontend.label_composer import LabelComposer\n'), ((65610, 65639), 'sys.version.replace', 'sys.version.replace', (['"""\n"""', '""""""'], {}), "('\\n', '')\n", (65629, 65639), False, 'import os, sys, errno\n'), ((66054, 66079), 'os.getenv', 'os.getenv', (['"""THEANO_FLAGS"""'], {}), "('THEANO_FLAGS')\n", (66063, 66079), False, 'import os, sys, errno\n'), ((67316, 67332), 'socket.getfqdn', 'socket.getfqdn', ([], {}), '()\n', (67330, 67332), False, 'import socket\n'), ((67361, 67378), 'os.getenv', 'os.getenv', (['"""USER"""'], {}), "('USER')\n", (67370, 67378), False, 'import os, sys, errno\n'), ((4110, 4137), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (4126, 4137), False, 'import os, sys, errno\n'), ((11809, 11896), 'sys.exit', 'sys.exit', (['"""In LHUC adaptation new model must have more parameters than old model."""'], {}), "(\n 'In LHUC adaptation new model must have more parameters than old model.')\n", (11817, 11896), False, 'import os, sys, errno\n'), ((15741, 15800), 'numpy.asarray', 'numpy.asarray', (['temp_valid_set_x'], {'dtype': 'theano.config.floatX'}), '(temp_valid_set_x, dtype=theano.config.floatX)\n', (15754, 15800), False, 'import numpy\n'), ((15849, 15908), 'numpy.asarray', 'numpy.asarray', (['temp_valid_set_y'], {'dtype': 'theano.config.floatX'}), '(temp_valid_set_y, dtype=theano.config.floatX)\n', (15862, 15908), False, 'import numpy\n'), ((26753, 26788), 'os.path.join', 'os.path.join', (['data_dir', 'new_feature'], {}), '(data_dir, new_feature)\n', (26765, 26788), False, 'import os, sys, errno\n'), ((26826, 26895), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['file_id_list', 'new_feat_dir', "('.' + new_feature)"], {}), "(file_id_list, new_feat_dir, '.' + new_feature)\n", (26848, 26895), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((26920, 26971), 'frontend.merge_features.MergeFeat', 'MergeFeat', ([], {'lab_dim': 'in_dim', 'feat_dim': 'new_feature_dim'}), '(lab_dim=in_dim, feat_dim=new_feature_dim)\n', (26929, 26971), False, 'from frontend.merge_features import MergeFeat\n'), ((27852, 27892), 'os.path.join', 'os.path.join', (['file_paths.nn_label_dir', 'f'], {}), '(file_paths.nn_label_dir, f)\n', (27864, 27892), False, 'import os, sys, errno\n'), ((27966, 28011), 'os.path.join', 'os.path.join', (['file_paths.nn_label_norm_dir', 'f'], {}), '(file_paths.nn_label_norm_dir, f)\n', (27978, 28011), False, 'import os, sys, errno\n'), ((28091, 28131), 'os.path.join', 'os.path.join', (['file_paths.nn_label_dir', 'f'], {}), '(file_paths.nn_label_dir, f)\n', (28103, 28131), False, 'import os, sys, errno\n'), ((28216, 28261), 'os.path.join', 'os.path.join', (['file_paths.nn_label_norm_dir', 'f'], {}), '(file_paths.nn_label_norm_dir, f)\n', (28228, 28261), False, 'import os, sys, errno\n'), ((28344, 28387), 'os.path.join', 'os.path.join', (['file_paths.nn_cmp_norm_dir', 'f'], {}), '(file_paths.nn_cmp_norm_dir, f)\n', (28356, 28387), False, 'import os, sys, errno\n'), ((30404, 30522), 'utils.utils.prepare_file_path_list', 'prepare_file_path_list', (['test_id_list', 'cfg.in_dir_dict[feature_name]', 'cfg.file_extension_dict[feature_name]', '(False)'], {}), '(test_id_list, cfg.in_dir_dict[feature_name], cfg.\n file_extension_dict[feature_name], False)\n', (30426, 30522), False, 'from utils.utils import read_file_list, prepare_file_path_list\n'), ((32096, 32282), 'frontend.silence_remover.SilenceRemover', 'SilenceRemover', ([], {'n_cmp': 'cfg.cmp_dim', 'silence_pattern': 'cfg.silence_pattern', 'label_type': 'cfg.label_type', 'remove_frame_features': 'cfg.add_frame_features', 'subphone_feats': 'cfg.subphone_feats'}), '(n_cmp=cfg.cmp_dim, silence_pattern=cfg.silence_pattern,\n label_type=cfg.label_type, remove_frame_features=cfg.add_frame_features,\n subphone_feats=cfg.subphone_feats)\n', (32110, 32282), False, 'from frontend.silence_remover import SilenceRemover\n'), ((34799, 34885), 'frontend.min_max_norm.MinMaxNormalisation', 'MinMaxNormalisation', ([], {'feature_dimension': 'cfg.cmp_dim', 'min_value': '(0.01)', 'max_value': '(0.99)'}), '(feature_dimension=cfg.cmp_dim, min_value=0.01,\n max_value=0.99)\n', (34818, 34885), False, 'from frontend.min_max_norm import MinMaxNormalisation\n'), ((35341, 35400), 'numpy.concatenate', 'numpy.concatenate', (['(cmp_min_vector, cmp_max_vector)'], {'axis': '(0)'}), '((cmp_min_vector, cmp_max_vector), axis=0)\n', (35358, 35400), False, 'import numpy\n'), ((35988, 36105), 'numpy.array', 'numpy.array', (['global_std_vector[:, feature_index:feature_index + cfg.out_dimension_dict[\n feature_name]]', '"""float32"""'], {}), "(global_std_vector[:, feature_index:feature_index + cfg.\n out_dimension_dict[feature_name]], 'float32')\n", (35999, 36105), False, 'import numpy\n'), ((40313, 40324), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (40321, 40324), False, 'import os, sys, errno\n'), ((43805, 43928), 'frontend.min_max_norm.MinMaxNormalisation', 'MinMaxNormalisation', (['cfg.cmp_dim'], {'min_value': '(0.01)', 'max_value': '(0.99)', 'min_vector': 'cmp_min_vector', 'max_vector': 'cmp_max_vector'}), '(cfg.cmp_dim, min_value=0.01, max_value=0.99, min_vector\n =cmp_min_vector, max_vector=cmp_max_vector)\n', (43824, 43928), False, 'from frontend.min_max_norm import MinMaxNormalisation\n'), ((51565, 51683), 'frontend.silence_remover.trim_silence', 'trim_silence', (['untrimmed_reference_data', 'ref_mgc_list', 'cfg.mgc_dim', 'untrimmed_test_labels', 'lab_dim', 'silence_feature'], {}), '(untrimmed_reference_data, ref_mgc_list, cfg.mgc_dim,\n untrimmed_test_labels, lab_dim, silence_feature)\n', (51577, 51683), False, 'from frontend.silence_remover import trim_silence\n'), ((52517, 52532), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (52527, 52532), False, 'import numpy\n'), ((52605, 52620), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (52615, 52620), False, 'import numpy\n'), ((52902, 53020), 'frontend.silence_remover.trim_silence', 'trim_silence', (['untrimmed_reference_data', 'ref_bap_list', 'cfg.bap_dim', 'untrimmed_test_labels', 'lab_dim', 'silence_feature'], {}), '(untrimmed_reference_data, ref_bap_list, cfg.bap_dim,\n untrimmed_test_labels, lab_dim, silence_feature)\n', (52914, 53020), False, 'from frontend.silence_remover import trim_silence\n'), ((54317, 54435), 'frontend.silence_remover.trim_silence', 'trim_silence', (['untrimmed_reference_data', 'ref_lf0_list', 'cfg.lf0_dim', 'untrimmed_test_labels', 'lab_dim', 'silence_feature'], {}), '(untrimmed_reference_data, ref_lf0_list, cfg.lf0_dim,\n untrimmed_test_labels, lab_dim, silence_feature)\n', (54329, 54435), False, 'from frontend.silence_remover import trim_silence\n'), ((55513, 55614), 'frontend.silence_remover.SilenceRemover', 'SilenceRemover', ([], {'n_cmp': 'cfg.mag_dim', 'silence_pattern': 'cfg.silence_pattern', 'label_type': 'cfg.label_type'}), '(n_cmp=cfg.mag_dim, silence_pattern=cfg.silence_pattern,\n label_type=cfg.label_type)\n', (55527, 55614), False, 'from frontend.silence_remover import SilenceRemover\n'), ((55861, 55892), 'os.path.join', 'os.path.join', (['data_dir', '"""feats"""'], {}), "(data_dir, 'feats')\n", (55873, 55892), False, 'import os, sys, errno\n'), ((56180, 56206), 'numpy.log10', 'numpy.log10', (['valid_mag_mse'], {}), '(valid_mag_mse)\n', (56191, 56206), False, 'import numpy\n'), ((56240, 56265), 'numpy.log10', 'numpy.log10', (['test_mag_mse'], {}), '(test_mag_mse)\n', (56251, 56265), False, 'import numpy\n'), ((56389, 56491), 'frontend.silence_remover.SilenceRemover', 'SilenceRemover', ([], {'n_cmp': 'cfg.real_dim', 'silence_pattern': 'cfg.silence_pattern', 'label_type': 'cfg.label_type'}), '(n_cmp=cfg.real_dim, silence_pattern=cfg.silence_pattern,\n label_type=cfg.label_type)\n', (56403, 56491), False, 'from frontend.silence_remover import SilenceRemover\n'), ((56740, 56771), 'os.path.join', 'os.path.join', (['data_dir', '"""feats"""'], {}), "(data_dir, 'feats')\n", (56752, 56771), False, 'import os, sys, errno\n'), ((57065, 57092), 'numpy.log10', 'numpy.log10', (['valid_real_mse'], {}), '(valid_real_mse)\n', (57076, 57092), False, 'import numpy\n'), ((57127, 57153), 'numpy.log10', 'numpy.log10', (['test_real_mse'], {}), '(test_real_mse)\n', (57138, 57153), False, 'import numpy\n'), ((57277, 57379), 'frontend.silence_remover.SilenceRemover', 'SilenceRemover', ([], {'n_cmp': 'cfg.imag_dim', 'silence_pattern': 'cfg.silence_pattern', 'label_type': 'cfg.label_type'}), '(n_cmp=cfg.imag_dim, silence_pattern=cfg.silence_pattern,\n label_type=cfg.label_type)\n', (57291, 57379), False, 'from frontend.silence_remover import SilenceRemover\n'), ((57628, 57659), 'os.path.join', 'os.path.join', (['data_dir', '"""feats"""'], {}), "(data_dir, 'feats')\n", (57640, 57659), False, 'import os, sys, errno\n'), ((57954, 57981), 'numpy.log10', 'numpy.log10', (['valid_imag_mse'], {}), '(valid_imag_mse)\n', (57965, 57981), False, 'import numpy\n'), ((58016, 58042), 'numpy.log10', 'numpy.log10', (['test_imag_mse'], {}), '(test_imag_mse)\n', (58027, 58042), False, 'import numpy\n'), ((58314, 58432), 'frontend.silence_remover.trim_silence', 'trim_silence', (['untrimmed_reference_data', 'ref_lsf_list', 'cfg.lsf_dim', 'untrimmed_test_labels', 'lab_dim', 'silence_feature'], {}), '(untrimmed_reference_data, ref_lsf_list, cfg.lsf_dim,\n untrimmed_test_labels, lab_dim, silence_feature)\n', (58326, 58432), False, 'from frontend.silence_remover import trim_silence\n'), ((58511, 58612), 'frontend.silence_remover.SilenceRemover', 'SilenceRemover', ([], {'n_cmp': 'cfg.lsf_dim', 'silence_pattern': 'cfg.silence_pattern', 'label_type': 'cfg.label_type'}), '(n_cmp=cfg.lsf_dim, silence_pattern=cfg.silence_pattern,\n label_type=cfg.label_type)\n', (58525, 58612), False, 'from frontend.silence_remover import SilenceRemover\n'), ((59361, 59481), 'frontend.silence_remover.trim_silence', 'trim_silence', (['untrimmed_reference_data', 'ref_slsf_list', 'cfg.slsf_dim', 'untrimmed_test_labels', 'lab_dim', 'silence_feature'], {}), '(untrimmed_reference_data, ref_slsf_list, cfg.slsf_dim,\n untrimmed_test_labels, lab_dim, silence_feature)\n', (59373, 59481), False, 'from frontend.silence_remover import trim_silence\n'), ((59560, 59662), 'frontend.silence_remover.SilenceRemover', 'SilenceRemover', ([], {'n_cmp': 'cfg.slsf_dim', 'silence_pattern': 'cfg.silence_pattern', 'label_type': 'cfg.label_type'}), '(n_cmp=cfg.slsf_dim, silence_pattern=cfg.silence_pattern,\n label_type=cfg.label_type)\n', (59574, 59662), False, 'from frontend.silence_remover import SilenceRemover\n'), ((60415, 60533), 'frontend.silence_remover.trim_silence', 'trim_silence', (['untrimmed_reference_data', 'ref_hnr_list', 'cfg.hnr_dim', 'untrimmed_test_labels', 'lab_dim', 'silence_feature'], {}), '(untrimmed_reference_data, ref_hnr_list, cfg.hnr_dim,\n untrimmed_test_labels, lab_dim, silence_feature)\n', (60427, 60533), False, 'from frontend.silence_remover import trim_silence\n'), ((60612, 60713), 'frontend.silence_remover.SilenceRemover', 'SilenceRemover', ([], {'n_cmp': 'cfg.hnr_dim', 'silence_pattern': 'cfg.silence_pattern', 'label_type': 'cfg.label_type'}), '(n_cmp=cfg.hnr_dim, silence_pattern=cfg.silence_pattern,\n label_type=cfg.label_type)\n', (60626, 60713), False, 'from frontend.silence_remover import SilenceRemover\n'), ((61462, 61582), 'frontend.silence_remover.trim_silence', 'trim_silence', (['untrimmed_reference_data', 'ref_gain_list', 'cfg.gain_dim', 'untrimmed_test_labels', 'lab_dim', 'silence_feature'], {}), '(untrimmed_reference_data, ref_gain_list, cfg.gain_dim,\n untrimmed_test_labels, lab_dim, silence_feature)\n', (61474, 61582), False, 'from frontend.silence_remover import trim_silence\n'), ((61661, 61763), 'frontend.silence_remover.SilenceRemover', 'SilenceRemover', ([], {'n_cmp': 'cfg.gain_dim', 'silence_pattern': 'cfg.silence_pattern', 'label_type': 'cfg.label_type'}), '(n_cmp=cfg.gain_dim, silence_pattern=cfg.silence_pattern,\n label_type=cfg.label_type)\n', (61675, 61763), False, 'from frontend.silence_remover import SilenceRemover\n'), ((62516, 62634), 'frontend.silence_remover.trim_silence', 'trim_silence', (['untrimmed_reference_data', 'ref_pdd_list', 'cfg.pdd_dim', 'untrimmed_test_labels', 'lab_dim', 'silence_feature'], {}), '(untrimmed_reference_data, ref_pdd_list, cfg.pdd_dim,\n untrimmed_test_labels, lab_dim, silence_feature)\n', (62528, 62634), False, 'from frontend.silence_remover import trim_silence\n'), ((62713, 62814), 'frontend.silence_remover.SilenceRemover', 'SilenceRemover', ([], {'n_cmp': 'cfg.pdd_dim', 'silence_pattern': 'cfg.silence_pattern', 'label_type': 'cfg.label_type'}), '(n_cmp=cfg.pdd_dim, silence_pattern=cfg.silence_pattern,\n label_type=cfg.label_type)\n', (62727, 62814), False, 'from frontend.silence_remover import SilenceRemover\n'), ((67410, 67421), 'os.getpid', 'os.getpid', ([], {}), '()\n', (67419, 67421), False, 'import os, sys, errno\n'), ((12413, 12433), 'numpy.shape', 'numpy.shape', (['old_val'], {}), '(old_val)\n', (12424, 12433), False, 'import numpy\n'), ((12437, 12457), 'numpy.shape', 'numpy.shape', (['new_val'], {}), '(new_val)\n', (12448, 12457), False, 'import numpy\n'), ((12560, 12621), 'sys.exit', 'sys.exit', (['"""old and new weight matrices have different shapes"""'], {}), "('old and new weight matrices have different shapes')\n", (12568, 12621), False, 'import os, sys, errno\n'), ((15022, 15130), 'numpy.asarray', 'numpy.asarray', (['temp_train_set_x[index * batch_size:(index + 1) * batch_size]'], {'dtype': 'theano.config.floatX'}), '(temp_train_set_x[index * batch_size:(index + 1) * batch_size],\n dtype=theano.config.floatX)\n', (15035, 15130), False, 'import numpy\n'), ((15175, 15283), 'numpy.asarray', 'numpy.asarray', (['temp_train_set_y[index * batch_size:(index + 1) * batch_size]'], {'dtype': 'theano.config.floatX'}), '(temp_train_set_y[index * batch_size:(index + 1) * batch_size],\n dtype=theano.config.floatX)\n', (15188, 15283), False, 'import numpy\n'), ((51798, 51899), 'frontend.silence_remover.SilenceRemover', 'SilenceRemover', ([], {'n_cmp': 'cfg.mgc_dim', 'silence_pattern': 'cfg.silence_pattern', 'label_type': 'cfg.label_type'}), '(n_cmp=cfg.mgc_dim, silence_pattern=cfg.silence_pattern,\n label_type=cfg.label_type)\n', (51812, 51899), False, 'from frontend.silence_remover import SilenceRemover\n'), ((52146, 52175), 'os.path.join', 'os.path.join', (['data_dir', '"""mgc"""'], {}), "(data_dir, 'mgc')\n", (52158, 52175), False, 'import os, sys, errno\n'), ((52500, 52513), 'numpy.log', 'numpy.log', (['(10)'], {}), '(10)\n', (52509, 52513), False, 'import numpy\n'), ((52588, 52601), 'numpy.log', 'numpy.log', (['(10)'], {}), '(10)\n', (52597, 52601), False, 'import numpy\n'), ((53135, 53236), 'frontend.silence_remover.SilenceRemover', 'SilenceRemover', ([], {'n_cmp': 'cfg.bap_dim', 'silence_pattern': 'cfg.silence_pattern', 'label_type': 'cfg.label_type'}), '(n_cmp=cfg.bap_dim, silence_pattern=cfg.silence_pattern,\n label_type=cfg.label_type)\n', (53149, 53236), False, 'from frontend.silence_remover import SilenceRemover\n'), ((53483, 53512), 'os.path.join', 'os.path.join', (['data_dir', '"""bap"""'], {}), "(data_dir, 'bap')\n", (53495, 53512), False, 'import os, sys, errno\n'), ((54550, 54651), 'frontend.silence_remover.SilenceRemover', 'SilenceRemover', ([], {'n_cmp': 'cfg.lf0_dim', 'silence_pattern': 'cfg.silence_pattern', 'label_type': 'cfg.label_type'}), '(n_cmp=cfg.lf0_dim, silence_pattern=cfg.silence_pattern,\n label_type=cfg.label_type)\n', (54564, 54651), False, 'from frontend.silence_remover import SilenceRemover\n'), ((66702, 66776), 'subprocess.Popen', 'subprocess.Popen', (["['git', 'diff', '--name-status']"], {'stdout': 'subprocess.PIPE'}), "(['git', 'diff', '--name-status'], stdout=subprocess.PIPE)\n", (66718, 66776), False, 'import subprocess\n'), ((44495, 44543), 'os.path.join', 'os.path.join', (['cfg.singing_inter_data_dir', '"""meta"""'], {}), "(cfg.singing_inter_data_dir, 'meta')\n", (44507, 44543), False, 'import os, sys, errno\n'), ((45499, 45547), 'os.path.join', 'os.path.join', (['cfg.singing_inter_data_dir', '"""meta"""'], {}), "(cfg.singing_inter_data_dir, 'meta')\n", (45511, 45547), False, 'import os, sys, errno\n'), ((54953, 54984), 'os.path.join', 'os.path.join', (['data_dir', '"""feats"""'], {}), "(data_dir, 'feats')\n", (54965, 54984), False, 'import os, sys, errno\n'), ((55042, 55071), 'os.path.join', 'os.path.join', (['data_dir', '"""lf0"""'], {}), "(data_dir, 'lf0')\n", (55054, 55071), False, 'import os, sys, errno\n'), ((65060, 65086), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (65076, 65086), False, 'import os, sys, errno\n'), ((66323, 66411), 'subprocess.Popen', 'subprocess.Popen', (["['git', 'describe', '--tags', '--always']"], {'stdout': 'subprocess.PIPE'}), "(['git', 'describe', '--tags', '--always'], stdout=\n subprocess.PIPE)\n", (66339, 66411), False, 'import subprocess\n'), ((66517, 66608), 'subprocess.Popen', 'subprocess.Popen', (["['git', 'rev-parse', '--abbrev-ref', 'HEAD']"], {'stdout': 'subprocess.PIPE'}), "(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], stdout=\n subprocess.PIPE)\n", (66533, 66608), False, 'import subprocess\n'), ((67138, 67168), 'os.path.basename', 'os.path.basename', (['cfg.log_file'], {}), '(cfg.log_file)\n', (67154, 67168), False, 'import os, sys, errno\n')] |
# Imports
import torch
from itertools import count
from torch.autograd import Variable
from utils import *
import random
import numpy as np
USE_CUDA = torch.cuda.is_available()
dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def deep_Q_learning(env, optimizer_spec, exploration_params, replay_buffer_size=100000,
start_learning=50000, batch_size=128, gamma=0.99, target_update_freq=10000,
save_fig=True, save_model=False):
"""
Implementation of DQN learning procedure
:param env: gym environment
:param architecture: dict. with input_size, hidden_size and output_size (2-layer NN)
:param optimizer_spec: optimizer and its params
:param encode_type: how to encode state - one_hot or ???
:param exploration_params: dict. with final epsilon and num. of time steps until final epsilon
:param replay_buffer_size: size of replay memory
:param start_learning: num. iterations before start learning (filling the buffer)
:param batch_size: batch size for optimization steps
:param gamma: discount factor of MDP
:param target_update_freq: num. of iterations between target network update
:param save_fig: flag for saving plots
:param save_model: flag for saving optimal weirhts of the net at the end of training session
Algorithm saves a trained network
"""
def select_epsilon_greedy_action(model, state, exploration_params, t):
"""
:param model: Q network
:param state: current state of env - in 3D image difference
:param exploration_params: final epsilon and num. timesteps until final epsilon
:param t: current timestep
:return: Algorithm returns an action chosen by an epsilon greedy policy
"""
# Compute current epsilon
fraction = min(1.0, float(t) /exploration_params["timesteps"])
epsilon = 1 + fraction * (exploration_params["final_eps"] - 1)
num_actions = model.head.out_features # output size of Q network is as action space
sample = random.random()
if sample <= epsilon:
return random.randrange(num_actions), epsilon
else:
return int(model(Variable(state)).data.argmax()), epsilon
num_actions = env.action_space.n
# Initialize network and target network
Q = DQN(num_actions).to(device)
Q_target = DQN(num_actions).to(device)
# Construct optimizer
optimizer = optimizer_spec.constructor(Q.parameters(), **optimizer_spec.kwargs)
# Construct the replay buffer
replay_buffer = PriorReplayMemory(replay_buffer_size)
# Initialize episodic reward list
episodic_rewards = []
avg_episodic_rewards = []
stdev_episodic_rewards = []
best_avg_episodic_reward = -np.inf
acc_episodic_reward = 0.0
num_param_updates = 0
episodes_passed = 0
stopping_counter = 0
_ = env.reset()
current_screen = get_screen(env)
state = current_screen
for t in count():
# Stop if last average accumulated episodic reward over 10 episodes is above -150
if len(avg_episodic_rewards) > 0:
if avg_episodic_rewards[-1] > -115:
stopping_counter += 1
if stopping_counter >= 11:
if save_model:
torch.save(Q, 'stable_trained_Acrobot_model_v4')
break
else:
stopping_counter = 0
# Choose random action if not yet start learning
if t > start_learning:
action, eps_val = select_epsilon_greedy_action(Q, state, exploration_params, t)
else:
action = random.randrange(num_actions)
eps_val = 1.0
# Advance one step
_, reward, done, _ = env.step(action)
last_screen = current_screen
current_screen = get_screen(env)
next_state = current_screen - last_screen
# Construct priority for the current sample
# Q value for state-action pair that were taken
current_Q_value = Q(state)[0][action]
# Best Q value from next state - using Q_target as estimator
next_Q_value = Q_target(next_state).detach().max(1)[0]
# Compute estimated Q values (based on Q_target)
target_Q_value = reward + (gamma * next_Q_value)
# Compute Bellman error
bellman_error = target_Q_value - current_Q_value.squeeze()
# document accumulated reward
acc_episodic_reward = acc_episodic_reward + reward
# Save and insert transition to replay buffer
transition = Transition(state=state, action=action, reward=reward, next_state=next_state, done=int(done))
replay_buffer.insert(transition, np.abs(bellman_error.data))
# Resets the environment when reaching an episode boundary.
if done:
# Resets the environment when finishing an episode
_ = env.reset()
current_screen = get_screen(env)
next_state = current_screen
# Document statistics
episodic_rewards.append(acc_episodic_reward)
acc_episodic_reward = 0.0
episodes_passed += 1
# Compute average reward and variance (standard deviation)
if len(episodic_rewards) <= 10:
avg_episodic_rewards.append(np.mean(np.array(episodic_rewards)))
if len(episodic_rewards) >= 2:
stdev_episodic_rewards.append(np.std(np.array(episodic_rewards)))
else:
avg_episodic_rewards.append(np.mean(np.array(episodic_rewards[-10:])))
stdev_episodic_rewards.append(np.std(np.array(episodic_rewards[-10:])))
# Check if average acc. reward has improved
if avg_episodic_rewards[-1] > best_avg_episodic_reward:
best_avg_episodic_reward = avg_episodic_rewards[-1]
if save_model:
torch.save(Q, 'trained_DQN_model')
# Update plot of acc. rewards every 20 episodes and print
# training details
if episodes_passed % 20 == 0:
plot_rewards(np.array(episodic_rewards), np.array(avg_episodic_rewards),
np.array(stdev_episodic_rewards), save_fig)
print('Episode {}\tAvg. Reward: {:.2f}\tEpsilon: {:.4f}\t'.format(
episodes_passed, avg_episodic_rewards[-1], eps_val))
print('Best avg. episodic reward:', best_avg_episodic_reward)
state = next_state
# Perform experience replay and train the network.
if t > start_learning and replay_buffer.can_sample(batch_size):
# Sample from experience buffer
state_batch, action_batch, reward_batch, next_state_batch, done_mask, idxs_batch, is_weight = \
replay_buffer.sample(batch_size)
# Convert numpy nd_array to torch variables for calculation
state_batch = torch.cat(state_batch)
action_batch = Variable(torch.tensor(action_batch).long())
reward_batch = Variable(torch.tensor(reward_batch, device=device)).type(dtype)
next_state_batch = torch.cat(next_state_batch)
not_done_mask = Variable(1 - torch.tensor(done_mask)).type(dtype)
is_weight = Variable(torch.tensor(is_weight)).type(dtype)
# Case GPU is available
if USE_CUDA:
action_batch = action_batch.cuda()
reward_batch = reward_batch.cuda()
# Q values for state-action pair that were taken
current_Q_values = Q(state_batch).gather(1, action_batch.unsqueeze(1)).squeeze()
# Best Q values from next state - using Q_target as estimator
Q_max_next_state = Q_target(next_state_batch).detach().max(1)[0]
# Update only when episode not terminated
next_Q_values = not_done_mask * Q_max_next_state
# Compute estimated Q values (based on Q_target)
target_Q_values = reward_batch + (gamma * next_Q_values)
# Compute TD error
loss = (current_Q_values - target_Q_values.detach()).pow(2) * is_weight
prios = loss + 1e-5
loss = loss.mean()
# Clear previous gradients before backward pass
optimizer.zero_grad()
# Run backward pass
loss.backward()
# update priority
for i in range(batch_size):
idx = idxs_batch[i]
replay_buffer.update(idx, prios[i].data.cpu().numpy())
# Perform the update
optimizer.step()
num_param_updates += 1
# Periodically update the target network by Q network to Q_target network
if num_param_updates % target_update_freq == 0:
Q_target.load_state_dict(Q.state_dict())
| [
"numpy.abs",
"random.randrange",
"numpy.array",
"torch.tensor",
"torch.cuda.is_available",
"itertools.count",
"torch.save",
"random.random",
"torch.autograd.Variable",
"torch.cat"
] | [((154, 179), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (177, 179), False, 'import torch\n'), ((214, 239), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (237, 239), False, 'import torch\n'), ((3088, 3095), 'itertools.count', 'count', ([], {}), '()\n', (3093, 3095), False, 'from itertools import count\n'), ((295, 320), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (318, 320), False, 'import torch\n'), ((2163, 2178), 'random.random', 'random.random', ([], {}), '()\n', (2176, 2178), False, 'import random\n'), ((3763, 3792), 'random.randrange', 'random.randrange', (['num_actions'], {}), '(num_actions)\n', (3779, 3792), False, 'import random\n'), ((4828, 4854), 'numpy.abs', 'np.abs', (['bellman_error.data'], {}), '(bellman_error.data)\n', (4834, 4854), True, 'import numpy as np\n'), ((7083, 7105), 'torch.cat', 'torch.cat', (['state_batch'], {}), '(state_batch)\n', (7092, 7105), False, 'import torch\n'), ((7299, 7326), 'torch.cat', 'torch.cat', (['next_state_batch'], {}), '(next_state_batch)\n', (7308, 7326), False, 'import torch\n'), ((2228, 2257), 'random.randrange', 'random.randrange', (['num_actions'], {}), '(num_actions)\n', (2244, 2257), False, 'import random\n'), ((6049, 6083), 'torch.save', 'torch.save', (['Q', '"""trained_DQN_model"""'], {}), "(Q, 'trained_DQN_model')\n", (6059, 6083), False, 'import torch\n'), ((6257, 6283), 'numpy.array', 'np.array', (['episodic_rewards'], {}), '(episodic_rewards)\n', (6265, 6283), True, 'import numpy as np\n'), ((6285, 6315), 'numpy.array', 'np.array', (['avg_episodic_rewards'], {}), '(avg_episodic_rewards)\n', (6293, 6315), True, 'import numpy as np\n'), ((6346, 6378), 'numpy.array', 'np.array', (['stdev_episodic_rewards'], {}), '(stdev_episodic_rewards)\n', (6354, 6378), True, 'import numpy as np\n'), ((3417, 3465), 'torch.save', 'torch.save', (['Q', '"""stable_trained_Acrobot_model_v4"""'], {}), "(Q, 'stable_trained_Acrobot_model_v4')\n", (3427, 3465), False, 'import torch\n'), ((5449, 5475), 'numpy.array', 'np.array', (['episodic_rewards'], {}), '(episodic_rewards)\n', (5457, 5475), True, 'import numpy as np\n'), ((5682, 5714), 'numpy.array', 'np.array', (['episodic_rewards[-10:]'], {}), '(episodic_rewards[-10:])\n', (5690, 5714), True, 'import numpy as np\n'), ((5770, 5802), 'numpy.array', 'np.array', (['episodic_rewards[-10:]'], {}), '(episodic_rewards[-10:])\n', (5778, 5802), True, 'import numpy as np\n'), ((7142, 7168), 'torch.tensor', 'torch.tensor', (['action_batch'], {}), '(action_batch)\n', (7154, 7168), False, 'import torch\n'), ((7213, 7254), 'torch.tensor', 'torch.tensor', (['reward_batch'], {'device': 'device'}), '(reward_batch, device=device)\n', (7225, 7254), False, 'import torch\n'), ((7438, 7461), 'torch.tensor', 'torch.tensor', (['is_weight'], {}), '(is_weight)\n', (7450, 7461), False, 'import torch\n'), ((5582, 5608), 'numpy.array', 'np.array', (['episodic_rewards'], {}), '(episodic_rewards)\n', (5590, 5608), True, 'import numpy as np\n'), ((7368, 7391), 'torch.tensor', 'torch.tensor', (['done_mask'], {}), '(done_mask)\n', (7380, 7391), False, 'import torch\n'), ((2310, 2325), 'torch.autograd.Variable', 'Variable', (['state'], {}), '(state)\n', (2318, 2325), False, 'from torch.autograd import Variable\n')] |
"""Testing utilities for the MNE BIDS converter."""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os.path as op
# This is here to handle mne-python <0.20
import warnings
from datetime import datetime
from pathlib import Path
import pytest
from numpy.random import random
with warnings.catch_warnings():
warnings.filterwarnings(action='ignore',
message="can't resolve package",
category=ImportWarning)
import mne
from mne_bids import BIDSPath
from mne_bids.utils import (_check_types, _age_on_date,
_infer_eeg_placement_scheme, _handle_datatype,
_get_ch_type_mapping)
from mne_bids.path import _path_to_str
base_path = op.join(op.dirname(mne.__file__), 'io')
subject_id = '01'
session_id = '01'
run = '01'
acq = None
task = 'testing'
bids_path = BIDSPath(
subject=subject_id, session=session_id, run=run, acquisition=acq,
task=task)
def test_get_ch_type_mapping():
"""Test getting a correct channel mapping."""
with pytest.raises(ValueError, match='specified from "bogus" to "mne"'):
_get_ch_type_mapping(fro='bogus', to='mne')
def test_handle_datatype():
"""Test the automatic extraction of datatype from the data."""
# Create a dummy raw
n_channels = 1
sampling_rate = 100
data = random((n_channels, sampling_rate))
channel_types = ['grad', 'eeg', 'ecog']
expected_modalities = ['meg', 'eeg', 'ieeg']
# do it once for each type ... and once for "no type"
for chtype, datatype in zip(channel_types, expected_modalities):
info = mne.create_info(n_channels, sampling_rate, ch_types=[chtype])
raw = mne.io.RawArray(data, info)
assert _handle_datatype(raw) == datatype
# if the situation is ambiguous (EEG and iEEG channels both), raise error
with pytest.raises(ValueError, match='Both EEG and iEEG channels found'):
info = mne.create_info(2, sampling_rate,
ch_types=['eeg', 'ecog'])
raw = mne.io.RawArray(random((2, sampling_rate)), info)
_handle_datatype(raw)
# if we cannot find a proper channel type, we raise an error
with pytest.raises(ValueError, match='Neither MEG/EEG/iEEG channels'):
info = mne.create_info(n_channels, sampling_rate, ch_types=['misc'])
raw = mne.io.RawArray(data, info)
_handle_datatype(raw)
def test_check_types():
"""Test the check whether vars are str or None."""
assert _check_types(['foo', 'bar', None]) is None
with pytest.raises(ValueError):
_check_types([None, 1, 3.14, 'meg', [1, 2]])
def test_path_to_str():
"""Test that _path_to_str returns a string."""
path_str = 'foo'
assert _path_to_str(path_str) == path_str
assert _path_to_str(Path(path_str)) == path_str
with pytest.raises(ValueError):
_path_to_str(1)
def test_age_on_date():
"""Test whether the age is determined correctly."""
bday = datetime(1994, 1, 26)
exp1 = datetime(2018, 1, 25)
exp2 = datetime(2018, 1, 26)
exp3 = datetime(2018, 1, 27)
exp4 = datetime(1990, 1, 1)
assert _age_on_date(bday, exp1) == 23
assert _age_on_date(bday, exp2) == 24
assert _age_on_date(bday, exp3) == 24
with pytest.raises(ValueError):
_age_on_date(bday, exp4)
def test_infer_eeg_placement_scheme():
"""Test inferring a correct EEG placement scheme."""
# no eeg channels case (e.g., MEG data)
data_path = op.join(base_path, 'bti', 'tests', 'data')
raw_fname = op.join(data_path, 'test_pdf_linux')
config_fname = op.join(data_path, 'test_config_linux')
headshape_fname = op.join(data_path, 'test_hs_linux')
raw = mne.io.read_raw_bti(raw_fname, config_fname, headshape_fname)
placement_scheme = _infer_eeg_placement_scheme(raw)
assert placement_scheme == 'n/a'
# 1020 case
data_path = op.join(base_path, 'brainvision', 'tests', 'data')
raw_fname = op.join(data_path, 'test.vhdr')
raw = mne.io.read_raw_brainvision(raw_fname)
placement_scheme = _infer_eeg_placement_scheme(raw)
assert placement_scheme == 'based on the extended 10/20 system'
# Unknown case, use raw from 1020 case but rename a channel
raw.rename_channels({'P3': 'foo'})
placement_scheme = _infer_eeg_placement_scheme(raw)
assert placement_scheme == 'n/a'
| [
"mne.io.read_raw_bti",
"mne_bids.utils._get_ch_type_mapping",
"mne.io.RawArray",
"datetime.datetime",
"mne_bids.BIDSPath",
"numpy.random.random",
"pathlib.Path",
"mne_bids.path._path_to_str",
"mne_bids.utils._age_on_date",
"mne.io.read_raw_brainvision",
"mne_bids.utils._check_types",
"os.path.... | [((943, 1032), 'mne_bids.BIDSPath', 'BIDSPath', ([], {'subject': 'subject_id', 'session': 'session_id', 'run': 'run', 'acquisition': 'acq', 'task': 'task'}), '(subject=subject_id, session=session_id, run=run, acquisition=acq,\n task=task)\n', (951, 1032), False, 'from mne_bids import BIDSPath\n'), ((351, 376), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (374, 376), False, 'import warnings\n'), ((382, 483), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'message': '"""can\'t resolve package"""', 'category': 'ImportWarning'}), '(action=\'ignore\', message="can\'t resolve package",\n category=ImportWarning)\n', (405, 483), False, 'import warnings\n'), ((823, 847), 'os.path.dirname', 'op.dirname', (['mne.__file__'], {}), '(mne.__file__)\n', (833, 847), True, 'import os.path as op\n'), ((1427, 1462), 'numpy.random.random', 'random', (['(n_channels, sampling_rate)'], {}), '((n_channels, sampling_rate))\n', (1433, 1462), False, 'from numpy.random import random\n'), ((3072, 3093), 'datetime.datetime', 'datetime', (['(1994)', '(1)', '(26)'], {}), '(1994, 1, 26)\n', (3080, 3093), False, 'from datetime import datetime\n'), ((3105, 3126), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(25)'], {}), '(2018, 1, 25)\n', (3113, 3126), False, 'from datetime import datetime\n'), ((3138, 3159), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(26)'], {}), '(2018, 1, 26)\n', (3146, 3159), False, 'from datetime import datetime\n'), ((3171, 3192), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(27)'], {}), '(2018, 1, 27)\n', (3179, 3192), False, 'from datetime import datetime\n'), ((3204, 3224), 'datetime.datetime', 'datetime', (['(1990)', '(1)', '(1)'], {}), '(1990, 1, 1)\n', (3212, 3224), False, 'from datetime import datetime\n'), ((3578, 3620), 'os.path.join', 'op.join', (['base_path', '"""bti"""', '"""tests"""', '"""data"""'], {}), "(base_path, 'bti', 'tests', 'data')\n", (3585, 3620), True, 'import os.path as op\n'), ((3637, 3673), 'os.path.join', 'op.join', (['data_path', '"""test_pdf_linux"""'], {}), "(data_path, 'test_pdf_linux')\n", (3644, 3673), True, 'import os.path as op\n'), ((3693, 3732), 'os.path.join', 'op.join', (['data_path', '"""test_config_linux"""'], {}), "(data_path, 'test_config_linux')\n", (3700, 3732), True, 'import os.path as op\n'), ((3755, 3790), 'os.path.join', 'op.join', (['data_path', '"""test_hs_linux"""'], {}), "(data_path, 'test_hs_linux')\n", (3762, 3790), True, 'import os.path as op\n'), ((3801, 3862), 'mne.io.read_raw_bti', 'mne.io.read_raw_bti', (['raw_fname', 'config_fname', 'headshape_fname'], {}), '(raw_fname, config_fname, headshape_fname)\n', (3820, 3862), False, 'import mne\n'), ((3886, 3918), 'mne_bids.utils._infer_eeg_placement_scheme', '_infer_eeg_placement_scheme', (['raw'], {}), '(raw)\n', (3913, 3918), False, 'from mne_bids.utils import _check_types, _age_on_date, _infer_eeg_placement_scheme, _handle_datatype, _get_ch_type_mapping\n'), ((3989, 4039), 'os.path.join', 'op.join', (['base_path', '"""brainvision"""', '"""tests"""', '"""data"""'], {}), "(base_path, 'brainvision', 'tests', 'data')\n", (3996, 4039), True, 'import os.path as op\n'), ((4056, 4087), 'os.path.join', 'op.join', (['data_path', '"""test.vhdr"""'], {}), "(data_path, 'test.vhdr')\n", (4063, 4087), True, 'import os.path as op\n'), ((4098, 4136), 'mne.io.read_raw_brainvision', 'mne.io.read_raw_brainvision', (['raw_fname'], {}), '(raw_fname)\n', (4125, 4136), False, 'import mne\n'), ((4160, 4192), 'mne_bids.utils._infer_eeg_placement_scheme', '_infer_eeg_placement_scheme', (['raw'], {}), '(raw)\n', (4187, 4192), False, 'from mne_bids.utils import _check_types, _age_on_date, _infer_eeg_placement_scheme, _handle_datatype, _get_ch_type_mapping\n'), ((4388, 4420), 'mne_bids.utils._infer_eeg_placement_scheme', '_infer_eeg_placement_scheme', (['raw'], {}), '(raw)\n', (4415, 4420), False, 'from mne_bids.utils import _check_types, _age_on_date, _infer_eeg_placement_scheme, _handle_datatype, _get_ch_type_mapping\n'), ((1131, 1197), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""specified from "bogus" to "mne\\""""'}), '(ValueError, match=\'specified from "bogus" to "mne"\')\n', (1144, 1197), False, 'import pytest\n'), ((1207, 1250), 'mne_bids.utils._get_ch_type_mapping', '_get_ch_type_mapping', ([], {'fro': '"""bogus"""', 'to': '"""mne"""'}), "(fro='bogus', to='mne')\n", (1227, 1250), False, 'from mne_bids.utils import _check_types, _age_on_date, _infer_eeg_placement_scheme, _handle_datatype, _get_ch_type_mapping\n'), ((1698, 1759), 'mne.create_info', 'mne.create_info', (['n_channels', 'sampling_rate'], {'ch_types': '[chtype]'}), '(n_channels, sampling_rate, ch_types=[chtype])\n', (1713, 1759), False, 'import mne\n'), ((1774, 1801), 'mne.io.RawArray', 'mne.io.RawArray', (['data', 'info'], {}), '(data, info)\n', (1789, 1801), False, 'import mne\n'), ((1939, 2006), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Both EEG and iEEG channels found"""'}), "(ValueError, match='Both EEG and iEEG channels found')\n", (1952, 2006), False, 'import pytest\n'), ((2023, 2082), 'mne.create_info', 'mne.create_info', (['(2)', 'sampling_rate'], {'ch_types': "['eeg', 'ecog']"}), "(2, sampling_rate, ch_types=['eeg', 'ecog'])\n", (2038, 2082), False, 'import mne\n'), ((2186, 2207), 'mne_bids.utils._handle_datatype', '_handle_datatype', (['raw'], {}), '(raw)\n', (2202, 2207), False, 'from mne_bids.utils import _check_types, _age_on_date, _infer_eeg_placement_scheme, _handle_datatype, _get_ch_type_mapping\n'), ((2283, 2347), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Neither MEG/EEG/iEEG channels"""'}), "(ValueError, match='Neither MEG/EEG/iEEG channels')\n", (2296, 2347), False, 'import pytest\n'), ((2364, 2425), 'mne.create_info', 'mne.create_info', (['n_channels', 'sampling_rate'], {'ch_types': "['misc']"}), "(n_channels, sampling_rate, ch_types=['misc'])\n", (2379, 2425), False, 'import mne\n'), ((2440, 2467), 'mne.io.RawArray', 'mne.io.RawArray', (['data', 'info'], {}), '(data, info)\n', (2455, 2467), False, 'import mne\n'), ((2476, 2497), 'mne_bids.utils._handle_datatype', '_handle_datatype', (['raw'], {}), '(raw)\n', (2492, 2497), False, 'from mne_bids.utils import _check_types, _age_on_date, _infer_eeg_placement_scheme, _handle_datatype, _get_ch_type_mapping\n'), ((2590, 2624), 'mne_bids.utils._check_types', '_check_types', (["['foo', 'bar', None]"], {}), "(['foo', 'bar', None])\n", (2602, 2624), False, 'from mne_bids.utils import _check_types, _age_on_date, _infer_eeg_placement_scheme, _handle_datatype, _get_ch_type_mapping\n'), ((2642, 2667), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2655, 2667), False, 'import pytest\n'), ((2677, 2721), 'mne_bids.utils._check_types', '_check_types', (["[None, 1, 3.14, 'meg', [1, 2]]"], {}), "([None, 1, 3.14, 'meg', [1, 2]])\n", (2689, 2721), False, 'from mne_bids.utils import _check_types, _age_on_date, _infer_eeg_placement_scheme, _handle_datatype, _get_ch_type_mapping\n'), ((2831, 2853), 'mne_bids.path._path_to_str', '_path_to_str', (['path_str'], {}), '(path_str)\n', (2843, 2853), False, 'from mne_bids.path import _path_to_str\n'), ((2928, 2953), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2941, 2953), False, 'import pytest\n'), ((2963, 2978), 'mne_bids.path._path_to_str', '_path_to_str', (['(1)'], {}), '(1)\n', (2975, 2978), False, 'from mne_bids.path import _path_to_str\n'), ((3236, 3260), 'mne_bids.utils._age_on_date', '_age_on_date', (['bday', 'exp1'], {}), '(bday, exp1)\n', (3248, 3260), False, 'from mne_bids.utils import _check_types, _age_on_date, _infer_eeg_placement_scheme, _handle_datatype, _get_ch_type_mapping\n'), ((3278, 3302), 'mne_bids.utils._age_on_date', '_age_on_date', (['bday', 'exp2'], {}), '(bday, exp2)\n', (3290, 3302), False, 'from mne_bids.utils import _check_types, _age_on_date, _infer_eeg_placement_scheme, _handle_datatype, _get_ch_type_mapping\n'), ((3320, 3344), 'mne_bids.utils._age_on_date', '_age_on_date', (['bday', 'exp3'], {}), '(bday, exp3)\n', (3332, 3344), False, 'from mne_bids.utils import _check_types, _age_on_date, _infer_eeg_placement_scheme, _handle_datatype, _get_ch_type_mapping\n'), ((3360, 3385), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3373, 3385), False, 'import pytest\n'), ((3395, 3419), 'mne_bids.utils._age_on_date', '_age_on_date', (['bday', 'exp4'], {}), '(bday, exp4)\n', (3407, 3419), False, 'from mne_bids.utils import _check_types, _age_on_date, _infer_eeg_placement_scheme, _handle_datatype, _get_ch_type_mapping\n'), ((1817, 1838), 'mne_bids.utils._handle_datatype', '_handle_datatype', (['raw'], {}), '(raw)\n', (1833, 1838), False, 'from mne_bids.utils import _check_types, _age_on_date, _infer_eeg_placement_scheme, _handle_datatype, _get_ch_type_mapping\n'), ((2144, 2170), 'numpy.random.random', 'random', (['(2, sampling_rate)'], {}), '((2, sampling_rate))\n', (2150, 2170), False, 'from numpy.random import random\n'), ((2890, 2904), 'pathlib.Path', 'Path', (['path_str'], {}), '(path_str)\n', (2894, 2904), False, 'from pathlib import Path\n')] |
import os
import json
import numpy as np
from experiment_handler.time_synchronisation import convert_timestamps
from experiment_handler.finder import find_all_imu_files
def load_imu_file(filepath):
lines = []
with open(filepath, 'r') as file:
try:
lines = file.read().split("\n")
except UnicodeDecodeError as e:
print(e)
return lines
def _get_beacon_id(ble_data, use_uuid=True):
if use_uuid:
return ble_data['uuid'] + "-" + str(ble_data['major']) + "-" + str(ble_data['minor'])
else:
return ble_data['macAdd']
def find_and_categorize_beacon_ids(experiments, threshold=45, save_ids=True):
"""
Find bluetooth beacon ids and count how many times they occur during the given experiments
Observation showed that some beacon ids randomly appear only a few times during the experiments
This function sort the ids into two categories (static id: appearing again and again, changing ids: occuring only a few times)
Parameters
----------
experiments: list of str
List of pathes to the experiment roots
threshold: int
Defines separation threshold. Beacon ids with equal or less then this many detection are defined as changing id,
save_ids: boolean
If True result dictionary is saved to 'beacon_ids.json'
Returns
-------
static_ids: list of str
IDs with more detection than the threshold
changing_ids: list of str
IDs with less or equal detection than the threshold
ids_detection_counts: dict
Dictionary with id as key and count of detection as value
"""
# Recompute set of ids from bluetooth beacons which are apparently static
ids_detection_counts = {}
for exp_root in experiments:
imu_files = find_all_imu_files(exp_root)
for filepath in imu_files:
imu_lines = load_imu_file(filepath)
for imu_line in imu_lines:
try:
data = json.loads(imu_line)
except json.decoder.JSONDecodeError:
continue
if data['type'] != 'ble':
continue
for beacon in data['beacons']:
current_id = _get_beacon_id(beacon)
if current_id in ids_detection_counts.keys():
ids_detection_counts[current_id] += 1
else:
ids_detection_counts[current_id] = 1
static_ids = []
changing_ids = []
for key in ids_detection_counts.keys():
if ids_detection_counts[key] > threshold:
static_ids.append(key)
else:
changing_ids.append(key)
# save ids into file
if save_ids:
sorted_ids = {
"static_ids": static_ids,
"changing_ids": changing_ids
}
with open('beacon_ids.json', 'w') as fp:
json.dump(sorted_ids, fp)
return static_ids, changing_ids, ids_detection_counts
def load_beacon_ids():
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, 'beacon_ids.json'), 'r') as fp:
sorted_ids = json.load(fp)
return sorted_ids
def get_ble_data(experiment_path, source, start=None, end=None, reference_time=None, convert_time=True):
"""
Read beacon data for a given source (e.g. P3 left hand) in a time interval
Parameters
----------
experiment_path: str
Root of the experiment (e.g. /data/igroups/experiment_8)
source: str
Name of the IMU file without extension e.g. P3_imu_right
start: float
Return values from this timestamp (if reference_time is set, the value is interpreted as time on that channel)
end: float
Return values until this timestamp (if reference_time is set, the value is interpreted as time on that channel)
reference_time: str
Use this signal channel's time for reference (convert start and end values to correspond with IMU time)
convert_time: bool
If set the returned array will contain timestamp in reference_time's values
Returns
-------
parsed_data_rssi: numpy array
beacon's rssi data with columns order: <timestamp>, <rssi value of unique id 0>, ...
parsed_data_tx: numpy array
beacon's tx data with columns order: <timestamp>, <tx value of unique id 0>, ...
unique_ids: list of strings
Containing unique beacon ids in the same order as the columns above (offseted by timestamp)
"""
filepath = os.path.join(experiment_path, "imu", source + ".log")
imu_lines = load_imu_file(filepath)
# Convert start and end time:
imu_reference_time = source.split("_")[0] + "_IMU"
if start is not None:
start_timestamp = convert_timestamps(experiment_path, start, reference_time, imu_reference_time)
if end is not None:
end_timestamp = convert_timestamps(experiment_path, end, reference_time, imu_reference_time)
ids = load_beacon_ids()
unique_ids = ids['static_ids']
parsed_data_tx = np.zeros((0, len(unique_ids) + 1))
parsed_data_rssi = np.zeros((0, len(unique_ids) + 1))
# Parse lines:
for imu_line in imu_lines:
try:
data = json.loads(imu_line)
except json.decoder.JSONDecodeError:
continue
if data['type'] != 'ble':
continue
new_data_entry_tx = np.zeros((1, len(unique_ids) + 1))
new_data_entry_rssi = np.zeros((1, len(unique_ids) + 1))
new_data_entry_rssi[0, 0] = data['time']
new_data_entry_tx[0, 0] = data['time']
for beacon in data['beacons']:
current_id = _get_beacon_id(beacon)
if current_id in unique_ids:
column_index = unique_ids.index(current_id)
new_data_entry_rssi[0, column_index + 1] = beacon['rssi']
new_data_entry_tx[0, column_index + 1] = beacon['tx']
parsed_data_tx = np.append(parsed_data_tx, new_data_entry_tx, axis=0)
parsed_data_rssi = np.append(parsed_data_rssi, new_data_entry_rssi, axis=0)
if start is not None:
parsed_data_tx = parsed_data_tx[parsed_data_tx[:, 0] >= start_timestamp, :]
parsed_data_rssi = parsed_data_rssi[parsed_data_rssi[:, 0] >= start_timestamp, :]
if end is not None:
parsed_data_tx = parsed_data_tx[parsed_data_tx[:, 0] <= end_timestamp, :]
parsed_data_rssi = parsed_data_rssi[parsed_data_rssi[:, 0] <= end_timestamp, :]
if convert_time:
parsed_data_rssi[:, 0] = convert_timestamps(experiment_path, parsed_data_rssi[:, 0], imu_reference_time,
reference_time)
parsed_data_tx[:, 0] = convert_timestamps(experiment_path, parsed_data_tx[:, 0], imu_reference_time,
reference_time)
return parsed_data_rssi, parsed_data_tx, unique_ids
def get_imu_data(experiment_path, source, start=None, end=None, reference_time=None, convert_time=True):
"""
Read imu data for a given source (e.g. P3 left hand) in a time interval
Parameters
----------
experiment_path: str
Root of the experiment (e.g. /data/igroups/experiment_8)
source: str
Name of the IMU file without extension e.g. P3_imu_right
start: float
Return values from this timestamp (if reference_time is set, the value is interpreted as time on that channel)
end: float
Return values until this timestamp (if reference_time is set, the value is interpreted as time on that channel)
reference_time: str
Use this signal channel's time for reference (convert start and end values to correspond with IMU time)
convert_time: bool
If set the returned array will contain timestamp in reference_time's values
use_pkl: bool
If set the pickle serialized binary file will be read instead of the .log text file. (If the file doesn't exists yet, the .log file is loaded and then the .pkl file is created.)
Returns
-------
parsed_data: numpy array
IMU data with columns order: <timestamp>, <ax>, <ay>, <az>, <gx>, <gy>, <gz>, <mx>, <my>, <mz>, <roll>, <pitch>, <yaw>, <qx>, <qy>, <gz>, <qw>
"""
npy_filepath = os.path.join(experiment_path, "imu", source + "_movement-data.npy")
if not os.path.exists(npy_filepath):
log_filepath = os.path.join(experiment_path, "imu", source + ".log")
parsed_data = create_imu_log_file_movement_data(log_filepath, npy_filepath)
else:
parsed_data = np.load(npy_filepath)
# Convert start and end time:
imu_reference_time = source.split("_")[0] + "_IMU"
if start is not None:
start_timestamp = convert_timestamps(experiment_path, start, reference_time, imu_reference_time)
if end is not None:
end_timestamp = convert_timestamps(experiment_path, end, reference_time, imu_reference_time)
if start is not None:
parsed_data = parsed_data[parsed_data[:, 0] >= start_timestamp, :]
if end is not None:
parsed_data = parsed_data[parsed_data[:, 0] <= end_timestamp, :]
if convert_time:
parsed_data[:, 0] = convert_timestamps(experiment_path, parsed_data[:, 0], imu_reference_time, reference_time)
return parsed_data
def create_imu_log_file_movement_data(log_file_path, npy_file_path):
imu_lines = load_imu_file(log_file_path)
parsed_data = np.zeros((len(imu_lines), 17))
parsed_data.fill(np.nan)
# Parse lines:
for i in range(len(imu_lines)):
imu_line = imu_lines[i]
try:
data = json.loads(imu_line)
except json.decoder.JSONDecodeError:
continue
if data['type'] != 'imu-bosch':
continue
parsed_data[i, 0] = data['time']
parsed_data[i, 1] = data['measurement']['ax']
parsed_data[i, 2] = data['measurement']['ay']
parsed_data[i, 3] = data['measurement']['az']
parsed_data[i, 4] = data['measurement']['gx']
parsed_data[i, 5] = data['measurement']['gy']
parsed_data[i, 6] = data['measurement']['gz']
parsed_data[i, 7] = data['measurement']['mx']
parsed_data[i, 8] = data['measurement']['my']
parsed_data[i, 9] = data['measurement']['mz']
parsed_data[i, 10] = data['measurement']['roll']
parsed_data[i, 11] = data['measurement']['pitch']
parsed_data[i, 12] = data['measurement']['yaw']
parsed_data[i, 13] = data['measurement']['qx']
parsed_data[i, 14] = data['measurement']['qy']
parsed_data[i, 15] = data['measurement']['qz']
parsed_data[i, 16] = data['measurement']['qw']
parsed_data = parsed_data[~np.isnan(parsed_data).any(axis=1)]
np.save(npy_file_path, parsed_data)
return parsed_data
if __name__ == '__main__':
if False:
experiments = [
"/Users/hevesi/ownCloud/Datasets/igroups_experiment_8",
"/Users/hevesi/ownCloud/Datasets/igroups_experiment_9"
]
good, bad, id_det_counts = find_and_categorize_beacon_ids(experiments)
print(good)
print("Bad:")
print(bad)
exp_root = "/Volumes/DataDrive/igroups_recordings/igroups_experiment_8"
exp_root = "/Users/hevesi/ownCloud/Datasets/igroups_experiment_8"
data = get_ble_data(exp_root, "P2_imu_head", start=1800, end=2100, reference_time="video", convert_time=True)
#data = get_imu_data(exp_root, "P3_imu_left", start=1800, end=2368, reference_time="video", convert_time=True)
print(data[2])
import matplotlib.pyplot as plt
plt.imshow(data[0][:, 1:].T)
plt.show()
| [
"matplotlib.pyplot.imshow",
"experiment_handler.finder.find_all_imu_files",
"os.path.exists",
"json.loads",
"json.dump",
"os.path.join",
"experiment_handler.time_synchronisation.convert_timestamps",
"os.path.realpath",
"numpy.append",
"numpy.isnan",
"json.load",
"numpy.load",
"numpy.save",
... | [((4624, 4677), 'os.path.join', 'os.path.join', (['experiment_path', '"""imu"""', "(source + '.log')"], {}), "(experiment_path, 'imu', source + '.log')\n", (4636, 4677), False, 'import os\n'), ((8368, 8435), 'os.path.join', 'os.path.join', (['experiment_path', '"""imu"""', "(source + '_movement-data.npy')"], {}), "(experiment_path, 'imu', source + '_movement-data.npy')\n", (8380, 8435), False, 'import os\n'), ((10861, 10896), 'numpy.save', 'np.save', (['npy_file_path', 'parsed_data'], {}), '(npy_file_path, parsed_data)\n', (10868, 10896), True, 'import numpy as np\n'), ((11710, 11738), 'matplotlib.pyplot.imshow', 'plt.imshow', (['data[0][:, 1:].T'], {}), '(data[0][:, 1:].T)\n', (11720, 11738), True, 'import matplotlib.pyplot as plt\n'), ((11743, 11753), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11751, 11753), True, 'import matplotlib.pyplot as plt\n'), ((1825, 1853), 'experiment_handler.finder.find_all_imu_files', 'find_all_imu_files', (['exp_root'], {}), '(exp_root)\n', (1843, 1853), False, 'from experiment_handler.finder import find_all_imu_files\n'), ((3101, 3127), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3117, 3127), False, 'import os\n'), ((3219, 3232), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (3228, 3232), False, 'import json\n'), ((4860, 4938), 'experiment_handler.time_synchronisation.convert_timestamps', 'convert_timestamps', (['experiment_path', 'start', 'reference_time', 'imu_reference_time'], {}), '(experiment_path, start, reference_time, imu_reference_time)\n', (4878, 4938), False, 'from experiment_handler.time_synchronisation import convert_timestamps\n'), ((4987, 5063), 'experiment_handler.time_synchronisation.convert_timestamps', 'convert_timestamps', (['experiment_path', 'end', 'reference_time', 'imu_reference_time'], {}), '(experiment_path, end, reference_time, imu_reference_time)\n', (5005, 5063), False, 'from experiment_handler.time_synchronisation import convert_timestamps\n'), ((6052, 6104), 'numpy.append', 'np.append', (['parsed_data_tx', 'new_data_entry_tx'], {'axis': '(0)'}), '(parsed_data_tx, new_data_entry_tx, axis=0)\n', (6061, 6104), True, 'import numpy as np\n'), ((6132, 6188), 'numpy.append', 'np.append', (['parsed_data_rssi', 'new_data_entry_rssi'], {'axis': '(0)'}), '(parsed_data_rssi, new_data_entry_rssi, axis=0)\n', (6141, 6188), True, 'import numpy as np\n'), ((6639, 6738), 'experiment_handler.time_synchronisation.convert_timestamps', 'convert_timestamps', (['experiment_path', 'parsed_data_rssi[:, 0]', 'imu_reference_time', 'reference_time'], {}), '(experiment_path, parsed_data_rssi[:, 0],\n imu_reference_time, reference_time)\n', (6657, 6738), False, 'from experiment_handler.time_synchronisation import convert_timestamps\n'), ((6813, 6910), 'experiment_handler.time_synchronisation.convert_timestamps', 'convert_timestamps', (['experiment_path', 'parsed_data_tx[:, 0]', 'imu_reference_time', 'reference_time'], {}), '(experiment_path, parsed_data_tx[:, 0],\n imu_reference_time, reference_time)\n', (6831, 6910), False, 'from experiment_handler.time_synchronisation import convert_timestamps\n'), ((8448, 8476), 'os.path.exists', 'os.path.exists', (['npy_filepath'], {}), '(npy_filepath)\n', (8462, 8476), False, 'import os\n'), ((8501, 8554), 'os.path.join', 'os.path.join', (['experiment_path', '"""imu"""', "(source + '.log')"], {}), "(experiment_path, 'imu', source + '.log')\n", (8513, 8554), False, 'import os\n'), ((8671, 8692), 'numpy.load', 'np.load', (['npy_filepath'], {}), '(npy_filepath)\n', (8678, 8692), True, 'import numpy as np\n'), ((8835, 8913), 'experiment_handler.time_synchronisation.convert_timestamps', 'convert_timestamps', (['experiment_path', 'start', 'reference_time', 'imu_reference_time'], {}), '(experiment_path, start, reference_time, imu_reference_time)\n', (8853, 8913), False, 'from experiment_handler.time_synchronisation import convert_timestamps\n'), ((8962, 9038), 'experiment_handler.time_synchronisation.convert_timestamps', 'convert_timestamps', (['experiment_path', 'end', 'reference_time', 'imu_reference_time'], {}), '(experiment_path, end, reference_time, imu_reference_time)\n', (8980, 9038), False, 'from experiment_handler.time_synchronisation import convert_timestamps\n'), ((9288, 9382), 'experiment_handler.time_synchronisation.convert_timestamps', 'convert_timestamps', (['experiment_path', 'parsed_data[:, 0]', 'imu_reference_time', 'reference_time'], {}), '(experiment_path, parsed_data[:, 0], imu_reference_time,\n reference_time)\n', (9306, 9382), False, 'from experiment_handler.time_synchronisation import convert_timestamps\n'), ((2960, 2985), 'json.dump', 'json.dump', (['sorted_ids', 'fp'], {}), '(sorted_ids, fp)\n', (2969, 2985), False, 'import json\n'), ((3143, 3184), 'os.path.join', 'os.path.join', (['dir_path', '"""beacon_ids.json"""'], {}), "(dir_path, 'beacon_ids.json')\n", (3155, 3184), False, 'import os\n'), ((5325, 5345), 'json.loads', 'json.loads', (['imu_line'], {}), '(imu_line)\n', (5335, 5345), False, 'import json\n'), ((9718, 9738), 'json.loads', 'json.loads', (['imu_line'], {}), '(imu_line)\n', (9728, 9738), False, 'import json\n'), ((2025, 2045), 'json.loads', 'json.loads', (['imu_line'], {}), '(imu_line)\n', (2035, 2045), False, 'import json\n'), ((10822, 10843), 'numpy.isnan', 'np.isnan', (['parsed_data'], {}), '(parsed_data)\n', (10830, 10843), True, 'import numpy as np\n')] |
import os
import glob
import pickle
import re
# Our numerical workhorses
import numpy as np
import pandas as pd
# Import the project utils
import sys
sys.path.insert(0, '../')
import NB_sortseq_utils as utils
# Import matplotlib stuff for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from IPython.core.pylabtools import figsize
from matplotlib.ticker import FormatStrFormatter
# Seaborn, useful for graphics
import seaborn as sns
sns.set_palette("deep", color_codes=True)
utils.set_plotting_style1()
#===============================================================================
# Set output directory
#===============================================================================
output = 'output_figs/'
#------------------------------------------------------------------------------#
# Load in the summary csv file
#------------------------------------------------------------------------------#
df = pd.read_csv('RegulonDB_20170630_BindingSiteSet.csv',comment='#')
df['len'] = df['TF_sequence'].str.len()
df['len_half'] = df['TF_sequence'].str.len()/2
df['right_side_bs'] = df['center_position_bs'] + df['len_half']
hist = np.zeros(1)
for index, row in df.iterrows():
# print(row['right_side_bs']-row['len'])
if pd.isnull(row['right_side_bs']):
continue
hist = np.append(hist,np.arange(row['right_side_bs']-row['len'],row['right_side_bs']), axis=0)
data = hist
# data = df.right_side_bs.dropna()
fig, ax = plt.subplots()
counts, bins, patches = plt.hist(data,
bins=np.arange(min(data), max(data) + 1, 1),
linewidth = 0)#, histtype = 'stepfilled')
# calculate the fraction of binding sites positioned within a specific window
# We want to capture the region downstream of the start site, however, lets
# assume we will go as far as the approximate rbs region since doing
# Sort-Seq probabily will probably not be useful there.
window_150 = 0.0
for i in data:
if -134 < i < 16:
window_150 += 1.0
window_150_frac = window_150/len(data)
# Change the colors of bars at the edges...
twentyfifth, seventyfifth = np.percentile(data, [25, 75])
for patch, rightside, leftside in zip(patches, bins[1:], bins[:-1]):
if leftside > 16:
patch.set_facecolor('grey')
if rightside < -134:
patch.set_facecolor('grey')
from matplotlib.patches import Rectangle
#create legend
handles = [Rectangle((0,0),1,1,ec="none")]
labels= ["150 bp window ("+ str(window_150_frac*100)[:4] + "% \n known TF binding sites)"]
plt.legend(handles, labels)
ax.set_xlim(-400,300)
ax.set_xlabel('position relative to TSS')
ax.set_ylabel('number of binding sites \n overlapping at a base pair')
plt.tight_layout()
figname_out = 'figS6_RegulonDB_bindingsites_summary.pdf'
fig.savefig(output + figname_out, format='pdf')
| [
"pandas.isnull",
"sys.path.insert",
"seaborn.set_palette",
"matplotlib.patches.Rectangle",
"pandas.read_csv",
"numpy.arange",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout",
"numpy.percentile",
"NB_sortseq_utils.set_plotting_style1",
"matplotlib.pyplot.legend"
] | [((152, 177), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../"""'], {}), "(0, '../')\n", (167, 177), False, 'import sys\n'), ((458, 499), 'seaborn.set_palette', 'sns.set_palette', (['"""deep"""'], {'color_codes': '(True)'}), "('deep', color_codes=True)\n", (473, 499), True, 'import seaborn as sns\n'), ((500, 527), 'NB_sortseq_utils.set_plotting_style1', 'utils.set_plotting_style1', ([], {}), '()\n', (525, 527), True, 'import NB_sortseq_utils as utils\n'), ((938, 1003), 'pandas.read_csv', 'pd.read_csv', (['"""RegulonDB_20170630_BindingSiteSet.csv"""'], {'comment': '"""#"""'}), "('RegulonDB_20170630_BindingSiteSet.csv', comment='#')\n", (949, 1003), True, 'import pandas as pd\n'), ((1166, 1177), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (1174, 1177), True, 'import numpy as np\n'), ((1471, 1485), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1483, 1485), True, 'import matplotlib.pyplot as plt\n'), ((2149, 2178), 'numpy.percentile', 'np.percentile', (['data', '[25, 75]'], {}), '(data, [25, 75])\n', (2162, 2178), True, 'import numpy as np\n'), ((2558, 2585), 'matplotlib.pyplot.legend', 'plt.legend', (['handles', 'labels'], {}), '(handles, labels)\n', (2568, 2585), True, 'import matplotlib.pyplot as plt\n'), ((2723, 2741), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2739, 2741), True, 'import matplotlib.pyplot as plt\n'), ((1263, 1294), 'pandas.isnull', 'pd.isnull', (["row['right_side_bs']"], {}), "(row['right_side_bs'])\n", (1272, 1294), True, 'import pandas as pd\n'), ((2435, 2469), 'matplotlib.patches.Rectangle', 'Rectangle', (['(0, 0)', '(1)', '(1)'], {'ec': '"""none"""'}), "((0, 0), 1, 1, ec='none')\n", (2444, 2469), False, 'from matplotlib.patches import Rectangle\n'), ((1339, 1405), 'numpy.arange', 'np.arange', (["(row['right_side_bs'] - row['len'])", "row['right_side_bs']"], {}), "(row['right_side_bs'] - row['len'], row['right_side_bs'])\n", (1348, 1405), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import random
import sys
from collections import Counter
import json
from argparse import ArgumentParser
from json_utils import load_json_file, load_json_stream
def get_leaves(node, leaves):
if node["left"] is not None:
get_leaves(node["left"], leaves)
get_leaves(node["right"], leaves)
else:
leaves.append(node)
return leaves
def upgma(nodes, distf):
N = len(nodes)
dmat = np.inf * np.ones((N, N))
for i in range(N):
nodes[i]["members"] = [i]
for j in range(i + 1, N):
dmat[i,j] = distf(nodes[i], nodes[j])
cdmat = np.copy(dmat)
cnodes = nodes
while len(cnodes) > 2:
idx = np.unravel_index(np.argmin(cdmat, axis=None), cdmat.shape)
N2 = cdmat.shape[0] - 1
cnodes2 = []
for i in range(len(cdmat)):
if i == idx[0]:
pnode = {
"left": cnodes[idx[0]],
"right": cnodes[idx[1]],
"members": cnodes[idx[0]]["members"] + cnodes[idx[1]]["members"]
}
cnodes2.append(pnode)
elif i == idx[1]:
pass
else:
cnodes2.append(cnodes[i])
# TODO: reuse old vals for efficiency
cdmat2 = np.inf * np.ones((N2, N2))
for i in range(N2):
for j in range(i + 1, N2):
dist = 0.0
for k in cnodes2[i]["members"]:
for l in cnodes2[j]["members"]:
dist += dmat[k,l] if k < l else dmat[l,k]
dist /= len(cnodes2[i]["members"]) * len(cnodes2[j]["members"])
cdmat2[i][j] = dist
cdmat = cdmat2
cnodes = cnodes2
pnode = {
"left": cnodes[0],
"right": cnodes[1],
"members": cnodes[0]["members"] + cnodes[1]["members"]
}
return pnode
def get_geoclusters(geotree, leaves, min_size=2):
def _get_clusters_main(node, leaves, clusters):
mems = []
for idx in node["members"]:
mems.append(leaves[idx]["name"])
# sig = "\t".join(sorted(mems))
# sigs[sig] = True
cluster = sorted(mems)
if len(cluster) >= min_size:
clusters.append(cluster)
if node["left"] is not None:
_get_clusters_main(node["left"], leaves, clusters)
_get_clusters_main(node["right"], leaves, clusters)
# sigs = {}
clusters = []
_get_clusters_main(geotree, leaves, clusters)
return clusters
def get_clusters(tree, min_size=2):
def _get_clusters_main(node, clusters):
mems = [node["name"]]
if node["left"] is not None:
mems1 = _get_clusters_main(node["left"], clusters)
mems2 = _get_clusters_main(node["right"], clusters)
mems += mems1
mems += mems2
# sig = "\t".join(sorted(mems))
# sigs[sig] = True
cluster = sorted(mems)
if len(cluster) >= min_size:
clusters.append(cluster)
return mems
# sigs = {}
clusters = []
_get_clusters_main(tree, clusters)
return clusters
def merge_clusters(clusters1, clusters2):
sigs = {}
for clusters in clusters1, clusters2:
for cluster in clusters:
sig = "\t".join(cluster) # names already sorted
sigs[sig] = True
clusters = []
for sig in sigs.keys():
cluster = sig.split("\t")
clusters.append(cluster)
return clusters
def max_jaccard(sysclusters, refclusters):
score = 0.0
for syscluster in sysclusters:
jaccard_max = -1.0
a = set(syscluster)
for refcluster in refclusters:
b = set(refcluster)
jaccard = len(a & b) / float(len(a | b))
if jaccard > jaccard_max:
jaccard_max = jaccard
# for lname in refcluster:
# if lname in syscluster:
# c += 1
# if c > cmax:
# cmax = c
# score += cmax / float(len(syscluster))
score += jaccard_max
score /= len(sysclusters)
return score
def main():
parser = ArgumentParser()
parser.add_argument('--min_ratio', type=float, default=0.05)
parser.add_argument("--model", default="adm")
parser.add_argument("tree", metavar="LANG", default=None)
parser.add_argument("bins", metavar="LANG", default=None)
args = parser.parse_args()
sys.stderr.write("args\t{}\n".format(args))
tree = load_json_file(args.tree)
leaves = get_leaves(tree, [])
min_size = len(leaves) * args.min_ratio
treeclusters = get_clusters(tree, min_size=min_size)
# print(treeclusters)
geotree = upgma(leaves, lambda x, y: np.sqrt((x["x"] - y["x"]) ** 2 + (x["y"] - y["y"]) ** 2))
geoclusters = get_geoclusters(geotree, leaves, min_size=min_size)
# print(geoclusters)
combinedclusters = merge_clusters(treeclusters, geoclusters)
bins = load_json_file(args.bins)
report = []
if args.model == "adm":
bins = np.array(bins, dtype=np.float64)
bins /= bins.sum(axis=1, keepdims=True)
thres = 0.1
while thres < 1.0:
K = bins.shape[1]
clusters = []
for k in range(K):
cluster = []
for lid, probs in enumerate(bins):
if probs[k] >= thres:
cluster.append(leaves[lid]["name"])
if len(cluster) > 0:
clusters.append(cluster)
if len(clusters) <= 0:
break
treescore = max_jaccard(clusters, treeclusters)
geoscore = max_jaccard(clusters, geoclusters)
combinedscore = max_jaccard(clusters, combinedclusters)
report.append({
"model": "adm",
"K": K,
"thres": thres,
"treescore": treescore,
"geoscore": geoscore,
"combinedscore": combinedscore,
})
sys.stderr.write("{}\t{}\t{}\t{}\n".format(thres, treescore, geoscore, combinedscore))
thres += 0.1
elif args.model == "mda":
cprobs = np.array(bins["avg_zmat"]) # K x L
thres = 0.1
while thres < 1.0:
K = cprobs.shape[0]
clusters = []
for k in range(K):
cluster = []
for lid, prob in enumerate(cprobs[k]):
if prob >= thres:
cluster.append(leaves[lid]["name"])
if len(cluster) > 0:
clusters.append(cluster)
if len(clusters) <= 0:
break
treescore = max_jaccard(clusters, treeclusters)
geoscore = max_jaccard(clusters, geoclusters)
combinedscore = max_jaccard(clusters, combinedclusters)
report.append({
"model": "mda",
"K": K,
"thres": thres,
"treescore": treescore,
"geoscore": geoscore,
"combinedscore": combinedscore,
})
sys.stderr.write("{}\t{}\t{}\t{}\n".format(thres, treescore, geoscore, combinedscore))
thres += 0.1
else:
raise NotImplementedError
print(json.dumps(report))
if __name__ == "__main__":
main()
| [
"numpy.copy",
"json_utils.load_json_file",
"numpy.sqrt",
"numpy.ones",
"argparse.ArgumentParser",
"json.dumps",
"numpy.array",
"numpy.argmin"
] | [((644, 657), 'numpy.copy', 'np.copy', (['dmat'], {}), '(dmat)\n', (651, 657), True, 'import numpy as np\n'), ((4205, 4221), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (4219, 4221), False, 'from argparse import ArgumentParser\n'), ((4552, 4577), 'json_utils.load_json_file', 'load_json_file', (['args.tree'], {}), '(args.tree)\n', (4566, 4577), False, 'from json_utils import load_json_file, load_json_stream\n'), ((5017, 5042), 'json_utils.load_json_file', 'load_json_file', (['args.bins'], {}), '(args.bins)\n', (5031, 5042), False, 'from json_utils import load_json_file, load_json_stream\n'), ((475, 490), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (482, 490), True, 'import numpy as np\n'), ((5102, 5134), 'numpy.array', 'np.array', (['bins'], {'dtype': 'np.float64'}), '(bins, dtype=np.float64)\n', (5110, 5134), True, 'import numpy as np\n'), ((7373, 7391), 'json.dumps', 'json.dumps', (['report'], {}), '(report)\n', (7383, 7391), False, 'import json\n'), ((735, 762), 'numpy.argmin', 'np.argmin', (['cdmat'], {'axis': 'None'}), '(cdmat, axis=None)\n', (744, 762), True, 'import numpy as np\n'), ((1333, 1350), 'numpy.ones', 'np.ones', (['(N2, N2)'], {}), '((N2, N2))\n', (1340, 1350), True, 'import numpy as np\n'), ((4782, 4838), 'numpy.sqrt', 'np.sqrt', (["((x['x'] - y['x']) ** 2 + (x['y'] - y['y']) ** 2)"], {}), "((x['x'] - y['x']) ** 2 + (x['y'] - y['y']) ** 2)\n", (4789, 4838), True, 'import numpy as np\n'), ((6252, 6278), 'numpy.array', 'np.array', (["bins['avg_zmat']"], {}), "(bins['avg_zmat'])\n", (6260, 6278), True, 'import numpy as np\n')] |
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
# 生成虚拟数据
import numpy as np
# 1000行 20列
x_train = np.random.random((10000, 20))
# [0,10) 整数
y_train = keras.utils.to_categorical(np.random.randint(10, size=(10000, 1)), num_classes=10)
x_test = np.random.random((1000, 20))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)
model = Sequential()
model.add(Dense(64, activation='relu', input_dim=20))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
# 设定优化器参数
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# 配置编译器参数
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
# 训练
model.fit(x_train, y_train,
epochs=20,
batch_size=128)
model.save('my_model.h5') # 创建 HDF5 文件 'my_model.h5'
| [
"numpy.random.random",
"keras.models.Sequential",
"numpy.random.randint",
"keras.optimizers.SGD",
"keras.layers.Dense",
"keras.layers.Dropout"
] | [((186, 215), 'numpy.random.random', 'np.random.random', (['(10000, 20)'], {}), '((10000, 20))\n', (202, 215), True, 'import numpy as np\n'), ((330, 358), 'numpy.random.random', 'np.random.random', (['(1000, 20)'], {}), '((1000, 20))\n', (346, 358), True, 'import numpy as np\n'), ((459, 471), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (469, 471), False, 'from keras.models import Sequential\n'), ((676, 730), 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.01)', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, decay=1e-06, momentum=0.9, nesterov=True)\n', (679, 730), False, 'from keras.optimizers import SGD\n'), ((265, 303), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(10000, 1)'}), '(10, size=(10000, 1))\n', (282, 303), True, 'import numpy as np\n'), ((395, 432), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(1000, 1)'}), '(10, size=(1000, 1))\n', (412, 432), True, 'import numpy as np\n'), ((483, 525), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""', 'input_dim': '(20)'}), "(64, activation='relu', input_dim=20)\n", (488, 525), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((537, 549), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (544, 549), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((561, 589), 'keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (566, 589), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((601, 613), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (608, 613), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((625, 656), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (630, 656), False, 'from keras.layers import Dense, Dropout, Activation\n')] |
from typing import Dict, Tuple
from gym.envs.registration import register
import numpy as np
from highway_env import utils
from highway_env.envs.common.abstract import AbstractEnv, MultiAgentWrapper
from highway_env.road.lane import LineType, StraightLane, CircularLane, AbstractLane
from highway_env.road.regulation import RegulatedRoad
from highway_env.road.road import RoadNetwork
from highway_env.vehicle.kinematics import Vehicle
class IntersectionEnv(AbstractEnv):
ACTIONS: Dict[int, str] = {
0: 'SLOWER',
1: 'IDLE',
2: 'FASTER'
}
ACTIONS_INDEXES = {v: k for k, v in ACTIONS.items()}
@classmethod
def default_config(cls) -> dict:
config = super().default_config()
config.update({
"observation": {
"type": "Kinematics",
"vehicles_count": 15,
"features": ["presence", "x", "y", "vx", "vy", "cos_h", "sin_h"],
"features_range": {
"x": [-100, 100],
"y": [-100, 100],
"vx": [-20, 20],
"vy": [-20, 20],
},
"absolute": True,
"flatten": False,
"observe_intentions": False
},
"action": {
"type": "DiscreteMetaAction",
"longitudinal": True,
"lateral": False
},
"duration": 13, # [s]
"destination": "o1",
"controlled_vehicles": 1,
"initial_vehicle_count": 10,
"spawn_probability": 0.6,
"screen_width": 600,
"screen_height": 600,
"centering_position": [0.5, 0.6],
"scaling": 5.5 * 1.3,
"collision_reward": -5,
"high_speed_reward": 1,
"arrived_reward": 1,
"normalize_reward": False
})
return config
def _reward(self, action: int) -> float:
# Cooperative multi-agent reward
return sum(self._agent_reward(action, vehicle) for vehicle in self.controlled_vehicles) \
/ len(self.controlled_vehicles)
def _agent_reward(self, action: int, vehicle: Vehicle) -> float:
reward = self.config["collision_reward"] * vehicle.crashed \
+ self.config["high_speed_reward"] * (vehicle.speed_index == vehicle.SPEED_COUNT - 1)
reward = self.config["arrived_reward"] if self.has_arrived(vehicle) else reward
if self.config["normalize_reward"]:
reward = utils.lmap(reward, [self.config["collision_reward"], self.config["arrived_reward"]], [0, 1])
return reward
def _is_terminal(self) -> bool:
return any(vehicle.crashed for vehicle in self.controlled_vehicles) \
or all(self.has_arrived(vehicle) for vehicle in self.controlled_vehicles) \
or self.steps >= self.config["duration"] * self.config["policy_frequency"]
def _agent_is_terminal(self, vehicle: Vehicle) -> bool:
"""The episode is over when a collision occurs or when the access ramp has been passed."""
return vehicle.crashed \
or self.steps >= self.config["duration"] * self.config["policy_frequency"] \
or self.has_arrived(vehicle)
def _info(self, obs: np.ndarray, action: int) -> dict:
info = super()._info(obs, action)
info["agents_rewards"] = tuple(self._agent_reward(action, vehicle) for vehicle in self.controlled_vehicles)
info["agents_dones"] = tuple(self._agent_is_terminal(vehicle) for vehicle in self.controlled_vehicles)
return info
def _reset(self) -> None:
self._make_road()
self._make_vehicles(self.config["initial_vehicle_count"])
def step(self, action: int) -> Tuple[np.ndarray, float, bool, dict]:
obs, reward, done, info = super().step(action)
self._clear_vehicles()
self._spawn_vehicle(spawn_probability=self.config["spawn_probability"])
return obs, reward, done, info
def _make_road(self) -> None:
"""
Make an 4-way intersection.
The horizontal road has the right of way. More precisely, the levels of priority are:
- 3 for horizontal straight lanes and right-turns
- 1 for vertical straight lanes and right-turns
- 2 for horizontal left-turns
- 0 for vertical left-turns
The code for nodes in the road network is:
(o:outer | i:inner + [r:right, l:left]) + (0:south | 1:west | 2:north | 3:east)
:return: the intersection road
"""
lane_width = AbstractLane.DEFAULT_WIDTH
right_turn_radius = lane_width + 5 # [m}
left_turn_radius = right_turn_radius + lane_width # [m}
outer_distance = right_turn_radius + lane_width / 2
access_length = 50 + 50 # [m]
net = RoadNetwork()
n, c, s = LineType.NONE, LineType.CONTINUOUS, LineType.STRIPED
for corner in range(4):
angle = np.radians(90 * corner)
is_horizontal = corner % 2
priority = 3 if is_horizontal else 1
rotation = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]])
# Incoming
start = rotation @ np.array([lane_width / 2, access_length + outer_distance])
end = rotation @ np.array([lane_width / 2, outer_distance])
net.add_lane("o" + str(corner), "ir" + str(corner),
StraightLane(start, end, line_types=[s, c], priority=priority, speed_limit=10))
# Right turn
r_center = rotation @ (np.array([outer_distance, outer_distance]))
net.add_lane("ir" + str(corner), "il" + str((corner - 1) % 4),
CircularLane(r_center, right_turn_radius, angle + np.radians(180), angle + np.radians(270),
line_types=[n, c], priority=priority, speed_limit=10))
# Left turn
l_center = rotation @ (np.array([-left_turn_radius + lane_width / 2, left_turn_radius - lane_width / 2]))
net.add_lane("ir" + str(corner), "il" + str((corner + 1) % 4),
CircularLane(l_center, left_turn_radius, angle + np.radians(0), angle + np.radians(-90),
clockwise=False, line_types=[n, n], priority=priority - 1, speed_limit=10))
# Straight
start = rotation @ np.array([lane_width / 2, outer_distance])
end = rotation @ np.array([lane_width / 2, -outer_distance])
net.add_lane("ir" + str(corner), "il" + str((corner + 2) % 4),
StraightLane(start, end, line_types=[s, n], priority=priority, speed_limit=10))
# Exit
start = rotation @ np.flip([lane_width / 2, access_length + outer_distance], axis=0)
end = rotation @ np.flip([lane_width / 2, outer_distance], axis=0)
net.add_lane("il" + str((corner - 1) % 4), "o" + str((corner - 1) % 4),
StraightLane(end, start, line_types=[n, c], priority=priority, speed_limit=10))
road = RegulatedRoad(network=net, np_random=self.np_random, record_history=self.config["show_trajectories"])
self.road = road
def _make_vehicles(self, n_vehicles: int = 10) -> None:
"""
Populate a road with several vehicles on the highway and on the merging lane
:return: the ego-vehicle
"""
# Configure vehicles
vehicle_type = utils.class_from_path(self.config["other_vehicles_type"])
vehicle_type.DISTANCE_WANTED = 7 # Low jam distance
vehicle_type.COMFORT_ACC_MAX = 6
vehicle_type.COMFORT_ACC_MIN = -3
# Random vehicles
simulation_steps = 3
for t in range(n_vehicles - 1):
self._spawn_vehicle(np.linspace(0, 80, n_vehicles)[t])
for _ in range(simulation_steps):
[(self.road.act(), self.road.step(1 / self.config["simulation_frequency"])) for _ in range(self.config["simulation_frequency"])]
# Challenger vehicle
self._spawn_vehicle(60, spawn_probability=1, go_straight=True, position_deviation=0.1, speed_deviation=0)
# Controlled vehicles
self.controlled_vehicles = []
for ego_id in range(0, self.config["controlled_vehicles"]):
ego_lane = self.road.network.get_lane(("o{}".format(ego_id % 4), "ir{}".format(ego_id % 4), 0))
destination = self.config["destination"] or "o" + str(self.np_random.randint(1, 4))
ego_vehicle = self.action_type.vehicle_class(
self.road,
ego_lane.position(60 + 5*self.np_random.randn(1), 0),
speed=ego_lane.speed_limit,
heading=ego_lane.heading_at(60)) \
.plan_route_to(destination)
ego_vehicle.SPEED_MIN = 0
ego_vehicle.SPEED_MAX = 9
ego_vehicle.SPEED_COUNT = 3
ego_vehicle.speed_index = ego_vehicle.speed_to_index(ego_lane.speed_limit)
ego_vehicle.target_speed = ego_vehicle.index_to_speed(ego_vehicle.speed_index)
self.road.vehicles.append(ego_vehicle)
self.controlled_vehicles.append(ego_vehicle)
for v in self.road.vehicles: # Prevent early collisions
if v is not ego_vehicle and np.linalg.norm(v.position - ego_vehicle.position) < 20:
self.road.vehicles.remove(v)
def _spawn_vehicle(self,
longitudinal: float = 0,
position_deviation: float = 1.,
speed_deviation: float = 1.,
spawn_probability: float = 0.6,
go_straight: bool = False) -> None:
if self.np_random.rand() > spawn_probability:
return
route = self.np_random.choice(range(4), size=2, replace=False)
route[1] = (route[0] + 2) % 4 if go_straight else route[1]
vehicle_type = utils.class_from_path(self.config["other_vehicles_type"])
vehicle = vehicle_type.make_on_lane(self.road, ("o" + str(route[0]), "ir" + str(route[0]), 0),
longitudinal=longitudinal + 5 + self.np_random.randn() * position_deviation,
speed=8 + self.np_random.randn() * speed_deviation)
for v in self.road.vehicles:
if np.linalg.norm(v.position - vehicle.position) < 15:
return
vehicle.plan_route_to("o" + str(route[1]))
vehicle.randomize_behavior()
self.road.vehicles.append(vehicle)
return vehicle
def _clear_vehicles(self) -> None:
is_leaving = lambda vehicle: "il" in vehicle.lane_index[0] and "o" in vehicle.lane_index[1] \
and vehicle.lane.local_coordinates(vehicle.position)[0] \
>= vehicle.lane.length - 4 * vehicle.LENGTH
self.road.vehicles = [vehicle for vehicle in self.road.vehicles if
vehicle in self.controlled_vehicles or not (is_leaving(vehicle) or vehicle.route is None)]
def has_arrived(self, vehicle: Vehicle, exit_distance: float = 25) -> bool:
return "il" in vehicle.lane_index[0] \
and "o" in vehicle.lane_index[1] \
and vehicle.lane.local_coordinates(vehicle.position)[0] >= exit_distance
def _cost(self, action: int) -> float:
"""The constraint signal is the occurrence of collisions."""
return float(self.vehicle.crashed)
class MultiAgentIntersectionEnv(IntersectionEnv):
@classmethod
def default_config(cls) -> dict:
config = super().default_config()
config.update({
"action": {
"type": "MultiAgentAction",
"action_config": {
"type": "DiscreteMetaAction",
"lateral": False,
"longitudinal": True
}
},
"observation": {
"type": "MultiAgentObservation",
"observation_config": {
"type": "Kinematics"
}
},
"controlled_vehicles": 2
})
return config
TupleMultiAgentIntersectionEnv = MultiAgentWrapper(MultiAgentIntersectionEnv)
register(
id='intersection-v0',
entry_point='highway_env.envs:IntersectionEnv',
)
register(
id='intersection-multi-agent-v0',
entry_point='highway_env.envs:MultiAgentIntersectionEnv',
)
register(
id='intersection-multi-agent-v1',
entry_point='highway_env.envs:TupleMultiAgentIntersectionEnv',
)
| [
"highway_env.road.regulation.RegulatedRoad",
"highway_env.road.road.RoadNetwork",
"numpy.radians",
"numpy.flip",
"numpy.linalg.norm",
"highway_env.utils.lmap",
"highway_env.utils.class_from_path",
"numpy.array",
"highway_env.road.lane.StraightLane",
"numpy.linspace",
"numpy.cos",
"highway_env.... | [((12436, 12480), 'highway_env.envs.common.abstract.MultiAgentWrapper', 'MultiAgentWrapper', (['MultiAgentIntersectionEnv'], {}), '(MultiAgentIntersectionEnv)\n', (12453, 12480), False, 'from highway_env.envs.common.abstract import AbstractEnv, MultiAgentWrapper\n'), ((12483, 12561), 'gym.envs.registration.register', 'register', ([], {'id': '"""intersection-v0"""', 'entry_point': '"""highway_env.envs:IntersectionEnv"""'}), "(id='intersection-v0', entry_point='highway_env.envs:IntersectionEnv')\n", (12491, 12561), False, 'from gym.envs.registration import register\n'), ((12574, 12679), 'gym.envs.registration.register', 'register', ([], {'id': '"""intersection-multi-agent-v0"""', 'entry_point': '"""highway_env.envs:MultiAgentIntersectionEnv"""'}), "(id='intersection-multi-agent-v0', entry_point=\n 'highway_env.envs:MultiAgentIntersectionEnv')\n", (12582, 12679), False, 'from gym.envs.registration import register\n'), ((12687, 12797), 'gym.envs.registration.register', 'register', ([], {'id': '"""intersection-multi-agent-v1"""', 'entry_point': '"""highway_env.envs:TupleMultiAgentIntersectionEnv"""'}), "(id='intersection-multi-agent-v1', entry_point=\n 'highway_env.envs:TupleMultiAgentIntersectionEnv')\n", (12695, 12797), False, 'from gym.envs.registration import register\n'), ((4898, 4911), 'highway_env.road.road.RoadNetwork', 'RoadNetwork', ([], {}), '()\n', (4909, 4911), False, 'from highway_env.road.road import RoadNetwork\n'), ((7184, 7290), 'highway_env.road.regulation.RegulatedRoad', 'RegulatedRoad', ([], {'network': 'net', 'np_random': 'self.np_random', 'record_history': "self.config['show_trajectories']"}), "(network=net, np_random=self.np_random, record_history=self.\n config['show_trajectories'])\n", (7197, 7290), False, 'from highway_env.road.regulation import RegulatedRoad\n'), ((7567, 7624), 'highway_env.utils.class_from_path', 'utils.class_from_path', (["self.config['other_vehicles_type']"], {}), "(self.config['other_vehicles_type'])\n", (7588, 7624), False, 'from highway_env import utils\n'), ((10101, 10158), 'highway_env.utils.class_from_path', 'utils.class_from_path', (["self.config['other_vehicles_type']"], {}), "(self.config['other_vehicles_type'])\n", (10122, 10158), False, 'from highway_env import utils\n'), ((2562, 2659), 'highway_env.utils.lmap', 'utils.lmap', (['reward', "[self.config['collision_reward'], self.config['arrived_reward']]", '[0, 1]'], {}), "(reward, [self.config['collision_reward'], self.config[\n 'arrived_reward']], [0, 1])\n", (2572, 2659), False, 'from highway_env import utils\n'), ((5035, 5058), 'numpy.radians', 'np.radians', (['(90 * corner)'], {}), '(90 * corner)\n', (5045, 5058), True, 'import numpy as np\n'), ((5300, 5358), 'numpy.array', 'np.array', (['[lane_width / 2, access_length + outer_distance]'], {}), '([lane_width / 2, access_length + outer_distance])\n', (5308, 5358), True, 'import numpy as np\n'), ((5388, 5430), 'numpy.array', 'np.array', (['[lane_width / 2, outer_distance]'], {}), '([lane_width / 2, outer_distance])\n', (5396, 5430), True, 'import numpy as np\n'), ((5520, 5598), 'highway_env.road.lane.StraightLane', 'StraightLane', (['start', 'end'], {'line_types': '[s, c]', 'priority': 'priority', 'speed_limit': '(10)'}), '(start, end, line_types=[s, c], priority=priority, speed_limit=10)\n', (5532, 5598), False, 'from highway_env.road.lane import LineType, StraightLane, CircularLane, AbstractLane\n'), ((5660, 5702), 'numpy.array', 'np.array', (['[outer_distance, outer_distance]'], {}), '([outer_distance, outer_distance])\n', (5668, 5702), True, 'import numpy as np\n'), ((6048, 6133), 'numpy.array', 'np.array', (['[-left_turn_radius + lane_width / 2, left_turn_radius - lane_width / 2]'], {}), '([-left_turn_radius + lane_width / 2, left_turn_radius - lane_width /\n 2])\n', (6056, 6133), True, 'import numpy as np\n'), ((6488, 6530), 'numpy.array', 'np.array', (['[lane_width / 2, outer_distance]'], {}), '([lane_width / 2, outer_distance])\n', (6496, 6530), True, 'import numpy as np\n'), ((6560, 6603), 'numpy.array', 'np.array', (['[lane_width / 2, -outer_distance]'], {}), '([lane_width / 2, -outer_distance])\n', (6568, 6603), True, 'import numpy as np\n'), ((6704, 6782), 'highway_env.road.lane.StraightLane', 'StraightLane', (['start', 'end'], {'line_types': '[s, n]', 'priority': 'priority', 'speed_limit': '(10)'}), '(start, end, line_types=[s, n], priority=priority, speed_limit=10)\n', (6716, 6782), False, 'from highway_env.road.lane import LineType, StraightLane, CircularLane, AbstractLane\n'), ((6834, 6899), 'numpy.flip', 'np.flip', (['[lane_width / 2, access_length + outer_distance]'], {'axis': '(0)'}), '([lane_width / 2, access_length + outer_distance], axis=0)\n', (6841, 6899), True, 'import numpy as np\n'), ((6929, 6978), 'numpy.flip', 'np.flip', (['[lane_width / 2, outer_distance]'], {'axis': '(0)'}), '([lane_width / 2, outer_distance], axis=0)\n', (6936, 6978), True, 'import numpy as np\n'), ((7088, 7166), 'highway_env.road.lane.StraightLane', 'StraightLane', (['end', 'start'], {'line_types': '[n, c]', 'priority': 'priority', 'speed_limit': '(10)'}), '(end, start, line_types=[n, c], priority=priority, speed_limit=10)\n', (7100, 7166), False, 'from highway_env.road.lane import LineType, StraightLane, CircularLane, AbstractLane\n'), ((10531, 10576), 'numpy.linalg.norm', 'np.linalg.norm', (['(v.position - vehicle.position)'], {}), '(v.position - vehicle.position)\n', (10545, 10576), True, 'import numpy as np\n'), ((7897, 7927), 'numpy.linspace', 'np.linspace', (['(0)', '(80)', 'n_vehicles'], {}), '(0, 80, n_vehicles)\n', (7908, 7927), True, 'import numpy as np\n'), ((5181, 5194), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (5187, 5194), True, 'import numpy as np\n'), ((5214, 5227), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (5220, 5227), True, 'import numpy as np\n'), ((5229, 5242), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (5235, 5242), True, 'import numpy as np\n'), ((5854, 5869), 'numpy.radians', 'np.radians', (['(180)'], {}), '(180)\n', (5864, 5869), True, 'import numpy as np\n'), ((5879, 5894), 'numpy.radians', 'np.radians', (['(270)'], {}), '(270)\n', (5889, 5894), True, 'import numpy as np\n'), ((6280, 6293), 'numpy.radians', 'np.radians', (['(0)'], {}), '(0)\n', (6290, 6293), True, 'import numpy as np\n'), ((6303, 6318), 'numpy.radians', 'np.radians', (['(-90)'], {}), '(-90)\n', (6313, 6318), True, 'import numpy as np\n'), ((9462, 9511), 'numpy.linalg.norm', 'np.linalg.norm', (['(v.position - ego_vehicle.position)'], {}), '(v.position - ego_vehicle.position)\n', (9476, 9511), True, 'import numpy as np\n'), ((5197, 5210), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (5203, 5210), True, 'import numpy as np\n')] |
"""
Implements Genetic algorithms for black-box optimisation.
--<EMAIL>
"""
# pylint: disable=invalid-name
# pylint: disable=no-member
from argparse import Namespace
from numpy.random import choice
# Local imports
from .blackbox_optimiser import BlackboxOptimiser, blackbox_opt_args
from ..utils.general_utils import sample_according_to_exp_probs
from ..utils.option_handler import get_option_specs, load_options
from ..utils.reporters import get_reporter
ga_specific_opt_args = [
get_option_specs('num_mutations_per_epoch', False, 5,
'Number of mutations per epoch.'),
get_option_specs('num_candidates_to_mutate_from', False, -1,
'The number of candidates to choose the mutations from.'),
get_option_specs('fitness_sampler_scaling_const', False, 2,
'The scaling constant for sampling according to exp_probs.'),
]
ga_opt_args = ga_specific_opt_args + blackbox_opt_args
class GAOptimiser(BlackboxOptimiser):
""" Class for optimisation based on Genetic algorithms. """
def __init__(self, func_caller, worker_manager, mutation_op, crossover_op=None,
options=None, reporter=None):
""" Constructor.
mutation_op: A function which takes in a list of objects and modifies them.
crossover_op: A function which takes in two objects and performs a cross-over
operation.
So far we have not implemented cross-over but included here in case we want to
include it in the future.
For other arguments, see BlackboxOptimiser
"""
# TODO: implement cross-over operation
if options is None:
reporter = get_reporter(reporter)
options = load_options(ga_opt_args, reporter=reporter)
super(GAOptimiser, self).__init__(func_caller, worker_manager, model=None,
options=options, reporter=reporter)
self.mutation_op = mutation_op
self.crossover_op = crossover_op
self.to_eval_points = []
def _opt_method_set_up(self):
""" Additional set up. """
# pylint: disable=attribute-defined-outside-init
# Set up parameters for the mutations
self.method_name = 'GA'
self.num_mutations_per_epoch = self.options.num_mutations_per_epoch
self.num_candidates_to_mutate_from = self.options.num_candidates_to_mutate_from
def _opt_method_optimise_initialise(self):
""" No initialisation for GA. """
self.generate_new_eval_points()
def _add_data_to_model(self, qinfos):
""" Update the optimisation model. """
pass
def _child_build_new_model(self):
""" Build new optimisation model. """
pass
def _get_candidates_to_mutate_from(self, num_mutations, num_candidates_to_mutate_from):
""" Returns the candidates to mutate from. """
all_prev_eval_points = self.prev_eval_points + self.history.query_points
all_prev_eval_vals = self.prev_eval_vals + self.history.query_vals
if num_candidates_to_mutate_from <= 0:
idxs_to_mutate_from = sample_according_to_exp_probs(all_prev_eval_vals,
num_mutations, replace=True,
scaling_const=self.options.fitness_sampler_scaling_const,
sample_uniformly_if_fail=True)
num_mutations_arg_to_mutation_op = [(idxs_to_mutate_from == i).sum() for i
in range(len(all_prev_eval_points))]
candidates_to_mutate_from = all_prev_eval_points
else:
cand_idxs_to_mutate_from = sample_according_to_exp_probs(all_prev_eval_vals,
num_candidates_to_mutate_from, replace=False,
scaling_const=self.options.fitness_sampler_scaling_const)
candidates_to_mutate_from = [all_prev_eval_points[i] for i in
cand_idxs_to_mutate_from]
num_mutations_arg_to_mutation_op = num_mutations
return candidates_to_mutate_from, num_mutations_arg_to_mutation_op
def generate_new_eval_points(self, num_mutations=None,
num_candidates_to_mutate_from=None):
""" Generates the mutations. """
new_candidates = []
num_tries = 0
num_mutations_to_try = self.num_mutations_per_epoch if num_mutations is None \
else num_mutations
while len(new_candidates) == 0:
num_tries += 1
generated_from_mutation_op = self.generate_new_eval_points_from_mutation_op(
num_mutations_to_try, num_candidates_to_mutate_from)
points_in_domain = [elem for elem in generated_from_mutation_op if
self.domain.is_a_member(elem)]
new_candidates.extend(points_in_domain)
if len(points_in_domain) == 0:
if num_tries % 10 == 0:
error_msg = ('Could not generate any points in domain from given mutation ' +
'operator despite %d tries with up to %d candidates.')%(num_tries,
num_mutations_to_try)
self.reporter.writeln(error_msg)
if num_tries == 35:
error_msg = ('Could not generate any points in domain from given mutation ' +
'operator despite %d tries with up to %d candidates. Quitting now.')%(
num_tries, num_mutations_to_try)
raise ValueError(error_msg)
# Try a larger number of mutations the next time
num_mutations_to_try = int(num_mutations_to_try * 1.2 + 1)
new_candidates = new_candidates[:num_mutations]
self.to_eval_points.extend(new_candidates)
def generate_new_eval_points_from_mutation_op(self, num_mutations=None,
num_candidates_to_mutate_from=None):
""" Generates the mutations. """
num_mutations = self.num_mutations_per_epoch if num_mutations is None else \
num_mutations
num_candidates_to_mutate_from = self.num_candidates_to_mutate_from if \
num_candidates_to_mutate_from is None else num_candidates_to_mutate_from
candidates_to_mutate_from, num_mutations_arg_to_mutation_op = \
self._get_candidates_to_mutate_from(num_mutations, num_candidates_to_mutate_from)
new_eval_points = self.mutation_op(candidates_to_mutate_from,
num_mutations_arg_to_mutation_op)
return new_eval_points
def _determine_next_query(self):
""" Determine the next point for evaluation. """
if len(self.to_eval_points) == 0:
self.generate_new_eval_points()
ret = self.to_eval_points.pop(0)
return Namespace(point=ret)
def _determine_next_batch_of_queries(self, batch_size):
""" Determines the next batch of eval points. Not implementing for now. """
qinfos = [self._determine_next_query() for _ in range(batch_size)]
return qinfos
def _get_method_str(self):
""" Returns a string describing the method. """
return 'ga'
def is_an_mf_method(self):
""" Returns False. """
return False
# A GA optimiser with random fitness values ----------------------------------------------
class GARandOptimiser(GAOptimiser):
""" Same as the GA optimiser, but the candidates to mutate from are picked randomly.
This is used in the RAND baseline.
"""
# pylint: disable=abstract-method
def _child_set_up(self):
""" Additional set up. """
super(GARandOptimiser, self)._child_set_up()
self.method_name = 'randGA'
def _get_candidates_to_mutate_from(self, num_mutations, num_candidates_to_mutate_from):
""" Returns a random list of points from the evaluations to mutate from. """
all_prev_eval_points = self.prev_eval_points + self.history.query_points
candidates_to_mutate_from = choice(all_prev_eval_points,
self.num_candidates_to_mutate_from,
replace=False)
return candidates_to_mutate_from, num_mutations
# APIs
# ======================================================================================
def ga_optimise_from_args(func_caller, worker_manager, max_capital, mode, mutation_op,
is_rand=False, crossover_op=None, options=None,
reporter='default'):
""" GA optimisation from args. """
if options is None:
reporter = get_reporter(reporter)
options = load_options(ga_opt_args, reporter=reporter)
options.mode = mode
optimiser_class = GARandOptimiser if is_rand else GAOptimiser
return (optimiser_class(func_caller, worker_manager, mutation_op, crossover_op,
options, reporter)).optimise(max_capital)
| [
"numpy.random.choice",
"argparse.Namespace"
] | [((6471, 6491), 'argparse.Namespace', 'Namespace', ([], {'point': 'ret'}), '(point=ret)\n', (6480, 6491), False, 'from argparse import Namespace\n'), ((7612, 7691), 'numpy.random.choice', 'choice', (['all_prev_eval_points', 'self.num_candidates_to_mutate_from'], {'replace': '(False)'}), '(all_prev_eval_points, self.num_candidates_to_mutate_from, replace=False)\n', (7618, 7691), False, 'from numpy.random import choice\n')] |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import re
import numpy as np
import proto.framework_pb2 as framework_pb2
from . import core
import unique_name
__all__ = [
'Block',
'Variable',
'Program',
'Operator',
'default_startup_program',
'default_main_program',
'program_guard',
'switch_startup_program',
'switch_main_program',
'get_var',
]
EMPTY_VAR_NAME = core.kEmptyVarName()
TEMP_VAR_NAME = core.kTempVarName()
GRAD_VAR_SUFFIX = core.kGradVarSuffix()
ZERO_VAR_SUFFIX = core.kZeroVarSuffix()
def grad_var_name(var_name):
"""
return gradient name for a certain var name
"""
return var_name + GRAD_VAR_SUFFIX
def convert_np_dtype_to_dtype_(np_dtype):
"""
Convert the data type in numpy to the data type in Paddle
Args:
np_dtype(np.dtype): the data type in numpy
Returns(core.VarDesc.VarType): the data type in Paddle
"""
dtype = np.dtype(np_dtype)
if dtype == np.float32:
return core.VarDesc.VarType.FP32
elif dtype == np.float64:
return core.VarDesc.VarType.FP64
elif dtype == np.float16:
return core.VarDesc.VarType.FP16
elif dtype == np.int32:
return core.VarDesc.VarType.INT32
elif dtype == np.int16:
return core.VarDesc.VarType.INT16
elif dtype == np.int64:
return core.VarDesc.VarType.INT64
elif dtype == np.bool:
return core.VarDesc.VarType.BOOL
elif dtype == np.uint8:
return core.VarDesc.VarType.UINT8
else:
raise ValueError("Not supported numpy dtype " + str(dtype))
def dtype_is_floating(dtype):
"""
Check the data type is floating or not.
Args:
dtype(np.dtype|core.VarDesc.VarType): data type.
Could be numpy format or Paddle format
Returns(bool): True if data type is a float value
"""
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
return dtype in [
core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32,
core.VarDesc.VarType.FP64
]
def _debug_string_(proto, throw_on_error=True):
"""
Get the debug string of a protobuf message. The message could be not
initialized.
Args:
proto(google.protobuf.message.Message): The protobuf message
throw_on_error(bool): True if raise an error when the protobuf message
is not initialized.
Returns(str): The debug string of the protobuf message
"""
error_fields = list()
if not proto.IsInitialized(error_fields) and throw_on_error:
raise ValueError("{0} are not initialized.\nThe message is {1}:\n".
format(error_fields, proto))
return proto.__str__()
class Variable(object):
"""
Python variable. Every input and output of an operator is a variable. Every
variable belongs to a block. The variable has a name and two variables in
different blocks could have the same name.
There are many kinds of variables. Please reference the framework.proto for
details.
Notes: The constructor of Variable should not be invoked directly. Please
use `Block.create_var` to create a variable.
>>> cur_program = Program()
>>> cur_block = cur_program.current_block()
>>> new_variable = cur_block.create_var(
>>> name="X", shape=[-1, 23, 48], dtype='float32')
Args:
block(Block): The associated block. It will be passed by
`Block.create_var` automatically.
type(core.VarDesc.VarType): Variable type. Please reference the
framework.proto for details.
shape(tuple|list|None): The shape of variable. -1 means the batch size.
Some kinds of variable do not contain shape, just set it to None.
dtype(np.dtype|core.VarDesc.VarType|str): The data type of variable.
lod_level(int): The level of lod tensor. 0 means it is not a time
series data.
capacity(int): The capacity of Channel variable. Ignored
for other types.
persistable(bool): True if the variable should be saved as check point.
Defaults to False.
stop_gradient(bool): True if the variable will stop to calculate
gradients when backward. Defaults to False.
"""
def __init__(self,
block,
type=core.VarDesc.VarType.LOD_TENSOR,
name=None,
shape=None,
dtype=None,
lod_level=None,
capacity=None,
persistable=None,
error_clip=None,
stop_gradient=False,
is_data=False,
**kwargs):
self.block = block
self.error_clip = error_clip
if name is None:
name = unique_name.generate('_generated_var')
is_new_var = False
self.desc = self.block.desc.find_var(name)
if self.desc is None:
self.desc = self.block.desc.var(name)
is_new_var = True
if is_new_var:
self.desc.set_type(type)
elif self.desc.type() != type:
raise ValueError("Variable {0} has been created before. The "
"previous type is {1}; the new type is {2}. They"
" are not matched".format(self.name,
self.desc.type(), type))
if shape is not None:
if is_new_var:
self.desc.set_shape(shape)
else:
old_shape = self.shape
shape = tuple(shape)
if shape != old_shape:
raise ValueError(
"Variable {0} has been created before. the previous "
"shape is {1}; the new shape is {2}. They are not "
"matched.".format(self.name, old_shape, shape))
if dtype is not None:
if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype)
if is_new_var:
self.desc.set_dtype(dtype)
else:
old_dtype = self.dtype
if dtype != old_dtype:
raise ValueError("Variable {0} has been created before. "
"The previous data type is {1}; the new "
"data type is {2}. They are not "
"matched.".format(self.name, old_dtype,
dtype))
if lod_level is not None:
if is_new_var:
self.desc.set_lod_level(lod_level)
else:
if lod_level != self.lod_level:
raise ValueError("Variable {0} has been created before. "
"The previous lod_level is {1}; the new "
"lod_level is {2}. They are not "
"matched".format(self.name, self.lod_level,
lod_level))
if persistable is not None:
if is_new_var:
self.desc.set_persistable(persistable)
else:
if persistable != self.persistable:
raise ValueError(
"Variable {0} has been created before."
"The previous persistable is {1}; the new "
"persistable is {2}. They are not matched".format(
self.name, self.persistable, persistable))
if capacity is not None:
if is_new_var:
self.desc.set_capacity(capacity)
else:
# TODO(abhinavarora) : Compare with set capacity once,
# get_capacity is implemented
pass
self.block.vars[name] = self
self.op = None
self.stop_gradient = stop_gradient
self.is_data = is_data
def __str__(self):
return self.to_string(True)
def to_string(self, throw_on_error, with_details=False):
"""
Get debug string.
Args:
throw_on_error(bool): True if raise an exception when self is not
intialized.
with_details(bool): more details about variables and parameters
(e.g. trainable, optimize_attr, ...) will be printed when with_details is True
Returns(str): The debug string.
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
protostr = self.desc.serialize_to_string()
proto = framework_pb2.VarDesc.FromString(str(protostr))
res_str = _debug_string_(proto, throw_on_error)
if with_details:
additional_attr = ("error_clip", "stop_gradient")
for attr_name in additional_attr:
res_str += "%s: %s\n" % (attr_name,
str(getattr(self, attr_name)))
return res_str
__repr__ = __str__
def set_desc(self, input):
self.desc = input
@property
def persistable(self):
return self.desc.persistable()
@persistable.setter
def persistable(self, p):
self.desc.set_persistable(p)
@property
def name(self):
return self.desc.name()
@name.setter
def name(self, new_name):
self.desc.set_name(new_name)
@property
def shape(self):
# convert to tuple, make it as same as numpy API.
return tuple(self.desc.shape())
@property
def dtype(self):
return self.desc.dtype()
@property
def lod_level(self):
return self.desc.lod_level()
@property
def type(self):
return self.desc.type()
def set_error_clip(self, error_clip):
self.error_clip = error_clip
def get_all_op_protos():
"""
Get all registered op proto from PaddlePaddle C++ end.
Returns(list): list of OpProto
"""
protostrs = core.get_all_op_protos()
ret_values = []
for pbstr in protostrs:
op_proto = framework_pb2.OpProto.FromString(str(pbstr))
ret_values.append(op_proto)
return ret_values
class OpProtoHolder(object):
"""
A global variable to hold all OpProtos from C++ as a map
"""
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = cls()
return cls._instance
def __init__(self):
assert not hasattr(
self.__class__,
'_instance'), 'Please use `instance()` to get OpProtoHolder object!'
op_protos = get_all_op_protos()
self.op_proto_map = {}
for proto in op_protos:
self.op_proto_map[proto.type] = proto
def get_op_proto(self, type):
"""
Get OpProto by a type string.
Args:
type(str): The type that operator registered in C++ side.
Returns(framework_pb2.OpProto): The OpProto
"""
if type not in self.op_proto_map:
raise ValueError("Operator \"%s\" has not been registered." % type)
return self.op_proto_map[type]
class Operator(object):
"""
Python Operator class. The operator represents the build in instructions in a
Block. Users can use the build in instructions to describe their neural
network.
"""
def __init__(self,
block,
desc,
type=None,
inputs=None,
outputs=None,
attrs=None):
"""
Constructor.
Notes: The constructor of operator should not be invoked directly. Use
Block.append_op or Block.prepend_op instead.
>>> cur_program = Program()
>>> cur_block = cur_program.current_block()
>>> # var1 += var2 + var3
>>> cur_block.append_op(type="sum",
>>> inputs={"X": [var1, var2, var3]},
>>> outputs={"Out": [var1]})
Args:
block(Block): The block has the current operator.
desc(core.OpDesc): The protobuf description.
type(str): The type of operator.
inputs(dict): The input dictionary. Key is the input parameter name.
Value is a list of variables.
outputs(dict): The output dictionary which has the same format with
inputs.
attrs(dict): The attributes dictionary. Key is attribute name. Value
is the attribute value. The attribute type should be as same as
the type registered in C++
"""
self.block = block
self.desc = desc
self.attrs = attrs
if len(self.desc.type()) != 0:
return
if type is None:
raise ValueError(
"`type` to initilized an Operator can not be None.")
self.desc.set_type(type)
proto = OpProtoHolder.instance().get_op_proto(type)
def find_name(var_list, name):
for var_name in var_list:
if var_list[var_name] is not None and var_name == name:
return True
return False
if inputs is not None:
for in_proto in proto.inputs:
found = find_name(inputs, in_proto.name)
assert found or in_proto.dispensable, "Input {} not found".format(
in_proto.name)
if found:
in_args = inputs[in_proto.name]
if not isinstance(in_args, list):
in_args = [in_args]
if not in_proto.duplicable and len(in_args) > 1:
raise ValueError(
"Input %s expects only one input, but %d are given."
% (in_proto.name, len(in_args)))
in_arg_names = []
for arg in in_args:
if isinstance(arg, basestring):
in_arg_names.append(arg)
else:
in_arg_names.append(arg.name)
self.desc.set_input(in_proto.name, in_arg_names)
else:
self.desc.set_input(in_proto.name, [])
if outputs is not None:
given = set()
need = set()
for n in outputs:
given.add(n)
for m in proto.outputs:
need.add(m.name)
if not given == need:
raise ValueError(("Incorrect setting for output(s) of "
"operator \"%s\". Need: [%s] Given: [%s]") %
(type, ", ".join(str(e) for e in need),
", ".join(str(e) for e in given)))
for out_proto in proto.outputs:
out_args = outputs[out_proto.name]
if not isinstance(out_args, list):
out_args = [out_args]
if not out_proto.duplicable and len(out_args) > 1:
raise ValueError(
"Output %s expects only one output, but %d are given." %
(out_proto.name, len(out_args)))
out_arg_names = []
for arg in out_args:
out_arg_names.append(arg.name)
arg.op = self
self.desc.set_output(out_proto.name, out_arg_names)
if attrs is not None:
if not isinstance(attrs, dict):
raise TypeError("'attrs' should be a dict.")
for attr in proto.attrs:
attr_name = attr.name
if (attr_name not in attrs) or (attrs[attr_name] is None):
continue
if isinstance(attrs[attr_name], Block):
self.desc.set_block_attr(attr_name, attrs[attr_name].desc)
elif isinstance(attrs[attr_name], core.BlockDesc) or \
isinstance(attrs[attr_name], core.ProgramDesc):
self.desc.set_serialized_attr(
attr_name, attrs[attr_name].serialize_to_string())
else:
self.desc.set_attr(attr_name, attrs[attr_name])
self.desc.check_attrs()
no_kernel_op_set = {
'feed', 'fetch', 'save', 'load', 'recurrent', 'go',
'rnn_memory_helper_grad', 'conditional_block', 'while', 'send',
'recv', 'listen_and_serv', 'parallel_do', 'save_combine',
'load_combine', 'ncclInit', 'channel_create', 'channel_close',
'channel_send', 'channel_recv', 'select', 'gen_nccl_id'
}
if type not in no_kernel_op_set:
self.desc.infer_var_type(self.block.desc)
self.desc.infer_shape(self.block.desc)
def to_string(self, throw_on_error):
"""
To debug string.
Args:
throw_on_error(bool): raise exception when self is not initialized
when throw_on_error is True
Returns(str): The debug string.
"""
protostr = self.desc.serialize_to_string()
proto = framework_pb2.OpDesc.FromString(str(protostr))
return _debug_string_(proto, throw_on_error)
def __str__(self):
return self.to_string(True)
__repr__ = __str__
@property
def type(self):
return self.desc.type()
def input(self, name):
"""
Get input arguments by the input parameter name
Args:
name(str): The input parameter name
Returns(list): return the list of argument names associated with the
specific parameter name.
"""
return self.desc.input(name)
def rename_input(self, old_name, new_name):
self.desc.rename_input(old_name, new_name)
def rename_output(self, old_name, new_name):
self.desc.rename_output(old_name, new_name)
@property
def input_names(self):
"""
Get all input parameter names
Returns(list): return a list of input parameter names
"""
return self.desc.input_names()
@property
def input_arg_names(self):
return self.desc.input_arg_names()
@property
def output_arg_names(self):
return self.desc.output_arg_names()
def output(self, name):
"""
Get output arguments by the output parameter name
Args:
name(str): The output parameter name
Returns(list): return the list of argument names associated with the
specific parameter name.
"""
return self.desc.output(name)
@property
def output_names(self):
"""
Get all output parameter names
Returns(list): return a list of output parameter names
"""
return self.desc.output_names()
@property
def idx(self):
"""
Return the array index of current operator.
Returns(int): The array index in block.ops array
Raises:
ValueError: when the operator is not found.
"""
for i, op in enumerate(self.block.ops):
if op == self:
return i
raise ValueError(
"Can't find op itself in it's block. It could be a bug of Paddle.")
def has_attr(self, name):
"""
operator has the attribute with name or not.
Args:
name(str): the attribute name
Returns(bool): True if has this attribute.
"""
return self.desc.has_attr(name)
def attr_type(self, name):
"""
Get the type of attribute by attribute name
Args:
name(str): the attribute name
Returns(core.AttrType): the attribute type
"""
return self.desc.attr_type(name)
@property
def attr_names(self):
"""
Get all attribute names
Returns(list): The list of attribute name
"""
return self.desc.attr_names()
def attr(self, name):
"""
Get attribute by name
Args:
name(str): the attribute name
Returns(bool|int|str|float|list): The attribute value. The return value
can be any valid attribute type.
"""
return self.desc.attr(name)
def block_attr(self, name):
"""
Get the block attribute by name
Args:
name(str): the attribute name
Returns(int): the block index
"""
return self.desc.block_attr(name)
def all_attrs(self):
"""
Get the attribute dict
Returns(dict): The Operator's attribute dict
"""
attr_names = self.attr_names
attr_map = {}
for n in attr_names:
if n == 'sub_block':
attr_map[n] = self.block_attr(n)
else:
attr_map[n] = self.attr(n)
return attr_map
class Block(object):
def __init__(self, program, idx):
self.desc = program.desc.block(idx)
self.vars = collections.OrderedDict() # var_name --> var
self.ops = list() # operator list
self.program = program
self.removed_vars = collections.OrderedDict()
def __str__(self):
return self.to_string(True)
def to_string(self, throw_on_error, with_details=False):
"""
To debug string.
Args:
throw_on_error(bool): raise exception when self is not initialized
when throw_on_error is True
with_details(bool): more details about variables and parameters
(e.g. trainable, optimize_attr, ...) will be printed when with_details is True
Returns(str): The debug string.
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
if with_details:
re_add_indent = re.compile(r"\n(.)")
res_str = "blocks {\n idx: %d\n parent_idx: %d" % (
self.idx, self.parent_idx)
for var in self.vars.itervalues():
res_str += "\n vars {\n %s }" % re_add_indent.sub(
r"\n \1", var.to_string(throw_on_error, with_details))
for op in self.ops:
res_str += "\n ops {\n %s }" % re_add_indent.sub(
r"\n \1", op.to_string(throw_on_error))
res_str += "\n}"
else:
protostr = self.desc.serialize_to_string()
proto = framework_pb2.BlockDesc.FromString(str(protostr))
res_str = _debug_string_(proto, throw_on_error)
return res_str
__repr__ = __str__
@property
def parent_idx(self):
return self.desc.parent
@property
def forward_block_idx(self):
return self.desc.get_forward_block_idx()
def set_forward_block_idx(self, idx):
self.desc.set_forward_block_idx(idx)
@property
def idx(self):
return self.desc.id
def var(self, name):
if not isinstance(name, basestring):
raise TypeError()
v = self.vars.get(name, None)
if v is None:
raise ValueError("var %s not in this block" % name)
return v
def var_recursive(self, name):
frontier = list()
visited = set()
frontier.append(self)
prog = self.program
while len(frontier) != 0: # BFS
cur = frontier[0]
frontier = frontier[1:]
if id(cur) in visited:
continue
if cur.has_var(name):
return cur.var(name)
if cur.parent_idx != -1:
frontier.append(prog.block(cur.parent_idx))
if cur.forward_block_idx != -1:
frontier.append(prog.block(cur.forward_block_idx))
visited.add(id(cur))
raise ValueError("Var {0} is not found recursively".format(name))
def all_parameters(self):
return list(self.iter_parameters())
def iter_parameters(self):
return (item[1] for item in self.vars.iteritems()
if isinstance(item[1], Parameter))
def create_var(self, *args, **kwargs):
var = Variable(block=self, *args, **kwargs)
if 'initializer' in kwargs:
kwargs['initializer'](var, self)
return var
def has_var(self, name):
return name in self.vars
def rename_var(self, name, new_name):
"""
Rename variable in vars and ops' inputs and outputs
"""
if not self.has_var(name):
raise ValueError("var %s is not in current" % name)
v = self.var(name)
if type(v) == Parameter:
var_type = "Parameter"
stop_gradient = v.stop_gradient
trainable = v.trainable
optimize_attr = v.optimize_attr
regularizer = v.regularizer
gradient_clip_attr = v.gradient_clip_attr
error_clip = v.error_clip
elif type(v) == Variable:
var_type = "Variable"
error_clip = v.error_clip
stop_gradient = v.stop_gradient
else:
raise ValueError("unsupported var type: %s", type(v))
orig_var_type = v.type
self.desc.rename_var(name, new_name)
# NOTE: v is destroyed by C++ after calling rename_var.
d = self.desc.find_var(new_name)
if var_type == "Parameter":
var = Parameter(
self,
d.shape(),
d.dtype(),
type=orig_var_type,
name=new_name,
stop_gradient=stop_gradient,
trainable=trainable,
optimize_attr=optimize_attr,
regularizer=regularizer,
gradient_clip_attr=gradient_clip_attr,
error_clip=error_clip)
elif var_type == "Variable":
var = Variable(
self,
type=orig_var_type,
name=new_name,
error_clip=error_clip,
stop_gradient=stop_gradient)
# rename the python side, sync_with_cpp will only add
# new vars/ops to python side.
self.vars[new_name] = var
del self.vars[name]
self.sync_with_cpp()
def remove_var(self, name):
self.sync_with_cpp()
self.desc.remove_var(name)
del self.vars[name]
def create_parameter(self, *args, **kwargs):
global_block = self.program.global_block()
param = Parameter(global_block, *args, **kwargs)
if 'initializer' in kwargs:
kwargs['initializer'](param, self)
return param
def append_op(self, *args, **kwargs):
op_desc = self.desc.append_op()
op = Operator(block=self, desc=op_desc, *args, **kwargs)
self.ops.append(op)
return op
def insert_op(self, index, *args, **kwargs):
self.sync_with_cpp()
op_desc = self.desc.insert_op(index)
op = Operator(block=self, desc=op_desc, *args, **kwargs)
self.ops.insert(index, op)
return op
def remove_op(self, index):
self.sync_with_cpp()
self.desc.remove_op(index, index + 1)
del self.ops[index]
def slice_ops(self, start, end):
return self.ops[start:end]
def prepend_op(self, *args, **kwargs):
op_desc = self.desc.prepend_op()
op = Operator(self, op_desc, *args, **kwargs)
self.ops.insert(0, op)
return op
def sync_with_cpp(self):
"""
Sync from the desc on the c++ end.
This method is used to synchronize the c++ desc instance generated by backward.
"""
# sync variables from cpp
for var in self.desc.all_vars():
if not self.has_var(var.name()):
self.create_var(name=var.name(), desc=var, type=var.type())
# sync variables removed from c++ end
for var in self.vars.keys():
if not self.desc.find_var(var):
self.vars.pop(var)
# sync operators from cpp
ops_in_cpp = []
for op_idx in range(0, self.desc.op_size()):
ops_in_cpp.append(self.desc.op(op_idx))
if len(self.ops) != 0:
first_op_in_python = self.ops[0].desc
last_op_in_python = self.ops[len(self.ops) - 1].desc
start_index = None
end_index = None
for index in range(len(ops_in_cpp)):
if first_op_in_python == ops_in_cpp[index]:
start_index = index
if last_op_in_python == ops_in_cpp[index]:
end_index = index
assert start_index is not None
assert end_index is not None
assert start_index <= end_index
else:
start_index = 0
end_index = -1
# sync ops append to the head of cpp_ops
for index in range((start_index - 1 - 1), -1, -1):
op_desc = ops_in_cpp[index]
op = Operator(self, op_desc)
self.ops.insert(0, op)
# sync ops append to the end of cpp_ops
for index in range((end_index + 1), len(ops_in_cpp)):
op_desc = ops_in_cpp[index]
op = Operator(self, op_desc)
self.ops.append(op)
# sync ops removed from c++ end
if end_index != -1 and end_index < len(self.ops):
ops_in_cpp_index = 0
ops_in_python_index = 0
while ops_in_python_index < len(
self.ops) and ops_in_cpp_index < len(ops_in_cpp):
if self.ops[ops_in_python_index].desc != ops_in_cpp[
ops_in_cpp_index]:
del self.ops[ops_in_python_index]
else:
ops_in_cpp_index += 1
ops_in_python_index += 1
assert len(self.ops) == len(ops_in_cpp)
for index in range(len(self.ops)):
assert self.ops[index].desc == ops_in_cpp[index]
def copy_param_info_from(self, other):
"""
Copy the information of parameters from the other block
Args:
other(Block): the other block
Returns:
None
"""
if not isinstance(other, Block):
raise TypeError("copy_param_info_from should be invoked with Block")
for p in other.iter_parameters():
assert isinstance(p, Parameter)
v = self.vars.get(p.name, None)
if v is None:
raise ValueError("copy_param_info_from should be invoked with "
"same topology")
assert isinstance(v, Variable)
new_p = Parameter(
block=self,
shape=v.shape,
dtype=v.dtype,
type=v.type,
lod_level=v.lod_level,
stop_gradient=p.stop_gradient,
trainable=p.trainable,
optimize_attr=p.optimize_attr,
regularizer=p.regularizer,
gradient_clip_attr=p.gradient_clip_attr,
error_clip=p.error_clip,
name=v.name)
self.vars[new_p.name] = new_p
def clone_variable(self, var):
"""
Clone a variable into current block.
Args:
var: the variable to be cloned.
Returns:
The new variable cloned from 'var' in current block.
"""
assert isinstance(var, Variable)
ret_var = None
# make STEP_SCOPES var can be safely cloned.
if var.type == core.VarDesc.VarType.STEP_SCOPES:
ret_var = self.create_var(
name=var.name, persistable=var.persistable, type=var.type)
elif var.type == core.VarDesc.VarType.SELECTED_ROWS:
ret_var = self.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
persistable=True,
is_data=var.is_data)
else:
ret_var = self.create_var(
name=var.name,
shape=var.shape,
dtype=var.dtype,
type=var.type,
lod_level=var.lod_level,
persistable=True,
is_data=var.is_data)
return ret_var
class Program(object):
def __init__(self):
self.desc = core.ProgramDesc()
self.blocks = [Block(self, 0)]
self.current_block_idx = 0
self._seed = 0
def __str__(self):
return self.to_string(True)
def to_string(self, throw_on_error, with_details=False):
"""
To debug string.
Args:
throw_on_error(bool): raise exception when self is not initialized
when throw_on_error is True
with_details(bool): more details about variables and parameters
(e.g. trainable, optimize_attr, ...) will be printed when with_details is True
Returns(str): The debug string.
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
if with_details:
res_str = ""
for block in self.blocks:
res_str += block.to_string(throw_on_error, with_details)
else:
protostr = self.desc.serialize_to_string()
proto = framework_pb2.ProgramDesc.FromString(str(protostr))
res_str = _debug_string_(proto, throw_on_error)
return res_str
def get_desc(self):
return self.desc
def clone(self, for_test=False):
"""Clone the Program object
Set for_test to False when we want to clone the program for training.
Set for_test to True when we want to clone the program for testing.
Args:
for_test(bool): Some operators, such as batch_norm and drop_out ops,
behave differently in training and testing. If for_test is True,
the is_test attributes in these operators will be set to True for
testing purposes, otherwise, they remain unchanged.
Returns(Program):
The cloned Program object.
"""
if for_test:
p = self.inference_optimize()
else:
p = Program()
p.desc = core.ProgramDesc(self.desc)
p.blocks = [Block(p, i) for i in xrange(self.desc.num_blocks())]
p.sync_with_cpp()
p.copy_param_info_from(self)
p.copy_data_info_from(self)
return p
def prune(self, targets):
if not isinstance(targets, list):
targets = [targets]
targets_idx = []
for t in targets:
if not isinstance(t, Operator):
if isinstance(t, Variable):
# After transpiler processing, the op that output this
# variable maybe has been changed, so t.op is not reliable
# and we need to find the current op that generate this
# variable here.
t.op = None
global_block = self.global_block()
for idx, op in enumerate(global_block.ops):
if t.name in op.output_arg_names:
t.op = op
break
t = t.op
if t is None:
raise ValueError(
"The target variable must have an "
"associated operator that generates it.")
else:
raise ValueError("All targets of prune() can only be "
"Variable or Operator.")
targets_idx.append([t.block.idx, t.idx])
res = Program()
res.desc = core.prune(self.desc, targets_idx)
res.blocks = [Block(res, i) for i in xrange(res.desc.num_blocks())]
res.sync_with_cpp()
return res
def inference_optimize(self):
# this is an alternative implement before
# core.inference_optimize being fixed.
res = Program()
res.desc = core.ProgramDesc(self.desc)
for i in xrange(res.desc.num_blocks()):
block = res.desc.block(i)
for j in xrange(block.op_size()):
op = block.op(j)
if op.has_attr('is_test'):
op.set_attr('is_test', True)
res.blocks = [Block(res, i) for i in xrange(res.desc.num_blocks())]
res.sync_with_cpp()
return res
@staticmethod
def parse_from_string(binary_str):
p = Program()
p.desc = core.ProgramDesc(binary_str)
p.blocks = [Block(p, i) for i in xrange(p.desc.num_blocks())]
p.sync_with_cpp()
return p
@property
def random_seed(self):
return self._seed
@property
def num_blocks(self):
return self.desc.num_blocks()
@random_seed.setter
def random_seed(self, seed):
if not isinstance(seed, int):
raise ValueError("Seed must be a integer.")
self._seed = seed
def __repr__(self):
return str(self)
def global_block(self):
return self.blocks[0]
def block(self, index):
return self.blocks[index]
def current_block(self):
return self.blocks[self.current_block_idx]
def create_block(self, parent_idx=None):
new_block_idx = len(self.blocks)
parent = self.current_block() if parent_idx is None else self.block(
parent_idx)
self.desc.append_block(parent.desc)
self.current_block_idx = new_block_idx
self.blocks.append(Block(self, self.current_block_idx))
return self.current_block()
def rollback(self):
self.current_block_idx = self.current_block().parent_idx
def sync_with_cpp(self):
for block_idx in range(len(self.blocks), self.desc.num_blocks()):
self.blocks.append(Block(self, block_idx))
for block in self.blocks:
block.sync_with_cpp()
def copy_param_info_from(self, other):
"""
Copy the information of parameters from other program.
Args:
other(Program): Other program
Returns:
None
"""
if not isinstance(other, Program):
raise TypeError("copy_param_info_from should be invoked with "
"Program")
if len(self.blocks) != len(other.blocks):
raise ValueError("copy_param_info_from should be invoked with two "
"program, with represent the same topology")
self.global_block().copy_param_info_from(other.global_block())
def copy_data_info_from(self, other):
"""
Copy the information of data variables from other program.
Args:
other(Program): Other program
Returns:
None
"""
if not isinstance(other, Program):
raise TypeError("copy_param_info_from should be invoked with "
"Program")
if len(self.blocks) != len(other.blocks):
raise ValueError("copy_param_info_from should be invoked with two "
"program, with represent the same topology")
for var in other.global_block().vars.itervalues():
if var.is_data:
self.global_block().var(var.name).is_data = True
def list_vars(self):
for each_block in self.blocks:
for each_var in each_block.vars.itervalues():
yield each_var
class Parameter(Variable):
def __init__(self, block, shape, dtype, **kwargs):
if shape is None or dtype is None:
raise ValueError("Parameter must set shape and dtype")
if len(shape) == 0:
raise ValueError("Parameter shape cannot be empty")
for each in shape:
if each < 0:
raise ValueError("Parameter shape should not be related with "
"batch-size")
Variable.__init__(
self, block, persistable=True, shape=shape, dtype=dtype, **kwargs)
self.trainable = kwargs.get('trainable', True)
self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})
self.regularizer = kwargs.get('regularizer', None)
self.gradient_clip_attr = kwargs.get('gradient_clip_attr', None)
self.do_model_average = kwargs.get('do_model_average', None)
def __str__(self):
return self.to_string(True)
def to_string(self, throw_on_error, with_details=False):
"""
To debug string.
Args:
throw_on_error(bool): raise exception when self is not initialized
when throw_on_error is True
with_details(bool): more details about variables and parameters
(e.g. trainable, optimize_attr, ...) will be printed when with_details is True
Returns(str): The debug string.
"""
assert isinstance(throw_on_error, bool) and isinstance(with_details,
bool)
if with_details:
res_str = Variable.to_string(self, throw_on_error, True)
additional_attr = ("trainable", "optimize_attr", "regularizer",
"gradient_clip_attr", "do_model_average")
for attr_name in additional_attr:
res_str += "%s: %s\n" % (attr_name,
str(getattr(self, attr_name)))
else:
res_str = Variable.to_string(self, throw_on_error, False)
return res_str
__repr__ = __str__
# program is a global instance.
_main_program_ = Program()
_startup_program_ = Program()
def default_startup_program():
"""
Get default startup program. In startup program, Paddle will initialize
parameters, initialize nccl handle, etc.
Returns:
Program: startup program
"""
return _startup_program_
def default_main_program():
"""
Get default main program. The main program is used for training or testing.
Returns:
Program: main program
"""
return _main_program_
def switch_main_program(program):
"""
Switch the main program to a new program.
Args:
program(Program): The new main program
Returns:
Program: The previous main program
"""
global _main_program_
prev_program = _main_program_
_main_program_ = program
return prev_program
def switch_startup_program(program):
"""
Switch the startup program to a new program
Args:
program(Program): The new startup program
Returns:
Program: The previous startup program
"""
global _startup_program_
prev_program = _startup_program_
_startup_program_ = program
return prev_program
@contextlib.contextmanager
def program_guard(main_program, startup_program=None):
"""
Switch program with `with` statement
Examples:
>>> with program_guard(Program()):
>>> data = fluid.layers.data(...)
>>> hidden = fluid.layers.fc(...)
Args:
main_program(Program): New main program inside `with` statement
startup_program(Program): New startup program inside `with` statement.
None means do not change startup program.
Returns:
None
"""
if not isinstance(main_program, Program):
raise TypeError("main_program should be Program")
main_program = switch_main_program(main_program)
if startup_program is not None:
if not isinstance(startup_program, Program):
raise TypeError("startup_program should be Program")
startup_program = switch_startup_program(startup_program)
yield
switch_main_program(main_program)
if startup_program is not None:
switch_startup_program(startup_program)
def get_var(name, program=None):
"""
Get a variable by name from the global block of a program
Args:
name(str): name of the variable
program(Program|None): program object.
If None, default_global_program() will be used.
Returns:
Variable
"""
if program is None:
program = default_main_program()
assert isinstance(name, str)
assert isinstance(program, Program)
return program.global_block().var(name)
| [
"unique_name.generate",
"numpy.dtype",
"collections.OrderedDict",
"re.compile"
] | [((1534, 1552), 'numpy.dtype', 'np.dtype', (['np_dtype'], {}), '(np_dtype)\n', (1542, 1552), True, 'import numpy as np\n'), ((21927, 21952), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (21950, 21952), False, 'import collections\n'), ((22075, 22100), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (22098, 22100), False, 'import collections\n'), ((5440, 5478), 'unique_name.generate', 'unique_name.generate', (['"""_generated_var"""'], {}), "('_generated_var')\n", (5460, 5478), False, 'import unique_name\n'), ((22821, 22841), 're.compile', 're.compile', (['"""\\\\n(.)"""'], {}), "('\\\\n(.)')\n", (22831, 22841), False, 'import re\n')] |
"""Tests for module `petibmpy.grid`."""
import copy
import numpy
import pathlib
import unittest
import petibmpy
class GridIOTestCase(unittest.TestCase):
"""Tests related to the I/O grid."""
def setUp(self):
"""Setup."""
self.x = numpy.sort(numpy.random.rand(5))
self.y = numpy.sort(numpy.random.rand(10))
self.z = numpy.sort(numpy.random.rand(20))
def test_read_write_grid_hdf5(self):
"""Test I/O functions for HDF5 format."""
filepath = pathlib.Path('grid.h5')
for dim in [2, 3]:
coords = [self.x, self.y, self.z][:dim]
petibmpy.write_grid_hdf5(filepath, 'name', *coords)
coords2 = petibmpy.read_grid_hdf5(filepath, 'name')
self.assertEqual(len(coords2), len(coords))
for i in range(dim):
self.assertTrue(numpy.allclose(coords2[i], coords[i]))
filepath.unlink()
class SegmentTestCase(unittest.TestCase):
"""Tests the `Segment` class."""
def setUp(self):
"""Setup."""
pass
def test_create(self):
"""Create a segment given a configuration."""
# Check uniform segment.
start, end, width, r, num = 0.0, 1.0, 0.1, 1.0, 11
config = dict(start=start, end=end, width=width)
segment = petibmpy.Segment(config=config)
self.assertEqual(segment.start, start)
self.assertEqual(segment.end, end)
self.assertEqual(segment.r, r)
x = segment.asarray()
x_true = numpy.linspace(start, end, num=num)
self.assertTrue(numpy.allclose(x, x_true))
# Check stretched segment.
start, end, width, r = 0.0, 1.0, 0.1, 1.01
config = dict(start=start, end=end, width=width, stretchRatio=r)
segment = petibmpy.Segment(config=config)
self.assertEqual(segment.start, start)
self.assertEqual(segment.end, end)
self.assertEqual(segment.r, r)
x = segment.asarray()
self.assertTrue(abs(x[1] - x[0]) <= width)
self.assertEqual(abs(x[2] - x[1]) / abs(x[1] - x[0]), segment.r)
# Check reversed stretched segment.
start, end, width, r = 0.0, 1.0, 0.1, 1.01
config = dict(start=start, end=end, width=width, stretchRatio=r,
reverse=True)
segment = petibmpy.Segment(config=config)
self.assertEqual(segment.start, start)
self.assertEqual(segment.end, end)
self.assertAlmostEqual(segment.r, 1 / r, places=12)
x = segment.asarray()
self.assertTrue(abs(x[-1] - x[-2]) <= width)
self.assertAlmostEqual(abs(x[-2] - x[-3]) / abs(x[-1] - x[-2]), r,
places=12)
class GridLineTestCase(unittest.TestCase):
"""Tests the `GridLine` class."""
def setUp(self):
"""Setup."""
subconfig1 = dict(end=-2.0, width=0.1, stretchRatio=1.01, reverse=True)
subconfig2 = dict(end=2.0, width=0.1)
subconfig3 = dict(end=10.0, width=0.1, stretchRatio=1.02)
subDomains = [subconfig1, subconfig2, subconfig3]
self.config = dict(direction='x', start=-10.0, subDomains=subDomains)
def test_create_gridline(self):
"""Create a grid line given a configuration."""
gridline = petibmpy.GridLine(config=self.config)
x = gridline.asarray()
self.assertEqual(x[0], self.config['start'])
self.assertEqual(x[-1], self.config['subDomains'][-1]['end'])
class CartesianGridTestCase(unittest.TestCase):
"""Tests the `CartesianGrid` class."""
def setUp(self):
"""Setup."""
subconfig1 = dict(end=-2.0, width=0.1, stretchRatio=1.01, reverse=True)
subconfig2 = dict(end=2.0, width=0.1)
subconfig3 = dict(end=20.0, width=0.1, stretchRatio=1.02)
config_x = dict(direction='x', start=-10.0,
subDomains=[subconfig1, subconfig2, subconfig3])
config_y = dict(direction='y', start=-15.0,
subDomains=[subconfig1, subconfig2, subconfig3])
config_z = dict(direction='z', start=-2.0, subDomains=[subconfig2])
self.config = [config_x, config_y, config_z]
def test_grid_create(self):
"""Create a grid given a configuration."""
grid = petibmpy.CartesianGrid(config=self.config)
self.assertEqual(len(grid.gridlines), 3)
for i, x in enumerate(grid.get_gridlines()):
config = self.config[i]
self.assertAlmostEqual(x[0], config['start'], places=12)
self.assertAlmostEqual(x[-1], config['subDomains'][-1]['end'],
places=12)
def test_write_yaml(self):
"""Test the method to create the YAML configuration file."""
grid = petibmpy.CartesianGrid(config=self.config)
filepath = pathlib.Path('mesh.yaml')
grid.write_yaml(filepath, ndigits=12)
with open(filepath, 'r') as infile:
lines = infile.readlines()
datadir = pathlib.Path(__file__).absolute().parent / 'data'
filepath2 = datadir / 'mesh.yaml'
with open(filepath2, 'r') as infile:
lines2 = infile.readlines()
self.assertEqual(len(lines), len(lines2))
for line, line2 in zip(lines, lines2):
self.assertEqual(line, line2)
filepath.unlink()
def test_write_hdf5(self):
"""Test the method to write the mesh in HDF5 format."""
grid = petibmpy.CartesianGrid(config=self.config)
filepath = pathlib.Path('grid.h5')
grid.write_hdf5(filepath)
coords = petibmpy.read_grid_hdf5(filepath, 'vertex')
for x, x2 in zip(grid.get_gridlines(), coords):
self.assertTrue(numpy.allclose(x, x2))
filepath.unlink()
def test_num_cells(self):
"""Test the method to get the number of cells."""
start, end, width = -10.0, 10.0, 0.1
num = int(abs(end - start) / width)
subconfig = dict(start=start, subDomains=[dict(end=end, width=width)])
config = []
for dim, direction in zip([1, 2, 3], ['x', 'y', 'z']):
subconfig['direction'] = direction
config.append(copy.deepcopy(subconfig))
grid = petibmpy.CartesianGrid(config=config)
n_cells = grid.get_number_cells()
self.assertEqual(n_cells, num**dim)
| [
"petibmpy.Segment",
"petibmpy.read_grid_hdf5",
"numpy.allclose",
"numpy.random.rand",
"pathlib.Path",
"numpy.linspace",
"petibmpy.GridLine",
"petibmpy.write_grid_hdf5",
"petibmpy.CartesianGrid",
"copy.deepcopy"
] | [((504, 527), 'pathlib.Path', 'pathlib.Path', (['"""grid.h5"""'], {}), "('grid.h5')\n", (516, 527), False, 'import pathlib\n'), ((1307, 1338), 'petibmpy.Segment', 'petibmpy.Segment', ([], {'config': 'config'}), '(config=config)\n', (1323, 1338), False, 'import petibmpy\n'), ((1515, 1550), 'numpy.linspace', 'numpy.linspace', (['start', 'end'], {'num': 'num'}), '(start, end, num=num)\n', (1529, 1550), False, 'import numpy\n'), ((1779, 1810), 'petibmpy.Segment', 'petibmpy.Segment', ([], {'config': 'config'}), '(config=config)\n', (1795, 1810), False, 'import petibmpy\n'), ((2316, 2347), 'petibmpy.Segment', 'petibmpy.Segment', ([], {'config': 'config'}), '(config=config)\n', (2332, 2347), False, 'import petibmpy\n'), ((3264, 3301), 'petibmpy.GridLine', 'petibmpy.GridLine', ([], {'config': 'self.config'}), '(config=self.config)\n', (3281, 3301), False, 'import petibmpy\n'), ((4262, 4304), 'petibmpy.CartesianGrid', 'petibmpy.CartesianGrid', ([], {'config': 'self.config'}), '(config=self.config)\n', (4284, 4304), False, 'import petibmpy\n'), ((4749, 4791), 'petibmpy.CartesianGrid', 'petibmpy.CartesianGrid', ([], {'config': 'self.config'}), '(config=self.config)\n', (4771, 4791), False, 'import petibmpy\n'), ((4811, 4836), 'pathlib.Path', 'pathlib.Path', (['"""mesh.yaml"""'], {}), "('mesh.yaml')\n", (4823, 4836), False, 'import pathlib\n'), ((5437, 5479), 'petibmpy.CartesianGrid', 'petibmpy.CartesianGrid', ([], {'config': 'self.config'}), '(config=self.config)\n', (5459, 5479), False, 'import petibmpy\n'), ((5499, 5522), 'pathlib.Path', 'pathlib.Path', (['"""grid.h5"""'], {}), "('grid.h5')\n", (5511, 5522), False, 'import pathlib\n'), ((5574, 5617), 'petibmpy.read_grid_hdf5', 'petibmpy.read_grid_hdf5', (['filepath', '"""vertex"""'], {}), "(filepath, 'vertex')\n", (5597, 5617), False, 'import petibmpy\n'), ((269, 289), 'numpy.random.rand', 'numpy.random.rand', (['(5)'], {}), '(5)\n', (286, 289), False, 'import numpy\n'), ((319, 340), 'numpy.random.rand', 'numpy.random.rand', (['(10)'], {}), '(10)\n', (336, 340), False, 'import numpy\n'), ((370, 391), 'numpy.random.rand', 'numpy.random.rand', (['(20)'], {}), '(20)\n', (387, 391), False, 'import numpy\n'), ((619, 670), 'petibmpy.write_grid_hdf5', 'petibmpy.write_grid_hdf5', (['filepath', '"""name"""', '*coords'], {}), "(filepath, 'name', *coords)\n", (643, 670), False, 'import petibmpy\n'), ((693, 734), 'petibmpy.read_grid_hdf5', 'petibmpy.read_grid_hdf5', (['filepath', '"""name"""'], {}), "(filepath, 'name')\n", (716, 734), False, 'import petibmpy\n'), ((1575, 1600), 'numpy.allclose', 'numpy.allclose', (['x', 'x_true'], {}), '(x, x_true)\n', (1589, 1600), False, 'import numpy\n'), ((6209, 6246), 'petibmpy.CartesianGrid', 'petibmpy.CartesianGrid', ([], {'config': 'config'}), '(config=config)\n', (6231, 6246), False, 'import petibmpy\n'), ((5702, 5723), 'numpy.allclose', 'numpy.allclose', (['x', 'x2'], {}), '(x, x2)\n', (5716, 5723), False, 'import numpy\n'), ((6164, 6188), 'copy.deepcopy', 'copy.deepcopy', (['subconfig'], {}), '(subconfig)\n', (6177, 6188), False, 'import copy\n'), ((856, 893), 'numpy.allclose', 'numpy.allclose', (['coords2[i]', 'coords[i]'], {}), '(coords2[i], coords[i])\n', (870, 893), False, 'import numpy\n'), ((4984, 5006), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (4996, 5006), False, 'import pathlib\n')] |
# -*- coding: utf-8 -*-#
'''
# Name: lDataNormalization
# Description:
# Author: super
# Date: 2020/5/13
'''
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from HelperClass.NeuralNet_1_1 import *
file_name = "../data/ch05.npz"
def ShowResult(net, reader):
# draw example points
X,Y = reader.GetWholeTrainSamples()
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(X[:,0],X[:,1],Y)
# draw fitting surface
# numpy.linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0)
# Return evenly spaced numbers over a specified interval.
# 返回指定间隔内的均匀间隔的数字。
# >>> np.linspace(2.0, 3.0, num=5)
# array([2. , 2.25, 2.5 , 2.75, 3. ])
# >>> np.linspace(2.0, 3.0, num=5, endpoint=False)
# array([2. , 2.2, 2.4, 2.6, 2.8])
#
# retstep : bool, optional
# If True, return (samples, step), where step is the spacing between samples.
# >>> np.linspace(2.0, 3.0, num=5, retstep=True)
# (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
p = np.linspace(0,1)
q = np.linspace(0,1)
# Return coordinate matrices from coordinate vectors.
# 从坐标向量返回坐标矩阵。
# nx, ny = (3,2)
# x = np.linspace(0, 1, nx)
# y = np.linspace(0, 1, ny)
# print(x)
# [0. 0.5 1. ]
# print(y)
# [0. 1.]
# xv, yv = np.meshgrid(x, y)
# print(xv)
# [[0. 0.5 1. ]
# [0. 0.5 1. ]]
# print(yv)
# [[0. 0. 0.]
# [1. 1. 1.]]
#
# yv1, xv1 = np.meshgrid(y, x)
# print(xv1)
# [[0. 0. ]
# [0.5 0.5]
# [1. 1. ]]
# print(yv1)
# [[0. 1.]
# [0. 1.]
# [0. 1.]]
P,Q = np.meshgrid(p,q)
R = np.hstack((P.ravel().reshape(2500,1), Q.ravel().reshape(2500,1)))
Z = net.inference(R)
Z = Z.reshape(50,50)
ax.plot_surface(P,Q,Z, cmap='rainbow')
plt.show()
if __name__ == '__main__':
# data
reader = DataReader_1_1(file_name)
reader.ReadData()
reader.NormalizeX()
# use this setting for the first time
hp = HyperParameters_1_0(2, 1, eta=0.1, max_epoch=10, batch_size=1, eps = 1e-5)
# use this setting when you want to train more loops
#hp = HyperParameters_1_0(2, 1, eta=0.01, max_epoch=500, batch_size=10, eps = 1e-5)
net = NeuralNet_1_1(hp)
net.train(reader, checkpoint=0.1)
# inference
x1 = 15
x2 = 93
x = np.array([x1,x2]).reshape(1,2)
print("z=", net.inference(x))
ShowResult(net, reader) | [
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.meshgrid",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.show"
] | [((404, 416), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (414, 416), True, 'import matplotlib.pyplot as plt\n'), ((426, 437), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (432, 437), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((1091, 1108), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {}), '(0, 1)\n', (1102, 1108), True, 'import numpy as np\n'), ((1116, 1133), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {}), '(0, 1)\n', (1127, 1133), True, 'import numpy as np\n'), ((1687, 1704), 'numpy.meshgrid', 'np.meshgrid', (['p', 'q'], {}), '(p, q)\n', (1698, 1704), True, 'import numpy as np\n'), ((1875, 1885), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1883, 1885), True, 'import matplotlib.pyplot as plt\n'), ((2396, 2414), 'numpy.array', 'np.array', (['[x1, x2]'], {}), '([x1, x2])\n', (2404, 2414), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import gdspy
import picwriter.toolkit as tk
class Ring(tk.Component):
""" Ring Resonator Cell class.
Args:
* **wgt** (WaveguideTemplate): WaveguideTemplate object
* **radius** (float): Radius of the resonator
* **coupling_gap** (float): Distance between the bus waveguide and resonator
Keyword Args:
* **wrap_angle** (float): Angle in *radians* between 0 and pi that determines how much the bus waveguide wraps along the resonator. 0 corresponds to a straight bus waveguide, and pi corresponds to a bus waveguide wrapped around half of the resonator. Defaults to `0`.
* **parity** (1 or -1): If 1, resonator to left of bus waveguide, if -1 resonator to the right
* **port** (tuple): Cartesian coordinate of the input port (x1, y1)
* **direction** (string): Direction that the component will point *towards*, can be of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, OR an angle (float, in radians)
* **draw_bus_wg** (bool): If `False`, does not generate the bus waveguide. Instead, the input/output port positions will be at the some location at the bottom of the ring, and the user can route their own bus waveguide. Defaults to `True`.
Members:
* **portlist** (dict): Dictionary with the relevant port information
Portlist format:
* portlist['input'] = {'port': (x1,y1), 'direction': 'dir1'}
* portlist['output'] = {'port': (x2, y2), 'direction': 'dir2'}
Where in the above (x1,y1) is the same as the 'port' input, (x2, y2) is the end of the component, and 'dir1', 'dir2' are of type `'NORTH'`, `'WEST'`, `'SOUTH'`, `'EAST'`, *or* an angle in *radians*.
'Direction' points *towards* the waveguide that will connect to it.
"""
def __init__(
self,
wgt,
radius,
coupling_gap,
wrap_angle=0,
parity=1,
draw_bus_wg=True,
port=(0, 0),
direction="EAST",
):
tk.Component.__init__(self, "Ring", locals())
self.portlist = {}
self.port = port
self.direction = direction
self.radius = radius
self.coupling_gap = coupling_gap
self.wrap_angle = wrap_angle
if (wrap_angle > np.pi) or (wrap_angle < 0):
raise ValueError(
"Warning! Wrap_angle is nor a valid angle between 0 and pi."
)
self.parity = parity
self.resist = wgt.resist
self.wgt = wgt
self.wg_spec = {"layer": wgt.wg_layer, "datatype": wgt.wg_datatype}
self.clad_spec = {"layer": wgt.clad_layer, "datatype": wgt.clad_datatype}
self.draw_bus_wg = draw_bus_wg
self.__build_cell()
self.__build_ports()
""" Translate & rotate the ports corresponding to this specific component object
"""
self._auto_transform_()
def __build_cell(self):
# Sequentially build all the geometric shapes using gdspy path functions
# for waveguide, then add it to the Cell
if self.draw_bus_wg:
if self.wrap_angle == 0:
bus_length = 2 * self.radius
# Add bus waveguide with cladding
path = gdspy.Path(self.wgt.wg_width, (0, 0))
path.segment(2 * self.radius, direction="+x", **self.wg_spec)
clad = gdspy.Path(2 * self.wgt.clad_width + self.wgt.wg_width, (0, 0))
clad.segment(2 * self.radius, direction="+x", **self.clad_spec)
# Ring resonator
if self.parity == 1:
ring = gdspy.Round(
(
self.radius,
self.radius + self.wgt.wg_width + self.coupling_gap,
),
self.radius + self.wgt.wg_width / 2.0,
self.radius - self.wgt.wg_width / 2.0,
number_of_points=2
* self.wgt.get_num_points_curve(2 * np.pi, self.radius),
**self.wg_spec
)
clad_ring = gdspy.Round(
(
self.radius,
self.radius + self.wgt.wg_width + self.coupling_gap,
),
self.radius + self.wgt.wg_width / 2.0 + self.wgt.clad_width,
self.radius - self.wgt.wg_width / 2.0 - self.wgt.clad_width,
number_of_points=2
* self.wgt.get_num_points_curve(2 * np.pi, self.radius),
**self.clad_spec
)
elif self.parity == -1:
ring = gdspy.Round(
(
self.radius,
-self.radius - self.wgt.wg_width - self.coupling_gap,
),
self.radius + self.wgt.wg_width / 2.0,
self.radius - self.wgt.wg_width / 2.0,
number_of_points=2
* self.wgt.get_num_points_curve(2 * np.pi, self.radius),
**self.wg_spec
)
clad_ring = gdspy.Round(
(
self.radius,
-self.radius - self.wgt.wg_width - self.coupling_gap,
),
self.radius + self.wgt.wg_width / 2.0 + self.wgt.clad_width,
self.radius - self.wgt.wg_width / 2.0 - self.wgt.clad_width,
number_of_points=2
* self.wgt.get_num_points_curve(2 * np.pi, self.radius),
**self.clad_spec
)
else:
raise ValueError(
"Warning! Parity value is not an acceptable value (must be +1 or -1)."
)
elif self.wrap_angle > 0:
theta = self.wrap_angle / 2.0
rp = self.radius + self.wgt.wg_width + self.coupling_gap
dx, dy = rp * np.sin(theta), rp - rp * np.cos(theta)
bus_length = 2 * self.radius if (4 * dx < 2 * self.radius) else 4 * dx
# Add bus waveguide with cladding that wraps
path = gdspy.Path(self.wgt.wg_width, (0, 0))
clad = gdspy.Path(2 * self.wgt.clad_width + self.wgt.wg_width, (0, 0))
if 4 * dx < bus_length:
path.segment(
(bus_length - 4 * dx) / 2.0, direction="+x", **self.wg_spec
)
clad.segment(
(bus_length - 4 * dx) / 2.0, direction="+x", **self.clad_spec
)
xcenter = self.radius
else:
xcenter = 2 * dx
if self.parity == 1:
path.arc(
rp,
np.pi / 2.0,
np.pi / 2.0 - theta,
number_of_points=2
* self.wgt.get_num_points_curve(self.wrap_angle, rp),
**self.wg_spec
)
path.arc(
rp,
-np.pi / 2.0 - theta,
-np.pi / 2.0 + theta,
number_of_points=2
* self.wgt.get_num_points_curve(self.wrap_angle, rp),
**self.wg_spec
)
path.arc(
rp,
np.pi / 2.0 + theta,
np.pi / 2.0,
number_of_points=2
* self.wgt.get_num_points_curve(self.wrap_angle, rp),
**self.wg_spec
)
clad.arc(
rp,
np.pi / 2.0,
np.pi / 2.0 - theta,
number_of_points=2
* self.wgt.get_num_points_curve(self.wrap_angle, rp),
**self.clad_spec
)
clad.arc(
rp,
-np.pi / 2.0 - theta,
-np.pi / 2.0 + theta,
number_of_points=2
* self.wgt.get_num_points_curve(self.wrap_angle, rp),
**self.clad_spec
)
clad.arc(
rp,
np.pi / 2.0 + theta,
np.pi / 2.0,
number_of_points=2
* self.wgt.get_num_points_curve(self.wrap_angle, rp),
**self.clad_spec
)
# Make the ring resonator
ring = gdspy.Round(
(
xcenter,
self.radius
+ self.wgt.wg_width
+ self.coupling_gap
- 2 * dy,
),
self.radius + self.wgt.wg_width / 2.0,
self.radius - self.wgt.wg_width / 2.0,
number_of_points=2
* self.wgt.get_num_points_curve(
2 * np.pi, self.radius + self.wgt.wg_width / 2.0
),
**self.wg_spec
)
clad_ring = gdspy.Round(
(
xcenter,
self.radius
+ self.wgt.wg_width
+ self.coupling_gap
- 2 * dy,
),
self.radius + self.wgt.wg_width / 2.0 + self.wgt.clad_width,
self.radius - self.wgt.wg_width / 2.0 - self.wgt.clad_width,
number_of_points=2
* self.wgt.get_num_points_curve(
2 * np.pi,
self.radius + self.wgt.wg_width / 2.0 + self.wgt.clad_width,
),
**self.clad_spec
)
elif self.parity == -1:
path.arc(
rp,
-np.pi / 2.0,
-np.pi / 2.0 + theta,
number_of_points=2
* self.wgt.get_num_points_curve(self.wrap_angle, rp),
**self.wg_spec
)
path.arc(
rp,
np.pi / 2.0 + theta,
np.pi / 2.0 - theta,
number_of_points=2
* self.wgt.get_num_points_curve(self.wrap_angle, rp),
**self.wg_spec
)
path.arc(
rp,
-np.pi / 2.0 - theta,
-np.pi / 2.0,
number_of_points=2
* self.wgt.get_num_points_curve(self.wrap_angle, rp),
**self.wg_spec
)
clad.arc(
rp,
-np.pi / 2.0,
-np.pi / 2.0 + theta,
number_of_points=2
* self.wgt.get_num_points_curve(self.wrap_angle, rp),
**self.clad_spec
)
clad.arc(
rp,
np.pi / 2.0 + theta,
np.pi / 2.0 - theta,
number_of_points=2
* self.wgt.get_num_points_curve(self.wrap_angle, rp),
**self.clad_spec
)
clad.arc(
rp,
-np.pi / 2.0 - theta,
-np.pi / 2.0,
number_of_points=2
* self.wgt.get_num_points_curve(self.wrap_angle, rp),
**self.clad_spec
)
# Make the ring resonator
ring = gdspy.Round(
(
xcenter,
-self.radius
- self.wgt.wg_width
- self.coupling_gap
+ 2 * dy,
),
self.radius + self.wgt.wg_width / 2.0,
self.radius - self.wgt.wg_width / 2.0,
number_of_points=2
* self.wgt.get_num_points_curve(
2 * np.pi, self.radius + self.wgt.wg_width / 2.0
),
**self.wg_spec
)
clad_ring = gdspy.Round(
(
xcenter,
-self.radius
- self.wgt.wg_width
- self.coupling_gap
+ 2 * dy,
),
self.radius + self.wgt.wg_width / 2.0 + self.wgt.clad_width,
self.radius - self.wgt.wg_width / 2.0 - self.wgt.clad_width,
number_of_points=2
* self.wgt.get_num_points_curve(
2 * np.pi,
self.radius + self.wgt.wg_width / 2.0 + self.wgt.clad_width,
),
**self.clad_spec
)
else:
raise ValueError(
"Warning! Parity value is not an acceptable value (must be +1 or -1)."
)
if 4 * dx < bus_length:
path.segment((bus_length - 4 * dx) / 2.0, **self.wg_spec)
clad.segment((bus_length - 4 * dx) / 2.0, **self.clad_spec)
else:
# Ring resonator
bus_length = 0
if self.parity == 1:
ring = gdspy.Round(
(0, self.radius + self.wgt.wg_width + self.coupling_gap),
self.radius + self.wgt.wg_width / 2.0,
self.radius - self.wgt.wg_width / 2.0,
number_of_points=2
* self.wgt.get_num_points_curve(
2 * np.pi, self.radius + self.wgt.wg_width / 2.0
),
**self.wg_spec
)
clad_ring = gdspy.Round(
(0, self.radius + self.wgt.wg_width + self.coupling_gap),
self.radius + self.wgt.wg_width / 2.0 + self.wgt.clad_width,
self.radius - self.wgt.wg_width / 2.0 - self.wgt.clad_width,
number_of_points=2
* self.wgt.get_num_points_curve(
2 * np.pi,
self.radius + self.wgt.wg_width / 2.0 + self.wgt.clad_width,
),
**self.clad_spec
)
elif self.parity == -1:
ring = gdspy.Round(
(0, -self.radius - self.wgt.wg_width - self.coupling_gap),
self.radius + self.wgt.wg_width / 2.0,
self.radius - self.wgt.wg_width / 2.0,
number_of_points=2
* self.wgt.get_num_points_curve(
2 * np.pi, self.radius + self.wgt.wg_width / 2.0
),
**self.wg_spec
)
clad_ring = gdspy.Round(
(0, -self.radius - self.wgt.wg_width - self.coupling_gap),
self.radius + self.wgt.wg_width / 2.0 + self.wgt.clad_width,
self.radius - self.wgt.wg_width / 2.0 - self.wgt.clad_width,
number_of_points=2
* self.wgt.get_num_points_curve(
2 * np.pi,
self.radius + self.wgt.wg_width / 2.0 + self.wgt.clad_width,
),
**self.clad_spec
)
else:
raise ValueError(
"Warning! Parity value is not an acceptable value (must be +1 or -1)."
)
self.port_input = (0, 0)
self.port_output = (bus_length, 0)
if not self.draw_bus_wg:
self.port_output = (0, 0)
self.add(ring)
self.add(clad_ring)
if self.draw_bus_wg:
self.add(path)
self.add(clad)
def __build_ports(self):
# Portlist format:
# example: example: {'port':(x_position, y_position), 'direction': 'NORTH'}
self.portlist["input"] = {"port": self.port_input, "direction": "WEST"}
self.portlist["output"] = {"port": self.port_output, "direction": "EAST"}
if __name__ == "__main__":
from . import *
top = gdspy.Cell("top")
wgt = WaveguideTemplate(bend_radius=50, resist="+")
wg1 = Waveguide([(0, -100.0), (0, 0), (50, 0)], wgt)
tk.add(top, wg1)
r1 = Ring(wgt, 90000.0, 1.0, parity=1, draw_bus_wg=True, **wg1.portlist["output"])
tk.add(top, r1)
print(r1.portlist)
wg2 = Waveguide(
[
r1.portlist["output"]["port"],
(r1.portlist["output"]["port"][0] + 100, r1.portlist["output"]["port"][1]),
],
wgt,
)
tk.add(top, wg2)
# r2 = Ring(wgt, 50.0, 0.8, wrap_angle=np.pi, parity=-1, **wg2.portlist["output"])
#
# wg3=Waveguide([r2.portlist["output"]["port"], (r2.portlist["output"]["port"][0]+100, r2.portlist["output"]["port"][1])], wgt)
# tk.add(top, wg3)
#
# r3 = Ring(wgt, 40.0, 0.6, parity=1, **wg3.portlist["output"])
#
# wg4=Waveguide([r3.portlist["output"]["port"], (r3.portlist["output"]["port"][0]+100, r3.portlist["output"]["port"][1])], wgt)
# tk.add(top, wg4)
#
# tk.add(top, r1)
# tk.add(top, r2)
# tk.add(top, r3)
# gdspy.LayoutViewer()
gdspy.write_gds("ring.gds", unit=1.0e-6, precision=1.0e-9)
| [
"numpy.sin",
"picwriter.toolkit.add",
"gdspy.write_gds",
"numpy.cos",
"gdspy.Cell",
"gdspy.Path"
] | [((17612, 17629), 'gdspy.Cell', 'gdspy.Cell', (['"""top"""'], {}), "('top')\n", (17622, 17629), False, 'import gdspy\n'), ((17748, 17764), 'picwriter.toolkit.add', 'tk.add', (['top', 'wg1'], {}), '(top, wg1)\n', (17754, 17764), True, 'import picwriter.toolkit as tk\n'), ((17857, 17872), 'picwriter.toolkit.add', 'tk.add', (['top', 'r1'], {}), '(top, r1)\n', (17863, 17872), True, 'import picwriter.toolkit as tk\n'), ((18093, 18109), 'picwriter.toolkit.add', 'tk.add', (['top', 'wg2'], {}), '(top, wg2)\n', (18099, 18109), True, 'import picwriter.toolkit as tk\n'), ((18728, 18784), 'gdspy.write_gds', 'gdspy.write_gds', (['"""ring.gds"""'], {'unit': '(1e-06)', 'precision': '(1e-09)'}), "('ring.gds', unit=1e-06, precision=1e-09)\n", (18743, 18784), False, 'import gdspy\n'), ((3390, 3427), 'gdspy.Path', 'gdspy.Path', (['self.wgt.wg_width', '(0, 0)'], {}), '(self.wgt.wg_width, (0, 0))\n', (3400, 3427), False, 'import gdspy\n'), ((3529, 3592), 'gdspy.Path', 'gdspy.Path', (['(2 * self.wgt.clad_width + self.wgt.wg_width)', '(0, 0)'], {}), '(2 * self.wgt.clad_width + self.wgt.wg_width, (0, 0))\n', (3539, 3592), False, 'import gdspy\n'), ((6568, 6605), 'gdspy.Path', 'gdspy.Path', (['self.wgt.wg_width', '(0, 0)'], {}), '(self.wgt.wg_width, (0, 0))\n', (6578, 6605), False, 'import gdspy\n'), ((6629, 6692), 'gdspy.Path', 'gdspy.Path', (['(2 * self.wgt.clad_width + self.wgt.wg_width)', '(0, 0)'], {}), '(2 * self.wgt.clad_width + self.wgt.wg_width, (0, 0))\n', (6639, 6692), False, 'import gdspy\n'), ((6357, 6370), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6363, 6370), True, 'import numpy as np\n'), ((6382, 6395), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6388, 6395), True, 'import numpy as np\n')] |
import logging
import numpy as np
from os.path import join
from types import ModuleType
from inspect import getmembers, isclass
from pyquaternion import Quaternion
from pyrep import PyRep
from pyrep.backend.utils import suppress_std_out_and_err
from pyrep.errors import IKError
from pyrep.robots.arms.panda import Panda
from pyrep.robots.end_effectors.panda_gripper import PandaGripper
from rlbench import tasks
from rlbench.backend.const import *
from rlbench.backend.robot import Robot
from rlbench.backend.scene import Scene
from rlbench.environment import DIR_PATH
from rlbench.observation_config import ObservationConfig
from rlbench.action_modes import ActionMode, ArmActionMode
from rlbench.backend.exceptions import BoundaryError, WaypointError
from rlbench.task_environment import InvalidActionError, TaskEnvironmentError,\
TORQUE_MAX_VEL, DT, MAX_RESET_ATTEMPTS
from rlbench.backend.task import Task
from core.common import StepDict, Type
from core.environment.environment import Environment
# TaskClass: child class inherited from Task
TaskClass = Type[Task]
__all__ = ["EnvironmentImpl"]
class EnvironmentImpl(Environment):
"""Each environment has a scene."""
@staticmethod
def all_task_names():
return tuple(o[0] for o in getmembers(tasks) if isclass(o[1]))
def __init__(self, task_name: str, obs_config: ObservationConfig = ObservationConfig(task_low_dim_state=True),
action_mode: ActionMode = ActionMode(),
arm_name: str = "Panda", gripper_name: str = "Panda_gripper"):
super(EnvironmentImpl, self).__init__(task_name)
self._arm_name = arm_name
self._gripper_name = gripper_name
self._action_mode = action_mode
self._obs_config = obs_config
# TODO: modify the task/robot/arm/gripper to support early instantiation before v-rep launched
self._task = None
self._pyrep = None
self._robot = None
self._scene = None
self._variation_number = 0
self._reset_called = False
self._prev_ee_velocity = None
self._update_info_dict()
def init(self, display=False):
if self._pyrep is not None:
self.finalize()
with suppress_std_out_and_err():
self._pyrep = PyRep()
# TODO: TTT_FILE should be defined by robot, but now robot depends on launched pyrep
self._pyrep.launch(join(DIR_PATH, TTT_FILE), headless=not display)
self._pyrep.set_simulation_timestep(0.005)
# TODO: Load arm and gripper from name
self._robot = Robot(Panda(), PandaGripper())
self._scene = Scene(self._pyrep, self._robot, self._obs_config)
self._set_arm_control_action()
# Str comparison because sometimes class comparison doesn't work.
if self._task is not None:
self._task.unload()
self._task = self._get_class_by_name(self._task_name, tasks)(self._pyrep, self._robot)
self._scene.load(self._task)
self._pyrep.start()
def finalize(self):
with suppress_std_out_and_err():
self._pyrep.shutdown()
self._pyrep = None
def reset(self, random: bool = True) -> StepDict:
logging.info('Resetting task: %s' % self._task.get_name())
self._scene.reset()
try:
# TODO: let desc be constant
desc = self._scene.init_episode(self._variation_number, max_attempts=MAX_RESET_ATTEMPTS, randomly_place=random)
except (BoundaryError, WaypointError) as e:
raise TaskEnvironmentError(
'Could not place the task %s in the scene. This should not '
'happen, please raise an issues on this task.'
% self._task.get_name()) from e
ctr_loop = self._robot.arm.joints[0].is_control_loop_enabled()
locked = self._robot.arm.joints[0].is_motor_locked_at_zero_velocity()
self._robot.arm.set_control_loop_enabled(False)
self._robot.arm.set_motor_locked_at_zero_velocity(True)
self._reset_called = True
self._robot.arm.set_control_loop_enabled(ctr_loop)
self._robot.arm.set_motor_locked_at_zero_velocity(locked)
# Returns a list o f descriptions and the first observation
return {'s': self._scene.get_observation().get_low_dim_data(), "opt": desc}
def step(self, last_step: StepDict) -> (StepDict, bool):
# returns observation, reward, done, info
if not self._reset_called:
raise RuntimeError("Call 'reset' before calling 'step' on a task.")
assert 'a' in last_step, "Key 'a' for action not in last_step, maybe you passed a wrong dict ?"
# action should contain 1 extra value for gripper open close state
arm_action = np.array(last_step['a'][:-1])
ee_action = last_step['a'][-1]
current_ee = (1.0 if self._robot.gripper.get_open_amount()[0] > 0.9 else 0.0)
if ee_action > 0.0:
ee_action = 1.0
elif ee_action < -0.0:
ee_action = 0.0
if self._action_mode.arm == ArmActionMode.ABS_JOINT_VELOCITY:
self._assert_action_space(arm_action, (len(self._robot.arm.joints),))
self._robot.arm.set_joint_target_velocities(arm_action)
elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_VELOCITY:
self._assert_action_space(arm_action, (len(self._robot.arm.joints),))
cur = np.array(self._robot.arm.get_joint_velocities())
self._robot.arm.set_joint_target_velocities(cur + arm_action)
elif self._action_mode.arm == ArmActionMode.ABS_JOINT_POSITION:
self._assert_action_space(arm_action, (len(self._robot.arm.joints),))
self._robot.arm.set_joint_target_positions(arm_action)
elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_POSITION:
self._assert_action_space(arm_action, (len(self._robot.arm.joints),))
cur = np.array(self._robot.arm.get_joint_positions())
self._robot.arm.set_joint_target_positions(cur + arm_action)
elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE:
self._assert_action_space(arm_action, (7,))
self._ee_action(list(arm_action))
elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE:
self._assert_action_space(arm_action, (7,))
a_x, a_y, a_z, a_qx, a_qy, a_qz, a_qw = arm_action
x, y, z, qx, qy, qz, qw = self._robot.arm.get_tip().get_pose()
new_rot = Quaternion(a_qw, a_qx, a_qy, a_qz) * Quaternion(qw, qx, qy, qz)
qw, qx, qy, qz = list(new_rot)
new_pose = [a_x + x, a_y + y, a_z + z] + [qx, qy, qz, qw]
self._ee_action(list(new_pose))
elif self._action_mode.arm == ArmActionMode.ABS_EE_VELOCITY:
self._assert_action_space(arm_action, (7,))
pose = self._robot.arm.get_tip().get_pose()
new_pos = np.array(pose) + (arm_action * DT)
self._ee_action(list(new_pos))
elif self._action_mode.arm == ArmActionMode.DELTA_EE_VELOCITY:
self._assert_action_space(arm_action, (7,))
if self._prev_ee_velocity is None:
self._prev_ee_velocity = np.zeros((7,))
self._prev_ee_velocity += arm_action
pose = self._robot.arm.get_tip().get_pose()
pose = np.array(pose)
new_pose = pose + (self._prev_ee_velocity * DT)
self._ee_action(list(new_pose))
elif self._action_mode.arm == ArmActionMode.ABS_JOINT_TORQUE:
self._assert_action_space(arm_action, (len(self._robot.arm.joints),))
self._torque_action(arm_action)
elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_TORQUE:
cur = np.array(self._robot.arm.get_joint_forces())
new_action = cur + arm_action
self._torque_action(new_action)
else:
raise RuntimeError('Unrecognised action mode.')
if current_ee != ee_action:
done = False
while not done:
done = self._robot.gripper.actuate(ee_action, velocity=0.04)
self._pyrep.step()
self._task.step()
if ee_action == 0.0:
# If gripper close action, the check for grasp.
for g_obj in self._task.get_graspable_objects():
self._robot.gripper.grasp(g_obj)
else:
# If gripper opem action, the check for ungrasp.
self._robot.gripper.release()
self._scene.step()
success, terminate = self._task.success()
last_step['r'] = int(success)
next_step = {'s': self._scene.get_observation().get_low_dim_data(), "opt": None}
return last_step, next_step, terminate
def name(self) -> str:
return self._task_name
# ------------- private methods ------------- #
def _update_info_dict(self):
# update info dict
self._info["action mode"] = self._action_mode
self._info["observation mode"] = self._obs_config
# TODO: action dim should related to robot, not action mode, here we fixed it temporally
self._info["action dim"] = (8,)
self._info["action low"] = np.zeros(self._info["action dim"], dtype=np.float32) - 1.
self._info["action high"] = np.zeros(self._info["action dim"], dtype=np.float32) + 1.
self._info["state dim"] = (73,)
self._info["state low"] = np.zeros(self._info["state dim"], dtype=np.float32) - 100.
self._info["state high"] = np.zeros(self._info["state dim"], dtype=np.float32) + 100.
self._info["reward low"] = -np.inf
self._info["reward high"] = np.inf
def _set_arm_control_action(self):
self._robot.arm.set_control_loop_enabled(True)
if self._action_mode.arm in (ArmActionMode.ABS_JOINT_VELOCITY, ArmActionMode.DELTA_JOINT_VELOCITY):
self._robot.arm.set_control_loop_enabled(False)
self._robot.arm.set_motor_locked_at_zero_velocity(True)
elif self._action_mode.arm in (ArmActionMode.ABS_JOINT_POSITION, ArmActionMode.DELTA_JOINT_POSITION,
ArmActionMode.ABS_EE_POSE, ArmActionMode.DELTA_EE_POSE,
ArmActionMode.ABS_EE_VELOCITY, ArmActionMode.DELTA_EE_VELOCITY):
self._robot.arm.set_control_loop_enabled(True)
elif self._action_mode.arm in (ArmActionMode.ABS_JOINT_TORQUE, ArmActionMode.DELTA_JOINT_TORQUE):
self._robot.arm.set_control_loop_enabled(False)
else:
raise RuntimeError('Unrecognised action mode.')
def sample_variation(self) -> int:
self._variation_number = np.random.randint(0, self._task.variation_count())
return self._variation_number
def _assert_action_space(self, action, expected_shape):
if np.shape(action) != expected_shape:
raise RuntimeError(
'Expected the action shape to be: %s, but was shape: %s' % (
str(expected_shape), str(np.shape(action))))
def _torque_action(self, action):
self._robot.arm.set_joint_target_velocities([(TORQUE_MAX_VEL if t < 0 else -TORQUE_MAX_VEL) for t in action])
self._robot.arm.set_joint_forces(np.abs(action))
def _ee_action(self, action):
try:
joint_positions = self._robot.arm.solve_ik(action[:3], quaternion=action[3:])
self._robot.arm.set_joint_target_positions(joint_positions)
except IKError as e:
raise InvalidActionError("Could not find a path.") from e
self._pyrep.step()
@staticmethod
def _get_class_by_name(class_name: str, model: ModuleType) -> TaskClass:
all_class_dict = {}
for o in getmembers(model):
if isclass(o[1]):
all_class_dict[o[0]] = o[1]
if class_name not in all_class_dict:
raise NotImplementedError(f"No class {class_name} found in {model.__name__} !")
return all_class_dict[class_name]
if __name__ == "__main__":
from time import sleep
e = EnvironmentImpl("CloseMicrowave")
e.init(True)
e.reset()
for i in range(100):
e.step({'a': np.random.randn(8)})
sleep(0.1)
| [
"numpy.abs",
"pyquaternion.Quaternion",
"inspect.getmembers",
"rlbench.backend.scene.Scene",
"rlbench.observation_config.ObservationConfig",
"pyrep.backend.utils.suppress_std_out_and_err",
"pyrep.robots.arms.panda.Panda",
"pyrep.robots.end_effectors.panda_gripper.PandaGripper",
"os.path.join",
"rl... | [((1405, 1447), 'rlbench.observation_config.ObservationConfig', 'ObservationConfig', ([], {'task_low_dim_state': '(True)'}), '(task_low_dim_state=True)\n', (1422, 1447), False, 'from rlbench.observation_config import ObservationConfig\n'), ((1492, 1504), 'rlbench.action_modes.ActionMode', 'ActionMode', ([], {}), '()\n', (1502, 1504), False, 'from rlbench.action_modes import ActionMode, ArmActionMode\n'), ((4864, 4893), 'numpy.array', 'np.array', (["last_step['a'][:-1]"], {}), "(last_step['a'][:-1])\n", (4872, 4893), True, 'import numpy as np\n'), ((11914, 11931), 'inspect.getmembers', 'getmembers', (['model'], {}), '(model)\n', (11924, 11931), False, 'from inspect import getmembers, isclass\n'), ((12391, 12401), 'time.sleep', 'sleep', (['(0.1)'], {}), '(0.1)\n', (12396, 12401), False, 'from time import sleep\n'), ((2264, 2290), 'pyrep.backend.utils.suppress_std_out_and_err', 'suppress_std_out_and_err', ([], {}), '()\n', (2288, 2290), False, 'from pyrep.backend.utils import suppress_std_out_and_err\n'), ((2318, 2325), 'pyrep.PyRep', 'PyRep', ([], {}), '()\n', (2323, 2325), False, 'from pyrep import PyRep\n'), ((2692, 2741), 'rlbench.backend.scene.Scene', 'Scene', (['self._pyrep', 'self._robot', 'self._obs_config'], {}), '(self._pyrep, self._robot, self._obs_config)\n', (2697, 2741), False, 'from rlbench.backend.scene import Scene\n'), ((3149, 3175), 'pyrep.backend.utils.suppress_std_out_and_err', 'suppress_std_out_and_err', ([], {}), '()\n', (3173, 3175), False, 'from pyrep.backend.utils import suppress_std_out_and_err\n'), ((9376, 9428), 'numpy.zeros', 'np.zeros', (["self._info['action dim']"], {'dtype': 'np.float32'}), "(self._info['action dim'], dtype=np.float32)\n", (9384, 9428), True, 'import numpy as np\n'), ((9470, 9522), 'numpy.zeros', 'np.zeros', (["self._info['action dim']"], {'dtype': 'np.float32'}), "(self._info['action dim'], dtype=np.float32)\n", (9478, 9522), True, 'import numpy as np\n'), ((9602, 9653), 'numpy.zeros', 'np.zeros', (["self._info['state dim']"], {'dtype': 'np.float32'}), "(self._info['state dim'], dtype=np.float32)\n", (9610, 9653), True, 'import numpy as np\n'), ((9696, 9747), 'numpy.zeros', 'np.zeros', (["self._info['state dim']"], {'dtype': 'np.float32'}), "(self._info['state dim'], dtype=np.float32)\n", (9704, 9747), True, 'import numpy as np\n'), ((11013, 11029), 'numpy.shape', 'np.shape', (['action'], {}), '(action)\n', (11021, 11029), True, 'import numpy as np\n'), ((11421, 11435), 'numpy.abs', 'np.abs', (['action'], {}), '(action)\n', (11427, 11435), True, 'import numpy as np\n'), ((11948, 11961), 'inspect.isclass', 'isclass', (['o[1]'], {}), '(o[1])\n', (11955, 11961), False, 'from inspect import getmembers, isclass\n'), ((2454, 2478), 'os.path.join', 'join', (['DIR_PATH', 'TTT_FILE'], {}), '(DIR_PATH, TTT_FILE)\n', (2458, 2478), False, 'from os.path import join\n'), ((2641, 2648), 'pyrep.robots.arms.panda.Panda', 'Panda', ([], {}), '()\n', (2646, 2648), False, 'from pyrep.robots.arms.panda import Panda\n'), ((2650, 2664), 'pyrep.robots.end_effectors.panda_gripper.PandaGripper', 'PandaGripper', ([], {}), '()\n', (2662, 2664), False, 'from pyrep.robots.end_effectors.panda_gripper import PandaGripper\n'), ((11694, 11738), 'rlbench.task_environment.InvalidActionError', 'InvalidActionError', (['"""Could not find a path."""'], {}), "('Could not find a path.')\n", (11712, 11738), False, 'from rlbench.task_environment import InvalidActionError, TaskEnvironmentError, TORQUE_MAX_VEL, DT, MAX_RESET_ATTEMPTS\n'), ((12362, 12380), 'numpy.random.randn', 'np.random.randn', (['(8)'], {}), '(8)\n', (12377, 12380), True, 'import numpy as np\n'), ((1297, 1314), 'inspect.getmembers', 'getmembers', (['tasks'], {}), '(tasks)\n', (1307, 1314), False, 'from inspect import getmembers, isclass\n'), ((1318, 1331), 'inspect.isclass', 'isclass', (['o[1]'], {}), '(o[1])\n', (1325, 1331), False, 'from inspect import getmembers, isclass\n'), ((11203, 11219), 'numpy.shape', 'np.shape', (['action'], {}), '(action)\n', (11211, 11219), True, 'import numpy as np\n'), ((6620, 6654), 'pyquaternion.Quaternion', 'Quaternion', (['a_qw', 'a_qx', 'a_qy', 'a_qz'], {}), '(a_qw, a_qx, a_qy, a_qz)\n', (6630, 6654), False, 'from pyquaternion import Quaternion\n'), ((6657, 6683), 'pyquaternion.Quaternion', 'Quaternion', (['qw', 'qx', 'qy', 'qz'], {}), '(qw, qx, qy, qz)\n', (6667, 6683), False, 'from pyquaternion import Quaternion\n'), ((7044, 7058), 'numpy.array', 'np.array', (['pose'], {}), '(pose)\n', (7052, 7058), True, 'import numpy as np\n'), ((7476, 7490), 'numpy.array', 'np.array', (['pose'], {}), '(pose)\n', (7484, 7490), True, 'import numpy as np\n'), ((7337, 7351), 'numpy.zeros', 'np.zeros', (['(7,)'], {}), '((7,))\n', (7345, 7351), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# file: data_utils_for_inferring.py
# time: 2021/4/22 0022
# author: yangheng <<EMAIL>>
# github: https://github.com/yangheng95
# Copyright (C) 2021. All Rights Reserved.
import numpy as np
from pyabsa.utils.pyabsa_utils import check_and_fix_labels, validate_example
from torch.utils.data import Dataset
from tqdm import tqdm
from .apc_utils import (build_sentiment_window,
build_spc_mask_vec,
load_apc_datasets,
prepare_input_for_apc,
LABEL_PADDING, configure_spacy_model)
from .apc_utils_for_dlcf_dca import prepare_input_for_dlcf_dca, configure_dlcf_spacy_model
class ABSADataset(Dataset):
def __init__(self, tokenizer, opt):
configure_spacy_model(opt)
self.tokenizer = tokenizer
self.opt = opt
self.all_data = []
def parse_sample(self, text):
_text = text
samples = []
if '!sent!' not in text:
text += '!sent!'
text, _, ref_sent = text.partition('!sent!')
ref_sent = ref_sent.split(',') if ref_sent else None
text = '[PADDING] ' + text + ' [PADDING]'
splits = text.split('[ASP]')
if ref_sent and int((len(splits) - 1) / 2) == len(ref_sent):
for i in range(0, len(splits) - 1, 2):
sample = text.replace('[ASP]' + splits[i + 1] + '[ASP]',
'[TEMP]' + splits[i + 1] + '[TEMP]', 1).replace('[ASP]', '')
sample += ' !sent! ' + str(ref_sent[int(i / 2)])
samples.append(sample.replace('[TEMP]', '[ASP]'))
elif not ref_sent or int((len(splits) - 1) / 2) != len(ref_sent):
if not ref_sent:
print(_text, ' -> No the reference sentiment found')
else:
print(_text, ' -> Unequal length of reference sentiment and aspects, ignore the reference sentiment.')
for i in range(0, len(splits) - 1, 2):
sample = text.replace('[ASP]' + splits[i + 1] + '[ASP]',
'[TEMP]' + splits[i + 1] + '[TEMP]', 1).replace('[ASP]', '')
samples.append(sample.replace('[TEMP]', '[ASP]'))
else:
raise ValueError('Invalid Input:{}'.format(text))
return samples
def prepare_infer_sample(self, text: str):
self.process_data(self.parse_sample(text))
def prepare_infer_dataset(self, infer_file, ignore_error):
lines = load_apc_datasets(infer_file)
samples = []
for sample in lines:
if sample:
samples.extend(self.parse_sample(sample))
self.process_data(samples, ignore_error)
def process_data(self, samples, ignore_error=True):
all_data = []
label_set = set()
ex_id = 0
if len(samples) > 1:
it = tqdm(samples, postfix='building word indices...')
else:
it = samples
for i, text in enumerate(it):
try:
# handle for empty lines in inferring_tutorials dataset_utils
if text is None or '' == text.strip():
raise RuntimeError('Invalid Input!')
# check for given polarity
if '!sent!' in text:
text, polarity = text.split('!sent!')[0].strip(), text.split('!sent!')[1].strip()
polarity = polarity if polarity else LABEL_PADDING
text = text.replace('[PADDING]', '')
else:
polarity = str(LABEL_PADDING)
# simply add padding in case of some aspect is at the beginning or ending of a sentence
text_left, aspect, text_right = text.split('[ASP]')
text_left = text_left.replace('[PADDING] ', '')
text_right = text_right.replace(' [PADDING]', '')
text = text_left + ' ' + aspect + ' ' + text_right
prepared_inputs = prepare_input_for_apc(self.opt, self.tokenizer, text_left, text_right, aspect, input_demands=self.opt.inputs_cols)
text_raw = prepared_inputs['text_raw']
aspect = prepared_inputs['aspect']
aspect_position = prepared_inputs['aspect_position']
text_bert_indices = prepared_inputs['text_bert_indices']
text_raw_bert_indices = prepared_inputs['text_raw_bert_indices']
aspect_bert_indices = prepared_inputs['aspect_bert_indices']
lcfs_vec = prepared_inputs['lcfs_vec']
lcf_vec = prepared_inputs['lcf_vec']
validate_example(text_raw, aspect, polarity)
if self.opt.model_name == 'dlcf_dca_bert' or self.opt.model_name == 'dlcfs_dca_bert':
configure_dlcf_spacy_model(self.opt)
prepared_inputs = prepare_input_for_dlcf_dca(self.opt, self.tokenizer, text_left, text_right, aspect)
dlcf_vec = prepared_inputs['dlcf_cdm_vec'] if self.opt.lcf == 'cdm' else prepared_inputs['dlcf_cdw_vec']
dlcfs_vec = prepared_inputs['dlcfs_cdm_vec'] if self.opt.lcf == 'cdm' else prepared_inputs['dlcfs_cdw_vec']
depend_vec = prepared_inputs['depend_vec']
depended_vec = prepared_inputs['depended_vec']
data = {
'ex_id': ex_id,
'text_raw': text_raw,
'aspect': aspect,
'aspect_position': aspect_position,
'lca_ids': lcf_vec, # the lca indices are the same as the refactored CDM (lcf != CDW or Fusion) lcf vec
'lcf_vec': lcf_vec if 'lcf_vec' in self.opt.inputs_cols else 0,
'lcfs_vec': lcfs_vec if 'lcfs_vec' in self.opt.inputs_cols else 0,
'dlcf_vec': dlcf_vec if 'dlcf_vec' in self.opt.inputs_cols else 0,
'dlcfs_vec': dlcfs_vec if 'dlcfs_vec' in self.opt.inputs_cols else 0,
'depend_vec': depend_vec if 'depend_vec' in self.opt.inputs_cols else 0,
'depended_vec': depended_vec if 'depended_vec' in self.opt.inputs_cols else 0,
'spc_mask_vec': build_spc_mask_vec(self.opt, text_raw_bert_indices)
if 'spc_mask_vec' in self.opt.inputs_cols else 0,
'text_bert_indices': text_bert_indices
if 'text_bert_indices' in self.opt.inputs_cols else 0,
'aspect_bert_indices': aspect_bert_indices
if 'aspect_bert_indices' in self.opt.inputs_cols else 0,
'text_raw_bert_indices': text_raw_bert_indices
if 'text_raw_bert_indices' in self.opt.inputs_cols else 0,
'polarity': polarity,
}
label_set.add(polarity)
ex_id += 1
all_data.append(data)
except Exception as e:
if ignore_error:
print('Ignore error while processing: {} Error info:{}'.format(text, e))
else:
raise RuntimeError('Catch Exception: {}, use ignore_error=True to remove error samples.'.format(e))
self.opt.polarities_dim = len(label_set)
if 'left_lcf_vec' in self.opt.inputs_cols or 'right_lcf_vec' in self.opt.inputs_cols \
or 'left_lcfs_vec' in self.opt.inputs_cols or 'right_lcfs_vec' in self.opt.inputs_cols:
all_data = build_sentiment_window(all_data, self.tokenizer, self.opt.similarity_threshold)
for data in all_data:
cluster_ids = []
for pad_idx in range(self.opt.max_seq_len):
if pad_idx in data['cluster_ids']:
# print(data['polarity'])
cluster_ids.append(self.opt.label_to_index.get(self.opt.index_to_label.get(data['polarity'], 'N.A.'), -999))
else:
cluster_ids.append(-100)
# cluster_ids.append(3)
data['cluster_ids'] = np.asarray(cluster_ids, dtype=np.int64)
data['side_ex_ids'] = np.array(0)
data['aspect_position'] = np.array(0)
else:
for data in all_data:
data['aspect_position'] = np.array(0)
self.all_data = all_data
return all_data
def __getitem__(self, index):
return self.all_data[index]
def __len__(self):
return len(self.all_data)
| [
"pyabsa.utils.pyabsa_utils.validate_example",
"numpy.array",
"tqdm.tqdm",
"numpy.asarray"
] | [((2899, 2948), 'tqdm.tqdm', 'tqdm', (['samples'], {'postfix': '"""building word indices..."""'}), "(samples, postfix='building word indices...')\n", (2903, 2948), False, 'from tqdm import tqdm\n'), ((4669, 4713), 'pyabsa.utils.pyabsa_utils.validate_example', 'validate_example', (['text_raw', 'aspect', 'polarity'], {}), '(text_raw, aspect, polarity)\n', (4685, 4713), False, 'from pyabsa.utils.pyabsa_utils import check_and_fix_labels, validate_example\n'), ((8185, 8224), 'numpy.asarray', 'np.asarray', (['cluster_ids'], {'dtype': 'np.int64'}), '(cluster_ids, dtype=np.int64)\n', (8195, 8224), True, 'import numpy as np\n'), ((8263, 8274), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (8271, 8274), True, 'import numpy as np\n'), ((8317, 8328), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (8325, 8328), True, 'import numpy as np\n'), ((8420, 8431), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (8428, 8431), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
#encoding: utf-8
import os
import time
import numpy as np
import LED
import pandas as pd
import fixedsizes as fx
import pickle
import lirc
def blank_display():
for i in range(LED.DRIVER_COUNT*24):
LED.tlc5947[i] = 0
LED.tlc5947.write()
def apply_pattern(filename):
global LED, pattern_selected, old_ircode, new_ircode
if pattern_selected != filename:
pattern_selected = filename
Render = 0
f=open(filename,'rb')
Render = pickle.load(f)
f.close
Form = np.shape(Render)
Render.shape = (int(Form[0]/960),960)
Form = np.shape(Render)
LengthFrame = Form[1]
NumberFrames =Form[0]
for FrameCount in range(1,NumberFrames):
codeIR = lirc.nextcode()
if codeIR != []:
new_ircode = codeIR[0]
old_ircode = "update"
pattern_selected = ""
blank_display()
break
#Frame = Render[FrameCount]
for FrameAdress in range(0,LengthFrame):
LED.tlc5947[FrameAdress]=Render[FrameCount,FrameAdress]
LED.tlc5947.write()
def act_on_code(code):
if code == "KEY_UP":
apply_pattern("/home/pi/hex_display/pattern1.rnd")
elif code == "KEY_DOWN":
apply_pattern("/home/pi/hex_display/pattern3.rnd")
elif code == "KEY_LEFT":
apply_pattern("/home/pi/hex_display/pattern4.rnd")
elif code == "KEY_RIGHT":
apply_pattern("/home/pi/hex_display/pattern2.rnd")
elif code == "KEY_OK":
apply_pattern("/home/pi/hex_display/default.rnd")
elif code == "KEY_MENU":
print(str(code))
elif code == "KEY_PLAYPAUSE":
print(str(code))
if __name__ == '__main__':
global Render, LED, pattern_selected, old_ircode, new_ircode
pattern_selected = ""
old_ircode = ""
new_ircode = ""
#initialisieren GPIO's und LED's
LED.Init_Panel()
sockid=lirc.init("appleremote", blocking = False)
try:
apply_pattern("/home/pi/hex_display/pattern0.rnd")
pattern_selected = "/home/pi/hex_display/default.rnd"
except Exception as e:
print(str(e.args))
while True:
try:
old_ircode = new_ircode
codeIR = lirc.nextcode()
if codeIR != []:
new_ircode = codeIR[0]
act_on_code(new_ircode)
elif pattern_selected != "":
apply_pattern(pattern_selected)
if old_ircode == "update":
act_on_code(new_ircode)
time.sleep(0.02)
except KeyboardInterrupt:
raise
except Exception as e:
print(str(e.args))
pattern_selected = ""
old_ircode = ""
new_ircode = ""
| [
"lirc.nextcode",
"pickle.load",
"time.sleep",
"LED.tlc5947.write",
"lirc.init",
"LED.Init_Panel",
"numpy.shape"
] | [((244, 263), 'LED.tlc5947.write', 'LED.tlc5947.write', ([], {}), '()\n', (261, 263), False, 'import LED\n'), ((457, 471), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (468, 471), False, 'import pickle\n'), ((489, 505), 'numpy.shape', 'np.shape', (['Render'], {}), '(Render)\n', (497, 505), True, 'import numpy as np\n'), ((553, 569), 'numpy.shape', 'np.shape', (['Render'], {}), '(Render)\n', (561, 569), True, 'import numpy as np\n'), ((1658, 1674), 'LED.Init_Panel', 'LED.Init_Panel', ([], {}), '()\n', (1672, 1674), False, 'import LED\n'), ((1685, 1725), 'lirc.init', 'lirc.init', (['"""appleremote"""'], {'blocking': '(False)'}), "('appleremote', blocking=False)\n", (1694, 1725), False, 'import lirc\n'), ((669, 684), 'lirc.nextcode', 'lirc.nextcode', ([], {}), '()\n', (682, 684), False, 'import lirc\n'), ((942, 961), 'LED.tlc5947.write', 'LED.tlc5947.write', ([], {}), '()\n', (959, 961), False, 'import LED\n'), ((1947, 1962), 'lirc.nextcode', 'lirc.nextcode', ([], {}), '()\n', (1960, 1962), False, 'import lirc\n'), ((2167, 2183), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (2177, 2183), False, 'import time\n')] |
from ScopeFoundry.data_browser import DataBrowser, HyperSpectralBaseView
import numpy as np
class HyperSpecNPZView(HyperSpectralBaseView):
name = 'hyperspec_npz'
def is_file_supported(self, fname):
return "_spec_scan.npz" in fname
def load_data(self, fname):
self.dat = np.load(fname)
self.spec_map = self.dat['spec_map']
self.integrated_count_map = self.dat['integrated_count_map']
self.hyperspec_data = self.spec_map
self.display_image = self.integrated_count_map
self.spec_x_array = self.dat['wls']
def scan_specific_setup(self):
self.spec_plot.setLabel('left', 'Intensity', units='counts')
self.spec_plot.setLabel('bottom', 'Wavelength', units='nm')
def spectral_median(spec,wls, count_min=200):
int_spec = np.cumsum(spec)
total_sum = int_spec[-1]
if total_sum > count_min:
pos = int_spec.searchsorted( 0.5*total_sum)
wl = wls[pos]
else:
wl = np.NaN
return wl
class HyperSpecSpecMedianNPZView(HyperSpectralBaseView):
name = 'hyperspec_spec_median_npz'
def is_file_supported(self, fname):
return "_spec_scan.npz" in fname
def load_data(self, fname):
self.dat = np.load(fname)
self.spec_map = self.dat['spec_map']
self.wls = self.dat['wls']
self.integrated_count_map = self.dat['integrated_count_map']
self.spec_median_map = np.apply_along_axis(spectral_median, 2,
self.spec_map[:,:,:],
self.wls, 0)
self.hyperspec_data = self.spec_map
self.display_image = self.spec_median_map
self.spec_x_array = self.wls
def scan_specific_setup(self):
self.spec_plot.setLabel('left', 'Intensity', units='counts')
self.spec_plot.setLabel('bottom', 'Wavelength', units='nm')
if __name__ == '__main__':
import sys
app = DataBrowser(sys.argv)
app.load_view(HyperSpecNPZView(app))
sys.exit(app.exec_()) | [
"numpy.cumsum",
"ScopeFoundry.data_browser.DataBrowser",
"numpy.load",
"numpy.apply_along_axis"
] | [((848, 863), 'numpy.cumsum', 'np.cumsum', (['spec'], {}), '(spec)\n', (857, 863), True, 'import numpy as np\n'), ((2054, 2075), 'ScopeFoundry.data_browser.DataBrowser', 'DataBrowser', (['sys.argv'], {}), '(sys.argv)\n', (2065, 2075), False, 'from ScopeFoundry.data_browser import DataBrowser, HyperSpectralBaseView\n'), ((314, 328), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (321, 328), True, 'import numpy as np\n'), ((1293, 1307), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (1300, 1307), True, 'import numpy as np\n'), ((1497, 1573), 'numpy.apply_along_axis', 'np.apply_along_axis', (['spectral_median', '(2)', 'self.spec_map[:, :, :]', 'self.wls', '(0)'], {}), '(spectral_median, 2, self.spec_map[:, :, :], self.wls, 0)\n', (1516, 1573), True, 'import numpy as np\n')] |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main script to launch AugMix training on CIFAR-10/100.
Supports WideResNet, AllConv, ResNeXt models on CIFAR-10 and CIFAR-100 as well
as evaluation on CIFAR-10-C and CIFAR-100-C.
Example usage:
`python cifar.py`
"""
from __future__ import print_function
import argparse
import os
import shutil
import time
import augmentations
from models.cifar.allconv import AllConvNet
import numpy as np
from third_party.ResNeXt_DenseNet.models.densenet import densenet
from third_party.ResNeXt_DenseNet.models.resnext import resnext29
from third_party.WideResNet_pytorch.wideresnet import WideResNet
import torch.nn as nn
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
parser = argparse.ArgumentParser(
description='Trains a CIFAR Classifier',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--dataset',
type=str,
default='cifar10',
choices=['cifar10', 'cifar100'],
help='Choose between CIFAR-10, CIFAR-100.')
parser.add_argument(
'--model',
'-m',
type=str,
default='wrn',
choices=['wrn', 'allconv', 'densenet', 'resnext'],
help='Choose architecture.')
# Optimization options
parser.add_argument(
'--epochs', '-e', type=int, default=100, help='Number of epochs to train.')
parser.add_argument(
'--learning-rate',
'-lr',
type=float,
default=0.1,
help='Initial learning rate.')
parser.add_argument(
'--batch-size', '-b', type=int, default=128, help='Batch size.')
parser.add_argument('--eval-batch-size', type=int, default=1000)
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')
parser.add_argument(
'--decay',
'-wd',
type=float,
default=0.0005,
help='Weight decay (L2 penalty).')
# WRN Architecture options
parser.add_argument(
'--layers', default=40, type=int, help='total number of layers')
parser.add_argument('--widen-factor', default=2, type=int, help='Widen factor')
parser.add_argument(
'--droprate', default=0.0, type=float, help='Dropout probability')
# AugMix options
parser.add_argument(
'--mixture-width',
default=3,
type=int,
help='Number of augmentation chains to mix per augmented example')
parser.add_argument(
'--mixture-depth',
default=-1,
type=int,
help='Depth of augmentation chains. -1 denotes stochastic depth in [1, 3]')
parser.add_argument(
'--aug-severity',
default=3,
type=int,
help='Severity of base augmentation operators')
parser.add_argument(
'--no-jsd',
'-nj',
action='store_true',
help='Turn off JSD consistency loss.')
parser.add_argument(
'--all-ops',
'-all',
action='store_true',
help='Turn on all operations (+brightness,contrast,color,sharpness).')
# Checkpointing options
parser.add_argument(
'--save',
'-s',
type=str,
default='./snapshots',
help='Folder to save checkpoints.')
parser.add_argument(
'--resume',
'-r',
type=str,
default='',
help='Checkpoint path for resume / test.')
parser.add_argument('--evaluate', action='store_true', help='Eval only.')
parser.add_argument(
'--print-freq',
type=int,
default=50,
help='Training loss print frequency (batches).')
# Acceleration
parser.add_argument(
'--num-workers',
type=int,
default=4,
help='Number of pre-fetching threads.')
args = parser.parse_args()
CORRUPTIONS = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
def get_lr(step, total_steps, lr_max, lr_min):
"""Compute learning rate according to cosine annealing schedule."""
return lr_min + (lr_max - lr_min) * 0.5 * (1 +
np.cos(step / total_steps * np.pi))
def aug(image1, image2, preprocess):
"""Perform AugMix augmentations and compute mixture.
Args:
image: PIL.Image input image
preprocess: Preprocessing function which should return a torch tensor.
Returns:
mixed: Augmented and mixed image.
"""
m = np.float32(np.random.beta(1, 1))
mixed = (1 - m) * preprocess(image1) + m * preprocess(image2)
return mixed, m
class AugMixDataset(torch.utils.data.Dataset):
"""Dataset wrapper to perform AugMix augmentation."""
def __init__(self, dataset, preprocess, no_jsd=False):
self.dataset = dataset
self.preprocess = preprocess
self.no_jsd = no_jsd
self.index = torch.randperm(len(self.dataset))
def __getitem__(self, i):
x1, y1 = self.dataset[i]
x2, y2 = self.dataset[self.index[i]]
return aug(x1, x2, self.preprocess), y1, y2
def __len__(self):
return len(self.dataset)
def mixup_criterion(pred, y_a, y_b, lam):
criterion = nn.CrossEntropyLoss()
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def train(net, train_loader, optimizer, scheduler):
"""Train for one epoch."""
net.train()
loss_ema = 0.
for i, ((images, lam), targets1, targets2) in enumerate(train_loader):
optimizer.zero_grad()
images = images.cuda()
targets1 = targets1.cuda()
targets2 = targets2.cuda()
logits = net(images)
loss = mixup_criterion(logits, targets1, targets2, lam)
loss.backward()
optimizer.step()
scheduler.step()
loss_ema = loss_ema * 0.9 + float(loss) * 0.1
if i % args.print_freq == 0:
print('Train Loss {:.3f}'.format(loss_ema))
return loss_ema
def test(net, test_loader):
"""Evaluate network on given dataset."""
net.eval()
total_loss = 0.
total_correct = 0
with torch.no_grad():
for images, targets in test_loader:
images, targets = images.cuda(), targets.cuda()
logits = net(images)
loss = F.cross_entropy(logits, targets)
pred = logits.data.max(1)[1]
total_loss += float(loss.data)
total_correct += pred.eq(targets.data).sum().item()
return total_loss / len(test_loader.dataset), total_correct / len(
test_loader.dataset)
def test_c(net, test_data, base_path):
"""Evaluate network on given corrupted dataset."""
corruption_accs = []
for corruption in CORRUPTIONS:
# Reference to original data is mutated
test_data.data = np.load(base_path + corruption + '.npy')
test_data.targets = torch.LongTensor(np.load(base_path + 'labels.npy'))
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=args.eval_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True)
test_loss, test_acc = test(net, test_loader)
corruption_accs.append(test_acc)
print('{}\n\tTest Loss {:.3f} | Test Error {:.3f}'.format(
corruption, test_loss, 100 - 100. * test_acc))
return np.mean(corruption_accs)
def main():
torch.manual_seed(1)
np.random.seed(1)
# Load datasets
train_transform = transforms.Compose(
[transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4)])
preprocess = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize([0.5] * 3, [0.5] * 3)])
test_transform = preprocess
if args.dataset == 'cifar10':
train_data = datasets.CIFAR10(
'./data/cifar', train=True, transform=train_transform, download=True)
test_data = datasets.CIFAR10(
'./data/cifar', train=False, transform=test_transform, download=True)
base_c_path = './data/cifar/CIFAR-10-C/'
num_classes = 10
else:
train_data = datasets.CIFAR100(
'./data/cifar', train=True, transform=train_transform, download=True)
test_data = datasets.CIFAR100(
'./data/cifar', train=False, transform=test_transform, download=True)
base_c_path = './data/cifar/CIFAR-100-C/'
num_classes = 100
train_data = AugMixDataset(train_data, preprocess, args.no_jsd)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
pin_memory=True)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=args.eval_batch_size,
shuffle=False,
num_workers=args.num_workers,
pin_memory=True)
# Create model
if args.model == 'densenet':
net = densenet(num_classes=num_classes)
elif args.model == 'wrn':
net = WideResNet(args.layers, num_classes, args.widen_factor, args.droprate)
elif args.model == 'allconv':
net = AllConvNet(num_classes)
elif args.model == 'resnext':
net = resnext29(num_classes=num_classes)
optimizer = torch.optim.SGD(
net.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.decay,
nesterov=True)
# Distribute model across all visible GPUs
net = torch.nn.DataParallel(net).cuda()
cudnn.benchmark = True
start_epoch = 0
if args.resume:
if os.path.isfile(args.resume):
checkpoint = torch.load(args.resume)
start_epoch = checkpoint['epoch'] + 1
best_acc = checkpoint['best_acc']
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print('Model restored from epoch:', start_epoch)
if args.evaluate:
# Evaluate clean accuracy first because test_c mutates underlying data
test_loss, test_acc = test(net, test_loader)
print('Clean\n\tTest Loss {:.3f} | Test Error {:.2f}'.format(
test_loss, 100 - 100. * test_acc))
test_c_acc = test_c(net, test_data, base_c_path)
print('Mean Corruption Error: {:.3f}'.format(100 - 100. * test_c_acc))
return
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=lambda step: get_lr( # pylint: disable=g-long-lambda
step,
args.epochs * len(train_loader),
1, # lr_lambda computes multiplicative factor
1e-6 / args.learning_rate))
if not os.path.exists(args.save):
os.makedirs(args.save)
if not os.path.isdir(args.save):
raise Exception('%s is not a dir' % args.save)
log_path = os.path.join(args.save,
args.dataset + '_' + args.model + '_training_log.csv')
with open(log_path, 'w') as f:
f.write('epoch,time(s),train_loss,test_loss,test_error(%)\n')
best_acc = 0
print('Beginning training from epoch:', start_epoch + 1)
for epoch in range(start_epoch, args.epochs):
begin_time = time.time()
train_loss_ema = train(net, train_loader, optimizer, scheduler)
test_loss, test_acc = test(net, test_loader)
is_best = test_acc > best_acc
best_acc = max(test_acc, best_acc)
checkpoint = {
'epoch': epoch,
'dataset': args.dataset,
'model': args.model,
'state_dict': net.state_dict(),
'best_acc': best_acc,
'optimizer': optimizer.state_dict(),
}
save_path = os.path.join(args.save, 'checkpoint.pth.tar')
torch.save(checkpoint, save_path)
if is_best:
shutil.copyfile(save_path, os.path.join(args.save, 'model_best.pth.tar'))
with open(log_path, 'a') as f:
f.write('%03d,%05d,%0.6f,%0.5f,%0.2f\n' % (
(epoch + 1),
time.time() - begin_time,
train_loss_ema,
test_loss,
100 - 100. * test_acc,
))
print(
'Epoch {0:3d} | Time {1:5d} | Train Loss {2:.4f} | Test Loss {3:.3f} |'
' Test Error {4:.2f}'
.format((epoch + 1), int(time.time() - begin_time), train_loss_ema,
test_loss, 100 - 100. * test_acc))
test_c_acc = test_c(net, test_data, base_c_path)
print('Mean Corruption Error: {:.3f}'.format(100 - 100. * test_c_acc))
with open(log_path, 'a') as f:
f.write('%03d,%05d,%0.6f,%0.5f,%0.2f\n' %
(args.epochs + 1, 0, 0, 0, 100 - 100 * test_c_acc))
if __name__ == '__main__':
main()
| [
"torchvision.datasets.CIFAR100",
"torch.nn.CrossEntropyLoss",
"third_party.WideResNet_pytorch.wideresnet.WideResNet",
"models.cifar.allconv.AllConvNet",
"numpy.mean",
"os.path.exists",
"argparse.ArgumentParser",
"third_party.ResNeXt_DenseNet.models.densenet.densenet",
"os.path.isdir",
"numpy.rando... | [((1434, 1558), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Trains a CIFAR Classifier"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Trains a CIFAR Classifier',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (1457, 1558), False, 'import argparse\n'), ((5550, 5571), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (5569, 5571), True, 'import torch.nn as nn\n'), ((7527, 7551), 'numpy.mean', 'np.mean', (['corruption_accs'], {}), '(corruption_accs)\n', (7534, 7551), True, 'import numpy as np\n'), ((7568, 7588), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (7585, 7588), False, 'import torch\n'), ((7591, 7608), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (7605, 7608), True, 'import numpy as np\n'), ((8614, 8747), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_data'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(train_data, batch_size=args.batch_size, shuffle\n =True, num_workers=args.num_workers, pin_memory=True)\n', (8641, 8747), False, 'import torch\n'), ((8791, 8928), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_data'], {'batch_size': 'args.eval_batch_size', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(test_data, batch_size=args.eval_batch_size,\n shuffle=False, num_workers=args.num_workers, pin_memory=True)\n', (8818, 8928), False, 'import torch\n'), ((10792, 10870), 'os.path.join', 'os.path.join', (['args.save', "(args.dataset + '_' + args.model + '_training_log.csv')"], {}), "(args.save, args.dataset + '_' + args.model + '_training_log.csv')\n", (10804, 10870), False, 'import os\n'), ((4891, 4911), 'numpy.random.beta', 'np.random.beta', (['(1)', '(1)'], {}), '(1, 1)\n', (4905, 4911), True, 'import numpy as np\n'), ((6375, 6390), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6388, 6390), False, 'import torch\n'), ((7001, 7041), 'numpy.load', 'np.load', (["(base_path + corruption + '.npy')"], {}), "(base_path + corruption + '.npy')\n", (7008, 7041), True, 'import numpy as np\n'), ((7137, 7274), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_data'], {'batch_size': 'args.eval_batch_size', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(test_data, batch_size=args.eval_batch_size,\n shuffle=False, num_workers=args.num_workers, pin_memory=True)\n', (7164, 7274), False, 'import torch\n'), ((7953, 8043), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""./data/cifar"""'], {'train': '(True)', 'transform': 'train_transform', 'download': '(True)'}), "('./data/cifar', train=True, transform=train_transform,\n download=True)\n", (7969, 8043), False, 'from torchvision import datasets\n'), ((8065, 8155), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', (['"""./data/cifar"""'], {'train': '(False)', 'transform': 'test_transform', 'download': '(True)'}), "('./data/cifar', train=False, transform=test_transform,\n download=True)\n", (8081, 8155), False, 'from torchvision import datasets\n'), ((8252, 8343), 'torchvision.datasets.CIFAR100', 'datasets.CIFAR100', (['"""./data/cifar"""'], {'train': '(True)', 'transform': 'train_transform', 'download': '(True)'}), "('./data/cifar', train=True, transform=train_transform,\n download=True)\n", (8269, 8343), False, 'from torchvision import datasets\n'), ((8365, 8456), 'torchvision.datasets.CIFAR100', 'datasets.CIFAR100', (['"""./data/cifar"""'], {'train': '(False)', 'transform': 'test_transform', 'download': '(True)'}), "('./data/cifar', train=False, transform=test_transform,\n download=True)\n", (8382, 8456), False, 'from torchvision import datasets\n'), ((9015, 9048), 'third_party.ResNeXt_DenseNet.models.densenet.densenet', 'densenet', ([], {'num_classes': 'num_classes'}), '(num_classes=num_classes)\n', (9023, 9048), False, 'from third_party.ResNeXt_DenseNet.models.densenet import densenet\n'), ((9623, 9650), 'os.path.isfile', 'os.path.isfile', (['args.resume'], {}), '(args.resume)\n', (9637, 9650), False, 'import os\n'), ((10638, 10663), 'os.path.exists', 'os.path.exists', (['args.save'], {}), '(args.save)\n', (10652, 10663), False, 'import os\n'), ((10669, 10691), 'os.makedirs', 'os.makedirs', (['args.save'], {}), '(args.save)\n', (10680, 10691), False, 'import os\n'), ((10701, 10725), 'os.path.isdir', 'os.path.isdir', (['args.save'], {}), '(args.save)\n', (10714, 10725), False, 'import os\n'), ((11136, 11147), 'time.time', 'time.time', ([], {}), '()\n', (11145, 11147), False, 'import time\n'), ((11583, 11628), 'os.path.join', 'os.path.join', (['args.save', '"""checkpoint.pth.tar"""'], {}), "(args.save, 'checkpoint.pth.tar')\n", (11595, 11628), False, 'import os\n'), ((11633, 11666), 'torch.save', 'torch.save', (['checkpoint', 'save_path'], {}), '(checkpoint, save_path)\n', (11643, 11666), False, 'import torch\n'), ((6526, 6558), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'targets'], {}), '(logits, targets)\n', (6541, 6558), True, 'import torch.nn.functional as F\n'), ((7083, 7116), 'numpy.load', 'np.load', (["(base_path + 'labels.npy')"], {}), "(base_path + 'labels.npy')\n", (7090, 7116), True, 'import numpy as np\n'), ((7675, 7708), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (7706, 7708), False, 'from torchvision import transforms\n'), ((7717, 7753), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (7738, 7753), False, 'from torchvision import transforms\n'), ((7798, 7819), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7817, 7819), False, 'from torchvision import transforms\n'), ((7828, 7870), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['([0.5] * 3)', '([0.5] * 3)'], {}), '([0.5] * 3, [0.5] * 3)\n', (7848, 7870), False, 'from torchvision import transforms\n'), ((9087, 9157), 'third_party.WideResNet_pytorch.wideresnet.WideResNet', 'WideResNet', (['args.layers', 'num_classes', 'args.widen_factor', 'args.droprate'], {}), '(args.layers, num_classes, args.widen_factor, args.droprate)\n', (9097, 9157), False, 'from third_party.WideResNet_pytorch.wideresnet import WideResNet\n'), ((9519, 9545), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net'], {}), '(net)\n', (9540, 9545), False, 'import torch\n'), ((9671, 9694), 'torch.load', 'torch.load', (['args.resume'], {}), '(args.resume)\n', (9681, 9694), False, 'import torch\n'), ((4573, 4607), 'numpy.cos', 'np.cos', (['(step / total_steps * np.pi)'], {}), '(step / total_steps * np.pi)\n', (4579, 4607), True, 'import numpy as np\n'), ((9200, 9223), 'models.cifar.allconv.AllConvNet', 'AllConvNet', (['num_classes'], {}), '(num_classes)\n', (9210, 9223), False, 'from models.cifar.allconv import AllConvNet\n'), ((11716, 11761), 'os.path.join', 'os.path.join', (['args.save', '"""model_best.pth.tar"""'], {}), "(args.save, 'model_best.pth.tar')\n", (11728, 11761), False, 'import os\n'), ((9266, 9300), 'third_party.ResNeXt_DenseNet.models.resnext.resnext29', 'resnext29', ([], {'num_classes': 'num_classes'}), '(num_classes=num_classes)\n', (9275, 9300), False, 'from third_party.ResNeXt_DenseNet.models.resnext import resnext29\n'), ((12152, 12163), 'time.time', 'time.time', ([], {}), '()\n', (12161, 12163), False, 'import time\n'), ((11882, 11893), 'time.time', 'time.time', ([], {}), '()\n', (11891, 11893), False, 'import time\n')] |
from pandas_datareader import data
start_date = '2014-01-01'
end_date = '2018-01-01'
goog_data = data.DataReader('GOOG', 'yahoo', start_date, end_date)
import numpy as np
import pandas as pd
goog_data_signal = pd.DataFrame(index=goog_data.index)
goog_data_signal['price'] = goog_data['Adj Close']
goog_data_signal['daily_difference'] = goog_data_signal['price'].diff()
goog_data_signal['signal'] = 0.0
goog_data_signal['signal'][:] = np.where(goog_data_signal['daily_difference'][:] > 0, 1.0, 0.0)
goog_data_signal['positions'] = goog_data_signal['signal'].diff()
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(111, ylabel='Google price in $')
goog_data_signal['price'].plot(ax=ax1, color='r', lw=2.)
ax1.plot(goog_data_signal.loc[goog_data_signal.positions == 1.0].index,
goog_data_signal.price[goog_data_signal.positions == 1.0],
'^', markersize=5, color='m')
ax1.plot(goog_data_signal.loc[goog_data_signal.positions == -1.0].index,
goog_data_signal.price[goog_data_signal.positions == -1.0],
'v', markersize=5, color='k')
#plt.show()
# Set the initial capital
initial_capital= float(1000.0)
positions = pd.DataFrame(index=goog_data_signal.index).fillna(0.0)
portfolio = pd.DataFrame(index=goog_data_signal.index).fillna(0.0)
positions['GOOG'] = goog_data_signal['signal']
portfolio['positions'] = (positions.multiply(goog_data_signal['price'], axis=0))
portfolio['cash'] = initial_capital - (positions.diff().multiply(goog_data_signal['price'], axis=0)).cumsum()
portfolio['total'] = portfolio['positions'] + portfolio['cash']
portfolio.plot()
plt.show()
fig = plt.figure()
ax1 = fig.add_subplot(111, ylabel='Portfolio value in $')
portfolio['total'].plot(ax=ax1, lw=2.)
ax1.plot(portfolio.loc[goog_data_signal.positions == 1.0].index,portfolio.total[goog_data_signal.positions == 1.0],'^', markersize=10, color='m')
ax1.plot(portfolio.loc[goog_data_signal.positions == -1.0].index,portfolio.total[goog_data_signal.positions == -1.0],'v', markersize=10, color='k')
plt.show() | [
"pandas_datareader.data.DataReader",
"numpy.where",
"matplotlib.pyplot.figure",
"pandas.DataFrame",
"matplotlib.pyplot.show"
] | [((97, 151), 'pandas_datareader.data.DataReader', 'data.DataReader', (['"""GOOG"""', '"""yahoo"""', 'start_date', 'end_date'], {}), "('GOOG', 'yahoo', start_date, end_date)\n", (112, 151), False, 'from pandas_datareader import data\n'), ((213, 248), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'goog_data.index'}), '(index=goog_data.index)\n', (225, 248), True, 'import pandas as pd\n'), ((437, 500), 'numpy.where', 'np.where', (["(goog_data_signal['daily_difference'][:] > 0)", '(1.0)', '(0.0)'], {}), "(goog_data_signal['daily_difference'][:] > 0, 1.0, 0.0)\n", (445, 500), True, 'import numpy as np\n'), ((607, 619), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (617, 619), True, 'import matplotlib.pyplot as plt\n'), ((1622, 1632), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1630, 1632), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1653), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1651, 1653), True, 'import matplotlib.pyplot as plt\n'), ((2045, 2055), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2053, 2055), True, 'import matplotlib.pyplot as plt\n'), ((1179, 1221), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'goog_data_signal.index'}), '(index=goog_data_signal.index)\n', (1191, 1221), True, 'import pandas as pd\n'), ((1246, 1288), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'goog_data_signal.index'}), '(index=goog_data_signal.index)\n', (1258, 1288), True, 'import pandas as pd\n')] |
#!/usr/bin/python3
from tools import *
from sys import argv
from os.path import join
import h5py
import matplotlib.pylab as plt
from matplotlib.patches import Wedge
import numpy as np
if len(argv) > 1:
pathToSimFolder = argv[1]
else:
pathToSimFolder = "../data/"
parameters, electrodes = readParameters(pathToSimFolder)
electrodeNumber = len(electrodes)
acceptorPos = np.zeros((int(parameters["acceptorNumber"]), 2))
try:
donorPos = np.zeros((int(parameters["donorNumber"]), 2))
except KeyError:
donorPos = np.zeros(
(int(parameters["acceptorNumber"] * parameters["compensationFactor"]), 2)
)
with open(join(pathToSimFolder, "device.txt")) as deviceFile:
line = next(deviceFile)
line = next(deviceFile)
for i in range(acceptorPos.shape[0]):
acceptorPos[i] = next(deviceFile).split(" ")
line = next(deviceFile)
line = next(deviceFile)
for i in range(donorPos.shape[0]):
donorPos[i] = next(deviceFile).split(" ")
# print(acceptorPos)
# print(donorPos)
electrodePositions = np.empty((len(electrodes), 2))
for i in range(len(electrodes)):
if parameters["geometry"] == "rect":
if electrodes[i][1] == 0:
electrodePositions[i] = [0, electrodes[i][0] * parameters["lenY"]]
if electrodes[i][1] == 1:
electrodePositions[i] = [
parameters["lenX"],
electrodes[i][0] * parameters["lenY"],
]
if electrodes[i][1] == 2:
electrodePositions[i] = [electrodes[i][0] * parameters["lenX"], 0]
if electrodes[i][1] == 3:
electrodePositions[i] = [
electrodes[i][0] * parameters["lenX"],
parameters["lenY"],
]
elif parameters["geometry"] == "circle":
electrodePositions[i] = [
parameters["radius"] * np.cos(electrodes[i][0] / 360 * 2 * np.pi),
parameters["radius"] * np.sin(electrodes[i][0] / 360 * 2 * np.pi),
]
# print(electrodePositions)
def colorMaker(x):
from matplotlib import colors
from scipy.interpolate import interp1d
cols = ["darkred", "darkgreen"]
rgbaData = np.array([colors.to_rgba(c) for c in cols])
rInterpolater = interp1d(np.linspace(0, 1, len(cols)), rgbaData[:, 0])
gInterpolater = interp1d(np.linspace(0, 1, len(cols)), rgbaData[:, 1])
bInterpolater = interp1d(np.linspace(0, 1, len(cols)), rgbaData[:, 2])
return np.array([rInterpolater(x), gInterpolater(x), bInterpolater(x), 1])
inp = ["0_0", "0_1", "1_0", "1_1"]
for fileNumber in [1, 2, 3, 4]:
# for fileNumber in [1]:
data = np.genfromtxt(
join(pathToSimFolder, f"swapTrackFile{fileNumber}.txt"),
delimiter=";",
dtype=int,
)
maxIndex = np.max(data)
added = (maxIndex + 1) * data[:, 0] + data[:, 1]
bins = np.bincount(added)
bins.resize(maxIndex + 1, maxIndex + 1)
absBins = bins + bins.T
absBins = absBins / np.max(absBins)
distances = []
swapps = []
for i in range(bins.shape[0]):
if i >= parameters["acceptorNumber"]:
x1, y1 = (
electrodePositions[i - int(parameters["acceptorNumber"])][0],
electrodePositions[i - int(parameters["acceptorNumber"])][1],
)
else:
x1, y1 = acceptorPos[i, 0], acceptorPos[i, 1]
for j in range(i):
if j >= parameters["acceptorNumber"]:
x2, y2 = (
electrodePositions[j - int(parameters["acceptorNumber"])][0],
electrodePositions[j - int(parameters["acceptorNumber"])][1],
)
else:
x2, y2 = acceptorPos[j, 0], acceptorPos[j, 1]
# ax.plot([x1,x2],[y1,y2],"k-",alpha=bins[i,j])
if (bins[i, j] + bins[j, i]) != 0:
currentRatio = bins[i, j] / (bins[i, j] + bins[j, i])
distances.append(np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2))
swapps.append(absBins[i, j])
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.hist(
distances,
weights=swapps,
bins=50,
density=True,
histtype="step",
color="k",
range=[0, 100],
)
ax.set_xlabel(r"d [nm]")
ax.set_ylabel(r"P(swapp)")
plt.savefig(
join(pathToSimFolder, f"swapHist_{inp[fileNumber-1]}.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
fig, ax = plt.subplots(1, 1, figsize=(4.980614173228346, 3.2))
ax.hist(
distances,
weights=swapps,
bins=50,
density=True,
histtype="step",
color="k",
range=[0, 100],
log=True,
)
ax.set_xlabel(r"d [nm]")
ax.set_ylabel(r"P(swapp)")
plt.savefig(
join(pathToSimFolder, f"swapHist_{inp[fileNumber-1]}_log.png"),
bbox_inches="tight",
dpi=300,
)
# plt.show()
plt.close(fig)
| [
"matplotlib.pylab.subplots",
"numpy.sqrt",
"matplotlib.colors.to_rgba",
"os.path.join",
"numpy.max",
"numpy.cos",
"matplotlib.pylab.close",
"numpy.sin",
"numpy.bincount"
] | [((2761, 2773), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (2767, 2773), True, 'import numpy as np\n'), ((2839, 2857), 'numpy.bincount', 'np.bincount', (['added'], {}), '(added)\n', (2850, 2857), True, 'import numpy as np\n'), ((4035, 4087), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(4.980614173228346, 3.2)'}), '(1, 1, figsize=(4.980614173228346, 3.2))\n', (4047, 4087), True, 'import matplotlib.pylab as plt\n'), ((4479, 4493), 'matplotlib.pylab.close', 'plt.close', (['fig'], {}), '(fig)\n', (4488, 4493), True, 'import matplotlib.pylab as plt\n'), ((4509, 4561), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(4.980614173228346, 3.2)'}), '(1, 1, figsize=(4.980614173228346, 3.2))\n', (4521, 4561), True, 'import matplotlib.pylab as plt\n'), ((4975, 4989), 'matplotlib.pylab.close', 'plt.close', (['fig'], {}), '(fig)\n', (4984, 4989), True, 'import matplotlib.pylab as plt\n'), ((637, 672), 'os.path.join', 'join', (['pathToSimFolder', '"""device.txt"""'], {}), "(pathToSimFolder, 'device.txt')\n", (641, 672), False, 'from os.path import join\n'), ((2640, 2695), 'os.path.join', 'join', (['pathToSimFolder', 'f"""swapTrackFile{fileNumber}.txt"""'], {}), "(pathToSimFolder, f'swapTrackFile{fileNumber}.txt')\n", (2644, 2695), False, 'from os.path import join\n'), ((2955, 2970), 'numpy.max', 'np.max', (['absBins'], {}), '(absBins)\n', (2961, 2970), True, 'import numpy as np\n'), ((4345, 4405), 'os.path.join', 'join', (['pathToSimFolder', 'f"""swapHist_{inp[fileNumber - 1]}.png"""'], {}), "(pathToSimFolder, f'swapHist_{inp[fileNumber - 1]}.png')\n", (4349, 4405), False, 'from os.path import join\n'), ((4837, 4901), 'os.path.join', 'join', (['pathToSimFolder', 'f"""swapHist_{inp[fileNumber - 1]}_log.png"""'], {}), "(pathToSimFolder, f'swapHist_{inp[fileNumber - 1]}_log.png')\n", (4841, 4901), False, 'from os.path import join\n'), ((2169, 2186), 'matplotlib.colors.to_rgba', 'colors.to_rgba', (['c'], {}), '(c)\n', (2183, 2186), False, 'from matplotlib import colors\n'), ((1846, 1888), 'numpy.cos', 'np.cos', (['(electrodes[i][0] / 360 * 2 * np.pi)'], {}), '(electrodes[i][0] / 360 * 2 * np.pi)\n', (1852, 1888), True, 'import numpy as np\n'), ((1925, 1967), 'numpy.sin', 'np.sin', (['(electrodes[i][0] / 360 * 2 * np.pi)'], {}), '(electrodes[i][0] / 360 * 2 * np.pi)\n', (1931, 1967), True, 'import numpy as np\n'), ((3933, 3973), 'numpy.sqrt', 'np.sqrt', (['((x2 - x1) ** 2 + (y2 - y1) ** 2)'], {}), '((x2 - x1) ** 2 + (y2 - y1) ** 2)\n', (3940, 3973), True, 'import numpy as np\n')] |
import sys
import numpy as np
from starfish import ImageStack
from starfish.spots import FindSpots
from starfish.types import Axes
def test_lmpf_uniform_peak():
data_array = np.zeros(shape=(1, 1, 1, 100, 100), dtype=np.float32)
data_array[0, 0, 0, 45:55, 45:55] = 1
imagestack = ImageStack.from_numpy(data_array)
# standard local max peak finder, should find spots for all the evenly illuminated pixels.
lmpf_no_kwarg = FindSpots.LocalMaxPeakFinder(1, 1, 1, sys.maxsize)
peaks = lmpf_no_kwarg.run(imagestack)
results_no_kwarg = peaks[{Axes.ROUND: 0, Axes.CH: 0}]
assert len(results_no_kwarg.spot_attrs.data) == 100
# local max peak finder, capped at one peak per label.
lmpf_kwarg = FindSpots.LocalMaxPeakFinder(1, 1, 1, sys.maxsize, num_peaks_per_label=1)
peaks = lmpf_kwarg.run(imagestack)
results_kwarg = peaks[{Axes.ROUND: 0, Axes.CH: 0}]
assert len(results_kwarg.spot_attrs.data) == 1
| [
"starfish.ImageStack.from_numpy",
"numpy.zeros",
"starfish.spots.FindSpots.LocalMaxPeakFinder"
] | [((182, 235), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, 1, 1, 100, 100)', 'dtype': 'np.float32'}), '(shape=(1, 1, 1, 100, 100), dtype=np.float32)\n', (190, 235), True, 'import numpy as np\n'), ((295, 328), 'starfish.ImageStack.from_numpy', 'ImageStack.from_numpy', (['data_array'], {}), '(data_array)\n', (316, 328), False, 'from starfish import ImageStack\n'), ((445, 495), 'starfish.spots.FindSpots.LocalMaxPeakFinder', 'FindSpots.LocalMaxPeakFinder', (['(1)', '(1)', '(1)', 'sys.maxsize'], {}), '(1, 1, 1, sys.maxsize)\n', (473, 495), False, 'from starfish.spots import FindSpots\n'), ((729, 802), 'starfish.spots.FindSpots.LocalMaxPeakFinder', 'FindSpots.LocalMaxPeakFinder', (['(1)', '(1)', '(1)', 'sys.maxsize'], {'num_peaks_per_label': '(1)'}), '(1, 1, 1, sys.maxsize, num_peaks_per_label=1)\n', (757, 802), False, 'from starfish.spots import FindSpots\n')] |
import numpy as np
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.init as init
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
#from scipy import stats
from shallow_model import Model
class z24Dataset(Dataset):
def __init__(self, damage_case='1', filename = '40C07',window_size=100, normalize=True):
self.window_size = window_size
self.slices_per_file = 65536 // self.window_size
self.normalize = normalize
self.damage_case = damage_case
self.filename = filename
self.env_mean = np.load('../tools/env_mean.npy')
self.env_std = np.load('../tools/env_std.npy')
self.vibration_mean = np.load('../tools/vibration_mean.npy')
self.vibration_std = np.load('../tools/vibration_std.npy')
def __len__(self):
return self.slices_per_file
def __getitem__(self, index):
index_to_read = index // self.slices_per_file
index_in_dataframe = (index - index_to_read*self.slices_per_file) * self.window_size
file_path_vib = '../data/z24_damage/'+self.damage_case+'/'+self.filename+'_vibrations.npy'
file_path_env = '../data/z24_damage/'+self.damage_case+'/'+self.filename+'_env.npy'
memmap_vib = np.memmap(file_path_vib, dtype=np.float64, mode='r', shape=(65536, 7))
memmap_env = np.memmap(file_path_env, dtype=np.float64, mode='r', shape=(53,))
X_environmental = np.array(memmap_env[:])
X_vibration_window = np.array(memmap_vib[index_in_dataframe:index_in_dataframe+self.window_size,:])
if self.normalize:
X_vibration_window = (X_vibration_window - self.vibration_mean) / self.vibration_std
X_environmental = (X_environmental - self.env_mean) / self.env_std
X_vib_and_env = np.append(X_vibration_window.flatten(),X_environmental)
return X_vib_and_env, X_vibration_window.flatten()
damage_case = '1'
for damage_case in ['healthy','1','2','3','4','5','6','7','8','9','10','11','12','13','14']:
file_index = np.loadtxt('../data/z24_damage/damage_'+damage_case+'_index.txt',dtype=str)
w_size = 200
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = torch.load(f='../results/trained_shallow_relu200_100epochs.pt', map_location='cpu')
loss_criterion = torch.nn.MSELoss(reduce=False)
classification_list = []
for damage_file in file_index:
dataset = z24Dataset(damage_case=damage_case, filename = damage_file, window_size=w_size, normalize=True)
dataloader = DataLoader(dataset, batch_size=100, shuffle=False, num_workers=4)
#loss_criterion = torch.nn.MSELoss(reduce=False)
all_window_loss = []
for X, Y in dataloader:
X_tensor = X.float().to(device)
Y_tensor = Y.float()#.to(device)
batch_size, output_size = Y.shape
N = 100
N_predictions = torch.zeros([N, batch_size, output_size])
for i in range(N):
N_predictions[i,:,:] = model(X_tensor)
prediction_mean = torch.mean(N_predictions, dim=0)
prediction_std = torch.std(N_predictions, dim=0)
loss_full = loss_criterion(prediction_mean, Y_tensor)
lower_y = prediction_mean - 2*prediction_std
upper_y = prediction_mean + 2*prediction_std
within_lower = Y_tensor > lower_y
within_upper = Y_tensor < upper_y
within_range = within_lower & within_upper
loss_full[within_range] = 0
for j in range(batch_size):
window_loss = torch.sum(loss_full[j,:]) / torch.numel(loss_full[j,:])
all_window_loss.append(window_loss.item())
all_window_loss = np.array(all_window_loss)
all_window_loss[all_window_loss > 0.33731913566589355] = 0.33731913566589355
s = np.sum(all_window_loss)
classification = 42.65411979705095 < s
classification_list.append(classification)
#print('Damage case {}, file {}, classified as {}, total error {}'.format(damage_case, damage_file, classification, np.round(s)))
print(s)
n_correct = np.sum(classification_list)
print('Damage case: {}'.format(damage_case))
print('Number of cases: {}'.format(len(classification_list)))
print('Number of correct classifications: {}'.format(int(n_correct)))
print('Ratio {}'.format(n_correct/len(classification_list))) | [
"torch.mean",
"torch.load",
"numpy.memmap",
"torch.numel",
"numpy.sum",
"torch.nn.MSELoss",
"numpy.array",
"torch.cuda.is_available",
"torch.sum",
"torch.utils.data.DataLoader",
"numpy.loadtxt",
"numpy.load",
"torch.std",
"torch.zeros"
] | [((2117, 2202), 'numpy.loadtxt', 'np.loadtxt', (["('../data/z24_damage/damage_' + damage_case + '_index.txt')"], {'dtype': 'str'}), "('../data/z24_damage/damage_' + damage_case + '_index.txt', dtype=str\n )\n", (2127, 2202), True, 'import numpy as np\n'), ((2296, 2383), 'torch.load', 'torch.load', ([], {'f': '"""../results/trained_shallow_relu200_100epochs.pt"""', 'map_location': '"""cpu"""'}), "(f='../results/trained_shallow_relu200_100epochs.pt',\n map_location='cpu')\n", (2306, 2383), False, 'import torch\n'), ((2401, 2431), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'reduce': '(False)'}), '(reduce=False)\n', (2417, 2431), False, 'import torch\n'), ((4367, 4394), 'numpy.sum', 'np.sum', (['classification_list'], {}), '(classification_list)\n', (4373, 4394), True, 'import numpy as np\n'), ((621, 653), 'numpy.load', 'np.load', (['"""../tools/env_mean.npy"""'], {}), "('../tools/env_mean.npy')\n", (628, 653), True, 'import numpy as np\n'), ((677, 708), 'numpy.load', 'np.load', (['"""../tools/env_std.npy"""'], {}), "('../tools/env_std.npy')\n", (684, 708), True, 'import numpy as np\n'), ((739, 777), 'numpy.load', 'np.load', (['"""../tools/vibration_mean.npy"""'], {}), "('../tools/vibration_mean.npy')\n", (746, 777), True, 'import numpy as np\n'), ((807, 844), 'numpy.load', 'np.load', (['"""../tools/vibration_std.npy"""'], {}), "('../tools/vibration_std.npy')\n", (814, 844), True, 'import numpy as np\n'), ((1309, 1379), 'numpy.memmap', 'np.memmap', (['file_path_vib'], {'dtype': 'np.float64', 'mode': '"""r"""', 'shape': '(65536, 7)'}), "(file_path_vib, dtype=np.float64, mode='r', shape=(65536, 7))\n", (1318, 1379), True, 'import numpy as np\n'), ((1401, 1466), 'numpy.memmap', 'np.memmap', (['file_path_env'], {'dtype': 'np.float64', 'mode': '"""r"""', 'shape': '(53,)'}), "(file_path_env, dtype=np.float64, mode='r', shape=(53,))\n", (1410, 1466), True, 'import numpy as np\n'), ((1494, 1517), 'numpy.array', 'np.array', (['memmap_env[:]'], {}), '(memmap_env[:])\n', (1502, 1517), True, 'import numpy as np\n'), ((1547, 1633), 'numpy.array', 'np.array', (['memmap_vib[index_in_dataframe:index_in_dataframe + self.window_size, :]'], {}), '(memmap_vib[index_in_dataframe:index_in_dataframe + self.\n window_size, :])\n', (1555, 1633), True, 'import numpy as np\n'), ((2636, 2701), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(100)', 'shuffle': '(False)', 'num_workers': '(4)'}), '(dataset, batch_size=100, shuffle=False, num_workers=4)\n', (2646, 2701), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((3927, 3952), 'numpy.array', 'np.array', (['all_window_loss'], {}), '(all_window_loss)\n', (3935, 3952), True, 'import numpy as np\n'), ((4059, 4082), 'numpy.sum', 'np.sum', (['all_window_loss'], {}), '(all_window_loss)\n', (4065, 4082), True, 'import numpy as np\n'), ((2246, 2271), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2269, 2271), False, 'import torch\n'), ((3026, 3067), 'torch.zeros', 'torch.zeros', (['[N, batch_size, output_size]'], {}), '([N, batch_size, output_size])\n', (3037, 3067), False, 'import torch\n'), ((3202, 3234), 'torch.mean', 'torch.mean', (['N_predictions'], {'dim': '(0)'}), '(N_predictions, dim=0)\n', (3212, 3234), False, 'import torch\n'), ((3264, 3295), 'torch.std', 'torch.std', (['N_predictions'], {'dim': '(0)'}), '(N_predictions, dim=0)\n', (3273, 3295), False, 'import torch\n'), ((3769, 3795), 'torch.sum', 'torch.sum', (['loss_full[j, :]'], {}), '(loss_full[j, :])\n', (3778, 3795), False, 'import torch\n'), ((3797, 3825), 'torch.numel', 'torch.numel', (['loss_full[j, :]'], {}), '(loss_full[j, :])\n', (3808, 3825), False, 'import torch\n')] |
#!/usr/bin/env python
#
# Copyright (c) 2015 10X Genomics, Inc. All rights reserved.
#
import collections
import itertools
import json
import numpy as np
import os
import re
import sys
import tenkit.constants as tk_constants
import tenkit.fasta as tk_fasta
import tenkit.safe_json as tk_safe_json
import tenkit.seq as tk_seq
import cellranger.constants as cr_constants
import cellranger.h5_constants as h5_constants
import cellranger.utils as cr_utils
import cellranger.io as cr_io
def get_bamtofastq_defs(read_defs, destination_tags):
""" Determine which portions of reads need to be retained.
Args: read_defs - list(ReadDef)
destination_tags - list((str,str)) - list of (seq_tag, qual_tag) or
None if the read comes from the READ/QUAL fields
NOTE: Assumes that the read defs are contiguous - sequence in gaps between extracted seqs will be lost.
Returns dict of {read_type (str): str} """
assert len(read_defs) == len(destination_tags)
# Map read defs to their destination bam tags
dest_tags_dict = {read_def: dest_tags for read_def, dest_tags in itertools.izip(read_defs, destination_tags)}
# Bin read defs by read type (R1, R2, etc.)
read_types = collections.defaultdict(list)
for read_def in read_defs:
if read_def.read_type is None:
continue
read_types[read_def.read_type].append(read_def)
trim_defs = {}
for read_type, read_type_defs in sorted(read_types.items()):
# Get tags that were built from this read
read_type_defs.sort(key=lambda rd: rd.offset)
read_type_dest_tags = [dest_tags_dict[rd] for rd in read_type_defs]
# Construct bam_to_fastq entries
bam_to_fastq_entries = []
for dest_tag in read_type_dest_tags:
if dest_tag is None:
bam_to_fastq_entries.append('SEQ:QUAL')
else:
bam_to_fastq_entries.append('%s:%s' % dest_tag)
# Interpreted by bamtofastq
trim_defs[read_type] = '10x_bam_to_fastq:' + read_type + '(' + ','.join(bam_to_fastq_entries) + ')'
return trim_defs
def infer_barcode_reverse_complement(barcode_whitelist, read_iter):
if barcode_whitelist is None:
return False
reg_valid_count = 0
rc_valid_count = 0
for name, seq, qual in itertools.islice(read_iter, cr_constants.NUM_CHECK_BARCODES_FOR_ORIENTATION):
if seq in barcode_whitelist:
reg_valid_count += 1
if tk_seq.get_rev_comp(seq) in barcode_whitelist:
rc_valid_count += 1
if reg_valid_count:
return rc_valid_count >= ((rc_valid_count + reg_valid_count) *
cr_constants.REVCOMP_BARCODE_THRESHOLD)
return False
class BarcodeCounter:
def __init__(self, barcode_whitelist, out_counts, gem_groups=None):
self.barcode_counts = None
self.barcode_index = None
self.out_counts = out_counts
self.barcode_seqs = cr_utils.load_barcode_whitelist(barcode_whitelist)
if self.barcode_seqs:
self.barcode_seqs = cr_utils.format_barcode_seqs(self.barcode_seqs, gem_groups)
self.barcode_counts = np.zeros(len(self.barcode_seqs), dtype=np.uint32)
self.barcode_index = {bc: index for index, bc in enumerate(self.barcode_seqs)}
def count(self, name, seq, qual):
if self.barcode_seqs:
index = self.barcode_index.get(seq)
if index is not None:
self.barcode_counts[index] += 1
def merge(self, gem_group, out_counts):
if self.barcode_seqs:
with open(out_counts, 'r') as f:
barcode_counts = json.load(f)
start = (gem_group-1)*len(barcode_counts)
end = gem_group*len(barcode_counts)
self.barcode_counts[start:end] += np.array(barcode_counts, dtype=np.uint32)
def to_json(self):
if self.barcode_seqs:
return list(self.barcode_counts)
else:
return []
def close(self):
if self.barcode_seqs:
with open(self.out_counts, 'w') as f:
tk_safe_json.dump_numpy(self.to_json(), f)
@staticmethod
def merge_by(counter_files, keys, barcode_whitelist, gem_groups):
""" Merge BarcodeCounters by a key.
Args:
counter_files (list of str): Filenames of BarcodeCounter outputs
keys (list of str): Keys to group by
barcode_whitelist (list of str): Same as BarcodeCounter constructor
gem_groups (list of int): Same as BarcodeCounter constructor
Returns:
dict of str:dict: Keys are the group keys and dicts are serialized BarcodeCounters
"""
distinct_keys = sorted(list(set(keys)))
groups = {}
for key in distinct_keys:
groups[key] = BarcodeCounter(barcode_whitelist, None, gem_groups)
for key, filename, gg in zip(keys, counter_files, gem_groups):
groups[key].merge(gg, filename)
return {key: group.to_json() for key, group in groups.iteritems()}
def extract_read_maybe_paired(read_tuple, read_def, reads_interleaved, r1_length=None, r2_length=None):
""" Args: read_tuple: (name, read, qual)
read_def: ReadDef object
reads_interleaved: bool
r1_length (int): Hard trim on 3' end of input R1
r2_length (int): Hard trim on 3' end of input R2
Yields: ExtractReadResult """
if reads_interleaved and read_def.read_type == 'R1':
name, seq, qual = cr_utils.get_fastq_read1(read_tuple, None, True)
elif reads_interleaved and read_def.read_type == 'R2':
name, seq, qual = cr_utils.get_fastq_read2(read_tuple, None, True)
else:
name, seq, qual = read_tuple
# Apply hard trimming on input
hard_end = sys.maxint
if read_def.read_type == 'R1' and r1_length is not None:
hard_end = r1_length
elif read_def.read_type == 'R2' and r2_length is not None:
hard_end = r2_length
# Extract interval requested by the read def
if read_def.length is not None:
end = min(hard_end, read_def.offset + read_def.length)
read_slice = slice(read_def.offset, end)
else:
read_slice = slice(read_def.offset, hard_end)
return (name, seq[read_slice], qual[read_slice])
def get_read_generator_fastq(fastq_open_file, read_def, reads_interleaved, r1_length=None, r2_length=None):
read_iter = tk_fasta.read_generator_fastq(fastq_open_file, paired_end=reads_interleaved and read_def.read_type in ['R1', 'R2'])
for read_tuple in read_iter:
yield extract_read_maybe_paired(read_tuple, read_def, reads_interleaved, r1_length, r2_length)
def get_feature_generator_fastq(files, extractor, interleaved, read_types, r1_length=None, r2_length=None):
'''Extract feature barcodes from FASTQs.
Args:
files (list of File): FASTQ file handles for R1, R2
extractor (FeatureExtractor): Extracts feature barcodes
interleaved (bool): Are R1,R2 interleaved in a single file
read_types (list of str): List of read types (e.g. R1,R2) we need to inspect
r1_length (int): Length to hard-trim R1 to
r2_length (int): Length to hard-trim R2 to
Returns:
FeatureMatchResult: Yields the feature extraction result for a read pair
'''
assert len(files) == 2
assert 'R1' in read_types or 'R2' in read_types
# Apply hard trimming on input
r1_hard_end = sys.maxint if r1_length is None else r1_length
r2_hard_end = sys.maxint if r2_length is None else r2_length
if interleaved:
f = files[0]
assert f
# Get R1 and R2 seqs from interleaved FASTQ
pair_iter = itertools.imap(lambda x: (x[0:3], x[3:6]),
tk_fasta.read_generator_fastq(f, paired_end=True))
else:
r1_iter = tk_fasta.read_generator_fastq(files[0], paired_end=False) if 'R1' in read_types else iter([])
r2_iter = tk_fasta.read_generator_fastq(files[1], paired_end=False) if 'R2' in read_types else iter([])
pair_iter = itertools.izip_longest(r1_iter, r2_iter)
if read_types == ['R1']:
match_func = lambda x: extractor.extract_single_end(x[0][1][0:r1_hard_end], # seq
x[0][2][0:r1_hard_end], # qual
'R1')
elif read_types == ['R2']:
match_func = lambda x: extractor.extract_single_end(x[1][1][0:r2_hard_end], # seq
x[1][2][0:r2_hard_end], # qual
'R2')
elif read_types == ['R1', 'R2']:
match_func = lambda x: extractor.extract_paired_end(x[0][1][0:r1_hard_end], # seq
x[0][2][0:r1_hard_end], # qual
x[1][1][0:r2_hard_end], # seq
x[1][2][0:r2_hard_end]) # qual
return itertools.imap(match_func, pair_iter)
def get_fastq_from_read_type(fastq_dict, read_def, reads_interleaved):
''' Use a ReadDef to determine which FASTQ files to open '''
if read_def.read_type == 'R2' and reads_interleaved:
return fastq_dict.get('R1')
else:
return fastq_dict.get(read_def.read_type)
def get_fastqs_from_feature_ref(fastq_dict, reads_interleaved, read_types):
''' Determine which FASTQ files to open for a FeatureExtractor'''
assert 'R1' in read_types or 'R2' in read_types
fastq1 = fastq_dict.get('R1') if 'R1' in read_types else None
if reads_interleaved:
# All reads come from the first FASTQ ("R1")
fastq2 = fastq_dict.get('R1') if 'R2' in read_types else None
else:
# Read2 comes from the second FASTQ ("R2")
fastq2 = fastq_dict.get('R2') if 'R2' in read_types else None
return (fastq1, fastq2)
class FastqReader:
# Extracts specified regions from input fastqs
# For example, extract UMIs from the first 10 bases of the "R2" read
def __init__(self, in_filenames, read_def, reads_interleaved, r1_length, r2_length):
""" Args:
in_filenames - Map of paths to fastq files
read_def - ReadDef
"""
self.in_fastq = None
self.in_iter = iter([])
self.read_def = read_def
if in_filenames:
in_filename = get_fastq_from_read_type(in_filenames,
read_def,
reads_interleaved)
if in_filename:
self.in_fastq = cr_io.open_maybe_gzip(in_filename, 'r')
self.in_iter = get_read_generator_fastq(self.in_fastq,
read_def=read_def,
reads_interleaved=reads_interleaved,
r1_length=r1_length,
r2_length=r2_length)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
if self.in_fastq:
self.in_fastq.close()
self.in_fastq = None
class FastqFeatureReader:
''' Use a FeatureReference to extract specified feature barcodes from input fastqs
For example, extract antibody barcodes from R2. '''
def __init__(self, in_filenames, extractor, reads_interleaved, r1_length, r2_length):
""" Args:
in_filenames (dict of str -> str): Map of paths to fastq files
feature_ref (FeatureExtractor): for extracting feature barcodes
"""
self.in_fastqs = None
self.in_iter = iter([])
# Relevant read types
read_types = extractor.get_read_types()
if in_filenames:
in_filenames = get_fastqs_from_feature_ref(in_filenames,
reads_interleaved,
read_types)
if in_filenames != (None, None):
if reads_interleaved:
filename = in_filenames[0] if in_filenames[0] else in_filenames[1]
self.in_fastqs = (cr_io.open_maybe_gzip(filename, 'r') if filename[0] else None,
None)
else:
self.in_fastqs = (cr_io.open_maybe_gzip(in_filenames[0], 'r') if in_filenames[0] else None,
cr_io.open_maybe_gzip(in_filenames[1], 'r') if in_filenames[1] else None)
self.in_iter = get_feature_generator_fastq(files=self.in_fastqs,
extractor=extractor,
interleaved=reads_interleaved,
read_types=read_types,
r1_length=r1_length,
r2_length=r2_length)
def close(self):
if self.in_fastqs:
for f in self.in_fastqs:
if f:
f.close()
class ChunkedWriter:
# Writes sequencing read-based output, splitting into chunks as specified
def __init__(self, base_path, max_reads_per_file):
self.reads_per_file = 0
self.max_reads_per_file = max_reads_per_file
self.index = 0
self.base_path = base_path
self.file_paths = []
self.curr_file = None
cr_io.mkdir(base_path, allow_existing=True)
def write(self, data):
if self.reads_per_file >= self.max_reads_per_file or self.curr_file is None:
self.close()
self.index += 1
out_filename = os.path.join(self.base_path, self.generate_filename())
self.curr_file = self.open_file(out_filename)
self.file_paths.append(out_filename)
self.reads_per_file = 0
self.write_data(data)
self.reads_per_file += 1
def get_out_paths(self, default_len=None):
assert self.file_paths or default_len
if self.file_paths:
return self.file_paths
else:
return [None] * default_len
def close(self):
if self.curr_file is not None:
self.curr_file.close()
self.curr_file = None
class ChunkedFastqWriter(ChunkedWriter):
def __init__(self, *args, **kwargs):
compression = kwargs.pop('compression')
ChunkedWriter.__init__(self, *args, **kwargs)
if compression is None:
self.suffix = ''
elif compression == 'gzip':
self.suffix = h5_constants.GZIP_SUFFIX
elif compression == 'lz4':
self.suffix = h5_constants.LZ4_SUFFIX
else:
raise ValueError('Unknown compression type: %s' % compression)
self.compression = compression
def generate_filename(self):
return '%d.fastq%s' % (self.index, self.suffix or '')
def open_file(self, filename):
return cr_io.open_maybe_gzip(filename, 'w')
def write_data(self, data):
tk_fasta.write_read_fastq(self.curr_file, *data)
class AugmentedFastqHeader:
""" Store 10x specific tags in fastq qname """
TAG_SEP = '|||'
WORD_SEP = ' '
def __init__(self, fastq_header):
""" Parse the fastq header """
words = fastq_header.split(self.WORD_SEP)
# Assume that TAG_SEP doesn't exist in the original fastq header
fields = words[0].split(self.TAG_SEP)
stripped_word, tag_pairs = fields[0], fields[1:]
self.fastq_header = AugmentedFastqHeader.WORD_SEP.join([stripped_word] + words[1:])
self.tags = zip(tag_pairs[::2], tag_pairs[1::2])
def set_tag(self, key, value):
for i, (k,_) in enumerate(self.tags):
if k == key:
self.tags[i] = (key, value)
return
self.tags.append((key,value))
def get_tag(self, key):
for k, v in self.tags:
if k == key:
return v
return None
def to_string(self):
""" Append a TAG_SEP-delimited tag-dict onto the first word of the fastq name """
hdr_words = self.fastq_header.split(self.WORD_SEP)
tag_strings = [self.TAG_SEP.join(item) for item in self.tags]
augmented_word = self.TAG_SEP.join([hdr_words[0]] + tag_strings)
return self.WORD_SEP.join([augmented_word] + hdr_words[1:])
# Copied from tenkit because tenkit/preflight.py is utterly broken (imports martian)
def check_sample_indices(sample_item, sample_index_key = "sample_indices"):
sample_indices = sample_item[sample_index_key]
if type(sample_indices) != list:
return None, "Sample indices must be of type list"
if len(sample_indices) == 0:
return None, "Sample indices must be a non-empty list"
new_sample_indices = []
for sample_index in sample_indices:
if sample_index == "any":
return ['*'], None
elif tk_constants.SAMPLE_INDEX_MAP.get(sample_index):
new_sample_indices.extend(tk_constants.SAMPLE_INDEX_MAP.get(sample_index))
elif re.match("^[%s]+$" % "".join(tk_seq.NUCS), sample_index):
new_sample_indices.append(sample_index)
else:
return None, ("Sample index '%s' is not valid. Must be one of: any, SI-<number>, "
"SI-<plate>-<well coordinate>, 220<part number>, or "
"a nucleotide sequence." % sample_index)
return new_sample_indices, None
class FastqSpecException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def _require_sample_def_key(sd, key):
if key not in sd:
raise FastqSpecException('Sample def is missing the key "%s."' % key)
class FastqSpec(object):
""" All info required to find FASTQ files """
def __init__(self, fastq_mode, read_path, lanes, sample_indices, sample_names, interleaved):
self.fastq_mode = fastq_mode
self.read_path = read_path
self.lanes = lanes
self.sample_indices = sample_indices
self.sample_names = sample_names
self.interleaved = interleaved
@staticmethod
def from_sample_def(sd):
_require_sample_def_key(sd, 'fastq_mode')
_require_sample_def_key(sd, 'read_path')
_require_sample_def_key(sd, 'lanes')
if sd['fastq_mode'] == tk_constants.ILMN_BCL2FASTQ_FASTQ_MODE:
_require_sample_def_key(sd, 'sample_names')
return FastqSpec(fastq_mode=sd['fastq_mode'],
read_path=sd['read_path'],
lanes=sd['lanes'],
sample_indices=None,
sample_names=sd['sample_names'],
interleaved=False,
)
elif sd['fastq_mode'] == tk_constants.BCL_PROCESSOR_FASTQ_MODE:
_require_sample_def_key(sd, 'sample_indices')
# Check and optionally translate sample index sets to
# sample index sequences
si_strings, msg = check_sample_indices(sd)
if si_strings is None:
raise FastqSpecException(msg)
return FastqSpec(fastq_mode=sd['fastq_mode'],
read_path=sd['read_path'],
lanes=sd['lanes'],
sample_indices=si_strings,
sample_names=None,
interleaved=True,
)
else:
raise FastqSpecException('Sample def contained unrecognized fastq_mode "%s."' % sd['fastq_mode'])
def get_group_spec_iter(self):
""" Each group corresponds to a single sample_name or sample_index in
the original sample_def. This method slices on that.
Yields: (sample_index_or_name, FastQSpec) """
if self.sample_indices is not None:
for si in self.sample_indices:
yield si, FastqSpec(fastq_mode=self.fastq_mode,
read_path=self.read_path,
lanes=self.lanes,
sample_indices=[si],
sample_names=None,
interleaved=self.interleaved)
elif self.sample_names is not None:
for sn in self.sample_names:
yield sn, FastqSpec(fastq_mode=self.fastq_mode,
read_path=self.read_path,
lanes=self.lanes,
sample_indices=None,
sample_names=[sn],
interleaved=self.interleaved)
else:
raise ValueError("Cannote iterate over FastqSpec with no sample indices or names specified.")
def is_single_group(self):
""" Returns true if this spec contains a single sample index/name """
return (self.sample_indices is not None and len(self.sample_indices) == 1) or \
(self.sample_names is not None and len(self.sample_names) == 1)
def get_fastqs(self, read_type):
""" read_type (str) - One of RA,R1,R2,R3,R4,I1,I2 """
if read_type == 'RA' and not self.interleaved:
raise ValueError('Read type "%s" was requested but is only supported for non-interleaved FASTQs.' % read_type)
# If interleaved, translate R1|R2 => RA
if self.interleaved and read_type in ('R1', 'R2'):
read_type = 'RA'
fastqs = []
if self.fastq_mode == tk_constants.BCL_PROCESSOR_FASTQ_MODE:
for group, _ in self.get_group_spec_iter():
fastqs.extend(tk_fasta.find_input_fastq_files_10x_preprocess(
self.read_path, read_type, group, self.lanes))
elif self.fastq_mode == tk_constants.ILMN_BCL2FASTQ_FASTQ_MODE:
for group, _ in self.get_group_spec_iter():
fastqs.extend(tk_fasta.find_input_fastq_files_bcl2fastq_demult(
self.read_path, read_type, group, self.lanes))
return fastqs
def __str__(self):
return str(self.__dict__)
| [
"cellranger.io.mkdir",
"numpy.array",
"cellranger.io.open_maybe_gzip",
"itertools.izip",
"tenkit.seq.get_rev_comp",
"cellranger.utils.load_barcode_whitelist",
"tenkit.constants.SAMPLE_INDEX_MAP.get",
"cellranger.utils.get_fastq_read1",
"tenkit.fasta.find_input_fastq_files_10x_preprocess",
"tenkit.... | [((1268, 1297), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1291, 1297), False, 'import collections\n'), ((2369, 2445), 'itertools.islice', 'itertools.islice', (['read_iter', 'cr_constants.NUM_CHECK_BARCODES_FOR_ORIENTATION'], {}), '(read_iter, cr_constants.NUM_CHECK_BARCODES_FOR_ORIENTATION)\n', (2385, 2445), False, 'import itertools\n'), ((6522, 6641), 'tenkit.fasta.read_generator_fastq', 'tk_fasta.read_generator_fastq', (['fastq_open_file'], {'paired_end': "(reads_interleaved and read_def.read_type in ['R1', 'R2'])"}), "(fastq_open_file, paired_end=reads_interleaved and\n read_def.read_type in ['R1', 'R2'])\n", (6551, 6641), True, 'import tenkit.fasta as tk_fasta\n'), ((9176, 9213), 'itertools.imap', 'itertools.imap', (['match_func', 'pair_iter'], {}), '(match_func, pair_iter)\n', (9190, 9213), False, 'import itertools\n'), ((3023, 3073), 'cellranger.utils.load_barcode_whitelist', 'cr_utils.load_barcode_whitelist', (['barcode_whitelist'], {}), '(barcode_whitelist)\n', (3054, 3073), True, 'import cellranger.utils as cr_utils\n'), ((5607, 5655), 'cellranger.utils.get_fastq_read1', 'cr_utils.get_fastq_read1', (['read_tuple', 'None', '(True)'], {}), '(read_tuple, None, True)\n', (5631, 5655), True, 'import cellranger.utils as cr_utils\n'), ((8167, 8207), 'itertools.izip_longest', 'itertools.izip_longest', (['r1_iter', 'r2_iter'], {}), '(r1_iter, r2_iter)\n', (8189, 8207), False, 'import itertools\n'), ((13870, 13913), 'cellranger.io.mkdir', 'cr_io.mkdir', (['base_path'], {'allow_existing': '(True)'}), '(base_path, allow_existing=True)\n', (13881, 13913), True, 'import cellranger.io as cr_io\n'), ((15409, 15445), 'cellranger.io.open_maybe_gzip', 'cr_io.open_maybe_gzip', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (15430, 15445), True, 'import cellranger.io as cr_io\n'), ((15487, 15535), 'tenkit.fasta.write_read_fastq', 'tk_fasta.write_read_fastq', (['self.curr_file', '*data'], {}), '(self.curr_file, *data)\n', (15512, 15535), True, 'import tenkit.fasta as tk_fasta\n'), ((1157, 1200), 'itertools.izip', 'itertools.izip', (['read_defs', 'destination_tags'], {}), '(read_defs, destination_tags)\n', (1171, 1200), False, 'import itertools\n'), ((2528, 2552), 'tenkit.seq.get_rev_comp', 'tk_seq.get_rev_comp', (['seq'], {}), '(seq)\n', (2547, 2552), True, 'import tenkit.seq as tk_seq\n'), ((3136, 3195), 'cellranger.utils.format_barcode_seqs', 'cr_utils.format_barcode_seqs', (['self.barcode_seqs', 'gem_groups'], {}), '(self.barcode_seqs, gem_groups)\n', (3164, 3195), True, 'import cellranger.utils as cr_utils\n'), ((3885, 3926), 'numpy.array', 'np.array', (['barcode_counts'], {'dtype': 'np.uint32'}), '(barcode_counts, dtype=np.uint32)\n', (3893, 3926), True, 'import numpy as np\n'), ((5741, 5789), 'cellranger.utils.get_fastq_read2', 'cr_utils.get_fastq_read2', (['read_tuple', 'None', '(True)'], {}), '(read_tuple, None, True)\n', (5765, 5789), True, 'import cellranger.utils as cr_utils\n'), ((7862, 7911), 'tenkit.fasta.read_generator_fastq', 'tk_fasta.read_generator_fastq', (['f'], {'paired_end': '(True)'}), '(f, paired_end=True)\n', (7891, 7911), True, 'import tenkit.fasta as tk_fasta\n'), ((7941, 7998), 'tenkit.fasta.read_generator_fastq', 'tk_fasta.read_generator_fastq', (['files[0]'], {'paired_end': '(False)'}), '(files[0], paired_end=False)\n', (7970, 7998), True, 'import tenkit.fasta as tk_fasta\n'), ((8053, 8110), 'tenkit.fasta.read_generator_fastq', 'tk_fasta.read_generator_fastq', (['files[1]'], {'paired_end': '(False)'}), '(files[1], paired_end=False)\n', (8082, 8110), True, 'import tenkit.fasta as tk_fasta\n'), ((17390, 17437), 'tenkit.constants.SAMPLE_INDEX_MAP.get', 'tk_constants.SAMPLE_INDEX_MAP.get', (['sample_index'], {}), '(sample_index)\n', (17423, 17437), True, 'import tenkit.constants as tk_constants\n'), ((3723, 3735), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3732, 3735), False, 'import json\n'), ((10811, 10850), 'cellranger.io.open_maybe_gzip', 'cr_io.open_maybe_gzip', (['in_filename', '"""r"""'], {}), "(in_filename, 'r')\n", (10832, 10850), True, 'import cellranger.io as cr_io\n'), ((17477, 17524), 'tenkit.constants.SAMPLE_INDEX_MAP.get', 'tk_constants.SAMPLE_INDEX_MAP.get', (['sample_index'], {}), '(sample_index)\n', (17510, 17524), True, 'import tenkit.constants as tk_constants\n'), ((22210, 22306), 'tenkit.fasta.find_input_fastq_files_10x_preprocess', 'tk_fasta.find_input_fastq_files_10x_preprocess', (['self.read_path', 'read_type', 'group', 'self.lanes'], {}), '(self.read_path, read_type,\n group, self.lanes)\n', (22256, 22306), True, 'import tenkit.fasta as tk_fasta\n'), ((22484, 22582), 'tenkit.fasta.find_input_fastq_files_bcl2fastq_demult', 'tk_fasta.find_input_fastq_files_bcl2fastq_demult', (['self.read_path', 'read_type', 'group', 'self.lanes'], {}), '(self.read_path, read_type,\n group, self.lanes)\n', (22532, 22582), True, 'import tenkit.fasta as tk_fasta\n'), ((12520, 12556), 'cellranger.io.open_maybe_gzip', 'cr_io.open_maybe_gzip', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (12541, 12556), True, 'import cellranger.io as cr_io\n'), ((12687, 12730), 'cellranger.io.open_maybe_gzip', 'cr_io.open_maybe_gzip', (['in_filenames[0]', '"""r"""'], {}), "(in_filenames[0], 'r')\n", (12708, 12730), True, 'import cellranger.io as cr_io\n'), ((12799, 12842), 'cellranger.io.open_maybe_gzip', 'cr_io.open_maybe_gzip', (['in_filenames[1]', '"""r"""'], {}), "(in_filenames[1], 'r')\n", (12820, 12842), True, 'import cellranger.io as cr_io\n')] |
from __future__ import annotations
import numpy as np
from edutorch.typing import NPArray
from .module import Module
from .rnn_cell import RNNCell
class RNN(Module):
def __init__(self, input_size: int, hidden_size: int, batch_size: int) -> None:
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
N, D, H = batch_size, input_size, hidden_size
self.h0 = np.random.normal(scale=1e-3, size=(N, H))
self.Wx = np.random.normal(scale=1e-3, size=(D, H))
self.Wh = np.random.normal(scale=1e-3, size=(H, H))
self.b = np.random.normal(scale=1e-3, size=H)
self.set_parameters("h0", "Wx", "Wh", "b")
def forward(self, x: NPArray) -> NPArray:
"""
Run a vanilla RNN forward on an entire sequence of data. We assume an input
sequence composed of T vectors, each of dimension D. The RNN uses a hidden
size of H, and we work over a minibatch containing N sequences. After running
the RNN forward, we return the hidden states for all timesteps.
Inputs:
- x: Input data for the entire timeseries, of shape (N, T, D).
- h0: Initial hidden state, of shape (N, H)
- Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
- Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
- b: Biases of shape (H,)
Returns a tuple of:
- h: Hidden states for the entire timeseries, of shape (N, T, H).
- cache: Values needed in the backward pass
"""
# Manually reset cache in order to correctly append to it
self.cache = ()
N, T, _ = x.shape
H = self.hidden_size
prev_h = self.h0
h = np.zeros((N, T, H))
for i in range(T):
cell = RNNCell(prev_h, self.Wx, self.Wh, self.b)
h[:, i, :] = cell(x[:, i, :])
prev_h = h[:, i, :]
self.cache += (cell,)
return h
def backward(self, dout: NPArray) -> tuple[NPArray, ...]:
"""
Compute the backward pass for a vanilla RNN over an entire sequence of data.
Inputs:
- dout/dh: Upstream gradients of all hidden states, of shape (N, T, H).
NOTE: 'dh' contains the upstream gradients produced by the
individual loss functions at each timestep, *not* the gradients
being passed between timesteps (which you'll have to compute yourself
by calling rnn_step_backward in a loop).
Returns a tuple of:
- dx: Gradient of inputs, of shape (N, T, D)
- dh0: Gradient of initial hidden state, of shape (N, H)
- dWx: Gradient of input-to-hidden weights, of shape (D, H)
- dWh: Gradient of hidden-to-hidden weights, of shape (H, H)
- db: Gradient of biases, of shape (H,)
"""
N, T, _ = dout.shape
D = self.input_size
dx = np.zeros((N, T, D))
dprev_h = np.zeros_like(self.h0)
dWx = np.zeros_like(self.Wx)
dWh = np.zeros_like(self.Wh)
db = np.zeros_like(self.b)
for i in reversed(range(T)):
cell = self.cache[i]
dx[:, i, :], dh_i, dWx_i, dWh_i, db_i = cell.backward(
dout[:, i, :] + dprev_h
)
dWx += dWx_i
dWh += dWh_i
db += db_i
dprev_h = dh_i
dh0 = dprev_h
return dx, dh0, dWx, dWh, db
| [
"numpy.random.normal",
"numpy.zeros",
"numpy.zeros_like"
] | [((431, 473), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.001)', 'size': '(N, H)'}), '(scale=0.001, size=(N, H))\n', (447, 473), True, 'import numpy as np\n'), ((491, 533), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.001)', 'size': '(D, H)'}), '(scale=0.001, size=(D, H))\n', (507, 533), True, 'import numpy as np\n'), ((551, 593), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.001)', 'size': '(H, H)'}), '(scale=0.001, size=(H, H))\n', (567, 593), True, 'import numpy as np\n'), ((610, 647), 'numpy.random.normal', 'np.random.normal', ([], {'scale': '(0.001)', 'size': 'H'}), '(scale=0.001, size=H)\n', (626, 647), True, 'import numpy as np\n'), ((1762, 1781), 'numpy.zeros', 'np.zeros', (['(N, T, H)'], {}), '((N, T, H))\n', (1770, 1781), True, 'import numpy as np\n'), ((2934, 2953), 'numpy.zeros', 'np.zeros', (['(N, T, D)'], {}), '((N, T, D))\n', (2942, 2953), True, 'import numpy as np\n'), ((2972, 2994), 'numpy.zeros_like', 'np.zeros_like', (['self.h0'], {}), '(self.h0)\n', (2985, 2994), True, 'import numpy as np\n'), ((3009, 3031), 'numpy.zeros_like', 'np.zeros_like', (['self.Wx'], {}), '(self.Wx)\n', (3022, 3031), True, 'import numpy as np\n'), ((3046, 3068), 'numpy.zeros_like', 'np.zeros_like', (['self.Wh'], {}), '(self.Wh)\n', (3059, 3068), True, 'import numpy as np\n'), ((3082, 3103), 'numpy.zeros_like', 'np.zeros_like', (['self.b'], {}), '(self.b)\n', (3095, 3103), True, 'import numpy as np\n')] |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import time
import os
import random
import math
import contextlib
import paddle
import paddle.fluid as fluid
import paddle.fluid.framework as framework
import paddle.fluid.profiler as profiler
from paddle.fluid.executor import Executor
import reader
import sys
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding("utf-8")
sys.path.append('../')
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
from args import *
from models.model_check import check_cuda
from models.language_model import lm_model
from config import RNNConfig
import logging
import pickle
SEED = 123
@contextlib.contextmanager
def profile_context(profile=True):
if profile:
with profiler.profiler('All', 'total', '/tmp/paddingrnn.profile'):
yield
else:
yield
def get_current_model_para(train_prog, train_exe):
param_list = train_prog.block(0).all_parameters()
param_name_list = [p.name for p in param_list]
vals = {}
for p_name in param_name_list:
p_array = np.array(fluid.global_scope().find_var(p_name).get_tensor())
vals[p_name] = p_array
return vals
def save_para_npz(train_prog, train_exe):
print("begin to save model to model_base")
param_list = train_prog.block(0).all_parameters()
param_name_list = [p.name for p in param_list]
vals = {}
for p_name in param_name_list:
p_array = np.array(fluid.global_scope().find_var(p_name).get_tensor())
vals[p_name] = p_array
emb = vals["embedding_para"]
print("begin to save model to model_base")
np.savez("mode_base", **vals)
def main():
args = parse_args()
check_cuda(args.use_gpu)
logger = logging.getLogger("lm")
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if args.log_path:
file_handler = logging.FileHandler(args.log_path)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
else:
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
logger.info('Running with args : {}'.format(args))
config = RNNConfig(args)
# define train program
main_program = fluid.Program()
startup_program = fluid.Program()
if args.enable_ce:
startup_program.random_seed = SEED
with fluid.program_guard(main_program, startup_program):
with fluid.unique_name.guard():
res_vars = lm_model.lm_model(
config.hidden_size,
config.vocab_size,
config.batch_size,
num_layers=config.num_layers,
num_steps=config.num_steps,
init_scale=config.init_scale,
dropout=config.dropout,
rnn_model=config.rnn_model,
use_py_reader=args.use_py_reader)
if args.use_py_reader:
py_reader = res_vars[-1]
res_vars = res_vars[:-1]
loss, last_hidden, last_cell, feed_order = res_vars
fluid.clip.set_gradient_clip(
clip=fluid.clip.GradientClipByGlobalNorm(
clip_norm=config.max_grad_norm))
learning_rate = fluid.layers.create_global_var(
name="learning_rate",
shape=[1],
value=1.0,
dtype='float32',
persistable=True)
optimizer = fluid.optimizer.SGD(learning_rate=learning_rate)
optimizer.minimize(loss)
# define inference program
inference_program = fluid.Program()
inference_startup_program = fluid.Program()
with fluid.program_guard(inference_program, inference_startup_program):
with fluid.unique_name.guard():
lm_model.lm_model(
config.hidden_size,
config.vocab_size,
config.batch_size,
num_layers=config.num_layers,
num_steps=config.num_steps,
init_scale=config.init_scale,
dropout=config.dropout,
rnn_model=config.rnn_model,
use_py_reader=False)
# Some op behaves differently for train and inference, we need to call
# this clone function to ensure every op is right for inference.
inference_program = inference_program.clone(for_test=True)
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = Executor(place)
exe.run(startup_program)
device_count = len(fluid.cuda_places()) if args.use_gpu else len(
fluid.cpu_places())
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = device_count
exec_strategy.num_iteration_per_drop_scope = 100
build_strategy = fluid.BuildStrategy()
build_strategy.enable_inplace = True
build_strategy.memory_optimize = False
build_strategy.fuse_all_optimizer_ops = True
if args.parallel:
train_program = fluid.compiler.CompiledProgram(
main_program).with_data_parallel(
loss_name=loss.name,
build_strategy=build_strategy,
exec_strategy=exec_strategy)
else:
train_program = fluid.compiler.CompiledProgram(main_program)
data_path = args.data_path
print("begin to load data")
ptb_data = reader.get_ptb_data(data_path)
print("finished load data")
train_data, valid_data, test_data = ptb_data
def generate_init_data():
init_hidden = np.zeros(
(config.num_layers, config.batch_size, config.hidden_size),
dtype='float32')
init_cell = np.zeros(
(config.num_layers, config.batch_size, config.hidden_size),
dtype='float32')
return init_hidden, init_cell
def generate_new_lr(epoch_id=0, device_count=1):
new_lr = config.base_learning_rate * (config.lr_decay**max(
epoch_id + 1 - config.epoch_start_decay, 0.0))
lr = np.ones((device_count), dtype='float32') * new_lr
return lr
def prepare_input(batch,
init_hidden=None,
init_cell=None,
epoch_id=0,
with_lr=True,
device_count=1):
x, y = batch
x = x.reshape((-1, config.num_steps, 1))
y = y.reshape((-1, 1))
res = {}
res['x'] = x
res['y'] = y
if init_hidden is not None:
res['init_hidden'] = init_hidden
if init_cell is not None:
res['init_cell'] = init_cell
if with_lr:
res['learning_rate'] = generate_new_lr(epoch_id, device_count)
return res
def eval(data):
# when eval the batch_size set to 1
eval_data_iter = reader.get_data_iter(data, config.batch_size,
config.num_steps)
total_loss = 0.0
iters = 0
init_hidden, init_cell = generate_init_data()
for batch_id, batch in enumerate(eval_data_iter):
input_data_feed = prepare_input(
batch, init_hidden, init_cell, epoch_id=0, with_lr=False)
fetch_outs = exe.run(
program=inference_program,
feed=input_data_feed,
fetch_list=[loss.name, last_hidden.name, last_cell.name],
use_program_cache=False)
cost_eval = np.array(fetch_outs[0])
init_hidden = np.array(fetch_outs[1])
init_cell = np.array(fetch_outs[2])
total_loss += cost_eval
iters += config.num_steps
ppl = np.exp(total_loss / iters)
return ppl
def get_log_interval(data_len):
num_batchs = data_len // config.batch_size
epoch_size = (num_batchs - 1) // config.num_steps
log_interval = max(1, epoch_size // 10)
return log_interval
def train_an_epoch(epoch_id, batch_times):
# get train epoch size
log_interval = get_log_interval(len(train_data))
train_data_iter = reader.get_data_iter(train_data, config.batch_size,
config.num_steps)
total_loss = 0
iters = 0
init_hidden, init_cell = generate_init_data()
for batch_id, batch in enumerate(train_data_iter):
input_data_feed = prepare_input(
batch,
init_hidden=init_hidden,
init_cell=init_cell,
epoch_id=epoch_id,
with_lr=True,
device_count=device_count)
batch_start_time = time.time()
fetch_outs = exe.run(train_program,
feed=input_data_feed,
fetch_list=[
loss.name, "learning_rate",
last_hidden.name, last_cell.name
],
use_program_cache=True)
batch_time = time.time() - batch_start_time
batch_times.append(batch_time)
cost_train = np.array(fetch_outs[0])
lr = np.array(fetch_outs[1])
init_hidden = np.array(fetch_outs[2])
init_cell = np.array(fetch_outs[3])
total_loss += cost_train
iters += config.num_steps
if batch_id > 0 and batch_id % log_interval == 0:
ppl = np.exp(total_loss / iters)
print(
"-- Epoch:[%d]; Batch:[%d]; Time: %.5f s; ppl: %.5f, lr: %.5f"
% (epoch_id, batch_id, batch_time, ppl[0], lr[0]))
ppl = np.exp(total_loss / iters)
return ppl
def train_an_epoch_py_reader(epoch_id, batch_times):
# get train epoch size
log_interval = get_log_interval(len(train_data))
init_hidden, init_cell = generate_init_data()
total_loss = 0
iters = 0
py_reader.start()
batch_id = 0
try:
while True:
data_feeds = {}
if batch_id == 0:
batch_time = 0
batch_start_time = time.time()
else:
batch_time = time.time() - batch_start_time
batch_times.append(batch_time)
batch_start_time = time.time()
new_lr = generate_new_lr(epoch_id, device_count)
data_feeds['learning_rate'] = new_lr
data_feeds["init_hidden"] = init_hidden
data_feeds["init_cell"] = init_cell
fetch_outs = exe.run(train_program,
feed=data_feeds,
fetch_list=[
loss.name, "learning_rate",
last_hidden.name, last_cell.name
],
use_program_cache=True)
cost_train = np.array(fetch_outs[0])
lr = np.array(fetch_outs[1])
init_hidden = np.array(fetch_outs[2])
init_cell = np.array(fetch_outs[3])
total_loss += cost_train
iters += config.num_steps
if batch_id > 0 and (log_interval == 0 or
batch_id % log_interval == 0):
ppl = np.exp(total_loss / iters)
print(
"-- Epoch:[%d]; Batch:[%d]; Time: %.5f s; ppl: %.5f, lr: %.5f"
% (epoch_id, batch_id, batch_time, ppl[0], lr[0]))
batch_id += 1
except fluid.core.EOFException:
py_reader.reset()
batch_times.append(time.time() - batch_start_time)
ppl = np.exp(total_loss / iters)
return ppl
def train():
if args.use_py_reader:
def data_gen():
data_iter_size = config.batch_size // device_count
train_batches = reader.get_data_iter(train_data, data_iter_size,
config.num_steps)
for batch in train_batches:
x, y = batch
x = x.reshape((-1, config.num_steps, 1))
y = y.reshape((-1, 1))
yield x, y
py_reader.decorate_tensor_provider(data_gen)
total_time = 0.0
for epoch_id in range(config.max_epoch):
batch_times = []
epoch_start_time = time.time()
if args.use_py_reader:
train_ppl = train_an_epoch_py_reader(epoch_id, batch_times)
else:
train_ppl = train_an_epoch(epoch_id, batch_times)
epoch_time = time.time() - epoch_start_time
total_time += epoch_time
print(
"\nTrain epoch:[%d]; epoch Time: %.5f; ppl: %.5f; avg_time: %.5f steps/s \n"
% (epoch_id, epoch_time, train_ppl[0],
len(batch_times) / sum(batch_times)))
# FIXME(zjl): ppl[0] increases as batch_size increases.
# We should find a better way to calculate ppl by normalizing batch_size.
if device_count == 1 and config.batch_size <= 20 and epoch_id == 0 and train_ppl[
0] > 1000:
# for bad init, after first epoch, the loss is over 1000
# no more need to continue
print(
"Parameters are randomly initialized and not good this time because the loss is over 1000 after the first epoch."
)
print("Abort this training process and please start again.")
return
if epoch_id == config.max_epoch - 1 and args.enable_ce:
# kpis
print("ptblm\tlstm_language_model_%s_duration_card%d\t%s" %
(args.rnn_model, device_count,
total_time / config.max_epoch))
print("ptblm\tlstm_language_model_%s_loss_card%d\t%s" %
(args.rnn_model, device_count, train_ppl[0]))
# NOTE(zjl): sometimes we have not enough data for eval if batch_size is large, i.e., 2100
# Just skip to avoid error
def is_valid_data(data, batch_size, num_steps):
data_len = len(data)
batch_len = data_len // batch_size
epoch_size = (batch_len - 1) // num_steps
return epoch_size >= 1
valid_data_valid = is_valid_data(valid_data, config.batch_size,
config.num_steps)
if valid_data_valid:
valid_ppl = eval(valid_data)
print("Valid ppl: %.5f" % valid_ppl[0])
else:
print(
'WARNING: length of valid_data is {}, which is not enough for batch_size {} and num_steps {}'.
format(
len(valid_data), config.batch_size, config.num_steps))
save_model_dir = os.path.join(args.save_model_dir, str(epoch_id))
fluid.io.save_persistables(
executor=exe, dirname=save_model_dir, main_program=main_program)
print("Saved model to: %s.\n" % save_model_dir)
with profile_context(args.profile):
train()
test_ppl = eval(test_data)
print("Test ppl:", test_ppl[0])
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"sys.setdefaultencoding",
"logging.StreamHandler",
"paddle.fluid.cuda_places",
"models.language_model.lm_model.lm_model",
"numpy.array",
"paddle.fluid.cpu_places",
"paddle.fluid.clip.GradientClipByGlobalNorm",
"sys.path.append",
"paddle.fluid.ExecutionStrategy",
"paddle.flui... | [((1083, 1105), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (1098, 1105), False, 'import sys\n'), ((1051, 1082), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (1073, 1082), False, 'import sys\n'), ((2305, 2334), 'numpy.savez', 'np.savez', (['"""mode_base"""'], {}), "('mode_base', **vals)\n", (2313, 2334), True, 'import numpy as np\n'), ((2378, 2402), 'models.model_check.check_cuda', 'check_cuda', (['args.use_gpu'], {}), '(args.use_gpu)\n', (2388, 2402), False, 'from models.model_check import check_cuda\n'), ((2417, 2440), 'logging.getLogger', 'logging.getLogger', (['"""lm"""'], {}), "('lm')\n", (2434, 2440), False, 'import logging\n'), ((2491, 2564), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (2508, 2564), False, 'import logging\n'), ((3050, 3065), 'config.RNNConfig', 'RNNConfig', (['args'], {}), '(args)\n', (3059, 3065), False, 'from config import RNNConfig\n'), ((3113, 3128), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (3126, 3128), True, 'import paddle.fluid as fluid\n'), ((3151, 3166), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (3164, 3166), True, 'import paddle.fluid as fluid\n'), ((4475, 4490), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (4488, 4490), True, 'import paddle.fluid as fluid\n'), ((4523, 4538), 'paddle.fluid.Program', 'fluid.Program', ([], {}), '()\n', (4536, 4538), True, 'import paddle.fluid as fluid\n'), ((5336, 5351), 'paddle.fluid.executor.Executor', 'Executor', (['place'], {}), '(place)\n', (5344, 5351), False, 'from paddle.fluid.executor import Executor\n'), ((5501, 5526), 'paddle.fluid.ExecutionStrategy', 'fluid.ExecutionStrategy', ([], {}), '()\n', (5524, 5526), True, 'import paddle.fluid as fluid\n'), ((5647, 5668), 'paddle.fluid.BuildStrategy', 'fluid.BuildStrategy', ([], {}), '()\n', (5666, 5668), True, 'import paddle.fluid as fluid\n'), ((6214, 6244), 'reader.get_ptb_data', 'reader.get_ptb_data', (['data_path'], {}), '(data_path)\n', (6233, 6244), False, 'import reader\n'), ((2619, 2653), 'logging.FileHandler', 'logging.FileHandler', (['args.log_path'], {}), '(args.log_path)\n', (2638, 2653), False, 'import logging\n'), ((2819, 2842), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2840, 2842), False, 'import logging\n'), ((3242, 3292), 'paddle.fluid.program_guard', 'fluid.program_guard', (['main_program', 'startup_program'], {}), '(main_program, startup_program)\n', (3261, 3292), True, 'import paddle.fluid as fluid\n'), ((4548, 4613), 'paddle.fluid.program_guard', 'fluid.program_guard', (['inference_program', 'inference_startup_program'], {}), '(inference_program, inference_startup_program)\n', (4567, 4613), True, 'import paddle.fluid as fluid\n'), ((5269, 5287), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (5284, 5287), True, 'import paddle.fluid as fluid\n'), ((5309, 5325), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (5323, 5325), True, 'import paddle.fluid as fluid\n'), ((6090, 6134), 'paddle.fluid.compiler.CompiledProgram', 'fluid.compiler.CompiledProgram', (['main_program'], {}), '(main_program)\n', (6120, 6134), True, 'import paddle.fluid as fluid\n'), ((6379, 6469), 'numpy.zeros', 'np.zeros', (['(config.num_layers, config.batch_size, config.hidden_size)'], {'dtype': '"""float32"""'}), "((config.num_layers, config.batch_size, config.hidden_size), dtype=\n 'float32')\n", (6387, 6469), True, 'import numpy as np\n'), ((6510, 6600), 'numpy.zeros', 'np.zeros', (['(config.num_layers, config.batch_size, config.hidden_size)'], {'dtype': '"""float32"""'}), "((config.num_layers, config.batch_size, config.hidden_size), dtype=\n 'float32')\n", (6518, 6600), True, 'import numpy as np\n'), ((7660, 7723), 'reader.get_data_iter', 'reader.get_data_iter', (['data', 'config.batch_size', 'config.num_steps'], {}), '(data, config.batch_size, config.num_steps)\n', (7680, 7723), False, 'import reader\n'), ((8511, 8537), 'numpy.exp', 'np.exp', (['(total_loss / iters)'], {}), '(total_loss / iters)\n', (8517, 8537), True, 'import numpy as np\n'), ((8941, 9010), 'reader.get_data_iter', 'reader.get_data_iter', (['train_data', 'config.batch_size', 'config.num_steps'], {}), '(train_data, config.batch_size, config.num_steps)\n', (8961, 9010), False, 'import reader\n'), ((10556, 10582), 'numpy.exp', 'np.exp', (['(total_loss / iters)'], {}), '(total_loss / iters)\n', (10562, 10582), True, 'import numpy as np\n'), ((12730, 12756), 'numpy.exp', 'np.exp', (['(total_loss / iters)'], {}), '(total_loss / iters)\n', (12736, 12756), True, 'import numpy as np\n'), ((1425, 1485), 'paddle.fluid.profiler.profiler', 'profiler.profiler', (['"""All"""', '"""total"""', '"""/tmp/paddingrnn.profile"""'], {}), "('All', 'total', '/tmp/paddingrnn.profile')\n", (1442, 1485), True, 'import paddle.fluid.profiler as profiler\n'), ((3307, 3332), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (3330, 3332), True, 'import paddle.fluid as fluid\n'), ((3357, 3619), 'models.language_model.lm_model.lm_model', 'lm_model.lm_model', (['config.hidden_size', 'config.vocab_size', 'config.batch_size'], {'num_layers': 'config.num_layers', 'num_steps': 'config.num_steps', 'init_scale': 'config.init_scale', 'dropout': 'config.dropout', 'rnn_model': 'config.rnn_model', 'use_py_reader': 'args.use_py_reader'}), '(config.hidden_size, config.vocab_size, config.batch_size,\n num_layers=config.num_layers, num_steps=config.num_steps, init_scale=\n config.init_scale, dropout=config.dropout, rnn_model=config.rnn_model,\n use_py_reader=args.use_py_reader)\n', (3374, 3619), False, 'from models.language_model import lm_model\n'), ((4117, 4230), 'paddle.fluid.layers.create_global_var', 'fluid.layers.create_global_var', ([], {'name': '"""learning_rate"""', 'shape': '[1]', 'value': '(1.0)', 'dtype': '"""float32"""', 'persistable': '(True)'}), "(name='learning_rate', shape=[1], value=1.0,\n dtype='float32', persistable=True)\n", (4147, 4230), True, 'import paddle.fluid as fluid\n'), ((4333, 4381), 'paddle.fluid.optimizer.SGD', 'fluid.optimizer.SGD', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (4352, 4381), True, 'import paddle.fluid as fluid\n'), ((4628, 4653), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (4651, 4653), True, 'import paddle.fluid as fluid\n'), ((4667, 4916), 'models.language_model.lm_model.lm_model', 'lm_model.lm_model', (['config.hidden_size', 'config.vocab_size', 'config.batch_size'], {'num_layers': 'config.num_layers', 'num_steps': 'config.num_steps', 'init_scale': 'config.init_scale', 'dropout': 'config.dropout', 'rnn_model': 'config.rnn_model', 'use_py_reader': '(False)'}), '(config.hidden_size, config.vocab_size, config.batch_size,\n num_layers=config.num_layers, num_steps=config.num_steps, init_scale=\n config.init_scale, dropout=config.dropout, rnn_model=config.rnn_model,\n use_py_reader=False)\n', (4684, 4916), False, 'from models.language_model import lm_model\n'), ((5405, 5424), 'paddle.fluid.cuda_places', 'fluid.cuda_places', ([], {}), '()\n', (5422, 5424), True, 'import paddle.fluid as fluid\n'), ((5460, 5478), 'paddle.fluid.cpu_places', 'fluid.cpu_places', ([], {}), '()\n', (5476, 5478), True, 'import paddle.fluid as fluid\n'), ((6853, 6891), 'numpy.ones', 'np.ones', (['device_count'], {'dtype': '"""float32"""'}), "(device_count, dtype='float32')\n", (6860, 6891), True, 'import numpy as np\n'), ((8299, 8322), 'numpy.array', 'np.array', (['fetch_outs[0]'], {}), '(fetch_outs[0])\n', (8307, 8322), True, 'import numpy as np\n'), ((8349, 8372), 'numpy.array', 'np.array', (['fetch_outs[1]'], {}), '(fetch_outs[1])\n', (8357, 8372), True, 'import numpy as np\n'), ((8397, 8420), 'numpy.array', 'np.array', (['fetch_outs[2]'], {}), '(fetch_outs[2])\n', (8405, 8420), True, 'import numpy as np\n'), ((9500, 9511), 'time.time', 'time.time', ([], {}), '()\n', (9509, 9511), False, 'import time\n'), ((10014, 10037), 'numpy.array', 'np.array', (['fetch_outs[0]'], {}), '(fetch_outs[0])\n', (10022, 10037), True, 'import numpy as np\n'), ((10055, 10078), 'numpy.array', 'np.array', (['fetch_outs[1]'], {}), '(fetch_outs[1])\n', (10063, 10078), True, 'import numpy as np\n'), ((10105, 10128), 'numpy.array', 'np.array', (['fetch_outs[2]'], {}), '(fetch_outs[2])\n', (10113, 10128), True, 'import numpy as np\n'), ((10153, 10176), 'numpy.array', 'np.array', (['fetch_outs[3]'], {}), '(fetch_outs[3])\n', (10161, 10176), True, 'import numpy as np\n'), ((13478, 13489), 'time.time', 'time.time', ([], {}), '()\n', (13487, 13489), False, 'import time\n'), ((16107, 16202), 'paddle.fluid.io.save_persistables', 'fluid.io.save_persistables', ([], {'executor': 'exe', 'dirname': 'save_model_dir', 'main_program': 'main_program'}), '(executor=exe, dirname=save_model_dir,\n main_program=main_program)\n', (16133, 16202), True, 'import paddle.fluid as fluid\n'), ((5849, 5893), 'paddle.fluid.compiler.CompiledProgram', 'fluid.compiler.CompiledProgram', (['main_program'], {}), '(main_program)\n', (5879, 5893), True, 'import paddle.fluid as fluid\n'), ((9914, 9925), 'time.time', 'time.time', ([], {}), '()\n', (9923, 9925), False, 'import time\n'), ((10337, 10363), 'numpy.exp', 'np.exp', (['(total_loss / iters)'], {}), '(total_loss / iters)\n', (10343, 10363), True, 'import numpy as np\n'), ((11928, 11951), 'numpy.array', 'np.array', (['fetch_outs[0]'], {}), '(fetch_outs[0])\n', (11936, 11951), True, 'import numpy as np\n'), ((11973, 11996), 'numpy.array', 'np.array', (['fetch_outs[1]'], {}), '(fetch_outs[1])\n', (11981, 11996), True, 'import numpy as np\n'), ((12027, 12050), 'numpy.array', 'np.array', (['fetch_outs[2]'], {}), '(fetch_outs[2])\n', (12035, 12050), True, 'import numpy as np\n'), ((12079, 12102), 'numpy.array', 'np.array', (['fetch_outs[3]'], {}), '(fetch_outs[3])\n', (12087, 12102), True, 'import numpy as np\n'), ((12684, 12695), 'time.time', 'time.time', ([], {}), '()\n', (12693, 12695), False, 'import time\n'), ((12953, 13019), 'reader.get_data_iter', 'reader.get_data_iter', (['train_data', 'data_iter_size', 'config.num_steps'], {}), '(train_data, data_iter_size, config.num_steps)\n', (12973, 13019), False, 'import reader\n'), ((13710, 13721), 'time.time', 'time.time', ([], {}), '()\n', (13719, 13721), False, 'import time\n'), ((3998, 4065), 'paddle.fluid.clip.GradientClipByGlobalNorm', 'fluid.clip.GradientClipByGlobalNorm', ([], {'clip_norm': 'config.max_grad_norm'}), '(clip_norm=config.max_grad_norm)\n', (4033, 4065), True, 'import paddle.fluid as fluid\n'), ((11070, 11081), 'time.time', 'time.time', ([], {}), '()\n', (11079, 11081), False, 'import time\n'), ((11258, 11269), 'time.time', 'time.time', ([], {}), '()\n', (11267, 11269), False, 'import time\n'), ((12339, 12365), 'numpy.exp', 'np.exp', (['(total_loss / iters)'], {}), '(total_loss / iters)\n', (12345, 12365), True, 'import numpy as np\n'), ((11137, 11148), 'time.time', 'time.time', ([], {}), '()\n', (11146, 11148), False, 'import time\n'), ((1764, 1784), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (1782, 1784), True, 'import paddle.fluid as fluid\n'), ((2137, 2157), 'paddle.fluid.global_scope', 'fluid.global_scope', ([], {}), '()\n', (2155, 2157), True, 'import paddle.fluid as fluid\n')] |
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.imputation.bayes_mi import BayesGaussMI, MI
from numpy.testing import assert_allclose
def test_pat():
x = np.asarray([[1, np.nan, 3], [np.nan, 2, np.nan], [3, np.nan, 0],
[np.nan, 1, np.nan], [3, 2, 1]])
bm = BayesGaussMI(x)
assert_allclose(bm.patterns[0], np.r_[0, 2])
assert_allclose(bm.patterns[1], np.r_[1, 3])
def test_2x2():
# Generate correlated data with mean and variance
np.random.seed(3434)
x = np.random.normal(size=(1000, 2))
r = 0.5
x[:, 1] = r*x[:, 0] + np.sqrt(1-r**2)*x[:, 1]
x[:, 0] *= 2
x[:, 1] *= 3
x[:, 0] += 1
x[:, 1] -= 2
# Introduce some missing values
u = np.random.normal(size=x.shape[0])
x[u > 1, 0] = np.nan
u = np.random.normal(size=x.shape[0])
x[u > 1, 1] = np.nan
bm = BayesGaussMI(x)
# Burn-in
for k in range(500):
bm.update()
# Estimate the posterior mean
mean = 0
cov = 0
dmean = 0
dcov = 0
for k in range(500):
bm.update()
mean += bm.mean
cov += bm.cov
dmean += bm.data.mean(0)
dcov += np.cov(bm.data.T)
mean /= 500
cov /= 500
dmean /= 500
dcov /= 500
assert_allclose(mean, np.r_[1, -2], 0.1)
assert_allclose(dmean, np.r_[1, -2], 0.1)
assert_allclose(cov, np.asarray([[4, 6*r], [6*r, 9]]), 0.1)
assert_allclose(dcov, np.asarray([[4, 6*r], [6*r, 9]]), 0.1)
def test_MI():
np.random.seed(414)
x = np.random.normal(size=(200, 4))
x[[1, 3, 9], 0] = np.nan
x[[1, 4, 3], 1] = np.nan
x[[2, 11, 21], 2] = np.nan
x[[11, 22, 99], 3] = np.nan
def model_args(x):
# Return endog, exog
# Regress x0 on x1 and x2
return (x[:, 0], x[:, 1:])
for j in (0, 1):
np.random.seed(2342)
imp = BayesGaussMI(x.copy())
mi = MI(imp, sm.OLS, model_args, burn=0)
r = mi.fit()
r.summary() # smoke test
# TODO: why does the test tolerance need to be so slack?
# There is unexpected variation across versions on travis.
assert_allclose(r.params, np.r_[
-0.05347919, -0.02479701, 0.10075517], 0.25, 0)
c = np.asarray([[0.00418232, 0.00029746, -0.00035057],
[0.00029746, 0.00407264, 0.00019496],
[-0.00035057, 0.00019496, 0.00509413]])
assert_allclose(r.cov_params(), c, 0.3, 0)
# Test with ndarray and pandas input
x = pd.DataFrame(x)
def test_MI_stat():
# Test for MI where we know statistically what should happen. The
# analysis model is x0 ~ x1 with standard error 1/sqrt(n) for the
# slope parameter. The nominal n is 1000, but half of the cases
# have missing x1. Then we introduce x2 that is either
# independent of x1, or almost perfectly correlated with x1. In
# the first case the SE is 1/sqrt(500), in the second case the SE
# is 1/sqrt(1000).
np.random.seed(414)
z = np.random.normal(size=(1000, 3))
z[:, 0] += 0.5*z[:, 1]
# Control the degree to which x2 proxies for x1
exp = [1/np.sqrt(500), 1/np.sqrt(1000)]
fmi = [0.5, 0]
for j, r in enumerate((0, 0.9999)):
x = z.copy()
x[:, 2] = r*x[:, 1] + np.sqrt(1 - r**2)*x[:, 2]
x[0:500, 1] = np.nan
def model_args(x):
# Return endog, exog
# Regress x1 on x2
return (x[:, 0], x[:, 1])
np.random.seed(2342)
imp = BayesGaussMI(x.copy())
mi = MI(imp, sm.OLS, model_args, nrep=100, skip=10)
r = mi.fit()
# Check the SE
d = np.abs(r.bse[0] - exp[j]) / exp[j]
assert(d < 0.03)
# Check the FMI
d = np.abs(r.fmi[0] - fmi[j])
assert(d < 0.05)
| [
"numpy.random.normal",
"numpy.abs",
"numpy.sqrt",
"numpy.testing.assert_allclose",
"numpy.asarray",
"numpy.random.seed",
"statsmodels.imputation.bayes_mi.MI",
"pandas.DataFrame",
"numpy.cov",
"statsmodels.imputation.bayes_mi.BayesGaussMI"
] | [((198, 299), 'numpy.asarray', 'np.asarray', (['[[1, np.nan, 3], [np.nan, 2, np.nan], [3, np.nan, 0], [np.nan, 1, np.nan],\n [3, 2, 1]]'], {}), '([[1, np.nan, 3], [np.nan, 2, np.nan], [3, np.nan, 0], [np.nan, 1,\n np.nan], [3, 2, 1]])\n', (208, 299), True, 'import numpy as np\n'), ((325, 340), 'statsmodels.imputation.bayes_mi.BayesGaussMI', 'BayesGaussMI', (['x'], {}), '(x)\n', (337, 340), False, 'from statsmodels.imputation.bayes_mi import BayesGaussMI, MI\n'), ((345, 389), 'numpy.testing.assert_allclose', 'assert_allclose', (['bm.patterns[0]', 'np.r_[0, 2]'], {}), '(bm.patterns[0], np.r_[0, 2])\n', (360, 389), False, 'from numpy.testing import assert_allclose\n'), ((394, 438), 'numpy.testing.assert_allclose', 'assert_allclose', (['bm.patterns[1]', 'np.r_[1, 3]'], {}), '(bm.patterns[1], np.r_[1, 3])\n', (409, 438), False, 'from numpy.testing import assert_allclose\n'), ((516, 536), 'numpy.random.seed', 'np.random.seed', (['(3434)'], {}), '(3434)\n', (530, 536), True, 'import numpy as np\n'), ((545, 577), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1000, 2)'}), '(size=(1000, 2))\n', (561, 577), True, 'import numpy as np\n'), ((753, 786), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'x.shape[0]'}), '(size=x.shape[0])\n', (769, 786), True, 'import numpy as np\n'), ((820, 853), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'x.shape[0]'}), '(size=x.shape[0])\n', (836, 853), True, 'import numpy as np\n'), ((889, 904), 'statsmodels.imputation.bayes_mi.BayesGaussMI', 'BayesGaussMI', (['x'], {}), '(x)\n', (901, 904), False, 'from statsmodels.imputation.bayes_mi import BayesGaussMI, MI\n'), ((1279, 1319), 'numpy.testing.assert_allclose', 'assert_allclose', (['mean', 'np.r_[1, -2]', '(0.1)'], {}), '(mean, np.r_[1, -2], 0.1)\n', (1294, 1319), False, 'from numpy.testing import assert_allclose\n'), ((1324, 1365), 'numpy.testing.assert_allclose', 'assert_allclose', (['dmean', 'np.r_[1, -2]', '(0.1)'], {}), '(dmean, np.r_[1, -2], 0.1)\n', (1339, 1365), False, 'from numpy.testing import assert_allclose\n'), ((1517, 1536), 'numpy.random.seed', 'np.random.seed', (['(414)'], {}), '(414)\n', (1531, 1536), True, 'import numpy as np\n'), ((1545, 1576), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(200, 4)'}), '(size=(200, 4))\n', (1561, 1576), True, 'import numpy as np\n'), ((3018, 3037), 'numpy.random.seed', 'np.random.seed', (['(414)'], {}), '(414)\n', (3032, 3037), True, 'import numpy as np\n'), ((3046, 3078), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1000, 3)'}), '(size=(1000, 3))\n', (3062, 3078), True, 'import numpy as np\n'), ((1192, 1209), 'numpy.cov', 'np.cov', (['bm.data.T'], {}), '(bm.data.T)\n', (1198, 1209), True, 'import numpy as np\n'), ((1391, 1427), 'numpy.asarray', 'np.asarray', (['[[4, 6 * r], [6 * r, 9]]'], {}), '([[4, 6 * r], [6 * r, 9]])\n', (1401, 1427), True, 'import numpy as np\n'), ((1456, 1492), 'numpy.asarray', 'np.asarray', (['[[4, 6 * r], [6 * r, 9]]'], {}), '([[4, 6 * r], [6 * r, 9]])\n', (1466, 1492), True, 'import numpy as np\n'), ((1850, 1870), 'numpy.random.seed', 'np.random.seed', (['(2342)'], {}), '(2342)\n', (1864, 1870), True, 'import numpy as np\n'), ((1921, 1956), 'statsmodels.imputation.bayes_mi.MI', 'MI', (['imp', 'sm.OLS', 'model_args'], {'burn': '(0)'}), '(imp, sm.OLS, model_args, burn=0)\n', (1923, 1956), False, 'from statsmodels.imputation.bayes_mi import BayesGaussMI, MI\n'), ((2152, 2231), 'numpy.testing.assert_allclose', 'assert_allclose', (['r.params', 'np.r_[-0.05347919, -0.02479701, 0.10075517]', '(0.25)', '(0)'], {}), '(r.params, np.r_[-0.05347919, -0.02479701, 0.10075517], 0.25, 0)\n', (2167, 2231), False, 'from numpy.testing import assert_allclose\n'), ((2258, 2390), 'numpy.asarray', 'np.asarray', (['[[0.00418232, 0.00029746, -0.00035057], [0.00029746, 0.00407264, 0.00019496\n ], [-0.00035057, 0.00019496, 0.00509413]]'], {}), '([[0.00418232, 0.00029746, -0.00035057], [0.00029746, 0.00407264,\n 0.00019496], [-0.00035057, 0.00019496, 0.00509413]])\n', (2268, 2390), True, 'import numpy as np\n'), ((2544, 2559), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {}), '(x)\n', (2556, 2559), True, 'import pandas as pd\n'), ((3508, 3528), 'numpy.random.seed', 'np.random.seed', (['(2342)'], {}), '(2342)\n', (3522, 3528), True, 'import numpy as np\n'), ((3579, 3625), 'statsmodels.imputation.bayes_mi.MI', 'MI', (['imp', 'sm.OLS', 'model_args'], {'nrep': '(100)', 'skip': '(10)'}), '(imp, sm.OLS, model_args, nrep=100, skip=10)\n', (3581, 3625), False, 'from statsmodels.imputation.bayes_mi import BayesGaussMI, MI\n'), ((3780, 3805), 'numpy.abs', 'np.abs', (['(r.fmi[0] - fmi[j])'], {}), '(r.fmi[0] - fmi[j])\n', (3786, 3805), True, 'import numpy as np\n'), ((616, 635), 'numpy.sqrt', 'np.sqrt', (['(1 - r ** 2)'], {}), '(1 - r ** 2)\n', (623, 635), True, 'import numpy as np\n'), ((3172, 3184), 'numpy.sqrt', 'np.sqrt', (['(500)'], {}), '(500)\n', (3179, 3184), True, 'import numpy as np\n'), ((3188, 3201), 'numpy.sqrt', 'np.sqrt', (['(1000)'], {}), '(1000)\n', (3195, 3201), True, 'import numpy as np\n'), ((3683, 3708), 'numpy.abs', 'np.abs', (['(r.bse[0] - exp[j])'], {}), '(r.bse[0] - exp[j])\n', (3689, 3708), True, 'import numpy as np\n'), ((3314, 3333), 'numpy.sqrt', 'np.sqrt', (['(1 - r ** 2)'], {}), '(1 - r ** 2)\n', (3321, 3333), True, 'import numpy as np\n')] |
import numpy as np
import h5py
import tensorflow as tf
# import keras
import os
import sys
import pickle
# We are going to try to do some residual netowrks
expr_name = sys.argv[0][:-3]
expr_no = '1'
save_dir = os.path.abspath(os.path.join(os.path.expanduser('~/fluoro/code/jupyt/vox_fluoro'), expr_name))
print(save_dir)
os.makedirs(save_dir, exist_ok=True)
# -----------------------------------------------------------------
def cust_mean_squared_error_var(y_true, y_pred):
base_dir = os.path.expanduser('~/fluoro/data/compilation')
stats_file = h5py.File(os.path.join(base_dir, 'labels_stats.h5py'), 'r')
# mean_dset = stats_file['mean']
# std_dset = stats_file['std']
var_dset = stats_file['var']
# mean_v = mean_dset[:]
# std_v = std_dset[:]
var_v = var_dset[:]
stats_file.close()
return tf.keras.backend.mean(tf.keras.backend.square(y_pred - y_true) / var_v)
# -----------------------------------------------------------------
params = {
# ---
# 3D CONV
# ---
# Entry Layers
'v_conv_0_filters': 30,
'v_conv_0_kernel': 9,
'v_conv_0_strides_0': 2,
'v_conv_0_strides_1': 2,
'v_conv_0_strides_2': 2,
'v_conv_0_pad': 'same',
'v_spatial_drop_rate_0': 0.3,
'v_conv_1_filters': 30,
'v_conv_1_kernel': 5,
'v_conv_1_strides_0': 2,
'v_conv_1_strides_1': 2,
'v_conv_1_strides_2': 3,
'v_conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'v_pool_0_size': 2,
'v_pool_0_pad': 'same',
# ---
# Second Run of Entry Layers
'v_conv_2_filters': 30,
'v_conv_2_kernel': 5,
'v_conv_2_strides_0': 2,
'v_conv_2_strides_1': 2,
'v_conv_2_strides_2': 2,
'v_conv_2_pad': 'same',
# ---
# Run of Residual Layers
# 1
'v_conv_3_filters': 30,
'v_conv_3_kernel': 3,
'v_conv_3_strides_0': 1,
'v_conv_3_strides_1': 1,
'v_conv_3_strides_2': 1,
'v_conv_3_pad': 'same',
'v_spatial_drop_rate_2': 0.3,
'v_conv_4_filters': 30,
'v_conv_4_kernel': 3,
'v_conv_4_strides_0': 1,
'v_conv_4_strides_1': 1,
'v_conv_4_strides_2': 1,
'v_conv_4_pad': 'same',
# 2
'v_conv_5_filters': 30,
'v_conv_5_kernel': 3,
'v_conv_5_strides_0': 1,
'v_conv_5_strides_1': 1,
'v_conv_5_strides_2': 1,
'v_conv_5_pad': 'same',
'v_spatial_drop_rate_3': 0.3,
'v_conv_6_filters': 30,
'v_conv_6_kernel': 3,
'v_conv_6_strides_0': 1,
'v_conv_6_strides_1': 1,
'v_conv_6_strides_2': 1,
'v_conv_6_pad': 'same',
# 3
'v_conv_7_filters': 30,
'v_conv_7_kernel': 3,
'v_conv_7_strides_0': 1,
'v_conv_7_strides_1': 1,
'v_conv_7_strides_2': 1,
'v_conv_7_pad': 'same',
'v_spatial_drop_rate_4': 0.3,
'v_conv_8_filters': 30,
'v_conv_8_kernel': 3,
'v_conv_8_strides_0': 1,
'v_conv_8_strides_1': 1,
'v_conv_8_strides_2': 1,
'v_conv_8_pad': 'same',
# 4
'v_conv_9_filters': 40,
'v_conv_9_kernel': 3,
'v_conv_9_strides_0': 2,
'v_conv_9_strides_1': 2,
'v_conv_9_strides_2': 2,
'v_conv_9_pad': 'same',
'v_spatial_drop_rate_5': 0.3,
'v_conv_10_filters': 40,
'v_conv_10_kernel': 3,
'v_conv_10_strides_0': 1,
'v_conv_10_strides_1': 1,
'v_conv_10_strides_2': 1,
'v_conv_10_pad': 'same',
'v_conv_11_filters': 40,
'v_conv_11_kernel': 3,
'v_conv_11_strides_0': 2,
'v_conv_11_strides_1': 2,
'v_conv_11_strides_2': 2,
'v_conv_11_pad': 'same',
# 5
'v_conv_12_filters': 50,
'v_conv_12_kernel': 2,
'v_conv_12_strides_0': 2,
'v_conv_12_strides_1': 2,
'v_conv_12_strides_2': 2,
'v_conv_12_pad': 'same',
'v_spatial_drop_rate_6': 0.3,
'v_conv_13_filters': 50,
'v_conv_13_kernel': 2,
'v_conv_13_strides_0': 1,
'v_conv_13_strides_1': 1,
'v_conv_13_strides_2': 1,
'v_conv_13_pad': 'same',
'v_conv_14_filters': 50,
'v_conv_14_kernel': 1,
'v_conv_14_strides_0': 2,
'v_conv_14_strides_1': 2,
'v_conv_14_strides_2': 2,
'v_conv_14_pad': 'same',
# 6
'v_conv_15_filters': 50,
'v_conv_15_kernel': 2,
'v_conv_15_strides_0': 2,
'v_conv_15_strides_1': 2,
'v_conv_15_strides_2': 2,
'v_conv_15_pad': 'same',
'v_spatial_drop_rate_7': 0.3,
'v_conv_16_filters': 50,
'v_conv_16_kernel': 2,
'v_conv_16_strides_0': 1,
'v_conv_16_strides_1': 1,
'v_conv_16_strides_2': 1,
'v_conv_16_pad': 'same',
'v_conv_17_filters': 50,
'v_conv_17_kernel': 1,
'v_conv_17_strides_0': 2,
'v_conv_17_strides_1': 2,
'v_conv_17_strides_2': 2,
'v_conv_17_pad': 'same',
# ---
# Final Convs
'v_spatial_drop_rate_8': 0.5,
'v_conv_18_filters': 50,
'v_conv_18_kernel': 2,
'v_conv_18_strides_0': 1,
'v_conv_18_strides_1': 1,
'v_conv_18_strides_2': 1,
'v_conv_18_pad': 'valid',
'dense_1_v_units': 75,
'dense_2_v_units': 50,
# ---
# 2D CONV
# ---
# Entry Fluoro Layers
'conv_0_filters': 30,
'conv_0_kernel': 5,
'conv_0_strides': 2,
'conv_0_pad': 'same',
'spatial_drop_rate_0': 0.3,
'conv_1_filters': 30,
'conv_1_kernel': 5,
'conv_1_strides': 2,
'conv_1_pad': 'same',
# ---
# Pool After Initial Layers
'pool_0_size': 2,
'pool_0_pad': 'same',
# ---
# Run Of Residual Layers
# 1
'conv_2_filters': 30,
'conv_2_kernel': 3,
'conv_2_strides': 1,
'conv_2_pad': 'same',
'spatial_drop_rate_1': 0.3,
'conv_3_filters': 30,
'conv_3_kernel': 3,
'conv_3_strides': 1,
'conv_3_pad': 'same',
# 2
'conv_4_filters': 30,
'conv_4_kernel': 3,
'conv_4_strides': 1,
'conv_4_pad': 'same',
'spatial_drop_rate_2': 0.3,
'conv_5_filters': 30,
'conv_5_kernel': 3,
'conv_5_strides': 1,
'conv_5_pad': 'same',
# 3
'conv_6_filters': 30,
'conv_6_kernel': 3,
'conv_6_strides': 1,
'conv_6_pad': 'same',
'spatial_drop_rate_3': 0.3,
'conv_7_filters': 30,
'conv_7_kernel': 3,
'conv_7_strides': 1,
'conv_7_pad': 'same',
# 4
'conv_8_filters': 30,
'conv_8_kernel': 3,
'conv_8_strides': 1,
'conv_8_pad': 'same',
'spatial_drop_rate_4': 0.3,
'conv_9_filters': 30,
'conv_9_kernel': 3,
'conv_9_strides': 1,
'conv_9_pad': 'same',
# 5
'conv_10_filters': 40,
'conv_10_kernel': 3,
'conv_10_strides': 2,
'conv_10_pad': 'same',
'spatial_drop_rate_5': 0.3,
'conv_11_filters': 40,
'conv_11_kernel': 3,
'conv_11_strides': 1,
'conv_11_pad': 'same',
'conv_12_filters': 40,
'conv_12_kernel': 1,
'conv_12_strides': 2,
'conv_12_pad': 'same',
# 6
'conv_13_filters': 40,
'conv_13_kernel': 3,
'conv_13_strides': 2,
'conv_13_pad': 'same',
'spatial_drop_rate_6': 0.3,
'conv_14_filters': 40,
'conv_14_kernel': 3,
'conv_14_strides': 1,
'conv_14_pad': 'same',
'conv_15_filters': 40,
'conv_15_kernel': 1,
'conv_15_strides': 2,
'conv_15_pad': 'same',
# 7
'conv_16_filters': 40,
'conv_16_kernel': 3,
'conv_16_strides': 2,
'conv_16_pad': 'same',
'spatial_drop_rate_7': 0.3,
'conv_17_filters': 40,
'conv_17_kernel': 3,
'conv_17_strides': 1,
'conv_17_pad': 'same',
'conv_18_filters': 40,
'conv_18_kernel': 1,
'conv_18_strides': 2,
'conv_18_pad': 'same',
# ---
# Final Conv Layers
'spatial_drop_rate_8': 0.3,
'conv_19_filters': 50,
'conv_19_kernel': 2,
'conv_19_strides': 1,
'conv_19_pad': 'valid',
# ---
# Dense Layers
'dense_0_f_units': 50,
'dense_1_f_units': 50,
'dense_comb_1_units': 50,
'dense_comb_2_units': 50,
# Calibration Dense Layers
'dense_1_cali_units': 20,
'dense_2_cali_units': 6,
'dense_comb_v_1_units': 20,
'dense_comb_v_2_units': 6,
# Top Level Dense Units
'dense_1_co_units': 250,
'drop_1_comb_rate': 0.2,
'dense_2_co_units': 150,
'dense_3_co_units': 100,
'drop_2_comb_rate': 0.2,
'dense_4_co_units': 20,
# Main Output
'main_output_units': 6,
'main_output_act': 'linear',
# General Housekeeping
'v_conv_regularizer': None,
'conv_regularizer': None,
'dense_regularizer_1': None,
'dense_regularizer_2': None,
'activation_fn': 'elu',
'v_intra_act_fn': None,
'v_res_act_fn': 'elu',
'c_intra_act_fn': None,
'c_res_act_fn': 'elu',
'res_act_fn': 'elu',
'kern_init': 'glorot_uniform',
'model_opt': tf.keras.optimizers.Adam,
'learning_rate': 0.001,
'model_epochs': 50,
'model_batchsize': 5,
'model_loss': cust_mean_squared_error_var,
'model_metric': cust_mean_squared_error_var
}
# -----------------------------------------------------------------
channel_order = 'channels_last'
img_input_shape = (128, 128, 1)
vox_input_shape = (199, 164, 566, 1)
cali_input_shape = (6,)
# Input Layers
input_vox = tf.keras.Input(shape=vox_input_shape, name='input_vox', dtype='float32')
input_fluoro_1 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_1', dtype='float32')
input_fluoro_2 = tf.keras.Input(shape=img_input_shape, name='input_fluoro_2', dtype='float32')
input_cali = tf.keras.Input(shape=cali_input_shape, name='input_cali', dtype='float32')
# -----------------------------------------------------------------
# ---
# Entry Layers
v_conv_0 = tf.keras.layers.Conv3D(filters=params['v_conv_0_filters'], kernel_size=params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=params['v_conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(input_vox)
bn_0 = tf.keras.layers.BatchNormalization()(v_conv_0)
v_spat_0 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_0'])(bn_0)
v_conv_1 = tf.keras.layers.Conv3D(filters=params['v_conv_1_filters'], kernel_size=params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=params['v_conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_0)
# ---
# Pool After Initial Layers
v_pool_0 = tf.keras.layers.MaxPooling3D(pool_size=params['v_pool_0_size'], padding=params['v_pool_0_pad'], data_format=channel_order)(v_conv_1)
# ---
# Second Run of Entry Layers
bn_1 = tf.keras.layers.BatchNormalization()(v_pool_0)
v_conv_2 = tf.keras.layers.Conv3D(filters=params['v_conv_2_filters'], kernel_size=params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=params['v_conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(v_conv_2)
# 1
v_conv_3 = tf.keras.layers.Conv3D(filters=params['v_conv_3_filters'], kernel_size=params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=params['v_conv_3_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(v_conv_3)
v_spat_2 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_2'])(bn_3)
v_conv_4 = tf.keras.layers.Conv3D(filters=params['v_conv_4_filters'], kernel_size=params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=params['v_conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_2)
bn_4 = tf.keras.layers.BatchNormalization()(v_conv_4)
v_add_0 = tf.keras.layers.Add()([bn_4, bn_2])
v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_0)
# 2
v_conv_5 = tf.keras.layers.Conv3D(filters=params['v_conv_5_filters'], kernel_size=params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=params['v_conv_5_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_0)
bn_5 = tf.keras.layers.BatchNormalization()(v_conv_5)
v_spat_3 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_3'])(bn_5)
v_conv_6 = tf.keras.layers.Conv3D(filters=params['v_conv_6_filters'], kernel_size=params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=params['v_conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_3)
bn_6 = tf.keras.layers.BatchNormalization()(v_conv_6)
v_add_1 = tf.keras.layers.Add()([bn_6, v_act_0])
v_act_1 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_1)
# 3
v_conv_7 = tf.keras.layers.Conv3D(filters=params['v_conv_7_filters'], kernel_size=params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=params['v_conv_7_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_1)
bn_7 = tf.keras.layers.BatchNormalization()(v_conv_7)
v_spat_4 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_4'])(bn_7)
v_conv_8 = tf.keras.layers.Conv3D(filters=params['v_conv_8_filters'], kernel_size=params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=params['v_conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_4)
bn_8 = tf.keras.layers.BatchNormalization()(v_conv_8)
v_add_2 = tf.keras.layers.Add()([bn_8, v_act_1])
v_act_2 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_2)
# 4
v_conv_9 = tf.keras.layers.Conv3D(filters=params['v_conv_9_filters'], kernel_size=params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=params['v_conv_9_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_2)
bn_9 = tf.keras.layers.BatchNormalization()(v_conv_9)
v_spat_5 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_5'])(bn_9)
v_conv_10 = tf.keras.layers.Conv3D(filters=params['v_conv_10_filters'], kernel_size=params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=params['v_conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_5)
bn_10 = tf.keras.layers.BatchNormalization()(v_conv_10)
v_conv_11 = tf.keras.layers.Conv3D(filters=params['v_conv_11_filters'], kernel_size=params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=params['v_conv_11_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_2)
bn_11 = tf.keras.layers.BatchNormalization()(v_conv_11)
v_add_3 = tf.keras.layers.Add()([bn_10, bn_11])
v_act_3 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_3)
# 5
v_conv_12 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_3)
bn_12 = tf.keras.layers.BatchNormalization()(v_conv_12)
v_spat_6 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_12)
v_conv_13 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_6)
bn_13 = tf.keras.layers.BatchNormalization()(v_conv_13)
v_conv_14 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_3)
bn_14 = tf.keras.layers.BatchNormalization()(v_conv_14)
v_add_4 = tf.keras.layers.Add()([bn_13, bn_14])
v_act_4 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_4)
# 6
v_conv_15 = tf.keras.layers.Conv3D(filters=params['v_conv_12_filters'], kernel_size=params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=params['v_conv_12_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_act_4)
bn_15 = tf.keras.layers.BatchNormalization()(v_conv_15)
v_spat_7 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_6'])(bn_15)
v_conv_16 = tf.keras.layers.Conv3D(filters=params['v_conv_13_filters'], kernel_size=params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=params['v_conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_spat_7)
bn_16 = tf.keras.layers.BatchNormalization()(v_conv_16)
v_conv_17 = tf.keras.layers.Conv3D(filters=params['v_conv_14_filters'], kernel_size=params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=params['v_conv_14_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_intra_act_fn'])(v_act_4)
bn_17 = tf.keras.layers.BatchNormalization()(v_conv_17)
v_add_5 = tf.keras.layers.Add()([bn_16, bn_17])
v_act_5 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(v_add_5)
# ---
# Final Conv Layers
bn_18 = tf.keras.layers.BatchNormalization()(v_act_5)
v_spat_8 = tf.keras.layers.SpatialDropout3D(rate=params['v_spatial_drop_rate_8'])(bn_18)
v_conv_18 = tf.keras.layers.Conv3D(filters=params['v_conv_18_filters'], kernel_size=params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=params['v_conv_18_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['v_conv_regularizer'])(v_spat_8)
# ---
# Dense Layers
v_flatten_0 = tf.keras.layers.Flatten()(v_conv_18)
bn_15 = tf.keras.layers.BatchNormalization()(v_flatten_0)
dense_1_v = tf.keras.layers.Dense(units=params['dense_1_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_15)
bn_16 = tf.keras.layers.BatchNormalization()(dense_1_v)
dense_2_v = tf.keras.layers.Dense(units=params['dense_2_v_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_16)
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_1 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_1)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_1)
conv_0_1 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_1)
spat_0_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_1 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_1)
# ---
# Pool After Initial Layers
pool_0_1 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_1)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_1)
# 1
conv_2_1 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_1)
spat_1_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_1)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_1)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 2
conv_4_1 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_1)
spat_2_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_5)
conv_5_1 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_1)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_1)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 3
conv_6_1 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_1)
spat_3_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_1 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_1)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_1)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 4
conv_8_1 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_1)
spat_4_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_1 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_1)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_1)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 5
conv_10_1 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_1)
spat_5_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_1 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_1)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_1)
conv_12_1 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_1)
add_4 = tf.keras.layers.Add()([bn_12, bn_13])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 6
conv_13_1 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_1)
spat_6_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_14)
conv_14_1 = tf.keras.layers.Conv2D(filters=params['conv_14_filters'], kernel_size=params['conv_14_kernel'], strides=params['conv_14_strides'], padding=params['conv_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_1)
bn_15 = tf.keras.layers.BatchNormalization()(conv_14_1)
conv_15_1 = tf.keras.layers.Conv2D(filters=params['conv_15_filters'], kernel_size=params['conv_15_kernel'], strides=params['conv_15_strides'], padding=params['conv_15_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_16 = tf.keras.layers.BatchNormalization()(conv_15_1)
add_5 = tf.keras.layers.Add()([bn_15, bn_16])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 7
conv_16_1 = tf.keras.layers.Conv2D(filters=params['conv_16_filters'], kernel_size=params['conv_15_kernel'], strides=params['conv_16_strides'], padding=params['conv_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_17 = tf.keras.layers.BatchNormalization()(conv_16_1)
spat_7_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_7'])(bn_17)
conv_17_1 = tf.keras.layers.Conv2D(filters=params['conv_17_filters'], kernel_size=params['conv_17_kernel'], strides=params['conv_17_strides'], padding=params['conv_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7_1)
bn_18 = tf.keras.layers.BatchNormalization()(conv_17_1)
conv_18_1 = tf.keras.layers.Conv2D(filters=params['conv_18_filters'], kernel_size=params['conv_18_kernel'], strides=params['conv_18_strides'], padding=params['conv_18_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_19 = tf.keras.layers.BatchNormalization()(conv_18_1)
add_6 = tf.keras.layers.Add()([bn_18, bn_19])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# ---
# Final Conv Layers
bn_20 = tf.keras.layers.BatchNormalization()(act_6)
spat_8_1 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_8'])(bn_20)
conv_19_1 = tf.keras.layers.Conv2D(filters=params['conv_19_filters'], kernel_size=params['conv_19_kernel'], strides=params['conv_19_strides'], padding=params['conv_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_8_1)
# ---
# Dense Layers
flatten_0 = tf.keras.layers.Flatten()(conv_19_1)
bn_21 = tf.keras.layers.BatchNormalization()(flatten_0)
dense_0_f_1 = tf.keras.layers.Dense(units=params['dense_0_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_21)
bn_22 = tf.keras.layers.BatchNormalization()(dense_0_f_1)
dense_1_f_1 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_22)
# -----------------------------------------------------------------
# ---
# Entry Fluoro Layers
per_image_stand_2 = tf.keras.layers.Lambda(lambda frame: tf.image.per_image_standardization(frame))(input_fluoro_2)
bn_0 = tf.keras.layers.BatchNormalization()(per_image_stand_2)
conv_0_2 = tf.keras.layers.Conv2D(filters=params['conv_0_filters'], kernel_size=params['conv_0_kernel'], strides=params['conv_0_strides'], padding=params['conv_0_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(conv_0_2)
spat_0_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_0'])(bn_1)
conv_1_2 = tf.keras.layers.Conv2D(filters=params['conv_1_filters'], kernel_size=params['conv_1_kernel'], strides=params['conv_1_strides'], padding=params['conv_1_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_0_2)
# ---
# Pool After Initial Layers
pool_0_2 = tf.keras.layers.AveragePooling2D(pool_size=params['pool_0_size'], padding=params['pool_0_pad'])(conv_1_2)
# ---
# Run of Residual Layers
bn_2 = tf.keras.layers.BatchNormalization()(pool_0_2)
# 1
conv_2_2 = tf.keras.layers.Conv2D(filters=params['conv_2_filters'], kernel_size=params['conv_2_kernel'], strides=params['conv_2_strides'], padding=params['conv_2_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(bn_2)
bn_3 = tf.keras.layers.BatchNormalization()(conv_2_2)
spat_1_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_1'])(bn_3)
conv_3_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_1_2)
bn_4 = tf.keras.layers.BatchNormalization()(conv_3_2)
add_0 = tf.keras.layers.Add()([bn_4, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_0)
# 2
conv_4_2 = tf.keras.layers.Conv2D(filters=params['conv_4_filters'], kernel_size=params['conv_4_kernel'], strides=params['conv_4_strides'], padding=params['conv_4_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_0)
bn_5 = tf.keras.layers.BatchNormalization()(conv_4_2)
spat_2_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_2'])(bn_5)
conv_5_2 = tf.keras.layers.Conv2D(filters=params['conv_3_filters'], kernel_size=params['conv_3_kernel'], strides=params['conv_3_strides'], padding=params['conv_3_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_2_2)
bn_6 = tf.keras.layers.BatchNormalization()(conv_5_2)
add_1 = tf.keras.layers.Add()([act_0, bn_6])
act_1 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_1)
# 3
conv_6_2 = tf.keras.layers.Conv2D(filters=params['conv_6_filters'], kernel_size=params['conv_6_kernel'], strides=params['conv_6_strides'], padding=params['conv_6_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_1)
bn_7 = tf.keras.layers.BatchNormalization()(conv_6_2)
spat_3_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_3'])(bn_7)
conv_7_2 = tf.keras.layers.Conv2D(filters=params['conv_7_filters'], kernel_size=params['conv_7_kernel'], strides=params['conv_7_strides'], padding=params['conv_7_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_3_2)
bn_8 = tf.keras.layers.BatchNormalization()(conv_7_2)
add_2 = tf.keras.layers.Add()([act_1, bn_8])
act_2 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_2)
# 4
conv_8_2 = tf.keras.layers.Conv2D(filters=params['conv_8_filters'], kernel_size=params['conv_8_kernel'], strides=params['conv_8_strides'], padding=params['conv_8_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_2)
bn_9 = tf.keras.layers.BatchNormalization()(conv_8_2)
spat_4_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_4'])(bn_9)
conv_9_2 = tf.keras.layers.Conv2D(filters=params['conv_9_filters'], kernel_size=params['conv_9_kernel'], strides=params['conv_9_strides'], padding=params['conv_9_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_4_2)
bn_10 = tf.keras.layers.BatchNormalization()(conv_9_2)
add_3 = tf.keras.layers.Add()([act_2, bn_10])
act_3 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_3)
# 5
conv_10_2 = tf.keras.layers.Conv2D(filters=params['conv_10_filters'], kernel_size=params['conv_10_kernel'], strides=params['conv_10_strides'], padding=params['conv_10_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_11 = tf.keras.layers.BatchNormalization()(conv_10_2)
spat_5_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_5'])(bn_11)
conv_11_2 = tf.keras.layers.Conv2D(filters=params['conv_11_filters'], kernel_size=params['conv_11_kernel'], strides=params['conv_11_strides'], padding=params['conv_11_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_5_2)
bn_12 = tf.keras.layers.BatchNormalization()(conv_11_2)
conv_12_2 = tf.keras.layers.Conv2D(filters=params['conv_12_filters'], kernel_size=params['conv_12_kernel'], strides=params['conv_12_strides'], padding=params['conv_12_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_3)
bn_13 = tf.keras.layers.BatchNormalization()(conv_12_2)
add_4 = tf.keras.layers.Add()([bn_12, bn_13])
act_4 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_4)
# 6
conv_13_2 = tf.keras.layers.Conv2D(filters=params['conv_13_filters'], kernel_size=params['conv_13_kernel'], strides=params['conv_13_strides'], padding=params['conv_13_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_14 = tf.keras.layers.BatchNormalization()(conv_13_2)
spat_6_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_6'])(bn_14)
conv_14_2 = tf.keras.layers.Conv2D(filters=params['conv_14_filters'], kernel_size=params['conv_14_kernel'], strides=params['conv_14_strides'], padding=params['conv_14_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_6_2)
bn_15 = tf.keras.layers.BatchNormalization()(conv_14_2)
conv_15_2 = tf.keras.layers.Conv2D(filters=params['conv_15_filters'], kernel_size=params['conv_15_kernel'], strides=params['conv_15_strides'], padding=params['conv_15_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_4)
bn_16 = tf.keras.layers.BatchNormalization()(conv_15_2)
add_5 = tf.keras.layers.Add()([bn_15, bn_16])
act_5 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_5)
# 7
conv_16_2 = tf.keras.layers.Conv2D(filters=params['conv_16_filters'], kernel_size=params['conv_15_kernel'], strides=params['conv_16_strides'], padding=params['conv_16_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_17 = tf.keras.layers.BatchNormalization()(conv_16_2)
spat_7_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_7'])(bn_17)
conv_17_2 = tf.keras.layers.Conv2D(filters=params['conv_17_filters'], kernel_size=params['conv_17_kernel'], strides=params['conv_17_strides'], padding=params['conv_17_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_7_2)
bn_18 = tf.keras.layers.BatchNormalization()(conv_17_2)
conv_18_2 = tf.keras.layers.Conv2D(filters=params['conv_18_filters'], kernel_size=params['conv_18_kernel'], strides=params['conv_18_strides'], padding=params['conv_18_pad'], data_format=channel_order, activation=params['c_intra_act_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(act_5)
bn_19 = tf.keras.layers.BatchNormalization()(conv_18_2)
add_6 = tf.keras.layers.Add()([bn_18, bn_19])
act_6 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(add_6)
# ---
# Final Conv Layers
bn_20 = tf.keras.layers.BatchNormalization()(act_6)
spat_8_2 = tf.keras.layers.SpatialDropout2D(rate=params['spatial_drop_rate_8'])(bn_20)
conv_19_2 = tf.keras.layers.Conv2D(filters=params['conv_19_filters'], kernel_size=params['conv_19_kernel'], strides=params['conv_19_strides'], padding=params['conv_19_pad'], data_format=channel_order, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['conv_regularizer'])(spat_8_2)
# ---
# Dense Layers
flatten_0 = tf.keras.layers.Flatten()(conv_19_2)
bn_21 = tf.keras.layers.BatchNormalization()(flatten_0)
dense_0_f_2 = tf.keras.layers.Dense(units=params['dense_0_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_21)
bn_22 = tf.keras.layers.BatchNormalization()(dense_0_f_2)
dense_1_f_2 = tf.keras.layers.Dense(units=params['dense_1_f_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_22)
# -----------------------------------------------------------------
bn_0 = tf.keras.layers.BatchNormalization()(input_cali)
dense_1_cali = tf.keras.layers.Dense(units=params['dense_1_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(dense_1_cali)
dense_2_cali = tf.keras.layers.Dense(units=params['dense_2_cali_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
bn_2 = tf.keras.layers.BatchNormalization()(dense_2_cali)
# -----------------------------------------------------------------
# ---
# Combine the fluoro inputs together
dense_comb_f_0 = tf.keras.layers.Add()([dense_1_f_1, dense_1_f_2])
dense_comb_act_0 = tf.keras.layers.Activation(activation=params['c_res_act_fn'])(dense_comb_f_0)
bn_0 = tf.keras.layers.BatchNormalization()(dense_comb_act_0)
dense_comb_f_1 = tf.keras.layers.Dense(units=params['dense_comb_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(dense_comb_f_1)
dense_comb_f_2 = tf.keras.layers.Dense(units=params['dense_comb_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
# -----------------------------------------------------------------
# ---
# Combine the fluoro with the vox
dense_comb_v_0 = tf.keras.layers.Add()([dense_comb_f_2, dense_2_v])
dense_comb_v_act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(dense_comb_v_0)
bn_0 = tf.keras.layers.BatchNormalization()(dense_comb_v_act_0)
dense_comb_v_1 = tf.keras.layers.Dense(units=params['dense_comb_v_1_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_0)
bn_1 = tf.keras.layers.BatchNormalization()(dense_comb_v_1)
dense_comb_v_2 = tf.keras.layers.Dense(units=params['dense_comb_v_2_units'], activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(bn_1)
# -----------------------------------------------------------------
top_comb = tf.keras.layers.Add()([dense_comb_v_2, bn_2])
top_comb_act = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(top_comb)
top_dense_1 = tf.keras.layers.Dense(units=6, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(top_comb_act)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_1)
top_dense_2 = tf.keras.layers.Dense(units=6, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(top_dense_1)
add_0 = tf.keras.layers.Add()([top_dense_2, bn_2])
act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(add_0)
top_dense_1 = tf.keras.layers.Dense(units=6, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(act_0)
bn_0 = tf.keras.layers.BatchNormalization()(top_dense_1)
top_dense_2 = tf.keras.layers.Dense(units=6, activation=params['activation_fn'], kernel_initializer=params['kern_init'], activity_regularizer=params['dense_regularizer_1'])(top_dense_1)
add_0 = tf.keras.layers.Add()([top_dense_2, act_0])
act_0 = tf.keras.layers.Activation(activation=params['v_res_act_fn'])(add_0)
# -----------------------------------------------------------------
# -----------------------------------------------------------------
# Main Output
main_output = tf.keras.layers.Dense(units=params['main_output_units'], activation=params['main_output_act'], kernel_initializer=params['kern_init'], name='main_output')(act_0)
# -----------------------------------------------------------------
# Model Housekeeping
model = tf.keras.Model(inputs=[input_vox, input_fluoro_1, input_fluoro_2, input_cali], outputs=main_output)
model.compile(optimizer=params['model_opt'](lr=params['learning_rate']), loss=params['model_loss'], metrics=[params['model_metric']])
tf.keras.utils.plot_model(model, os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.png')), show_shapes=True)
model.summary()
# -----------------------------------------------------------------
vox_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/voxels_pad.h5py'), 'r')
vox_init = vox_file['vox_dset']
image_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/images.h5py'), 'r')
image_init = image_file['image_dset']
label_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/labels.h5py'), 'r')
label_init = label_file['labels_dset']
cali_file = h5py.File(os.path.expanduser('~/fluoro/data/compilation/calibration.h5py'), 'r')
cali_init = cali_file['cali_len3_rot']
def split_train_test(shape, num_of_samples=None, ratio=0.2):
if num_of_samples is None:
shuffled_indices = np.random.choice(shape, size=shape, replace=False)
else:
shuffled_indices = np.random.choice(shape, size=num_of_samples, replace=False)
test_set_size = int(len(shuffled_indices) * 0.2)
test_indx = shuffled_indices[:test_set_size]
train_indx = shuffled_indices[test_set_size:]
return test_indx, train_indx
num_of_samples = None
test_indxs, train_sup_indxs = split_train_test(len(label_init), num_of_samples=num_of_samples)
val_indxs, train_indxs = split_train_test(len(train_sup_indxs))
val_indxs = train_sup_indxs[val_indxs]
train_indxs = train_sup_indxs[train_indxs]
test_indxs = sorted(list(test_indxs))
val_indxs = sorted(list(val_indxs))
train_indxs = sorted(list(train_indxs))
hist_file = open(os.path.join(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl'), 'wb')
var_dict = {}
var_dict['test_indxs'] = test_indxs
var_dict['val_indxs'] = val_indxs
var_dict['train_indxs'] = train_indxs
vox_mat_train = vox_init[:]
vox_mat_val = vox_mat_train[val_indxs]
vox_mat_train = vox_mat_train[train_indxs]
vox_file.close()
image_mat_train = image_init[:]
image_mat_val = image_mat_train[val_indxs]
image_mat_train = image_mat_train[train_indxs]
image_file.close()
cali_mat_train = cali_init[:]
cali_mat_val = cali_mat_train[val_indxs]
cali_mat_train = cali_mat_train[train_indxs]
cali_file.close()
label_mat_train = label_init[:]
label_mat_val = label_mat_train[val_indxs]
label_mat_train = label_mat_train[train_indxs]
label_file.close()
# -----------------------------------------------------------------
print('\n\ncompletely loaded...\n\n')
result = model.fit(x={'input_vox': np.expand_dims(vox_mat_train, axis=-1), 'input_fluoro_1': np.expand_dims(image_mat_train[:, 0, :, :], axis=-1), 'input_fluoro_2': np.expand_dims(image_mat_train[:, 1, :, :], axis=-1), 'input_cali': cali_mat_train}, y=label_mat_train, validation_data=([np.expand_dims(vox_mat_val, axis=-1), np.expand_dims(image_mat_val[:, 0, :, :], axis=-1), np.expand_dims(image_mat_val[:, 1, :, :], axis=-1), cali_mat_val], label_mat_val), epochs=params['model_epochs'], batch_size=params['model_batchsize'], shuffle=True, verbose=2)
model.save(os.path.abspath(os.path.join(save_dir, expr_name + '_' + expr_no + '.h5')))
var_dict['result'] = result.history
pickle.dump(var_dict, hist_file)
hist_file.close()
| [
"tensorflow.keras.layers.Conv3D",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.AveragePooling2D",
"tensorflow.keras.layers.SpatialDropout3D",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.backend.square",
"os.path.expanduser",
"tensorf... | [((325, 361), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (336, 361), False, 'import os\n'), ((8976, 9048), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'vox_input_shape', 'name': '"""input_vox"""', 'dtype': '"""float32"""'}), "(shape=vox_input_shape, name='input_vox', dtype='float32')\n", (8990, 9048), True, 'import tensorflow as tf\n'), ((9066, 9143), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'img_input_shape', 'name': '"""input_fluoro_1"""', 'dtype': '"""float32"""'}), "(shape=img_input_shape, name='input_fluoro_1', dtype='float32')\n", (9080, 9143), True, 'import tensorflow as tf\n'), ((9161, 9238), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'img_input_shape', 'name': '"""input_fluoro_2"""', 'dtype': '"""float32"""'}), "(shape=img_input_shape, name='input_fluoro_2', dtype='float32')\n", (9175, 9238), True, 'import tensorflow as tf\n'), ((9252, 9326), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'cali_input_shape', 'name': '"""input_cali"""', 'dtype': '"""float32"""'}), "(shape=cali_input_shape, name='input_cali', dtype='float32')\n", (9266, 9326), True, 'import tensorflow as tf\n'), ((45627, 45730), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[input_vox, input_fluoro_1, input_fluoro_2, input_cali]', 'outputs': 'main_output'}), '(inputs=[input_vox, input_fluoro_1, input_fluoro_2,\n input_cali], outputs=main_output)\n', (45641, 45730), True, 'import tensorflow as tf\n'), ((48991, 49023), 'pickle.dump', 'pickle.dump', (['var_dict', 'hist_file'], {}), '(var_dict, hist_file)\n', (49002, 49023), False, 'import pickle\n'), ((497, 544), 'os.path.expanduser', 'os.path.expanduser', (['"""~/fluoro/data/compilation"""'], {}), "('~/fluoro/data/compilation')\n", (515, 544), False, 'import os\n'), ((9430, 9836), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_0_filters']", 'kernel_size': "params['v_conv_0_kernel']", 'strides': "(params['v_conv_0_strides_0'], params['v_conv_0_strides_1'], params[\n 'v_conv_0_strides_2'])", 'padding': "params['v_conv_0_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_conv_regularizer']"}), "(filters=params['v_conv_0_filters'], kernel_size=\n params['v_conv_0_kernel'], strides=(params['v_conv_0_strides_0'],\n params['v_conv_0_strides_1'], params['v_conv_0_strides_2']), padding=\n params['v_conv_0_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_conv_regularizer'])\n", (9452, 9836), True, 'import tensorflow as tf\n'), ((9832, 9868), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (9866, 9868), True, 'import tensorflow as tf\n'), ((9892, 9962), 'tensorflow.keras.layers.SpatialDropout3D', 'tf.keras.layers.SpatialDropout3D', ([], {'rate': "params['v_spatial_drop_rate_0']"}), "(rate=params['v_spatial_drop_rate_0'])\n", (9924, 9962), True, 'import tensorflow as tf\n'), ((9980, 10386), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_1_filters']", 'kernel_size': "params['v_conv_1_kernel']", 'strides': "(params['v_conv_1_strides_0'], params['v_conv_1_strides_1'], params[\n 'v_conv_1_strides_2'])", 'padding': "params['v_conv_1_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_conv_regularizer']"}), "(filters=params['v_conv_1_filters'], kernel_size=\n params['v_conv_1_kernel'], strides=(params['v_conv_1_strides_0'],\n params['v_conv_1_strides_1'], params['v_conv_1_strides_2']), padding=\n params['v_conv_1_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_conv_regularizer'])\n", (10002, 10386), True, 'import tensorflow as tf\n'), ((10420, 10547), 'tensorflow.keras.layers.MaxPooling3D', 'tf.keras.layers.MaxPooling3D', ([], {'pool_size': "params['v_pool_0_size']", 'padding': "params['v_pool_0_pad']", 'data_format': 'channel_order'}), "(pool_size=params['v_pool_0_size'], padding=\n params['v_pool_0_pad'], data_format=channel_order)\n", (10448, 10547), True, 'import tensorflow as tf\n'), ((10596, 10632), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (10630, 10632), True, 'import tensorflow as tf\n'), ((10654, 11060), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_2_filters']", 'kernel_size': "params['v_conv_2_kernel']", 'strides': "(params['v_conv_2_strides_0'], params['v_conv_2_strides_1'], params[\n 'v_conv_2_strides_2'])", 'padding': "params['v_conv_2_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_conv_regularizer']"}), "(filters=params['v_conv_2_filters'], kernel_size=\n params['v_conv_2_kernel'], strides=(params['v_conv_2_strides_0'],\n params['v_conv_2_strides_1'], params['v_conv_2_strides_2']), padding=\n params['v_conv_2_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_conv_regularizer'])\n", (10676, 11060), True, 'import tensorflow as tf\n'), ((11086, 11122), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (11120, 11122), True, 'import tensorflow as tf\n'), ((11149, 11555), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_3_filters']", 'kernel_size': "params['v_conv_3_kernel']", 'strides': "(params['v_conv_3_strides_0'], params['v_conv_3_strides_1'], params[\n 'v_conv_3_strides_2'])", 'padding': "params['v_conv_3_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_conv_regularizer']"}), "(filters=params['v_conv_3_filters'], kernel_size=\n params['v_conv_3_kernel'], strides=(params['v_conv_3_strides_0'],\n params['v_conv_3_strides_1'], params['v_conv_3_strides_2']), padding=\n params['v_conv_3_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_conv_regularizer'])\n", (11171, 11555), True, 'import tensorflow as tf\n'), ((11546, 11582), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (11580, 11582), True, 'import tensorflow as tf\n'), ((11604, 11674), 'tensorflow.keras.layers.SpatialDropout3D', 'tf.keras.layers.SpatialDropout3D', ([], {'rate': "params['v_spatial_drop_rate_2']"}), "(rate=params['v_spatial_drop_rate_2'])\n", (11636, 11674), True, 'import tensorflow as tf\n'), ((11692, 12094), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_4_filters']", 'kernel_size': "params['v_conv_4_kernel']", 'strides': "(params['v_conv_4_strides_0'], params['v_conv_4_strides_1'], params[\n 'v_conv_4_strides_2'])", 'padding': "params['v_conv_4_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_intra_act_fn']"}), "(filters=params['v_conv_4_filters'], kernel_size=\n params['v_conv_4_kernel'], strides=(params['v_conv_4_strides_0'],\n params['v_conv_4_strides_1'], params['v_conv_4_strides_2']), padding=\n params['v_conv_4_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_intra_act_fn'])\n", (11714, 12094), True, 'import tensorflow as tf\n'), ((12089, 12125), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (12123, 12125), True, 'import tensorflow as tf\n'), ((12146, 12167), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (12165, 12167), True, 'import tensorflow as tf\n'), ((12192, 12253), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['v_res_act_fn']"}), "(activation=params['v_res_act_fn'])\n", (12218, 12253), True, 'import tensorflow as tf\n'), ((12279, 12685), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_5_filters']", 'kernel_size': "params['v_conv_5_kernel']", 'strides': "(params['v_conv_5_strides_0'], params['v_conv_5_strides_1'], params[\n 'v_conv_5_strides_2'])", 'padding': "params['v_conv_5_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_conv_regularizer']"}), "(filters=params['v_conv_5_filters'], kernel_size=\n params['v_conv_5_kernel'], strides=(params['v_conv_5_strides_0'],\n params['v_conv_5_strides_1'], params['v_conv_5_strides_2']), padding=\n params['v_conv_5_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_conv_regularizer'])\n", (12301, 12685), True, 'import tensorflow as tf\n'), ((12679, 12715), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (12713, 12715), True, 'import tensorflow as tf\n'), ((12737, 12807), 'tensorflow.keras.layers.SpatialDropout3D', 'tf.keras.layers.SpatialDropout3D', ([], {'rate': "params['v_spatial_drop_rate_3']"}), "(rate=params['v_spatial_drop_rate_3'])\n", (12769, 12807), True, 'import tensorflow as tf\n'), ((12825, 13227), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_6_filters']", 'kernel_size': "params['v_conv_6_kernel']", 'strides': "(params['v_conv_6_strides_0'], params['v_conv_6_strides_1'], params[\n 'v_conv_6_strides_2'])", 'padding': "params['v_conv_6_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_intra_act_fn']"}), "(filters=params['v_conv_6_filters'], kernel_size=\n params['v_conv_6_kernel'], strides=(params['v_conv_6_strides_0'],\n params['v_conv_6_strides_1'], params['v_conv_6_strides_2']), padding=\n params['v_conv_6_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_intra_act_fn'])\n", (12847, 13227), True, 'import tensorflow as tf\n'), ((13222, 13258), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (13256, 13258), True, 'import tensorflow as tf\n'), ((13279, 13300), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (13298, 13300), True, 'import tensorflow as tf\n'), ((13328, 13389), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['v_res_act_fn']"}), "(activation=params['v_res_act_fn'])\n", (13354, 13389), True, 'import tensorflow as tf\n'), ((13415, 13821), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_7_filters']", 'kernel_size': "params['v_conv_7_kernel']", 'strides': "(params['v_conv_7_strides_0'], params['v_conv_7_strides_1'], params[\n 'v_conv_7_strides_2'])", 'padding': "params['v_conv_7_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_conv_regularizer']"}), "(filters=params['v_conv_7_filters'], kernel_size=\n params['v_conv_7_kernel'], strides=(params['v_conv_7_strides_0'],\n params['v_conv_7_strides_1'], params['v_conv_7_strides_2']), padding=\n params['v_conv_7_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_conv_regularizer'])\n", (13437, 13821), True, 'import tensorflow as tf\n'), ((13815, 13851), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (13849, 13851), True, 'import tensorflow as tf\n'), ((13873, 13943), 'tensorflow.keras.layers.SpatialDropout3D', 'tf.keras.layers.SpatialDropout3D', ([], {'rate': "params['v_spatial_drop_rate_4']"}), "(rate=params['v_spatial_drop_rate_4'])\n", (13905, 13943), True, 'import tensorflow as tf\n'), ((13961, 14363), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_8_filters']", 'kernel_size': "params['v_conv_8_kernel']", 'strides': "(params['v_conv_8_strides_0'], params['v_conv_8_strides_1'], params[\n 'v_conv_8_strides_2'])", 'padding': "params['v_conv_8_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_intra_act_fn']"}), "(filters=params['v_conv_8_filters'], kernel_size=\n params['v_conv_8_kernel'], strides=(params['v_conv_8_strides_0'],\n params['v_conv_8_strides_1'], params['v_conv_8_strides_2']), padding=\n params['v_conv_8_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_intra_act_fn'])\n", (13983, 14363), True, 'import tensorflow as tf\n'), ((14358, 14394), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (14392, 14394), True, 'import tensorflow as tf\n'), ((14415, 14436), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (14434, 14436), True, 'import tensorflow as tf\n'), ((14464, 14525), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['v_res_act_fn']"}), "(activation=params['v_res_act_fn'])\n", (14490, 14525), True, 'import tensorflow as tf\n'), ((14551, 14957), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_9_filters']", 'kernel_size': "params['v_conv_9_kernel']", 'strides': "(params['v_conv_9_strides_0'], params['v_conv_9_strides_1'], params[\n 'v_conv_9_strides_2'])", 'padding': "params['v_conv_9_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_conv_regularizer']"}), "(filters=params['v_conv_9_filters'], kernel_size=\n params['v_conv_9_kernel'], strides=(params['v_conv_9_strides_0'],\n params['v_conv_9_strides_1'], params['v_conv_9_strides_2']), padding=\n params['v_conv_9_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_conv_regularizer'])\n", (14573, 14957), True, 'import tensorflow as tf\n'), ((14951, 14987), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (14985, 14987), True, 'import tensorflow as tf\n'), ((15009, 15079), 'tensorflow.keras.layers.SpatialDropout3D', 'tf.keras.layers.SpatialDropout3D', ([], {'rate': "params['v_spatial_drop_rate_5']"}), "(rate=params['v_spatial_drop_rate_5'])\n", (15041, 15079), True, 'import tensorflow as tf\n'), ((15098, 15506), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_10_filters']", 'kernel_size': "params['v_conv_10_kernel']", 'strides': "(params['v_conv_10_strides_0'], params['v_conv_10_strides_1'], params[\n 'v_conv_10_strides_2'])", 'padding': "params['v_conv_10_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_intra_act_fn']"}), "(filters=params['v_conv_10_filters'], kernel_size=\n params['v_conv_10_kernel'], strides=(params['v_conv_10_strides_0'],\n params['v_conv_10_strides_1'], params['v_conv_10_strides_2']), padding=\n params['v_conv_10_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_intra_act_fn'])\n", (15120, 15506), True, 'import tensorflow as tf\n'), ((15502, 15538), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (15536, 15538), True, 'import tensorflow as tf\n'), ((15562, 15970), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_11_filters']", 'kernel_size': "params['v_conv_11_kernel']", 'strides': "(params['v_conv_11_strides_0'], params['v_conv_11_strides_1'], params[\n 'v_conv_11_strides_2'])", 'padding': "params['v_conv_11_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_intra_act_fn']"}), "(filters=params['v_conv_11_filters'], kernel_size=\n params['v_conv_11_kernel'], strides=(params['v_conv_11_strides_0'],\n params['v_conv_11_strides_1'], params['v_conv_11_strides_2']), padding=\n params['v_conv_11_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_intra_act_fn'])\n", (15584, 15970), True, 'import tensorflow as tf\n'), ((15965, 16001), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (15999, 16001), True, 'import tensorflow as tf\n'), ((16023, 16044), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (16042, 16044), True, 'import tensorflow as tf\n'), ((16071, 16132), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['v_res_act_fn']"}), "(activation=params['v_res_act_fn'])\n", (16097, 16132), True, 'import tensorflow as tf\n'), ((16159, 16571), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_12_filters']", 'kernel_size': "params['v_conv_12_kernel']", 'strides': "(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params[\n 'v_conv_12_strides_2'])", 'padding': "params['v_conv_12_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_conv_regularizer']"}), "(filters=params['v_conv_12_filters'], kernel_size=\n params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'],\n params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=\n params['v_conv_12_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_conv_regularizer'])\n", (16181, 16571), True, 'import tensorflow as tf\n'), ((16566, 16602), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (16600, 16602), True, 'import tensorflow as tf\n'), ((16625, 16695), 'tensorflow.keras.layers.SpatialDropout3D', 'tf.keras.layers.SpatialDropout3D', ([], {'rate': "params['v_spatial_drop_rate_6']"}), "(rate=params['v_spatial_drop_rate_6'])\n", (16657, 16695), True, 'import tensorflow as tf\n'), ((16715, 17123), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_13_filters']", 'kernel_size': "params['v_conv_13_kernel']", 'strides': "(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params[\n 'v_conv_13_strides_2'])", 'padding': "params['v_conv_13_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_intra_act_fn']"}), "(filters=params['v_conv_13_filters'], kernel_size=\n params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'],\n params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=\n params['v_conv_13_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_intra_act_fn'])\n", (16737, 17123), True, 'import tensorflow as tf\n'), ((17119, 17155), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (17153, 17155), True, 'import tensorflow as tf\n'), ((17179, 17587), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_14_filters']", 'kernel_size': "params['v_conv_14_kernel']", 'strides': "(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params[\n 'v_conv_14_strides_2'])", 'padding': "params['v_conv_14_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_intra_act_fn']"}), "(filters=params['v_conv_14_filters'], kernel_size=\n params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'],\n params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=\n params['v_conv_14_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_intra_act_fn'])\n", (17201, 17587), True, 'import tensorflow as tf\n'), ((17582, 17618), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (17616, 17618), True, 'import tensorflow as tf\n'), ((17640, 17661), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (17659, 17661), True, 'import tensorflow as tf\n'), ((17688, 17749), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['v_res_act_fn']"}), "(activation=params['v_res_act_fn'])\n", (17714, 17749), True, 'import tensorflow as tf\n'), ((17776, 18188), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_12_filters']", 'kernel_size': "params['v_conv_12_kernel']", 'strides': "(params['v_conv_12_strides_0'], params['v_conv_12_strides_1'], params[\n 'v_conv_12_strides_2'])", 'padding': "params['v_conv_12_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_conv_regularizer']"}), "(filters=params['v_conv_12_filters'], kernel_size=\n params['v_conv_12_kernel'], strides=(params['v_conv_12_strides_0'],\n params['v_conv_12_strides_1'], params['v_conv_12_strides_2']), padding=\n params['v_conv_12_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_conv_regularizer'])\n", (17798, 18188), True, 'import tensorflow as tf\n'), ((18183, 18219), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (18217, 18219), True, 'import tensorflow as tf\n'), ((18242, 18312), 'tensorflow.keras.layers.SpatialDropout3D', 'tf.keras.layers.SpatialDropout3D', ([], {'rate': "params['v_spatial_drop_rate_6']"}), "(rate=params['v_spatial_drop_rate_6'])\n", (18274, 18312), True, 'import tensorflow as tf\n'), ((18332, 18740), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_13_filters']", 'kernel_size': "params['v_conv_13_kernel']", 'strides': "(params['v_conv_13_strides_0'], params['v_conv_13_strides_1'], params[\n 'v_conv_13_strides_2'])", 'padding': "params['v_conv_13_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_intra_act_fn']"}), "(filters=params['v_conv_13_filters'], kernel_size=\n params['v_conv_13_kernel'], strides=(params['v_conv_13_strides_0'],\n params['v_conv_13_strides_1'], params['v_conv_13_strides_2']), padding=\n params['v_conv_13_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_intra_act_fn'])\n", (18354, 18740), True, 'import tensorflow as tf\n'), ((18736, 18772), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (18770, 18772), True, 'import tensorflow as tf\n'), ((18796, 19204), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_14_filters']", 'kernel_size': "params['v_conv_14_kernel']", 'strides': "(params['v_conv_14_strides_0'], params['v_conv_14_strides_1'], params[\n 'v_conv_14_strides_2'])", 'padding': "params['v_conv_14_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_intra_act_fn']"}), "(filters=params['v_conv_14_filters'], kernel_size=\n params['v_conv_14_kernel'], strides=(params['v_conv_14_strides_0'],\n params['v_conv_14_strides_1'], params['v_conv_14_strides_2']), padding=\n params['v_conv_14_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_intra_act_fn'])\n", (18818, 19204), True, 'import tensorflow as tf\n'), ((19199, 19235), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (19233, 19235), True, 'import tensorflow as tf\n'), ((19257, 19278), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (19276, 19278), True, 'import tensorflow as tf\n'), ((19305, 19366), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['v_res_act_fn']"}), "(activation=params['v_res_act_fn'])\n", (19331, 19366), True, 'import tensorflow as tf\n'), ((19412, 19448), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (19446, 19448), True, 'import tensorflow as tf\n'), ((19469, 19539), 'tensorflow.keras.layers.SpatialDropout3D', 'tf.keras.layers.SpatialDropout3D', ([], {'rate': "params['v_spatial_drop_rate_8']"}), "(rate=params['v_spatial_drop_rate_8'])\n", (19501, 19539), True, 'import tensorflow as tf\n'), ((19559, 19971), 'tensorflow.keras.layers.Conv3D', 'tf.keras.layers.Conv3D', ([], {'filters': "params['v_conv_18_filters']", 'kernel_size': "params['v_conv_18_kernel']", 'strides': "(params['v_conv_18_strides_0'], params['v_conv_18_strides_1'], params[\n 'v_conv_18_strides_2'])", 'padding': "params['v_conv_18_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['v_conv_regularizer']"}), "(filters=params['v_conv_18_filters'], kernel_size=\n params['v_conv_18_kernel'], strides=(params['v_conv_18_strides_0'],\n params['v_conv_18_strides_1'], params['v_conv_18_strides_2']), padding=\n params['v_conv_18_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['v_conv_regularizer'])\n", (19581, 19971), True, 'import tensorflow as tf\n'), ((19995, 20020), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (20018, 20020), True, 'import tensorflow as tf\n'), ((20041, 20077), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (20075, 20077), True, 'import tensorflow as tf\n'), ((20103, 20294), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': "params['dense_1_v_units']", 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['dense_regularizer_1']"}), "(units=params['dense_1_v_units'], activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['dense_regularizer_1'])\n", (20124, 20294), True, 'import tensorflow as tf\n'), ((20302, 20338), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (20336, 20338), True, 'import tensorflow as tf\n'), ((20362, 20553), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': "params['dense_2_v_units']", 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['dense_regularizer_1']"}), "(units=params['dense_2_v_units'], activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['dense_regularizer_1'])\n", (20383, 20553), True, 'import tensorflow as tf\n'), ((20775, 20811), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (20809, 20811), True, 'import tensorflow as tf\n'), ((20842, 21170), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_0_filters']", 'kernel_size': "params['conv_0_kernel']", 'strides': "params['conv_0_strides']", 'padding': "params['conv_0_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_0_filters'], kernel_size=params\n ['conv_0_kernel'], strides=params['conv_0_strides'], padding=params[\n 'conv_0_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (20864, 21170), True, 'import tensorflow as tf\n'), ((21166, 21202), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (21200, 21202), True, 'import tensorflow as tf\n'), ((21224, 21292), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_0']"}), "(rate=params['spatial_drop_rate_0'])\n", (21256, 21292), True, 'import tensorflow as tf\n'), ((21310, 21638), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_1_filters']", 'kernel_size': "params['conv_1_kernel']", 'strides': "params['conv_1_strides']", 'padding': "params['conv_1_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_1_filters'], kernel_size=params\n ['conv_1_kernel'], strides=params['conv_1_strides'], padding=params[\n 'conv_1_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (21332, 21638), True, 'import tensorflow as tf\n'), ((21676, 21776), 'tensorflow.keras.layers.AveragePooling2D', 'tf.keras.layers.AveragePooling2D', ([], {'pool_size': "params['pool_0_size']", 'padding': "params['pool_0_pad']"}), "(pool_size=params['pool_0_size'], padding=\n params['pool_0_pad'])\n", (21708, 21776), True, 'import tensorflow as tf\n'), ((21822, 21858), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (21856, 21858), True, 'import tensorflow as tf\n'), ((21885, 22213), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_2_filters']", 'kernel_size': "params['conv_2_kernel']", 'strides': "params['conv_2_strides']", 'padding': "params['conv_2_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_2_filters'], kernel_size=params\n ['conv_2_kernel'], strides=params['conv_2_strides'], padding=params[\n 'conv_2_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (21907, 22213), True, 'import tensorflow as tf\n'), ((22208, 22244), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (22242, 22244), True, 'import tensorflow as tf\n'), ((22266, 22334), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_1']"}), "(rate=params['spatial_drop_rate_1'])\n", (22298, 22334), True, 'import tensorflow as tf\n'), ((22352, 22681), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_3_filters']", 'kernel_size': "params['conv_3_kernel']", 'strides': "params['conv_3_strides']", 'padding': "params['conv_3_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_3_filters'], kernel_size=params\n ['conv_3_kernel'], strides=params['conv_3_strides'], padding=params[\n 'conv_3_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (22374, 22681), True, 'import tensorflow as tf\n'), ((22680, 22716), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (22714, 22716), True, 'import tensorflow as tf\n'), ((22735, 22756), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (22754, 22756), True, 'import tensorflow as tf\n'), ((22779, 22840), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['c_res_act_fn']"}), "(activation=params['c_res_act_fn'])\n", (22805, 22840), True, 'import tensorflow as tf\n'), ((22864, 23192), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_4_filters']", 'kernel_size': "params['conv_4_kernel']", 'strides': "params['conv_4_strides']", 'padding': "params['conv_4_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_4_filters'], kernel_size=params\n ['conv_4_kernel'], strides=params['conv_4_strides'], padding=params[\n 'conv_4_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (22886, 23192), True, 'import tensorflow as tf\n'), ((23188, 23224), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (23222, 23224), True, 'import tensorflow as tf\n'), ((23246, 23314), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_1']"}), "(rate=params['spatial_drop_rate_1'])\n", (23278, 23314), True, 'import tensorflow as tf\n'), ((23332, 23661), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_3_filters']", 'kernel_size': "params['conv_3_kernel']", 'strides': "params['conv_3_strides']", 'padding': "params['conv_3_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_3_filters'], kernel_size=params\n ['conv_3_kernel'], strides=params['conv_3_strides'], padding=params[\n 'conv_3_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (23354, 23661), True, 'import tensorflow as tf\n'), ((23660, 23696), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (23694, 23696), True, 'import tensorflow as tf\n'), ((23715, 23736), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (23734, 23736), True, 'import tensorflow as tf\n'), ((23760, 23821), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['c_res_act_fn']"}), "(activation=params['c_res_act_fn'])\n", (23786, 23821), True, 'import tensorflow as tf\n'), ((23845, 24173), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_6_filters']", 'kernel_size': "params['conv_6_kernel']", 'strides': "params['conv_6_strides']", 'padding': "params['conv_6_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_6_filters'], kernel_size=params\n ['conv_6_kernel'], strides=params['conv_6_strides'], padding=params[\n 'conv_6_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (23867, 24173), True, 'import tensorflow as tf\n'), ((24169, 24205), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (24203, 24205), True, 'import tensorflow as tf\n'), ((24227, 24295), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_3']"}), "(rate=params['spatial_drop_rate_3'])\n", (24259, 24295), True, 'import tensorflow as tf\n'), ((24313, 24642), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_7_filters']", 'kernel_size': "params['conv_7_kernel']", 'strides': "params['conv_7_strides']", 'padding': "params['conv_7_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_7_filters'], kernel_size=params\n ['conv_7_kernel'], strides=params['conv_7_strides'], padding=params[\n 'conv_7_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (24335, 24642), True, 'import tensorflow as tf\n'), ((24641, 24677), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (24675, 24677), True, 'import tensorflow as tf\n'), ((24696, 24717), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (24715, 24717), True, 'import tensorflow as tf\n'), ((24741, 24802), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['c_res_act_fn']"}), "(activation=params['c_res_act_fn'])\n", (24767, 24802), True, 'import tensorflow as tf\n'), ((24826, 25154), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_8_filters']", 'kernel_size': "params['conv_8_kernel']", 'strides': "params['conv_8_strides']", 'padding': "params['conv_8_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_8_filters'], kernel_size=params\n ['conv_8_kernel'], strides=params['conv_8_strides'], padding=params[\n 'conv_8_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (24848, 25154), True, 'import tensorflow as tf\n'), ((25150, 25186), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (25184, 25186), True, 'import tensorflow as tf\n'), ((25208, 25276), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_4']"}), "(rate=params['spatial_drop_rate_4'])\n", (25240, 25276), True, 'import tensorflow as tf\n'), ((25294, 25623), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_9_filters']", 'kernel_size': "params['conv_9_kernel']", 'strides': "params['conv_9_strides']", 'padding': "params['conv_9_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_9_filters'], kernel_size=params\n ['conv_9_kernel'], strides=params['conv_9_strides'], padding=params[\n 'conv_9_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (25316, 25623), True, 'import tensorflow as tf\n'), ((25623, 25659), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (25657, 25659), True, 'import tensorflow as tf\n'), ((25678, 25699), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (25697, 25699), True, 'import tensorflow as tf\n'), ((25724, 25785), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['c_res_act_fn']"}), "(activation=params['c_res_act_fn'])\n", (25750, 25785), True, 'import tensorflow as tf\n'), ((25810, 26142), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_10_filters']", 'kernel_size': "params['conv_10_kernel']", 'strides': "params['conv_10_strides']", 'padding': "params['conv_10_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_10_filters'], kernel_size=\n params['conv_10_kernel'], strides=params['conv_10_strides'], padding=\n params['conv_10_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (25832, 26142), True, 'import tensorflow as tf\n'), ((26139, 26175), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (26173, 26175), True, 'import tensorflow as tf\n'), ((26198, 26266), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_5']"}), "(rate=params['spatial_drop_rate_5'])\n", (26230, 26266), True, 'import tensorflow as tf\n'), ((26286, 26619), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_11_filters']", 'kernel_size': "params['conv_11_kernel']", 'strides': "params['conv_11_strides']", 'padding': "params['conv_11_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_11_filters'], kernel_size=\n params['conv_11_kernel'], strides=params['conv_11_strides'], padding=\n params['conv_11_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (26308, 26619), True, 'import tensorflow as tf\n'), ((26619, 26655), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (26653, 26655), True, 'import tensorflow as tf\n'), ((26679, 27012), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_12_filters']", 'kernel_size': "params['conv_12_kernel']", 'strides': "params['conv_12_strides']", 'padding': "params['conv_12_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_12_filters'], kernel_size=\n params['conv_12_kernel'], strides=params['conv_12_strides'], padding=\n params['conv_12_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (26701, 27012), True, 'import tensorflow as tf\n'), ((27009, 27045), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (27043, 27045), True, 'import tensorflow as tf\n'), ((27065, 27086), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (27084, 27086), True, 'import tensorflow as tf\n'), ((27111, 27172), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['c_res_act_fn']"}), "(activation=params['c_res_act_fn'])\n", (27137, 27172), True, 'import tensorflow as tf\n'), ((27197, 27529), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_13_filters']", 'kernel_size': "params['conv_13_kernel']", 'strides': "params['conv_13_strides']", 'padding': "params['conv_13_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_13_filters'], kernel_size=\n params['conv_13_kernel'], strides=params['conv_13_strides'], padding=\n params['conv_13_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (27219, 27529), True, 'import tensorflow as tf\n'), ((27526, 27562), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (27560, 27562), True, 'import tensorflow as tf\n'), ((27585, 27653), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_6']"}), "(rate=params['spatial_drop_rate_6'])\n", (27617, 27653), True, 'import tensorflow as tf\n'), ((27673, 28006), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_14_filters']", 'kernel_size': "params['conv_14_kernel']", 'strides': "params['conv_14_strides']", 'padding': "params['conv_14_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_14_filters'], kernel_size=\n params['conv_14_kernel'], strides=params['conv_14_strides'], padding=\n params['conv_14_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (27695, 28006), True, 'import tensorflow as tf\n'), ((28006, 28042), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (28040, 28042), True, 'import tensorflow as tf\n'), ((28066, 28399), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_15_filters']", 'kernel_size': "params['conv_15_kernel']", 'strides': "params['conv_15_strides']", 'padding': "params['conv_15_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_15_filters'], kernel_size=\n params['conv_15_kernel'], strides=params['conv_15_strides'], padding=\n params['conv_15_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (28088, 28399), True, 'import tensorflow as tf\n'), ((28396, 28432), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (28430, 28432), True, 'import tensorflow as tf\n'), ((28452, 28473), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (28471, 28473), True, 'import tensorflow as tf\n'), ((28498, 28559), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['c_res_act_fn']"}), "(activation=params['c_res_act_fn'])\n", (28524, 28559), True, 'import tensorflow as tf\n'), ((28584, 28916), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_16_filters']", 'kernel_size': "params['conv_15_kernel']", 'strides': "params['conv_16_strides']", 'padding': "params['conv_16_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_16_filters'], kernel_size=\n params['conv_15_kernel'], strides=params['conv_16_strides'], padding=\n params['conv_16_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (28606, 28916), True, 'import tensorflow as tf\n'), ((28913, 28949), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (28947, 28949), True, 'import tensorflow as tf\n'), ((28972, 29040), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_7']"}), "(rate=params['spatial_drop_rate_7'])\n", (29004, 29040), True, 'import tensorflow as tf\n'), ((29060, 29393), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_17_filters']", 'kernel_size': "params['conv_17_kernel']", 'strides': "params['conv_17_strides']", 'padding': "params['conv_17_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_17_filters'], kernel_size=\n params['conv_17_kernel'], strides=params['conv_17_strides'], padding=\n params['conv_17_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (29082, 29393), True, 'import tensorflow as tf\n'), ((29393, 29429), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (29427, 29429), True, 'import tensorflow as tf\n'), ((29453, 29786), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_18_filters']", 'kernel_size': "params['conv_18_kernel']", 'strides': "params['conv_18_strides']", 'padding': "params['conv_18_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_18_filters'], kernel_size=\n params['conv_18_kernel'], strides=params['conv_18_strides'], padding=\n params['conv_18_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (29475, 29786), True, 'import tensorflow as tf\n'), ((29783, 29819), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (29817, 29819), True, 'import tensorflow as tf\n'), ((29839, 29860), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (29858, 29860), True, 'import tensorflow as tf\n'), ((29885, 29946), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['c_res_act_fn']"}), "(activation=params['c_res_act_fn'])\n", (29911, 29946), True, 'import tensorflow as tf\n'), ((29993, 30029), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (30027, 30029), True, 'import tensorflow as tf\n'), ((30048, 30116), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_8']"}), "(rate=params['spatial_drop_rate_8'])\n", (30080, 30116), True, 'import tensorflow as tf\n'), ((30136, 30468), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_19_filters']", 'kernel_size': "params['conv_19_kernel']", 'strides': "params['conv_19_strides']", 'padding': "params['conv_19_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_19_filters'], kernel_size=\n params['conv_19_kernel'], strides=params['conv_19_strides'], padding=\n params['conv_19_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (30158, 30468), True, 'import tensorflow as tf\n'), ((30494, 30519), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (30517, 30519), True, 'import tensorflow as tf\n'), ((30540, 30576), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (30574, 30576), True, 'import tensorflow as tf\n'), ((30602, 30793), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': "params['dense_0_f_units']", 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['dense_regularizer_1']"}), "(units=params['dense_0_f_units'], activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['dense_regularizer_1'])\n", (30623, 30793), True, 'import tensorflow as tf\n'), ((30801, 30837), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (30835, 30837), True, 'import tensorflow as tf\n'), ((30865, 31056), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': "params['dense_1_f_units']", 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['dense_regularizer_1']"}), "(units=params['dense_1_f_units'], activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['dense_regularizer_1'])\n", (30886, 31056), True, 'import tensorflow as tf\n'), ((31278, 31314), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (31312, 31314), True, 'import tensorflow as tf\n'), ((31345, 31673), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_0_filters']", 'kernel_size': "params['conv_0_kernel']", 'strides': "params['conv_0_strides']", 'padding': "params['conv_0_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_0_filters'], kernel_size=params\n ['conv_0_kernel'], strides=params['conv_0_strides'], padding=params[\n 'conv_0_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (31367, 31673), True, 'import tensorflow as tf\n'), ((31669, 31705), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (31703, 31705), True, 'import tensorflow as tf\n'), ((31727, 31795), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_0']"}), "(rate=params['spatial_drop_rate_0'])\n", (31759, 31795), True, 'import tensorflow as tf\n'), ((31813, 32141), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_1_filters']", 'kernel_size': "params['conv_1_kernel']", 'strides': "params['conv_1_strides']", 'padding': "params['conv_1_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_1_filters'], kernel_size=params\n ['conv_1_kernel'], strides=params['conv_1_strides'], padding=params[\n 'conv_1_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (31835, 32141), True, 'import tensorflow as tf\n'), ((32179, 32279), 'tensorflow.keras.layers.AveragePooling2D', 'tf.keras.layers.AveragePooling2D', ([], {'pool_size': "params['pool_0_size']", 'padding': "params['pool_0_pad']"}), "(pool_size=params['pool_0_size'], padding=\n params['pool_0_pad'])\n", (32211, 32279), True, 'import tensorflow as tf\n'), ((32325, 32361), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (32359, 32361), True, 'import tensorflow as tf\n'), ((32388, 32716), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_2_filters']", 'kernel_size': "params['conv_2_kernel']", 'strides': "params['conv_2_strides']", 'padding': "params['conv_2_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_2_filters'], kernel_size=params\n ['conv_2_kernel'], strides=params['conv_2_strides'], padding=params[\n 'conv_2_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (32410, 32716), True, 'import tensorflow as tf\n'), ((32711, 32747), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (32745, 32747), True, 'import tensorflow as tf\n'), ((32769, 32837), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_1']"}), "(rate=params['spatial_drop_rate_1'])\n", (32801, 32837), True, 'import tensorflow as tf\n'), ((32855, 33184), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_3_filters']", 'kernel_size': "params['conv_3_kernel']", 'strides': "params['conv_3_strides']", 'padding': "params['conv_3_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_3_filters'], kernel_size=params\n ['conv_3_kernel'], strides=params['conv_3_strides'], padding=params[\n 'conv_3_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (32877, 33184), True, 'import tensorflow as tf\n'), ((33183, 33219), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (33217, 33219), True, 'import tensorflow as tf\n'), ((33238, 33259), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (33257, 33259), True, 'import tensorflow as tf\n'), ((33282, 33343), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['c_res_act_fn']"}), "(activation=params['c_res_act_fn'])\n", (33308, 33343), True, 'import tensorflow as tf\n'), ((33367, 33695), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_4_filters']", 'kernel_size': "params['conv_4_kernel']", 'strides': "params['conv_4_strides']", 'padding': "params['conv_4_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_4_filters'], kernel_size=params\n ['conv_4_kernel'], strides=params['conv_4_strides'], padding=params[\n 'conv_4_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (33389, 33695), True, 'import tensorflow as tf\n'), ((33691, 33727), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (33725, 33727), True, 'import tensorflow as tf\n'), ((33749, 33817), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_2']"}), "(rate=params['spatial_drop_rate_2'])\n", (33781, 33817), True, 'import tensorflow as tf\n'), ((33835, 34164), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_3_filters']", 'kernel_size': "params['conv_3_kernel']", 'strides': "params['conv_3_strides']", 'padding': "params['conv_3_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_3_filters'], kernel_size=params\n ['conv_3_kernel'], strides=params['conv_3_strides'], padding=params[\n 'conv_3_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (33857, 34164), True, 'import tensorflow as tf\n'), ((34163, 34199), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (34197, 34199), True, 'import tensorflow as tf\n'), ((34218, 34239), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (34237, 34239), True, 'import tensorflow as tf\n'), ((34263, 34324), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['c_res_act_fn']"}), "(activation=params['c_res_act_fn'])\n", (34289, 34324), True, 'import tensorflow as tf\n'), ((34348, 34676), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_6_filters']", 'kernel_size': "params['conv_6_kernel']", 'strides': "params['conv_6_strides']", 'padding': "params['conv_6_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_6_filters'], kernel_size=params\n ['conv_6_kernel'], strides=params['conv_6_strides'], padding=params[\n 'conv_6_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (34370, 34676), True, 'import tensorflow as tf\n'), ((34672, 34708), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (34706, 34708), True, 'import tensorflow as tf\n'), ((34730, 34798), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_3']"}), "(rate=params['spatial_drop_rate_3'])\n", (34762, 34798), True, 'import tensorflow as tf\n'), ((34816, 35145), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_7_filters']", 'kernel_size': "params['conv_7_kernel']", 'strides': "params['conv_7_strides']", 'padding': "params['conv_7_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_7_filters'], kernel_size=params\n ['conv_7_kernel'], strides=params['conv_7_strides'], padding=params[\n 'conv_7_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (34838, 35145), True, 'import tensorflow as tf\n'), ((35144, 35180), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (35178, 35180), True, 'import tensorflow as tf\n'), ((35199, 35220), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (35218, 35220), True, 'import tensorflow as tf\n'), ((35244, 35305), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['c_res_act_fn']"}), "(activation=params['c_res_act_fn'])\n", (35270, 35305), True, 'import tensorflow as tf\n'), ((35329, 35657), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_8_filters']", 'kernel_size': "params['conv_8_kernel']", 'strides': "params['conv_8_strides']", 'padding': "params['conv_8_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_8_filters'], kernel_size=params\n ['conv_8_kernel'], strides=params['conv_8_strides'], padding=params[\n 'conv_8_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (35351, 35657), True, 'import tensorflow as tf\n'), ((35653, 35689), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (35687, 35689), True, 'import tensorflow as tf\n'), ((35711, 35779), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_4']"}), "(rate=params['spatial_drop_rate_4'])\n", (35743, 35779), True, 'import tensorflow as tf\n'), ((35797, 36126), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_9_filters']", 'kernel_size': "params['conv_9_kernel']", 'strides': "params['conv_9_strides']", 'padding': "params['conv_9_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_9_filters'], kernel_size=params\n ['conv_9_kernel'], strides=params['conv_9_strides'], padding=params[\n 'conv_9_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (35819, 36126), True, 'import tensorflow as tf\n'), ((36126, 36162), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (36160, 36162), True, 'import tensorflow as tf\n'), ((36181, 36202), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (36200, 36202), True, 'import tensorflow as tf\n'), ((36227, 36288), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['c_res_act_fn']"}), "(activation=params['c_res_act_fn'])\n", (36253, 36288), True, 'import tensorflow as tf\n'), ((36313, 36645), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_10_filters']", 'kernel_size': "params['conv_10_kernel']", 'strides': "params['conv_10_strides']", 'padding': "params['conv_10_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_10_filters'], kernel_size=\n params['conv_10_kernel'], strides=params['conv_10_strides'], padding=\n params['conv_10_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (36335, 36645), True, 'import tensorflow as tf\n'), ((36642, 36678), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (36676, 36678), True, 'import tensorflow as tf\n'), ((36701, 36769), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_5']"}), "(rate=params['spatial_drop_rate_5'])\n", (36733, 36769), True, 'import tensorflow as tf\n'), ((36789, 37122), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_11_filters']", 'kernel_size': "params['conv_11_kernel']", 'strides': "params['conv_11_strides']", 'padding': "params['conv_11_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_11_filters'], kernel_size=\n params['conv_11_kernel'], strides=params['conv_11_strides'], padding=\n params['conv_11_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (36811, 37122), True, 'import tensorflow as tf\n'), ((37122, 37158), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (37156, 37158), True, 'import tensorflow as tf\n'), ((37182, 37515), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_12_filters']", 'kernel_size': "params['conv_12_kernel']", 'strides': "params['conv_12_strides']", 'padding': "params['conv_12_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_12_filters'], kernel_size=\n params['conv_12_kernel'], strides=params['conv_12_strides'], padding=\n params['conv_12_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (37204, 37515), True, 'import tensorflow as tf\n'), ((37512, 37548), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (37546, 37548), True, 'import tensorflow as tf\n'), ((37568, 37589), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (37587, 37589), True, 'import tensorflow as tf\n'), ((37614, 37675), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['c_res_act_fn']"}), "(activation=params['c_res_act_fn'])\n", (37640, 37675), True, 'import tensorflow as tf\n'), ((37700, 38032), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_13_filters']", 'kernel_size': "params['conv_13_kernel']", 'strides': "params['conv_13_strides']", 'padding': "params['conv_13_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_13_filters'], kernel_size=\n params['conv_13_kernel'], strides=params['conv_13_strides'], padding=\n params['conv_13_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (37722, 38032), True, 'import tensorflow as tf\n'), ((38029, 38065), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (38063, 38065), True, 'import tensorflow as tf\n'), ((38088, 38156), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_6']"}), "(rate=params['spatial_drop_rate_6'])\n", (38120, 38156), True, 'import tensorflow as tf\n'), ((38176, 38509), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_14_filters']", 'kernel_size': "params['conv_14_kernel']", 'strides': "params['conv_14_strides']", 'padding': "params['conv_14_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_14_filters'], kernel_size=\n params['conv_14_kernel'], strides=params['conv_14_strides'], padding=\n params['conv_14_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (38198, 38509), True, 'import tensorflow as tf\n'), ((38509, 38545), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (38543, 38545), True, 'import tensorflow as tf\n'), ((38569, 38902), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_15_filters']", 'kernel_size': "params['conv_15_kernel']", 'strides': "params['conv_15_strides']", 'padding': "params['conv_15_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_15_filters'], kernel_size=\n params['conv_15_kernel'], strides=params['conv_15_strides'], padding=\n params['conv_15_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (38591, 38902), True, 'import tensorflow as tf\n'), ((38899, 38935), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (38933, 38935), True, 'import tensorflow as tf\n'), ((38955, 38976), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (38974, 38976), True, 'import tensorflow as tf\n'), ((39001, 39062), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['c_res_act_fn']"}), "(activation=params['c_res_act_fn'])\n", (39027, 39062), True, 'import tensorflow as tf\n'), ((39087, 39419), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_16_filters']", 'kernel_size': "params['conv_15_kernel']", 'strides': "params['conv_16_strides']", 'padding': "params['conv_16_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_16_filters'], kernel_size=\n params['conv_15_kernel'], strides=params['conv_16_strides'], padding=\n params['conv_16_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (39109, 39419), True, 'import tensorflow as tf\n'), ((39416, 39452), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (39450, 39452), True, 'import tensorflow as tf\n'), ((39475, 39543), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_7']"}), "(rate=params['spatial_drop_rate_7'])\n", (39507, 39543), True, 'import tensorflow as tf\n'), ((39563, 39896), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_17_filters']", 'kernel_size': "params['conv_17_kernel']", 'strides': "params['conv_17_strides']", 'padding': "params['conv_17_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_17_filters'], kernel_size=\n params['conv_17_kernel'], strides=params['conv_17_strides'], padding=\n params['conv_17_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (39585, 39896), True, 'import tensorflow as tf\n'), ((39896, 39932), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (39930, 39932), True, 'import tensorflow as tf\n'), ((39956, 40289), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_18_filters']", 'kernel_size': "params['conv_18_kernel']", 'strides': "params['conv_18_strides']", 'padding': "params['conv_18_pad']", 'data_format': 'channel_order', 'activation': "params['c_intra_act_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_18_filters'], kernel_size=\n params['conv_18_kernel'], strides=params['conv_18_strides'], padding=\n params['conv_18_pad'], data_format=channel_order, activation=params[\n 'c_intra_act_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (39978, 40289), True, 'import tensorflow as tf\n'), ((40286, 40322), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (40320, 40322), True, 'import tensorflow as tf\n'), ((40342, 40363), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (40361, 40363), True, 'import tensorflow as tf\n'), ((40388, 40449), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['c_res_act_fn']"}), "(activation=params['c_res_act_fn'])\n", (40414, 40449), True, 'import tensorflow as tf\n'), ((40494, 40530), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (40528, 40530), True, 'import tensorflow as tf\n'), ((40549, 40617), 'tensorflow.keras.layers.SpatialDropout2D', 'tf.keras.layers.SpatialDropout2D', ([], {'rate': "params['spatial_drop_rate_8']"}), "(rate=params['spatial_drop_rate_8'])\n", (40581, 40617), True, 'import tensorflow as tf\n'), ((40637, 40969), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'filters': "params['conv_19_filters']", 'kernel_size': "params['conv_19_kernel']", 'strides': "params['conv_19_strides']", 'padding': "params['conv_19_pad']", 'data_format': 'channel_order', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['conv_regularizer']"}), "(filters=params['conv_19_filters'], kernel_size=\n params['conv_19_kernel'], strides=params['conv_19_strides'], padding=\n params['conv_19_pad'], data_format=channel_order, activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['conv_regularizer'])\n", (40659, 40969), True, 'import tensorflow as tf\n'), ((40995, 41020), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (41018, 41020), True, 'import tensorflow as tf\n'), ((41041, 41077), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (41075, 41077), True, 'import tensorflow as tf\n'), ((41103, 41294), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': "params['dense_0_f_units']", 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['dense_regularizer_1']"}), "(units=params['dense_0_f_units'], activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['dense_regularizer_1'])\n", (41124, 41294), True, 'import tensorflow as tf\n'), ((41302, 41338), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (41336, 41338), True, 'import tensorflow as tf\n'), ((41366, 41557), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': "params['dense_1_f_units']", 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['dense_regularizer_1']"}), "(units=params['dense_1_f_units'], activation=params[\n 'activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['dense_regularizer_1'])\n", (41387, 41557), True, 'import tensorflow as tf\n'), ((41634, 41670), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (41668, 41670), True, 'import tensorflow as tf\n'), ((41698, 41892), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': "params['dense_1_cali_units']", 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['dense_regularizer_1']"}), "(units=params['dense_1_cali_units'], activation=params\n ['activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['dense_regularizer_1'])\n", (41719, 41892), True, 'import tensorflow as tf\n'), ((41897, 41933), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (41931, 41933), True, 'import tensorflow as tf\n'), ((41963, 42157), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': "params['dense_2_cali_units']", 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['dense_regularizer_1']"}), "(units=params['dense_2_cali_units'], activation=params\n ['activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['dense_regularizer_1'])\n", (41984, 42157), True, 'import tensorflow as tf\n'), ((42162, 42198), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (42196, 42198), True, 'import tensorflow as tf\n'), ((42345, 42366), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (42364, 42366), True, 'import tensorflow as tf\n'), ((42414, 42475), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['c_res_act_fn']"}), "(activation=params['c_res_act_fn'])\n", (42440, 42475), True, 'import tensorflow as tf\n'), ((42500, 42536), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (42534, 42536), True, 'import tensorflow as tf\n'), ((42573, 42767), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': "params['dense_comb_1_units']", 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['dense_regularizer_1']"}), "(units=params['dense_comb_1_units'], activation=params\n ['activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['dense_regularizer_1'])\n", (42594, 42767), True, 'import tensorflow as tf\n'), ((42773, 42809), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (42807, 42809), True, 'import tensorflow as tf\n'), ((42843, 43037), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': "params['dense_comb_2_units']", 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['dense_regularizer_1']"}), "(units=params['dense_comb_2_units'], activation=params\n ['activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['dense_regularizer_1'])\n", (42864, 43037), True, 'import tensorflow as tf\n'), ((43164, 43185), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (43183, 43185), True, 'import tensorflow as tf\n'), ((43236, 43297), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['v_res_act_fn']"}), "(activation=params['v_res_act_fn'])\n", (43262, 43297), True, 'import tensorflow as tf\n'), ((43321, 43357), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (43355, 43357), True, 'import tensorflow as tf\n'), ((43397, 43593), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': "params['dense_comb_v_1_units']", 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['dense_regularizer_1']"}), "(units=params['dense_comb_v_1_units'], activation=\n params['activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['dense_regularizer_1'])\n", (43418, 43593), True, 'import tensorflow as tf\n'), ((43599, 43635), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (43633, 43635), True, 'import tensorflow as tf\n'), ((43671, 43867), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': "params['dense_comb_v_2_units']", 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['dense_regularizer_1']"}), "(units=params['dense_comb_v_2_units'], activation=\n params['activation_fn'], kernel_initializer=params['kern_init'],\n activity_regularizer=params['dense_regularizer_1'])\n", (43692, 43867), True, 'import tensorflow as tf\n'), ((43946, 43967), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (43965, 43967), True, 'import tensorflow as tf\n'), ((44007, 44068), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['v_res_act_fn']"}), "(activation=params['v_res_act_fn'])\n", (44033, 44068), True, 'import tensorflow as tf\n'), ((44094, 44261), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(6)', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['dense_regularizer_1']"}), "(units=6, activation=params['activation_fn'],\n kernel_initializer=params['kern_init'], activity_regularizer=params[\n 'dense_regularizer_1'])\n", (44115, 44261), True, 'import tensorflow as tf\n'), ((44275, 44311), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (44309, 44311), True, 'import tensorflow as tf\n'), ((44339, 44506), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(6)', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['dense_regularizer_1']"}), "(units=6, activation=params['activation_fn'],\n kernel_initializer=params['kern_init'], activity_regularizer=params[\n 'dense_regularizer_1'])\n", (44360, 44506), True, 'import tensorflow as tf\n'), ((44520, 44541), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (44539, 44541), True, 'import tensorflow as tf\n'), ((44571, 44632), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['v_res_act_fn']"}), "(activation=params['v_res_act_fn'])\n", (44597, 44632), True, 'import tensorflow as tf\n'), ((44655, 44822), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(6)', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['dense_regularizer_1']"}), "(units=6, activation=params['activation_fn'],\n kernel_initializer=params['kern_init'], activity_regularizer=params[\n 'dense_regularizer_1'])\n", (44676, 44822), True, 'import tensorflow as tf\n'), ((44829, 44865), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (44863, 44865), True, 'import tensorflow as tf\n'), ((44893, 45060), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(6)', 'activation': "params['activation_fn']", 'kernel_initializer': "params['kern_init']", 'activity_regularizer': "params['dense_regularizer_1']"}), "(units=6, activation=params['activation_fn'],\n kernel_initializer=params['kern_init'], activity_regularizer=params[\n 'dense_regularizer_1'])\n", (44914, 45060), True, 'import tensorflow as tf\n'), ((45074, 45095), 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), '()\n', (45093, 45095), True, 'import tensorflow as tf\n'), ((45126, 45187), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': "params['v_res_act_fn']"}), "(activation=params['v_res_act_fn'])\n", (45152, 45187), True, 'import tensorflow as tf\n'), ((45366, 45530), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': "params['main_output_units']", 'activation': "params['main_output_act']", 'kernel_initializer': "params['kern_init']", 'name': '"""main_output"""'}), "(units=params['main_output_units'], activation=params[\n 'main_output_act'], kernel_initializer=params['kern_init'], name=\n 'main_output')\n", (45387, 45530), True, 'import tensorflow as tf\n'), ((46098, 46161), 'os.path.expanduser', 'os.path.expanduser', (['"""~/fluoro/data/compilation/voxels_pad.h5py"""'], {}), "('~/fluoro/data/compilation/voxels_pad.h5py')\n", (46116, 46161), False, 'import os\n'), ((46224, 46283), 'os.path.expanduser', 'os.path.expanduser', (['"""~/fluoro/data/compilation/images.h5py"""'], {}), "('~/fluoro/data/compilation/images.h5py')\n", (46242, 46283), False, 'import os\n'), ((46352, 46411), 'os.path.expanduser', 'os.path.expanduser', (['"""~/fluoro/data/compilation/labels.h5py"""'], {}), "('~/fluoro/data/compilation/labels.h5py')\n", (46370, 46411), False, 'import os\n'), ((46480, 46544), 'os.path.expanduser', 'os.path.expanduser', (['"""~/fluoro/data/compilation/calibration.h5py"""'], {}), "('~/fluoro/data/compilation/calibration.h5py')\n", (46498, 46544), False, 'import os\n'), ((47450, 47521), 'os.path.join', 'os.path.join', (['save_dir', "(expr_name + '_hist_objects_' + expr_no + '.pkl')"], {}), "(save_dir, expr_name + '_hist_objects_' + expr_no + '.pkl')\n", (47462, 47521), False, 'import os\n'), ((243, 295), 'os.path.expanduser', 'os.path.expanduser', (['"""~/fluoro/code/jupyt/vox_fluoro"""'], {}), "('~/fluoro/code/jupyt/vox_fluoro')\n", (261, 295), False, 'import os\n'), ((572, 615), 'os.path.join', 'os.path.join', (['base_dir', '"""labels_stats.h5py"""'], {}), "(base_dir, 'labels_stats.h5py')\n", (584, 615), False, 'import os\n'), ((45911, 45969), 'os.path.join', 'os.path.join', (['save_dir', "(expr_name + '_' + expr_no + '.png')"], {}), "(save_dir, expr_name + '_' + expr_no + '.png')\n", (45923, 45969), False, 'import os\n'), ((46712, 46762), 'numpy.random.choice', 'np.random.choice', (['shape'], {'size': 'shape', 'replace': '(False)'}), '(shape, size=shape, replace=False)\n', (46728, 46762), True, 'import numpy as np\n'), ((46800, 46859), 'numpy.random.choice', 'np.random.choice', (['shape'], {'size': 'num_of_samples', 'replace': '(False)'}), '(shape, size=num_of_samples, replace=False)\n', (46816, 46859), True, 'import numpy as np\n'), ((48893, 48950), 'os.path.join', 'os.path.join', (['save_dir', "(expr_name + '_' + expr_no + '.h5')"], {}), "(save_dir, expr_name + '_' + expr_no + '.h5')\n", (48905, 48950), False, 'import os\n'), ((864, 904), 'tensorflow.keras.backend.square', 'tf.keras.backend.square', (['(y_pred - y_true)'], {}), '(y_pred - y_true)\n', (887, 904), True, 'import tensorflow as tf\n'), ((20708, 20749), 'tensorflow.image.per_image_standardization', 'tf.image.per_image_standardization', (['frame'], {}), '(frame)\n', (20742, 20749), True, 'import tensorflow as tf\n'), ((31211, 31252), 'tensorflow.image.per_image_standardization', 'tf.image.per_image_standardization', (['frame'], {}), '(frame)\n', (31245, 31252), True, 'import tensorflow as tf\n'), ((48346, 48384), 'numpy.expand_dims', 'np.expand_dims', (['vox_mat_train'], {'axis': '(-1)'}), '(vox_mat_train, axis=-1)\n', (48360, 48384), True, 'import numpy as np\n'), ((48404, 48456), 'numpy.expand_dims', 'np.expand_dims', (['image_mat_train[:, 0, :, :]'], {'axis': '(-1)'}), '(image_mat_train[:, 0, :, :], axis=-1)\n', (48418, 48456), True, 'import numpy as np\n'), ((48476, 48528), 'numpy.expand_dims', 'np.expand_dims', (['image_mat_train[:, 1, :, :]'], {'axis': '(-1)'}), '(image_mat_train[:, 1, :, :], axis=-1)\n', (48490, 48528), True, 'import numpy as np\n'), ((48598, 48634), 'numpy.expand_dims', 'np.expand_dims', (['vox_mat_val'], {'axis': '(-1)'}), '(vox_mat_val, axis=-1)\n', (48612, 48634), True, 'import numpy as np\n'), ((48636, 48686), 'numpy.expand_dims', 'np.expand_dims', (['image_mat_val[:, 0, :, :]'], {'axis': '(-1)'}), '(image_mat_val[:, 0, :, :], axis=-1)\n', (48650, 48686), True, 'import numpy as np\n'), ((48688, 48738), 'numpy.expand_dims', 'np.expand_dims', (['image_mat_val[:, 1, :, :]'], {'axis': '(-1)'}), '(image_mat_val[:, 1, :, :], axis=-1)\n', (48702, 48738), True, 'import numpy as np\n')] |
# Copyright 2016 <NAME>, alexggmatthews
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import numpy as np
import tensorflow as tf
from . import likelihoods
from ._settings import settings
from .mean_functions import Zero
from .model import GPModel
from .param import Param, DataHolder
float_type = settings.dtypes.float_type
class SGPR(GPModel):
"""
Sparse Variational GP regression. The key reference is
::
@inproceedings{titsias2009variational,
title={Variational learning of inducing variables in
sparse Gaussian processes},
author={Titsias, <NAME>},
booktitle={International Conference on
Artificial Intelligence and Statistics},
pages={567--574},
year={2009}
}
"""
def __init__(self, X, Y, kern, Z, mean_function=Zero()):
"""
X is a data matrix, size N x D
Y is a data matrix, size N x R
Z is a matrix of pseudo inputs, size M x D
kern, mean_function are appropriate GPflow objects
This method only works with a Gaussian likelihood.
"""
X = DataHolder(X, on_shape_change='pass')
Y = DataHolder(Y, on_shape_change='pass')
likelihood = likelihoods.Gaussian()
GPModel.__init__(self, X, Y, kern, likelihood, mean_function)
self.Z = Param(Z)
self.num_data = X.shape[0]
self.num_latent = Y.shape[1]
def build_likelihood(self):
"""
Construct a tensorflow function to compute the bound on the marginal
likelihood. For a derivation of the terms in here, see the associated
SGPR notebook.
"""
num_inducing = tf.shape(self.Z)[0]
num_data = tf.cast(tf.shape(self.Y)[0], settings.dtypes.float_type)
output_dim = tf.cast(tf.shape(self.Y)[1], settings.dtypes.float_type)
err = self.Y - self.mean_function(self.X)
Kdiag = self.kern.Kdiag(self.X)
Kuf = self.kern.Kzx(self.Z, self.X)
Kuu = self.kern.Kzz(self.Z) + tf.eye(num_inducing, dtype=float_type) * settings.numerics.jitter_level
L = tf.cholesky(Kuu)
sigma = tf.sqrt(self.likelihood.variance)
# Compute intermediate matrices
A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
AAT = tf.matmul(A, A, transpose_b=True)
B = AAT + tf.eye(num_inducing, dtype=float_type)
LB = tf.cholesky(B)
Aerr = tf.matmul(A, err)
c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
# compute log marginal bound
bound = -0.5 * num_data * output_dim * np.log(2 * np.pi)
bound += - output_dim * tf.reduce_sum(tf.log(tf.diag_part(LB)))
bound -= 0.5 * num_data * output_dim * tf.log(self.likelihood.variance)
bound += -0.5 * tf.reduce_sum(tf.square(err)) / self.likelihood.variance
bound += 0.5 * tf.reduce_sum(tf.square(c))
bound += -0.5 * output_dim * tf.reduce_sum(Kdiag) / self.likelihood.variance
bound += 0.5 * output_dim * tf.reduce_sum(tf.diag_part(AAT))
return bound
def build_predict(self, Xnew, full_cov=False):
"""
Compute the mean and variance of the latent function at some new points
Xnew. For a derivation of the terms in here, see the associated SGPR
notebook.
"""
num_inducing = tf.shape(self.Z)[0]
err = self.Y - self.mean_function(self.X)
Kuf = self.kern.Kzx(self.Z, self.X)
Kuu = self.kern.Kzz(self.Z) + tf.eye(num_inducing, dtype=float_type) * settings.numerics.jitter_level
Kus = self.kern.Kzx(self.Z, Xnew)
sigma = tf.sqrt(self.likelihood.variance)
L = tf.cholesky(Kuu)
A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
B = tf.matmul(A, A, transpose_b=True) + tf.eye(num_inducing, dtype=float_type)
LB = tf.cholesky(B)
Aerr = tf.matmul(A, err)
c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
mean = tf.matmul(tmp2, c, transpose_a=True)
if full_cov:
var = self.kern.K(Xnew) + tf.matmul(tmp2, tmp2, transpose_a=True) \
- tf.matmul(tmp1, tmp1, transpose_a=True)
shape = tf.stack([1, 1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 2), shape)
else:
var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0) \
- tf.reduce_sum(tf.square(tmp1), 0)
shape = tf.stack([1, tf.shape(self.Y)[1]])
var = tf.tile(tf.expand_dims(var, 1), shape)
return mean + self.mean_function(Xnew), var
class GPRFITC(GPModel):
def __init__(self, X, Y, kern, Z, mean_function=Zero()):
"""
This implements GP regression with the FITC approximation.
The key reference is
@inproceedings{Snelson06sparsegaussian,
author = {<NAME> and <NAME>},
title = {Sparse Gaussian Processes using Pseudo-inputs},
booktitle = {Advances In Neural Information Processing Systems },
year = {2006},
pages = {1257--1264},
publisher = {MIT press}
}
Implementation loosely based on code from GPML matlab library although
obviously gradients are automatic in GPflow.
X is a data matrix, size N x D
Y is a data matrix, size N x R
Z is a matrix of pseudo inputs, size M x D
kern, mean_function are appropriate GPflow objects
This method only works with a Gaussian likelihood.
"""
X = DataHolder(X, on_shape_change='pass')
Y = DataHolder(Y, on_shape_change='pass')
likelihood = likelihoods.Gaussian()
GPModel.__init__(self, X, Y, kern, likelihood, mean_function)
self.Z = Param(Z)
self.num_data = X.shape[0]
self.num_latent = Y.shape[1]
def build_common_terms(self):
num_inducing = tf.shape(self.Z)[0]
err = self.Y - self.mean_function(self.X) # size N x R
Kdiag = self.kern.Kdiag(self.X)
Kuf = self.kern.Kzx(self.Z, self.X)
Kuu = self.kern.Kzz(self.Z) + tf.eye(num_inducing, dtype=float_type) * settings.numerics.jitter_level
Luu = tf.cholesky(Kuu) # => Luu Luu^T = Kuu
V = tf.matrix_triangular_solve(Luu, Kuf) # => V^T V = Qff = Kuf^T Kuu^-1 Kuf
diagQff = tf.reduce_sum(tf.square(V), 0)
nu = Kdiag - diagQff + self.likelihood.variance
B = tf.eye(num_inducing, dtype=float_type) + tf.matmul(V / nu, V, transpose_b=True)
L = tf.cholesky(B)
beta = err / tf.expand_dims(nu, 1) # size N x R
alpha = tf.matmul(V, beta) # size N x R
gamma = tf.matrix_triangular_solve(L, alpha, lower=True) # size N x R
return err, nu, Luu, L, alpha, beta, gamma
def build_likelihood(self):
"""
Construct a tensorflow function to compute the bound on the marginal
likelihood.
"""
# FITC approximation to the log marginal likelihood is
# log ( normal( y | mean, K_fitc ) )
# where K_fitc = Qff + diag( \nu )
# where Qff = Kfu Kuu^{-1} Kuf
# with \nu_i = Kff_{i,i} - Qff_{i,i} + \sigma^2
# We need to compute the Mahalanobis term -0.5* err^T K_fitc^{-1} err
# (summed over functions).
# We need to deal with the matrix inverse term.
# K_fitc^{-1} = ( Qff + \diag( \nu ) )^{-1}
# = ( V^T V + \diag( \nu ) )^{-1}
# Applying the Woodbury identity we obtain
# = \diag( \nu^{-1} ) - \diag( \nu^{-1} ) V^T ( I + V \diag( \nu^{-1} ) V^T )^{-1) V \diag(\nu^{-1} )
# Let \beta = \diag( \nu^{-1} ) err
# and let \alpha = V \beta
# then Mahalanobis term = -0.5* ( \beta^T err - \alpha^T Solve( I + V \diag( \nu^{-1} ) V^T, alpha ) )
err, nu, Luu, L, alpha, beta, gamma = self.build_common_terms()
mahalanobisTerm = -0.5 * tf.reduce_sum(tf.square(err) / tf.expand_dims(nu, 1)) \
+ 0.5 * tf.reduce_sum(tf.square(gamma))
# We need to compute the log normalizing term -N/2 \log 2 pi - 0.5 \log \det( K_fitc )
# We need to deal with the log determinant term.
# \log \det( K_fitc ) = \log \det( Qff + \diag( \nu ) )
# = \log \det( V^T V + \diag( \nu ) )
# Applying the determinant lemma we obtain
# = \log [ \det \diag( \nu ) \det( I + V \diag( \nu^{-1} ) V^T ) ]
# = \log [ \det \diag( \nu ) ] + \log [ \det( I + V \diag( \nu^{-1} ) V^T ) ]
constantTerm = -0.5 * self.num_data * tf.log(tf.constant(2. * np.pi, settings.dtypes.float_type))
logDeterminantTerm = -0.5 * tf.reduce_sum(tf.log(nu)) - tf.reduce_sum(tf.log(tf.diag_part(L)))
logNormalizingTerm = constantTerm + logDeterminantTerm
return mahalanobisTerm + logNormalizingTerm * self.num_latent
def build_predict(self, Xnew, full_cov=False):
"""
Compute the mean and variance of the latent function at some new points
Xnew.
"""
_, _, Luu, L, _, _, gamma = self.build_common_terms()
Kus = self.kern.Kzx(self.Z, Xnew) # size M x Xnew
w = tf.matrix_triangular_solve(Luu, Kus, lower=True) # size M x Xnew
tmp = tf.matrix_triangular_solve(tf.transpose(L), gamma, lower=False)
mean = tf.matmul(w, tmp, transpose_a=True) + self.mean_function(Xnew)
intermediateA = tf.matrix_triangular_solve(L, w, lower=True)
if full_cov:
var = self.kern.K(Xnew) - tf.matmul(w, w, transpose_a=True) \
+ tf.matmul(intermediateA, intermediateA, transpose_a=True)
var = tf.tile(tf.expand_dims(var, 2), tf.stack([1, 1, tf.shape(self.Y)[1]]))
else:
var = self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(w), 0) \
+ tf.reduce_sum(tf.square(intermediateA), 0) # size Xnew,
var = tf.tile(tf.expand_dims(var, 1), tf.stack([1, tf.shape(self.Y)[1]]))
return mean, var
| [
"tensorflow.eye",
"tensorflow.shape",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"numpy.log",
"tensorflow.sqrt",
"tensorflow.constant",
"tensorflow.matmul",
"tensorflow.square",
"tensorflow.expand_dims",
"tensorflow.cholesky",
"tensorflow.diag_part",
"tensorflow.log",
"tensorflow.mat... | [((2654, 2670), 'tensorflow.cholesky', 'tf.cholesky', (['Kuu'], {}), '(Kuu)\n', (2665, 2670), True, 'import tensorflow as tf\n'), ((2687, 2720), 'tensorflow.sqrt', 'tf.sqrt', (['self.likelihood.variance'], {}), '(self.likelihood.variance)\n', (2694, 2720), True, 'import tensorflow as tf\n'), ((2843, 2876), 'tensorflow.matmul', 'tf.matmul', (['A', 'A'], {'transpose_b': '(True)'}), '(A, A, transpose_b=True)\n', (2852, 2876), True, 'import tensorflow as tf\n'), ((2947, 2961), 'tensorflow.cholesky', 'tf.cholesky', (['B'], {}), '(B)\n', (2958, 2961), True, 'import tensorflow as tf\n'), ((2977, 2994), 'tensorflow.matmul', 'tf.matmul', (['A', 'err'], {}), '(A, err)\n', (2986, 2994), True, 'import tensorflow as tf\n'), ((4183, 4216), 'tensorflow.sqrt', 'tf.sqrt', (['self.likelihood.variance'], {}), '(self.likelihood.variance)\n', (4190, 4216), True, 'import tensorflow as tf\n'), ((4229, 4245), 'tensorflow.cholesky', 'tf.cholesky', (['Kuu'], {}), '(Kuu)\n', (4240, 4245), True, 'import tensorflow as tf\n'), ((4413, 4427), 'tensorflow.cholesky', 'tf.cholesky', (['B'], {}), '(B)\n', (4424, 4427), True, 'import tensorflow as tf\n'), ((4443, 4460), 'tensorflow.matmul', 'tf.matmul', (['A', 'err'], {}), '(A, err)\n', (4452, 4460), True, 'import tensorflow as tf\n'), ((4545, 4591), 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['L', 'Kus'], {'lower': '(True)'}), '(L, Kus, lower=True)\n', (4571, 4591), True, 'import tensorflow as tf\n'), ((4607, 4655), 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['LB', 'tmp1'], {'lower': '(True)'}), '(LB, tmp1, lower=True)\n', (4633, 4655), True, 'import tensorflow as tf\n'), ((4671, 4707), 'tensorflow.matmul', 'tf.matmul', (['tmp2', 'c'], {'transpose_a': '(True)'}), '(tmp2, c, transpose_a=True)\n', (4680, 4707), True, 'import tensorflow as tf\n'), ((6868, 6884), 'tensorflow.cholesky', 'tf.cholesky', (['Kuu'], {}), '(Kuu)\n', (6879, 6884), True, 'import tensorflow as tf\n'), ((6919, 6955), 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['Luu', 'Kuf'], {}), '(Luu, Kuf)\n', (6945, 6955), True, 'import tensorflow as tf\n'), ((7204, 7218), 'tensorflow.cholesky', 'tf.cholesky', (['B'], {}), '(B)\n', (7215, 7218), True, 'import tensorflow as tf\n'), ((7292, 7310), 'tensorflow.matmul', 'tf.matmul', (['V', 'beta'], {}), '(V, beta)\n', (7301, 7310), True, 'import tensorflow as tf\n'), ((7342, 7390), 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['L', 'alpha'], {'lower': '(True)'}), '(L, alpha, lower=True)\n', (7368, 7390), True, 'import tensorflow as tf\n'), ((9908, 9956), 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['Luu', 'Kus'], {'lower': '(True)'}), '(Luu, Kus, lower=True)\n', (9934, 9956), True, 'import tensorflow as tf\n'), ((10155, 10199), 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['L', 'w'], {'lower': '(True)'}), '(L, w, lower=True)\n', (10181, 10199), True, 'import tensorflow as tf\n'), ((2223, 2239), 'tensorflow.shape', 'tf.shape', (['self.Z'], {}), '(self.Z)\n', (2231, 2239), True, 'import tensorflow as tf\n'), ((2774, 2820), 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['L', 'Kuf'], {'lower': '(True)'}), '(L, Kuf, lower=True)\n', (2800, 2820), True, 'import tensorflow as tf\n'), ((2895, 2933), 'tensorflow.eye', 'tf.eye', (['num_inducing'], {'dtype': 'float_type'}), '(num_inducing, dtype=float_type)\n', (2901, 2933), True, 'import tensorflow as tf\n'), ((3007, 3055), 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['LB', 'Aerr'], {'lower': '(True)'}), '(LB, Aerr, lower=True)\n', (3033, 3055), True, 'import tensorflow as tf\n'), ((3149, 3166), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (3155, 3166), True, 'import numpy as np\n'), ((3286, 3318), 'tensorflow.log', 'tf.log', (['self.likelihood.variance'], {}), '(self.likelihood.variance)\n', (3292, 3318), True, 'import tensorflow as tf\n'), ((3901, 3917), 'tensorflow.shape', 'tf.shape', (['self.Z'], {}), '(self.Z)\n', (3909, 3917), True, 'import tensorflow as tf\n'), ((4258, 4304), 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['L', 'Kuf'], {'lower': '(True)'}), '(L, Kuf, lower=True)\n', (4284, 4304), True, 'import tensorflow as tf\n'), ((4325, 4358), 'tensorflow.matmul', 'tf.matmul', (['A', 'A'], {'transpose_b': '(True)'}), '(A, A, transpose_b=True)\n', (4334, 4358), True, 'import tensorflow as tf\n'), ((4361, 4399), 'tensorflow.eye', 'tf.eye', (['num_inducing'], {'dtype': 'float_type'}), '(num_inducing, dtype=float_type)\n', (4367, 4399), True, 'import tensorflow as tf\n'), ((4473, 4521), 'tensorflow.matrix_triangular_solve', 'tf.matrix_triangular_solve', (['LB', 'Aerr'], {'lower': '(True)'}), '(LB, Aerr, lower=True)\n', (4499, 4521), True, 'import tensorflow as tf\n'), ((6575, 6591), 'tensorflow.shape', 'tf.shape', (['self.Z'], {}), '(self.Z)\n', (6583, 6591), True, 'import tensorflow as tf\n'), ((7026, 7038), 'tensorflow.square', 'tf.square', (['V'], {}), '(V)\n', (7035, 7038), True, 'import tensorflow as tf\n'), ((7112, 7150), 'tensorflow.eye', 'tf.eye', (['num_inducing'], {'dtype': 'float_type'}), '(num_inducing, dtype=float_type)\n', (7118, 7150), True, 'import tensorflow as tf\n'), ((7153, 7191), 'tensorflow.matmul', 'tf.matmul', (['(V / nu)', 'V'], {'transpose_b': '(True)'}), '(V / nu, V, transpose_b=True)\n', (7162, 7191), True, 'import tensorflow as tf\n'), ((7240, 7261), 'tensorflow.expand_dims', 'tf.expand_dims', (['nu', '(1)'], {}), '(nu, 1)\n', (7254, 7261), True, 'import tensorflow as tf\n'), ((10016, 10031), 'tensorflow.transpose', 'tf.transpose', (['L'], {}), '(L)\n', (10028, 10031), True, 'import tensorflow as tf\n'), ((10068, 10103), 'tensorflow.matmul', 'tf.matmul', (['w', 'tmp'], {'transpose_a': '(True)'}), '(w, tmp, transpose_a=True)\n', (10077, 10103), True, 'import tensorflow as tf\n'), ((2270, 2286), 'tensorflow.shape', 'tf.shape', (['self.Y'], {}), '(self.Y)\n', (2278, 2286), True, 'import tensorflow as tf\n'), ((2348, 2364), 'tensorflow.shape', 'tf.shape', (['self.Y'], {}), '(self.Y)\n', (2356, 2364), True, 'import tensorflow as tf\n'), ((2570, 2608), 'tensorflow.eye', 'tf.eye', (['num_inducing'], {'dtype': 'float_type'}), '(num_inducing, dtype=float_type)\n', (2576, 2608), True, 'import tensorflow as tf\n'), ((3437, 3449), 'tensorflow.square', 'tf.square', (['c'], {}), '(c)\n', (3446, 3449), True, 'import tensorflow as tf\n'), ((3488, 3508), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['Kdiag'], {}), '(Kdiag)\n', (3501, 3508), True, 'import tensorflow as tf\n'), ((3586, 3603), 'tensorflow.diag_part', 'tf.diag_part', (['AAT'], {}), '(AAT)\n', (3598, 3603), True, 'import tensorflow as tf\n'), ((4053, 4091), 'tensorflow.eye', 'tf.eye', (['num_inducing'], {'dtype': 'float_type'}), '(num_inducing, dtype=float_type)\n', (4059, 4091), True, 'import tensorflow as tf\n'), ((4829, 4868), 'tensorflow.matmul', 'tf.matmul', (['tmp1', 'tmp1'], {'transpose_a': '(True)'}), '(tmp1, tmp1, transpose_a=True)\n', (4838, 4868), True, 'import tensorflow as tf\n'), ((4953, 4975), 'tensorflow.expand_dims', 'tf.expand_dims', (['var', '(2)'], {}), '(var, 2)\n', (4967, 4975), True, 'import tensorflow as tf\n'), ((5211, 5233), 'tensorflow.expand_dims', 'tf.expand_dims', (['var', '(1)'], {}), '(var, 1)\n', (5225, 5233), True, 'import tensorflow as tf\n'), ((6781, 6819), 'tensorflow.eye', 'tf.eye', (['num_inducing'], {'dtype': 'float_type'}), '(num_inducing, dtype=float_type)\n', (6787, 6819), True, 'import tensorflow as tf\n'), ((9313, 9365), 'tensorflow.constant', 'tf.constant', (['(2.0 * np.pi)', 'settings.dtypes.float_type'], {}), '(2.0 * np.pi, settings.dtypes.float_type)\n', (9324, 9365), True, 'import tensorflow as tf\n'), ((10316, 10373), 'tensorflow.matmul', 'tf.matmul', (['intermediateA', 'intermediateA'], {'transpose_a': '(True)'}), '(intermediateA, intermediateA, transpose_a=True)\n', (10325, 10373), True, 'import tensorflow as tf\n'), ((10400, 10422), 'tensorflow.expand_dims', 'tf.expand_dims', (['var', '(2)'], {}), '(var, 2)\n', (10414, 10422), True, 'import tensorflow as tf\n'), ((10655, 10677), 'tensorflow.expand_dims', 'tf.expand_dims', (['var', '(1)'], {}), '(var, 1)\n', (10669, 10677), True, 'import tensorflow as tf\n'), ((3220, 3236), 'tensorflow.diag_part', 'tf.diag_part', (['LB'], {}), '(LB)\n', (3232, 3236), True, 'import tensorflow as tf\n'), ((3357, 3371), 'tensorflow.square', 'tf.square', (['err'], {}), '(err)\n', (3366, 3371), True, 'import tensorflow as tf\n'), ((4767, 4806), 'tensorflow.matmul', 'tf.matmul', (['tmp2', 'tmp2'], {'transpose_a': '(True)'}), '(tmp2, tmp2, transpose_a=True)\n', (4776, 4806), True, 'import tensorflow as tf\n'), ((5110, 5125), 'tensorflow.square', 'tf.square', (['tmp1'], {}), '(tmp1)\n', (5119, 5125), True, 'import tensorflow as tf\n'), ((8708, 8724), 'tensorflow.square', 'tf.square', (['gamma'], {}), '(gamma)\n', (8717, 8724), True, 'import tensorflow as tf\n'), ((9416, 9426), 'tensorflow.log', 'tf.log', (['nu'], {}), '(nu)\n', (9422, 9426), True, 'import tensorflow as tf\n'), ((9451, 9466), 'tensorflow.diag_part', 'tf.diag_part', (['L'], {}), '(L)\n', (9463, 9466), True, 'import tensorflow as tf\n'), ((10260, 10293), 'tensorflow.matmul', 'tf.matmul', (['w', 'w'], {'transpose_a': '(True)'}), '(w, w, transpose_a=True)\n', (10269, 10293), True, 'import tensorflow as tf\n'), ((10586, 10610), 'tensorflow.square', 'tf.square', (['intermediateA'], {}), '(intermediateA)\n', (10595, 10610), True, 'import tensorflow as tf\n'), ((4905, 4921), 'tensorflow.shape', 'tf.shape', (['self.Y'], {}), '(self.Y)\n', (4913, 4921), True, 'import tensorflow as tf\n'), ((5054, 5069), 'tensorflow.square', 'tf.square', (['tmp2'], {}), '(tmp2)\n', (5063, 5069), True, 'import tensorflow as tf\n'), ((5163, 5179), 'tensorflow.shape', 'tf.shape', (['self.Y'], {}), '(self.Y)\n', (5171, 5179), True, 'import tensorflow as tf\n'), ((8618, 8632), 'tensorflow.square', 'tf.square', (['err'], {}), '(err)\n', (8627, 8632), True, 'import tensorflow as tf\n'), ((8635, 8656), 'tensorflow.expand_dims', 'tf.expand_dims', (['nu', '(1)'], {}), '(nu, 1)\n', (8649, 8656), True, 'import tensorflow as tf\n'), ((10533, 10545), 'tensorflow.square', 'tf.square', (['w'], {}), '(w)\n', (10542, 10545), True, 'import tensorflow as tf\n'), ((10440, 10456), 'tensorflow.shape', 'tf.shape', (['self.Y'], {}), '(self.Y)\n', (10448, 10456), True, 'import tensorflow as tf\n'), ((10692, 10708), 'tensorflow.shape', 'tf.shape', (['self.Y'], {}), '(self.Y)\n', (10700, 10708), True, 'import tensorflow as tf\n')] |
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
import json
import pickle
import numpy as np
import time
def get_data():
data_file = 'data/train.json'
with open(data_file, 'r') as f:
data = json.load(f)
print('Loaded Data')
X = []
Y = []
for key, values in data.items():
for value in values:
X.append(value)
Y.append(int(key))
return X, Y
def main():
X, Y = get_data()
param_grid = [
{'C': [1, 10, 100, 1000],
'kernel': ['linear']},
# {'C': [1, 10, 100, 1000],
# 'gamma': [0.001, 0.0001],
# 'kernel': ['rbf']}
]
print(len(X[0]))
tic = time.time()
clf = GridSearchCV(SVC(C=1, probability=True), param_grid, cv=5)
clf.fit(X, Y)
toc = time.time()
print('Training Time: ', toc - tic)
predictions = clf.predict_proba([X[20]])[0]
index = np.argmax(predictions)
print(predictions[index], index)
print(Y[20])
with open('data/classifier.pkl', 'wb') as f:
pickle.dump((clf), f)
if __name__ == '__main__':
main() | [
"pickle.dump",
"numpy.argmax",
"json.load",
"time.time",
"sklearn.svm.SVC"
] | [((703, 714), 'time.time', 'time.time', ([], {}), '()\n', (712, 714), False, 'import time\n'), ((814, 825), 'time.time', 'time.time', ([], {}), '()\n', (823, 825), False, 'import time\n'), ((926, 948), 'numpy.argmax', 'np.argmax', (['predictions'], {}), '(predictions)\n', (935, 948), True, 'import numpy as np\n'), ((238, 250), 'json.load', 'json.load', (['f'], {}), '(f)\n', (247, 250), False, 'import json\n'), ((739, 765), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(1)', 'probability': '(True)'}), '(C=1, probability=True)\n', (742, 765), False, 'from sklearn.svm import SVC\n'), ((1061, 1080), 'pickle.dump', 'pickle.dump', (['clf', 'f'], {}), '(clf, f)\n', (1072, 1080), False, 'import pickle\n')] |
# PhonopyImporter/CASTEP.py
# ----------------
# Module Docstring
# ----------------
""" Contains routines for working with the CASTEP code. """
# -------
# Imports
# -------
import numpy as np
# ---------
# Functions
# ---------
def ReadPhonon(file_path):
"""
Parse the CASTEP .phonon file at file_path.
Return value:
A (params, structure_data, q_point_data) tuple containing the data read in from the .phonon file.
* params: { num_atoms : int, num_bands : int, num_qpts : int, 'freq_units' : string, 'ir_units' : string, 'raman_units' : string}
* structure_data: (v_latt, atom_pos, atom_types, atom_mass)
* q_point_data: [(q_pt, q_wt, freqs, ir_ints, eigenvectors)]
"""
# Parameters to capture.
params, structure_data, q_point_data = None, None, None
# Read and parse input file.
with open(file_path, 'r') as input_reader:
# Read header.
assert next(input_reader).strip() == "BEGIN header"
# Read calculation parameters.
params = { }
capture = [
("Number of ions" , 'num_atoms' , int ),
("Number of branches" , 'num_bands' , int ),
("Number of wavevectors", 'num_qpts' , int ),
("Frequencies in" , 'freq_units' , None),
("IR intensities in" , 'ir_units' , None),
("Raman activities in" , 'raman_units', None)
]
for starts_with, params_key, conv_func in capture:
line = next(input_reader).strip()
assert line.startswith(starts_with)
param = line.replace(starts_with, '').strip()
if conv_func is not None:
param = conv_func(param)
params[params_key] = param
assert next(input_reader).strip() == "Unit cell vectors (A)"
# Read lattice vectors.
v_latt = [
[float(item) for item in next(input_reader).strip().split()[:3]]
for i in range(0, 3)
]
assert next(input_reader).strip() == "Fractional Co-ordinates"
# Read atom data.
atom_data = []
for i in range(0, params['num_atoms']):
elements = next(input_reader).strip().split()
assert len(elements) >= 5 and int(elements[0]) == i + 1
atom_pos = [float(item) for item in elements[1:4]]
atom_type = str(elements[4])
atom_mass = float(elements[5])
atom_data.append(
(atom_pos, atom_type, atom_mass)
)
assert next(input_reader).strip() == "END header"
# Read frequencies/eigenvectors for each calculated wavevector.
q_point_data = []
for i in range(0, params['num_qpts']):
# Read wavevector coordinates and weight.
elements = next(input_reader).strip().split()
assert len(elements) >= 6 and elements[0] == "q-pt=" and int(elements[1]) == i + 1
q = [float(item) for item in elements[2:5]]
w = float(elements[5])
# Read frequencies and spectroscopic activities.
freqs, ir_ints = [], []
for j in range(0, params['num_bands']):
elements = next(input_reader).strip().split()
assert len(elements) >= 3 and int(elements[0]) == j + 1
freqs.append(
float(elements[1])
)
ir_ints.append(
float(elements[2])
)
# Read eigenvectors.
assert next(input_reader).strip() == "Phonon Eigenvectors"
headers = next(input_reader).strip().split()
expected_headers = ["Mode", "Ion", "X", "Y", "Z"]
for j, expected_header in enumerate(expected_headers):
assert headers[j] == expected_header
eigenvectors = []
for j in range(0, params['num_bands']):
eigenvector = []
for k in range(0, params['num_atoms']):
elements = next(input_reader).strip().split()
assert len(elements) >= 8 and int(elements[0]) == j + 1 and int(elements[1]) == k + 1
eigenvector.append(
[float(elements[i]) + 1.0j * float(elements[i + 1]) for i in range(2, 8, 2)]
)
eigenvectors.append(eigenvector)
q_point_data.append(
(q, w, freqs, ir_ints, eigenvectors)
)
# Reformat data.
v_latt = [np.array(v, dtype = np.float64) for v in v_latt]
atom_pos = [
np.array(pos, dtype = np.float64)
for pos, _, _ in atom_data
]
atom_types = [atom_type for _, atom_type, _ in atom_data]
atom_mass = [atom_mass for _, _, atom_mass in atom_data]
structure_data = (v_latt, atom_pos, atom_types, atom_mass)
for i, (q, w, freqs, ir_ints, eigenvectors) in enumerate(q_point_data):
q = np.array(q, dtype = np.float64)
for j, eigenvector in enumerate(eigenvectors):
eigenvectors[j] = np.array(eigenvector, dtype = np.complex128)
q_point_data[i] = (q, w, freqs, ir_ints, eigenvectors)
# Returm results
return (params, structure_data, q_point_data)
| [
"numpy.array"
] | [((4617, 4646), 'numpy.array', 'np.array', (['v'], {'dtype': 'np.float64'}), '(v, dtype=np.float64)\n', (4625, 4646), True, 'import numpy as np\n'), ((4700, 4731), 'numpy.array', 'np.array', (['pos'], {'dtype': 'np.float64'}), '(pos, dtype=np.float64)\n', (4708, 4731), True, 'import numpy as np\n'), ((5088, 5117), 'numpy.array', 'np.array', (['q'], {'dtype': 'np.float64'}), '(q, dtype=np.float64)\n', (5096, 5117), True, 'import numpy as np\n'), ((5214, 5256), 'numpy.array', 'np.array', (['eigenvector'], {'dtype': 'np.complex128'}), '(eigenvector, dtype=np.complex128)\n', (5222, 5256), True, 'import numpy as np\n')] |
#!/usr/bin/python
"""Plot occupancy curves of each staple type."""
import argparse
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import gridspec
import numpy as np
from matplotlibstyles import styles
from origamipy import plot
from origamipy import utility
def main():
args = parse_args()
f = setup_figure()
gs = gridspec.GridSpec(
1, 2, f, width_ratios=[10, 1], height_ratios=[1])
ax = f.add_subplot(gs[0])
mappable = plot_figure(f, ax, vars(args))
setup_axis(ax)
set_labels(f, ax, mappable)
plot_filebase = f'{args.plot_dir}/{args.filebase}_staplestates-means'
save_figure(f, plot_filebase)
def setup_figure():
styles.set_thin_style()
figsize = (styles.cm_to_inches(14), styles.cm_to_inches(10))
return plt.figure(figsize=figsize, dpi=300, constrained_layout=True)
def plot_figure(f, ax, args):
input_dir = args['input_dir']
filebase = args['filebase']
stapletypes = args['stapletypes']
rtag = args['rtag']
rvalue = args['rvalue']
xtag = args['xtag']
contin = args['continuous']
inp_filebase = f'{input_dir}/{filebase}'
tagbase = 'staplestates'
tags = [f'{tagbase}{i}' for i in range(1, stapletypes + 1)]
aves, stds = plot.read_expectations(inp_filebase)
temps = aves['temp']
melting_points = utility.estimate_staple_melting_points(stapletypes, aves, temps)
min_t = np.min(melting_points)
max_t = np.max(melting_points)
cmap = cm.get_cmap('viridis')
mappable = styles.create_linear_mappable(cmap, min_t, max_t)
if rtag:
aves = aves[aves[args.rtag] == rvalue]
stds = stds[stds[args.rtag] == rvalue]
for i, tag in enumerate(tags):
xvars = aves[xtag]
color = mappable.to_rgba(melting_points[i])
darkcolor = styles.darken_color(color[:3], 0.4)
if contin:
ax.fill_between(
xvars, aves[tag] + stds[tag], aves[tag] - stds[tag],
color='0.8')
else:
ax.errorbar(xvars, aves[tag], yerr=stds[tag], color=darkcolor,
linestyle='None', marker='o')
ax.plot(xvars, aves[tag], color=color, marker='None')
return mappable
def setup_axis(ax):
ax.set_xlabel('$T$ / K')
ax.set_ylabel('Staple state')
def set_labels(f, ax, mappable):
f.colorbar(mappable, orientation='vertical')
def save_figure(f, plot_filebase):
# f.savefig(plot_filebase + '.pgf', transparent=True)
f.savefig(plot_filebase + '.pdf', transparent=True)
f.savefig(plot_filebase + '.png', transparent=True)
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'input_dir',
type=str,
help='Directory of inputs')
parser.add_argument(
'plot_dir',
type=str,
help='Plot directory')
parser.add_argument(
'filebase',
type=str,
help='Filebase')
parser.add_argument(
'stapletypes',
type=int,
help='Number of staple types')
parser.add_argument(
'--xtag',
default='temp',
type=str,
help='Dependent variable tag')
parser.add_argument(
'--rtag',
type=str,
help='Tag to slice on')
parser.add_argument(
'--rvalue',
type=float,
help='Slice value')
parser.add_argument(
'--continuous',
default=False,
type=bool,
help='Plot curves as continuous')
return parser.parse_args()
if __name__ == '__main__':
main()
| [
"origamipy.plot.read_expectations",
"argparse.ArgumentParser",
"numpy.max",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.figure",
"matplotlibstyles.styles.set_thin_style",
"numpy.min",
"matplotlibstyles.styles.darken_color",
"matplotlibstyles.styles.cm_to_inches",
"matplotlibstyles.styles.cr... | [((359, 426), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(1)', '(2)', 'f'], {'width_ratios': '[10, 1]', 'height_ratios': '[1]'}), '(1, 2, f, width_ratios=[10, 1], height_ratios=[1])\n', (376, 426), False, 'from matplotlib import gridspec\n'), ((697, 720), 'matplotlibstyles.styles.set_thin_style', 'styles.set_thin_style', ([], {}), '()\n', (718, 720), False, 'from matplotlibstyles import styles\n'), ((798, 859), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': '(300)', 'constrained_layout': '(True)'}), '(figsize=figsize, dpi=300, constrained_layout=True)\n', (808, 859), True, 'import matplotlib.pyplot as plt\n'), ((1260, 1296), 'origamipy.plot.read_expectations', 'plot.read_expectations', (['inp_filebase'], {}), '(inp_filebase)\n', (1282, 1296), False, 'from origamipy import plot\n'), ((1343, 1407), 'origamipy.utility.estimate_staple_melting_points', 'utility.estimate_staple_melting_points', (['stapletypes', 'aves', 'temps'], {}), '(stapletypes, aves, temps)\n', (1381, 1407), False, 'from origamipy import utility\n'), ((1420, 1442), 'numpy.min', 'np.min', (['melting_points'], {}), '(melting_points)\n', (1426, 1442), True, 'import numpy as np\n'), ((1455, 1477), 'numpy.max', 'np.max', (['melting_points'], {}), '(melting_points)\n', (1461, 1477), True, 'import numpy as np\n'), ((1489, 1511), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (1500, 1511), False, 'from matplotlib import cm\n'), ((1527, 1576), 'matplotlibstyles.styles.create_linear_mappable', 'styles.create_linear_mappable', (['cmap', 'min_t', 'max_t'], {}), '(cmap, min_t, max_t)\n', (1556, 1576), False, 'from matplotlibstyles import styles\n'), ((2637, 2740), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (2660, 2740), False, 'import argparse\n'), ((736, 759), 'matplotlibstyles.styles.cm_to_inches', 'styles.cm_to_inches', (['(14)'], {}), '(14)\n', (755, 759), False, 'from matplotlibstyles import styles\n'), ((761, 784), 'matplotlibstyles.styles.cm_to_inches', 'styles.cm_to_inches', (['(10)'], {}), '(10)\n', (780, 784), False, 'from matplotlibstyles import styles\n'), ((1819, 1854), 'matplotlibstyles.styles.darken_color', 'styles.darken_color', (['color[:3]', '(0.4)'], {}), '(color[:3], 0.4)\n', (1838, 1854), False, 'from matplotlibstyles import styles\n')] |
#! /usr/bin/env python
"""Extract and plot channel long profiles.
Plotting functions to extract and plot channel long profiles.
Call all three functions in sequence from the main code.
The functions will return the long profile nodes, return distances upstream of
those nodes, and plot the long profiles, respectively. The former two are, by
necessity, ragged lists of arrays - the number of channels, then the nodes in
that channel.
This module selects channels by taking the largest possible drainages crossing
the grid boundaries. You can specify how many different channels it handles
using the number_of_channels parameter in the channel_nodes function (default
is 1). This may lead to strange outputs if the drainage structure of the output
changes mid-run (e.g., channel piracy). This may be modified in the future.
"""
# DEJH, March 2014.
from six.moves import range
import numpy
try:
import matplotlib.pyplot as plt
except ImportError:
import warnings
warnings.warn('matplotlib not found', ImportWarning)
def channel_nodes(grid, steepest_nodes, drainage_area, flow_receiver, number_of_channels=1, threshold=None):
if threshold == None:
threshold = 2. * numpy.amin(grid.area_of_cell)
boundary_nodes = grid.boundary_nodes
#top_two_pc = len(boundary_nodes)//50
#starting_nodes = boundary_nodes[numpy.argsort(drainage_area[boundary_nodes])[-top_two_pc:]]
starting_nodes = boundary_nodes[numpy.argsort(
drainage_area[boundary_nodes])[-number_of_channels:]]
profile_IDs = []
for i in starting_nodes:
j = i
data_store = []
while 1:
data_store.append(j)
supplying_nodes = numpy.where(flow_receiver == j)[0]
supplying_nodes = supplying_nodes[
numpy.where(supplying_nodes != i)]
max_drainage = numpy.argmax(drainage_area[supplying_nodes])
if drainage_area[supplying_nodes[max_drainage]] < threshold:
break
else:
j = supplying_nodes[max_drainage]
profile_IDs.append(numpy.array(data_store))
return profile_IDs
def get_distances_upstream(grid, len_node_arrays, profile_IDs,
links_to_flow_receiver):
distances_upstream = []
for i in range(len(profile_IDs)):
data_store = []
total_distance = 0.
data_store.append(total_distance)
for j in range(len(profile_IDs[i]) - 1):
total_distance += grid._length_of_link_with_diagonals[
links_to_flow_receiver[profile_IDs[i][j + 1]]]
data_store.append(total_distance)
distances_upstream.append(numpy.array(data_store))
return distances_upstream
def plot_profiles(distances_upstream, profile_IDs, elevations):
for i in range(len(profile_IDs)):
the_nodes = profile_IDs[i]
plt.plot(distances_upstream[i], elevations[the_nodes])
def analyze_channel_network_and_plot(grid, elevations='topographic__elevation',
drainage_area='drainage_area',
flow_receiver='flow__receiver_node',
links_to_flow_receiver='flow__link_to_receiver_node',
number_of_channels=1,
starting_nodes=None,
threshold=None):
"""analyze_channel_network_and_plot(grid, elevations='topographic__elevation',
drainage_area='drainage_area',
flow_receiver='flow__receiver_node',
links_to_flow_receiver='flow__link_to_receiver_node',
number_of_channels=1,
starting_nodes=None,
threshold=None)
This function wraps the other three present here, and allows a single-line
call to plot long profiles.
As typical elsewhere, the inputs can be field names or arrays.
Note the key new parameter starting_nodes. This (optionally) can be a
Python list of node IDs marking the start of each profile. If it is not
provided, the profiles with the largest terminal drainage area will be used
instead.
Returns a tuple, containing:
- the list of arrays profile_IDs.
- the list of arrays dists_upstr, the distances from the final, lowest
node in the network.
Both lists are number_of_channels long.
-
"""
internal_list = [
0, 0, 0, 0] # we're going to put the input arrays in here; must be a better way but this will do
inputs = (elevations, drainage_area, flow_receiver, links_to_flow_receiver)
for i in range(4):
j = inputs[i]
if type(j) == str:
internal_list[i] = grid.at_node[j]
else:
assert j.size == grid.number_of_nodes, "Inputs must be field names or nnode-long numpy arrays!"
internal_list[i] = j
if starting_nodes == None:
profile_IDs = channel_nodes(grid, None, internal_list[1], internal_list[
2], number_of_channels, threshold)
else:
assert len(
starting_nodes) == number_of_channels, "Length of starting_nodes must equal the number_of_channels!"
if threshold == None:
threshold = 2. * numpy.amin(grid.area_of_cell)
profile_IDs = []
for i in starting_nodes:
j = i
data_store = []
while 1:
data_store.append(j)
supplying_nodes = numpy.where(flow_receiver == j)[0]
supplying_nodes = supplying_nodes[
numpy.where(supplying_nodes != i)]
max_drainage = numpy.argmax(internal_list[1][supplying_nodes])
if internal_list[1][supplying_nodes[max_drainage]] < threshold:
break
else:
j = supplying_nodes[max_drainage]
profile_IDs.append(numpy.array(data_store))
dists_upstr = get_distances_upstream(
grid, internal_list[1].size, profile_IDs, internal_list[3])
plot_profiles(dists_upstr, profile_IDs, internal_list[0])
return (profile_IDs, dists_upstr)
| [
"six.moves.range",
"numpy.amin",
"numpy.where",
"matplotlib.pyplot.plot",
"numpy.argmax",
"numpy.argsort",
"numpy.array",
"warnings.warn"
] | [((4790, 4798), 'six.moves.range', 'range', (['(4)'], {}), '(4)\n', (4795, 4798), False, 'from six.moves import range\n'), ((977, 1029), 'warnings.warn', 'warnings.warn', (['"""matplotlib not found"""', 'ImportWarning'], {}), "('matplotlib not found', ImportWarning)\n", (990, 1029), False, 'import warnings\n'), ((2865, 2919), 'matplotlib.pyplot.plot', 'plt.plot', (['distances_upstream[i]', 'elevations[the_nodes]'], {}), '(distances_upstream[i], elevations[the_nodes])\n', (2873, 2919), True, 'import matplotlib.pyplot as plt\n'), ((1192, 1221), 'numpy.amin', 'numpy.amin', (['grid.area_of_cell'], {}), '(grid.area_of_cell)\n', (1202, 1221), False, 'import numpy\n'), ((1438, 1482), 'numpy.argsort', 'numpy.argsort', (['drainage_area[boundary_nodes]'], {}), '(drainage_area[boundary_nodes])\n', (1451, 1482), False, 'import numpy\n'), ((1844, 1888), 'numpy.argmax', 'numpy.argmax', (['drainage_area[supplying_nodes]'], {}), '(drainage_area[supplying_nodes])\n', (1856, 1888), False, 'import numpy\n'), ((2079, 2102), 'numpy.array', 'numpy.array', (['data_store'], {}), '(data_store)\n', (2090, 2102), False, 'import numpy\n'), ((2663, 2686), 'numpy.array', 'numpy.array', (['data_store'], {}), '(data_store)\n', (2674, 2686), False, 'import numpy\n'), ((1684, 1715), 'numpy.where', 'numpy.where', (['(flow_receiver == j)'], {}), '(flow_receiver == j)\n', (1695, 1715), False, 'import numpy\n'), ((1782, 1815), 'numpy.where', 'numpy.where', (['(supplying_nodes != i)'], {}), '(supplying_nodes != i)\n', (1793, 1815), False, 'import numpy\n'), ((5437, 5466), 'numpy.amin', 'numpy.amin', (['grid.area_of_cell'], {}), '(grid.area_of_cell)\n', (5447, 5466), False, 'import numpy\n'), ((5835, 5882), 'numpy.argmax', 'numpy.argmax', (['internal_list[1][supplying_nodes]'], {}), '(internal_list[1][supplying_nodes])\n', (5847, 5882), False, 'import numpy\n'), ((6096, 6119), 'numpy.array', 'numpy.array', (['data_store'], {}), '(data_store)\n', (6107, 6119), False, 'import numpy\n'), ((5663, 5694), 'numpy.where', 'numpy.where', (['(flow_receiver == j)'], {}), '(flow_receiver == j)\n', (5674, 5694), False, 'import numpy\n'), ((5769, 5802), 'numpy.where', 'numpy.where', (['(supplying_nodes != i)'], {}), '(supplying_nodes != i)\n', (5780, 5802), False, 'import numpy\n')] |
import re
import time
import torch
from datetime import timedelta
import numpy as np
from numpy.core.arrayprint import printoptions
import pandas as pd
from config import logger, opt
from transformers import BertTokenizer
from torch.utils.data import Dataset
from pprint import pprint
pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
def get_time_dif(start_time):
"""获取已使用时间"""
end_time = time.time()
time_dif = end_time - start_time
return timedelta(seconds=int(round(time_dif)))
def parse_data_dialogue(df_data, test=False):
# id q1 id_sub q2 label
# all_data = []
all_data = dict()
df_data = df_data.groupby('id', as_index=False)
for _id, group in df_data:
for idx, line in group.iterrows():
query_id = line[0]
query = line[1].strip()
query = re.sub(pattern, '链接', query)
query = re.sub(r'\s+', ' ', query)
query_id_sub = line[2]
reply = line[3].strip()
reply = re.sub(pattern, '链接', reply)
reply = re.sub(r'\s+', ' ', reply)
if len(query) == 0 or len(reply) == 0:
logger.info("query or reply is empty!")
exit()
if test: # 测试集
label = 0
else:
label = line[4]
# 句子长度截断处理
while len(query) + len(reply) > opt.max_length:
if len(query) <= len(reply) and len(query) <= (opt.max_length // 2):
reply = reply[: opt.max_length - len(query)]
elif len(query) > len(reply) and len(reply) <= (opt.max_length // 2):
query = query[: opt.max_length - len(reply)]
else:
query = query[: opt.max_length // 2]
reply = reply[: opt.max_length // 2]
data = {'query': query, 'reply': reply, 'label': label}
if query_id not in all_data:
all_data[query_id] = []
all_data[query_id].append(data)
else:
all_data[query_id].append(data)
return all_data
def parse_data(df_data, test=False):
# 训练集中reply去重
if not test and opt.drop_duplicates and not opt.dialogue:
df_data = df_data.groupby('q1', as_index=False).apply(lambda df: df.drop_duplicates('q2', keep='first'))
# df_data = df_data.groupby('id', as_index=False).apply(lambda df: df.drop_duplicates('q2', keep=False))
all_data = []
# id q1 id_sub q2 label
for idx, line in df_data.iterrows():
try:
query_id = line[0]
query = line[1].strip()
# 去除url和空格 (多个连续空格转1个空格)
query = re.sub(pattern, '链接', query)
# query = query.replace(' ', '')
query = re.sub(r'\s+', ' ', query)
query_id_sub = line[2]
reply = line[3].strip()
reply = re.sub(pattern, '链接', reply)
# reply = reply.replace(' ', '')
reply = re.sub(r'\s+', ' ', reply)
# 去除emoji
# if not test:
# query = re.sub(u'[\U00010000-\U0010ffff]', '', query)
# reply = re.sub(u'[\U00010000-\U0010ffff]', '', reply)
# # 去掉没有中文的样本
# # if not re.search('[\w\u4E00-\u9FA5]+', query) or not re.search('[\w\u4E00-\u9FA5]+', reply):
# # continue
# if len(query) == 0 or len(reply) == 0:
# continue
if len(query) == 0 or len(reply) == 0:
logger.info("query or reply is empty!")
exit()
if test: # 测试集
label = 0
else:
label = line[4]
# 句子长度截断处理
while len(query) + len(reply) > opt.max_length:
if len(query) <= len(reply) and len(query) <= (opt.max_length // 2):
reply = reply[: opt.max_length - len(query)]
elif len(query) > len(reply) and len(reply) <= (opt.max_length // 2):
query = query[: opt.max_length - len(reply)]
else:
query = query[: opt.max_length // 2]
reply = reply[: opt.max_length // 2]
if opt.PET:
query = '[MASK]接回答问题:' + query
except:
logger.info('{}'.format(line))
exit()
data = {'query_id': query_id, 'query': query, 'reply': reply, 'label': label}
all_data.append(data)
# 数据增强: query-reply逆序
if not test and opt.datareverse:
data_reverse = {'query_id': query_id, 'query': reply, 'reply': query, 'label': label}
all_data.append(data_reverse)
logger.info('样本数: {}'.format(len(all_data)))
return all_data
def case_data(df_data):
if opt.drop_duplicates and not opt.dialogue:
df_data = df_data.groupby('q1', as_index=False).apply(lambda df: df.drop_duplicates('q2', keep='first'))
# df_data = df_data.groupby('id', as_index=False).apply(lambda df: df.drop_duplicates('q2', keep=False))
query_id_list, query_list, query_id_sub_list, reply_list, label_list = [], [], [], [], []
pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
# id q1 id_sub q2 label
for idx, line in df_data.iterrows():
try:
query_id = line[0]
query = line[1].strip()
query = re.sub(pattern, '链接', query)
query = re.sub(r'\s+', ' ', query)
query_id_sub = line[2]
reply = line[3].strip()
reply = re.sub(pattern, '链接', reply)
reply = re.sub(r'\s+', ' ', reply)
label = line[4]
# 去除emoji
# query = re.sub(u'[\U00010000-\U0010ffff]', '', query)
# reply = re.sub(u'[\U00010000-\U0010ffff]', '', reply)
# if len(query) == 0 or len(reply) == 0:
# continue
# 句子长度截断处理
while len(query) + len(reply) > opt.max_length:
if len(query) <= len(reply) and len(query) <= (opt.max_length // 2):
reply = reply[: opt.max_length - len(query)]
elif len(query) > len(reply) and len(reply) <= (opt.max_length // 2):
query = query[: opt.max_length - len(reply)]
else:
query = query[: opt.max_length // 2]
reply = reply[: opt.max_length // 2]
except:
logger.info('{}'.format(line))
exit()
query_id_list.append(query_id)
query_list.append(query)
query_id_sub_list.append(query_id_sub)
reply_list.append(reply)
label_list.append(label)
return query_id_list, query_list, query_id_sub_list, reply_list, label_list
class Tokenizer4Bert(object):
def __init__(self, max_length, pretrained_bert_name):
self.tokenizer = BertTokenizer.from_pretrained(pretrained_bert_name)
self.max_length = max_length
def text_to_sequence(self, text, reverse=False, padding='post', truncating='post'):
sequence = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
if len(sequence) == 0:
sequence = [0]
if reverse:
sequence = sequence[::-1]
return Tokenizer4Bert.pad_sequence(sequence, pad_id=0, maxlen=self.max_length,
padding=padding, truncating=truncating)
@staticmethod
def pad_sequence(sequence, pad_id, maxlen, dtype='int64', padding='post', truncating='post'):
# x = (np.zeros(maxlen) + pad_id).astype(dtype) # 长度为maxlen的数组中的元素全为pad_id,也就是0
x = (np.ones(maxlen) * pad_id).astype(dtype)
if truncating == 'pre':
trunc = sequence[-maxlen:] # 把过长的句子前面部分截断
else:
trunc = sequence[:maxlen] # 把过长的句子尾部截断
trunc = np.asarray(trunc, dtype=dtype)
if padding == 'post':
x[:len(trunc)] = trunc # 在句子尾部打padding
else:
x[-len(trunc):] = trunc # 在句子前面打padding
return x
@staticmethod
def split_text(text):
for ch in ["\'s", "\'ve", "n\'t", "\'re", "\'m", "\'d", "\'ll", ",", ".", "!", "*", "/", "?", "(", ")", "\"", "-", ":"]:
text = text.replace(ch, " "+ch+" ")
return text
class BertSentenceDataset(Dataset):
''' PyTorch standard dataset class '''
def __init__(self, df_data, tokenizer, test=False):
data = list()
if opt.dialogue:
parse = parse_data_dialogue
for key, value in parse(df_data, test).items():
dialogue_data = []
dialogue_id = key
for obj in value:
dialogue_pair_indices = tokenizer.text_to_sequence("[CLS] " + obj['query'] + " [SEP] " + obj['reply'] + " [SEP]")
query_indices = tokenizer.text_to_sequence(obj['query'])
reply_indices = tokenizer.text_to_sequence(obj['reply'])
bert_segments_ids = np.asarray([0] * (np.sum(query_indices != 0) + 2) + [1] * (np.sum(reply_indices != 0) + 1))
bert_segments_ids = tokenizer.pad_sequence(bert_segments_ids, 0, tokenizer.max_length)
attention_mask = np.asarray([1] * np.sum(dialogue_pair_indices != 0) + [0] * (opt.max_length - np.sum(dialogue_pair_indices != 0)))
label = obj['label']
dialogue_data.append(
{
'dialogue_pair_indices': dialogue_pair_indices,
'bert_segments_ids': bert_segments_ids,
'attention_mask': attention_mask,
'label': label,
}
)
data.append(dialogue_data)
else:
parse = parse_data
for obj in parse(df_data, test):
dialogue_pair_indices = tokenizer.text_to_sequence("[CLS] " + obj['query'] + " [SEP] " + obj['reply'] + " [SEP]")
dialogue_pair_indices_reverse = tokenizer.text_to_sequence("[CLS] " + obj['reply'] + " [SEP] " + obj['query'] + " [SEP]")
query_indices = tokenizer.text_to_sequence(obj['query'])
reply_indices = tokenizer.text_to_sequence(obj['reply'])
bert_segments_ids = np.asarray([0] * (np.sum(query_indices != 0) + 2) + [1] * (np.sum(reply_indices != 0) + 1))
bert_segments_ids = tokenizer.pad_sequence(bert_segments_ids, 0, tokenizer.max_length)
bert_segments_ids_reverse = np.asarray([0] * (np.sum(reply_indices != 0) + 2) + [1] * (np.sum(query_indices != 0) + 1))
bert_segments_ids_reverse = tokenizer.pad_sequence(bert_segments_ids_reverse, 0, tokenizer.max_length)
attention_mask = np.asarray([1] * np.sum(dialogue_pair_indices != 0) + [0] * (opt.max_length - np.sum(dialogue_pair_indices != 0)))
attention_mask_reverse = np.asarray([1] * np.sum(dialogue_pair_indices_reverse != 0) + [0] * (opt.max_length - np.sum(dialogue_pair_indices_reverse != 0)))
attention_mask_query = np.asarray([1] * np.sum(query_indices != 0) + [0] * (opt.max_length - np.sum(query_indices != 0)))
attention_mask_reply = np.asarray([1] * np.sum(reply_indices != 0) + [0] * (opt.max_length - np.sum(reply_indices != 0)))
label = obj['label']
sub_data = {
'dialogue_pair_indices': dialogue_pair_indices,
'dialogue_pair_indices_reverse': dialogue_pair_indices_reverse,
'bert_segments_ids': bert_segments_ids,
'bert_segments_ids_reverse': bert_segments_ids_reverse,
'attention_mask': attention_mask,
'attention_mask_reverse': attention_mask_reverse,
'query_indices': query_indices,
'reply_indices': reply_indices,
'attention_mask_query': attention_mask_query,
'attention_mask_reply': attention_mask_reply,
'label': label,
}
data.append(sub_data)
self._data = data
def __getitem__(self, index):
return self._data[index]
def __len__(self):
return len(self._data)
def collate_wrapper(batch):
dialogue_pair_indices = torch.LongTensor([item['dialogue_pair_indices'] for item in batch[0]]).detach()
bert_segments_ids = torch.LongTensor([item['bert_segments_ids'] for item in batch[0]]).detach()
attention_mask = torch.LongTensor([item['attention_mask'] for item in batch[0]]).detach()
label = torch.LongTensor([item['label'] for item in batch[0]]).detach()
data = {
'dialogue_pair_indices': dialogue_pair_indices,
'bert_segments_ids': bert_segments_ids,
'attention_mask': attention_mask,
'label': label,
}
return data
if __name__ == '__main__':
# query_path = './data/train/train.query.tsv'
# reply_path = './data/train/train.reply.tsv'
# df_query = pd.read_csv(query_path, sep='\t', header=None, encoding='utf-8', engine='python')
# df_query.columns = ['id', 'q1']
# df_reply = pd.read_csv(reply_path, sep='\t', header=None, encoding='utf-8', engine='python')
# df_reply.columns = ['id', 'id_sub', 'q2', 'label']
# df_reply['q2'] = df_reply['q2'].fillna('好的')
# df_data = df_query.merge(df_reply, how='left')
# df_data = df_data[['id', 'q1', 'id_sub', 'q2', 'label']]
# # X = np.array(df_data.index)
# # y = df_data.loc[:, 'label'].to_numpy()
# # data = parse_data(df_data)
# data = parse_data_dialogue(df_data)
# pprint(data)
pet = "[MASK]接回答问题:"
text = "[CLS]" + pet + "旁边有什么学校吗" + "[SEP]" + "北京邮电大学" + "[SEP]"
token = Tokenizer4Bert(opt.max_length, opt.pretrained_bert_name)
print(token.tokenizer.tokenize(text)) | [
"numpy.ones",
"re.compile",
"torch.LongTensor",
"transformers.BertTokenizer.from_pretrained",
"numpy.asarray",
"config.logger.info",
"numpy.sum",
"re.sub",
"time.time"
] | [((296, 400), 're.compile', 're.compile', (['"""http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"""'], {}), "(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n )\n", (306, 400), False, 'import re\n'), ((454, 465), 'time.time', 'time.time', ([], {}), '()\n', (463, 465), False, 'import time\n'), ((5313, 5417), 're.compile', 're.compile', (['"""http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"""'], {}), "(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\(\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n )\n", (5323, 5417), False, 'import re\n'), ((7074, 7125), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['pretrained_bert_name'], {}), '(pretrained_bert_name)\n', (7103, 7125), False, 'from transformers import BertTokenizer\n'), ((8049, 8079), 'numpy.asarray', 'np.asarray', (['trunc'], {'dtype': 'dtype'}), '(trunc, dtype=dtype)\n', (8059, 8079), True, 'import numpy as np\n'), ((894, 922), 're.sub', 're.sub', (['pattern', '"""链接"""', 'query'], {}), "(pattern, '链接', query)\n", (900, 922), False, 'import re\n'), ((943, 969), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'query'], {}), "('\\\\s+', ' ', query)\n", (949, 969), False, 'import re\n'), ((1061, 1089), 're.sub', 're.sub', (['pattern', '"""链接"""', 'reply'], {}), "(pattern, '链接', reply)\n", (1067, 1089), False, 'import re\n'), ((1110, 1136), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'reply'], {}), "('\\\\s+', ' ', reply)\n", (1116, 1136), False, 'import re\n'), ((2776, 2804), 're.sub', 're.sub', (['pattern', '"""链接"""', 'query'], {}), "(pattern, '链接', query)\n", (2782, 2804), False, 'import re\n'), ((2870, 2896), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'query'], {}), "('\\\\s+', ' ', query)\n", (2876, 2896), False, 'import re\n'), ((2988, 3016), 're.sub', 're.sub', (['pattern', '"""链接"""', 'reply'], {}), "(pattern, '链接', reply)\n", (2994, 3016), False, 'import re\n'), ((3082, 3108), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'reply'], {}), "('\\\\s+', ' ', reply)\n", (3088, 3108), False, 'import re\n'), ((5580, 5608), 're.sub', 're.sub', (['pattern', '"""链接"""', 'query'], {}), "(pattern, '链接', query)\n", (5586, 5608), False, 'import re\n'), ((5629, 5655), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'query'], {}), "('\\\\s+', ' ', query)\n", (5635, 5655), False, 'import re\n'), ((5747, 5775), 're.sub', 're.sub', (['pattern', '"""链接"""', 'reply'], {}), "(pattern, '链接', reply)\n", (5753, 5775), False, 'import re\n'), ((5796, 5822), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'reply'], {}), "('\\\\s+', ' ', reply)\n", (5802, 5822), False, 'import re\n'), ((12670, 12740), 'torch.LongTensor', 'torch.LongTensor', (["[item['dialogue_pair_indices'] for item in batch[0]]"], {}), "([item['dialogue_pair_indices'] for item in batch[0]])\n", (12686, 12740), False, 'import torch\n'), ((12774, 12840), 'torch.LongTensor', 'torch.LongTensor', (["[item['bert_segments_ids'] for item in batch[0]]"], {}), "([item['bert_segments_ids'] for item in batch[0]])\n", (12790, 12840), False, 'import torch\n'), ((12871, 12934), 'torch.LongTensor', 'torch.LongTensor', (["[item['attention_mask'] for item in batch[0]]"], {}), "([item['attention_mask'] for item in batch[0]])\n", (12887, 12934), False, 'import torch\n'), ((12956, 13010), 'torch.LongTensor', 'torch.LongTensor', (["[item['label'] for item in batch[0]]"], {}), "([item['label'] for item in batch[0]])\n", (12972, 13010), False, 'import torch\n'), ((1205, 1244), 'config.logger.info', 'logger.info', (['"""query or reply is empty!"""'], {}), "('query or reply is empty!')\n", (1216, 1244), False, 'from config import logger, opt\n'), ((3647, 3686), 'config.logger.info', 'logger.info', (['"""query or reply is empty!"""'], {}), "('query or reply is empty!')\n", (3658, 3686), False, 'from config import logger, opt\n'), ((7839, 7854), 'numpy.ones', 'np.ones', (['maxlen'], {}), '(maxlen)\n', (7846, 7854), True, 'import numpy as np\n'), ((11053, 11087), 'numpy.sum', 'np.sum', (['(dialogue_pair_indices != 0)'], {}), '(dialogue_pair_indices != 0)\n', (11059, 11087), True, 'import numpy as np\n'), ((11209, 11251), 'numpy.sum', 'np.sum', (['(dialogue_pair_indices_reverse != 0)'], {}), '(dialogue_pair_indices_reverse != 0)\n', (11215, 11251), True, 'import numpy as np\n'), ((11379, 11405), 'numpy.sum', 'np.sum', (['(query_indices != 0)'], {}), '(query_indices != 0)\n', (11385, 11405), True, 'import numpy as np\n'), ((11517, 11543), 'numpy.sum', 'np.sum', (['(reply_indices != 0)'], {}), '(reply_indices != 0)\n', (11523, 11543), True, 'import numpy as np\n'), ((9463, 9497), 'numpy.sum', 'np.sum', (['(dialogue_pair_indices != 0)'], {}), '(dialogue_pair_indices != 0)\n', (9469, 9497), True, 'import numpy as np\n'), ((10571, 10597), 'numpy.sum', 'np.sum', (['(query_indices != 0)'], {}), '(query_indices != 0)\n', (10577, 10597), True, 'import numpy as np\n'), ((10612, 10638), 'numpy.sum', 'np.sum', (['(reply_indices != 0)'], {}), '(reply_indices != 0)\n', (10618, 10638), True, 'import numpy as np\n'), ((10810, 10836), 'numpy.sum', 'np.sum', (['(reply_indices != 0)'], {}), '(reply_indices != 0)\n', (10816, 10836), True, 'import numpy as np\n'), ((10851, 10877), 'numpy.sum', 'np.sum', (['(query_indices != 0)'], {}), '(query_indices != 0)\n', (10857, 10877), True, 'import numpy as np\n'), ((11114, 11148), 'numpy.sum', 'np.sum', (['(dialogue_pair_indices != 0)'], {}), '(dialogue_pair_indices != 0)\n', (11120, 11148), True, 'import numpy as np\n'), ((11278, 11320), 'numpy.sum', 'np.sum', (['(dialogue_pair_indices_reverse != 0)'], {}), '(dialogue_pair_indices_reverse != 0)\n', (11284, 11320), True, 'import numpy as np\n'), ((11432, 11458), 'numpy.sum', 'np.sum', (['(query_indices != 0)'], {}), '(query_indices != 0)\n', (11438, 11458), True, 'import numpy as np\n'), ((11570, 11596), 'numpy.sum', 'np.sum', (['(reply_indices != 0)'], {}), '(reply_indices != 0)\n', (11576, 11596), True, 'import numpy as np\n'), ((9228, 9254), 'numpy.sum', 'np.sum', (['(query_indices != 0)'], {}), '(query_indices != 0)\n', (9234, 9254), True, 'import numpy as np\n'), ((9269, 9295), 'numpy.sum', 'np.sum', (['(reply_indices != 0)'], {}), '(reply_indices != 0)\n', (9275, 9295), True, 'import numpy as np\n'), ((9524, 9558), 'numpy.sum', 'np.sum', (['(dialogue_pair_indices != 0)'], {}), '(dialogue_pair_indices != 0)\n', (9530, 9558), True, 'import numpy as np\n')] |
import pytest
from keras.preprocessing import image
from PIL import Image
import numpy as np
import os
import shutil
import tempfile
class TestImage:
def setup_class(cls):
img_w = img_h = 20
rgb_images = []
gray_images = []
for n in range(8):
bias = np.random.rand(img_w, img_h, 1) * 64
variance = np.random.rand(img_w, img_h, 1) * (255 - 64)
imarray = np.random.rand(img_w, img_h, 3) * variance + bias
im = Image.fromarray(imarray.astype('uint8')).convert('RGB')
rgb_images.append(im)
imarray = np.random.rand(img_w, img_h, 1) * variance + bias
im = Image.fromarray(imarray.astype('uint8').squeeze()).convert('L')
gray_images.append(im)
cls.all_test_images = [rgb_images, gray_images]
def teardown_class(cls):
del cls.all_test_images
def test_image_data_generator(self):
for test_images in self.all_test_images:
img_list = []
for im in test_images:
img_list.append(image.img_to_array(im)[None, ...])
images = np.vstack(img_list)
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.5,
zoom_range=0.2,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.5,
horizontal_flip=True,
vertical_flip=True)
generator.fit(images, augment=True)
tmp_folder = tempfile.mkdtemp(prefix='test_images')
for x, y in generator.flow(images, np.arange(images.shape[0]),
shuffle=True, save_to_dir=tmp_folder):
assert x.shape[1:] == images.shape[1:]
break
shutil.rmtree(tmp_folder)
def test_image_data_generator_invalid_data(self):
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
dim_ordering='tf')
# Test fit with invalid data
with pytest.raises(ValueError):
x = np.random.random((3, 10, 10))
generator.fit(x)
with pytest.raises(ValueError):
x = np.random.random((32, 3, 10, 10))
generator.fit(x)
with pytest.raises(ValueError):
x = np.random.random((32, 10, 10, 5))
generator.fit(x)
# Test flow with invalid data
with pytest.raises(ValueError):
x = np.random.random((32, 10, 10, 5))
generator.flow(np.arange(x.shape[0]))
with pytest.raises(ValueError):
x = np.random.random((32, 10, 10))
generator.flow(np.arange(x.shape[0]))
with pytest.raises(ValueError):
x = np.random.random((32, 3, 10, 10))
generator.flow(np.arange(x.shape[0]))
def test_image_data_generator_fit(self):
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
dim_ordering='tf')
# Test grayscale
x = np.random.random((32, 10, 10, 1))
generator.fit(x)
# Test RBG
x = np.random.random((32, 10, 10, 3))
generator.fit(x)
generator = image.ImageDataGenerator(
featurewise_center=True,
samplewise_center=True,
featurewise_std_normalization=True,
samplewise_std_normalization=True,
zca_whitening=True,
dim_ordering='th')
# Test grayscale
x = np.random.random((32, 1, 10, 10))
generator.fit(x)
# Test RBG
x = np.random.random((32, 3, 10, 10))
generator.fit(x)
def test_directory_iterator(self):
num_classes = 2
tmp_folder = tempfile.mkdtemp(prefix='test_images')
# create folders and subfolders
paths = []
for cl in range(num_classes):
class_directory = 'class-{}'.format(cl)
classpaths = [
class_directory,
os.path.join(class_directory, 'subfolder-1'),
os.path.join(class_directory, 'subfolder-2'),
os.path.join(class_directory, 'subfolder-1', 'sub-subfolder')
]
for path in classpaths:
os.mkdir(os.path.join(tmp_folder, path))
paths.append(classpaths)
# save the images in the paths
count = 0
filenames = []
for test_images in self.all_test_images:
for im in test_images:
# rotate image class
im_class = count % num_classes
# rotate subfolders
classpaths = paths[im_class]
filename = os.path.join(classpaths[count % len(classpaths)], 'image-{}.jpg'.format(count))
filenames.append(filename)
im.save(os.path.join(tmp_folder, filename))
count += 1
# create iterator
generator = image.ImageDataGenerator()
dir_iterator = generator.flow_from_directory(tmp_folder)
# check number of classes and images
assert(len(dir_iterator.class_indices) == num_classes)
assert(len(dir_iterator.classes) == count)
assert(sorted(dir_iterator.filenames) == sorted(filenames))
shutil.rmtree(tmp_folder)
def test_img_utils(self):
height, width = 10, 8
# Test th dim ordering
x = np.random.random((3, height, width))
img = image.array_to_img(x, dim_ordering='th')
assert img.size == (width, height)
x = image.img_to_array(img, dim_ordering='th')
assert x.shape == (3, height, width)
# Test 2D
x = np.random.random((1, height, width))
img = image.array_to_img(x, dim_ordering='th')
assert img.size == (width, height)
x = image.img_to_array(img, dim_ordering='th')
assert x.shape == (1, height, width)
# Test tf dim ordering
x = np.random.random((height, width, 3))
img = image.array_to_img(x, dim_ordering='tf')
assert img.size == (width, height)
x = image.img_to_array(img, dim_ordering='tf')
assert x.shape == (height, width, 3)
# Test 2D
x = np.random.random((height, width, 1))
img = image.array_to_img(x, dim_ordering='tf')
assert img.size == (width, height)
x = image.img_to_array(img, dim_ordering='tf')
assert x.shape == (height, width, 1)
if __name__ == '__main__':
pytest.main([__file__])
| [
"keras.preprocessing.image.img_to_array",
"numpy.random.rand",
"numpy.random.random",
"os.path.join",
"keras.preprocessing.image.ImageDataGenerator",
"pytest.main",
"tempfile.mkdtemp",
"numpy.vstack",
"pytest.raises",
"shutil.rmtree",
"keras.preprocessing.image.array_to_img",
"numpy.arange"
] | [((7334, 7357), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (7345, 7357), False, 'import pytest\n'), ((2301, 2492), 'keras.preprocessing.image.ImageDataGenerator', 'image.ImageDataGenerator', ([], {'featurewise_center': '(True)', 'samplewise_center': '(True)', 'featurewise_std_normalization': '(True)', 'samplewise_std_normalization': '(True)', 'zca_whitening': '(True)', 'dim_ordering': '"""tf"""'}), "(featurewise_center=True, samplewise_center=True,\n featurewise_std_normalization=True, samplewise_std_normalization=True,\n zca_whitening=True, dim_ordering='tf')\n", (2325, 2492), False, 'from keras.preprocessing import image\n'), ((3498, 3689), 'keras.preprocessing.image.ImageDataGenerator', 'image.ImageDataGenerator', ([], {'featurewise_center': '(True)', 'samplewise_center': '(True)', 'featurewise_std_normalization': '(True)', 'samplewise_std_normalization': '(True)', 'zca_whitening': '(True)', 'dim_ordering': '"""tf"""'}), "(featurewise_center=True, samplewise_center=True,\n featurewise_std_normalization=True, samplewise_std_normalization=True,\n zca_whitening=True, dim_ordering='tf')\n", (3522, 3689), False, 'from keras.preprocessing import image\n'), ((3800, 3833), 'numpy.random.random', 'np.random.random', (['(32, 10, 10, 1)'], {}), '((32, 10, 10, 1))\n', (3816, 3833), True, 'import numpy as np\n'), ((3893, 3926), 'numpy.random.random', 'np.random.random', (['(32, 10, 10, 3)'], {}), '((32, 10, 10, 3))\n', (3909, 3926), True, 'import numpy as np\n'), ((3974, 4165), 'keras.preprocessing.image.ImageDataGenerator', 'image.ImageDataGenerator', ([], {'featurewise_center': '(True)', 'samplewise_center': '(True)', 'featurewise_std_normalization': '(True)', 'samplewise_std_normalization': '(True)', 'zca_whitening': '(True)', 'dim_ordering': '"""th"""'}), "(featurewise_center=True, samplewise_center=True,\n featurewise_std_normalization=True, samplewise_std_normalization=True,\n zca_whitening=True, dim_ordering='th')\n", (3998, 4165), False, 'from keras.preprocessing import image\n'), ((4276, 4309), 'numpy.random.random', 'np.random.random', (['(32, 1, 10, 10)'], {}), '((32, 1, 10, 10))\n', (4292, 4309), True, 'import numpy as np\n'), ((4369, 4402), 'numpy.random.random', 'np.random.random', (['(32, 3, 10, 10)'], {}), '((32, 3, 10, 10))\n', (4385, 4402), True, 'import numpy as np\n'), ((4518, 4556), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""test_images"""'}), "(prefix='test_images')\n", (4534, 4556), False, 'import tempfile\n'), ((5758, 5784), 'keras.preprocessing.image.ImageDataGenerator', 'image.ImageDataGenerator', ([], {}), '()\n', (5782, 5784), False, 'from keras.preprocessing import image\n'), ((6093, 6118), 'shutil.rmtree', 'shutil.rmtree', (['tmp_folder'], {}), '(tmp_folder)\n', (6106, 6118), False, 'import shutil\n'), ((6230, 6266), 'numpy.random.random', 'np.random.random', (['(3, height, width)'], {}), '((3, height, width))\n', (6246, 6266), True, 'import numpy as np\n'), ((6282, 6322), 'keras.preprocessing.image.array_to_img', 'image.array_to_img', (['x'], {'dim_ordering': '"""th"""'}), "(x, dim_ordering='th')\n", (6300, 6322), False, 'from keras.preprocessing import image\n'), ((6380, 6422), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {'dim_ordering': '"""th"""'}), "(img, dim_ordering='th')\n", (6398, 6422), False, 'from keras.preprocessing import image\n'), ((6501, 6537), 'numpy.random.random', 'np.random.random', (['(1, height, width)'], {}), '((1, height, width))\n', (6517, 6537), True, 'import numpy as np\n'), ((6553, 6593), 'keras.preprocessing.image.array_to_img', 'image.array_to_img', (['x'], {'dim_ordering': '"""th"""'}), "(x, dim_ordering='th')\n", (6571, 6593), False, 'from keras.preprocessing import image\n'), ((6651, 6693), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {'dim_ordering': '"""th"""'}), "(img, dim_ordering='th')\n", (6669, 6693), False, 'from keras.preprocessing import image\n'), ((6787, 6823), 'numpy.random.random', 'np.random.random', (['(height, width, 3)'], {}), '((height, width, 3))\n', (6803, 6823), True, 'import numpy as np\n'), ((6839, 6879), 'keras.preprocessing.image.array_to_img', 'image.array_to_img', (['x'], {'dim_ordering': '"""tf"""'}), "(x, dim_ordering='tf')\n", (6857, 6879), False, 'from keras.preprocessing import image\n'), ((6937, 6979), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {'dim_ordering': '"""tf"""'}), "(img, dim_ordering='tf')\n", (6955, 6979), False, 'from keras.preprocessing import image\n'), ((7058, 7094), 'numpy.random.random', 'np.random.random', (['(height, width, 1)'], {}), '((height, width, 1))\n', (7074, 7094), True, 'import numpy as np\n'), ((7110, 7150), 'keras.preprocessing.image.array_to_img', 'image.array_to_img', (['x'], {'dim_ordering': '"""tf"""'}), "(x, dim_ordering='tf')\n", (7128, 7150), False, 'from keras.preprocessing import image\n'), ((7208, 7250), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {'dim_ordering': '"""tf"""'}), "(img, dim_ordering='tf')\n", (7226, 7250), False, 'from keras.preprocessing import image\n'), ((1171, 1190), 'numpy.vstack', 'np.vstack', (['img_list'], {}), '(img_list)\n', (1180, 1190), True, 'import numpy as np\n'), ((1216, 1600), 'keras.preprocessing.image.ImageDataGenerator', 'image.ImageDataGenerator', ([], {'featurewise_center': '(True)', 'samplewise_center': '(True)', 'featurewise_std_normalization': '(True)', 'samplewise_std_normalization': '(True)', 'zca_whitening': '(True)', 'rotation_range': '(90.0)', 'width_shift_range': '(0.1)', 'height_shift_range': '(0.1)', 'shear_range': '(0.5)', 'zoom_range': '(0.2)', 'channel_shift_range': '(0.0)', 'fill_mode': '"""nearest"""', 'cval': '(0.5)', 'horizontal_flip': '(True)', 'vertical_flip': '(True)'}), "(featurewise_center=True, samplewise_center=True,\n featurewise_std_normalization=True, samplewise_std_normalization=True,\n zca_whitening=True, rotation_range=90.0, width_shift_range=0.1,\n height_shift_range=0.1, shear_range=0.5, zoom_range=0.2,\n channel_shift_range=0.0, fill_mode='nearest', cval=0.5, horizontal_flip\n =True, vertical_flip=True)\n", (1240, 1600), False, 'from keras.preprocessing import image\n'), ((1911, 1949), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""test_images"""'}), "(prefix='test_images')\n", (1927, 1949), False, 'import tempfile\n'), ((2197, 2222), 'shutil.rmtree', 'shutil.rmtree', (['tmp_folder'], {}), '(tmp_folder)\n', (2210, 2222), False, 'import shutil\n'), ((2616, 2641), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2629, 2641), False, 'import pytest\n'), ((2660, 2689), 'numpy.random.random', 'np.random.random', (['(3, 10, 10)'], {}), '((3, 10, 10))\n', (2676, 2689), True, 'import numpy as np\n'), ((2734, 2759), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2747, 2759), False, 'import pytest\n'), ((2778, 2811), 'numpy.random.random', 'np.random.random', (['(32, 3, 10, 10)'], {}), '((32, 3, 10, 10))\n', (2794, 2811), True, 'import numpy as np\n'), ((2856, 2881), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2869, 2881), False, 'import pytest\n'), ((2900, 2933), 'numpy.random.random', 'np.random.random', (['(32, 10, 10, 5)'], {}), '((32, 10, 10, 5))\n', (2916, 2933), True, 'import numpy as np\n'), ((3017, 3042), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3030, 3042), False, 'import pytest\n'), ((3061, 3094), 'numpy.random.random', 'np.random.random', (['(32, 10, 10, 5)'], {}), '((32, 10, 10, 5))\n', (3077, 3094), True, 'import numpy as np\n'), ((3160, 3185), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3173, 3185), False, 'import pytest\n'), ((3204, 3234), 'numpy.random.random', 'np.random.random', (['(32, 10, 10)'], {}), '((32, 10, 10))\n', (3220, 3234), True, 'import numpy as np\n'), ((3300, 3325), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3313, 3325), False, 'import pytest\n'), ((3344, 3377), 'numpy.random.random', 'np.random.random', (['(32, 3, 10, 10)'], {}), '((32, 3, 10, 10))\n', (3360, 3377), True, 'import numpy as np\n'), ((317, 348), 'numpy.random.rand', 'np.random.rand', (['img_w', 'img_h', '(1)'], {}), '(img_w, img_h, 1)\n', (331, 348), True, 'import numpy as np\n'), ((378, 409), 'numpy.random.rand', 'np.random.rand', (['img_w', 'img_h', '(1)'], {}), '(img_w, img_h, 1)\n', (392, 409), True, 'import numpy as np\n'), ((1998, 2024), 'numpy.arange', 'np.arange', (['images.shape[0]'], {}), '(images.shape[0])\n', (2007, 2024), True, 'import numpy as np\n'), ((3123, 3144), 'numpy.arange', 'np.arange', (['x.shape[0]'], {}), '(x.shape[0])\n', (3132, 3144), True, 'import numpy as np\n'), ((3263, 3284), 'numpy.arange', 'np.arange', (['x.shape[0]'], {}), '(x.shape[0])\n', (3272, 3284), True, 'import numpy as np\n'), ((3406, 3427), 'numpy.arange', 'np.arange', (['x.shape[0]'], {}), '(x.shape[0])\n', (3415, 3427), True, 'import numpy as np\n'), ((4791, 4835), 'os.path.join', 'os.path.join', (['class_directory', '"""subfolder-1"""'], {}), "(class_directory, 'subfolder-1')\n", (4803, 4835), False, 'import os\n'), ((4854, 4898), 'os.path.join', 'os.path.join', (['class_directory', '"""subfolder-2"""'], {}), "(class_directory, 'subfolder-2')\n", (4866, 4898), False, 'import os\n'), ((4917, 4978), 'os.path.join', 'os.path.join', (['class_directory', '"""subfolder-1"""', '"""sub-subfolder"""'], {}), "(class_directory, 'subfolder-1', 'sub-subfolder')\n", (4929, 4978), False, 'import os\n'), ((446, 477), 'numpy.random.rand', 'np.random.rand', (['img_w', 'img_h', '(3)'], {}), '(img_w, img_h, 3)\n', (460, 477), True, 'import numpy as np\n'), ((630, 661), 'numpy.random.rand', 'np.random.rand', (['img_w', 'img_h', '(1)'], {}), '(img_w, img_h, 1)\n', (644, 661), True, 'import numpy as np\n'), ((5057, 5087), 'os.path.join', 'os.path.join', (['tmp_folder', 'path'], {}), '(tmp_folder, path)\n', (5069, 5087), False, 'import os\n'), ((5644, 5678), 'os.path.join', 'os.path.join', (['tmp_folder', 'filename'], {}), '(tmp_folder, filename)\n', (5656, 5678), False, 'import os\n'), ((1112, 1134), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['im'], {}), '(im)\n', (1130, 1134), False, 'from keras.preprocessing import image\n')] |
import cv2
import numpy as np
#Example -2 (bright) -11(dark)
exposure=-5
#Example -130 (dark) +130(bright)
brightness=0
#Example -130 (dark) +130(bright)
contrast=0
#Example 0 - 500
focus=0
#0 to N (camera index, 0 is the default OS main camera)
camera_id=0
live_feed=False
vid = cv2.VideoCapture(camera_id)
if not vid.isOpened():
raise ValueError('Unable to open video source')
blank_image = np.zeros((200,200,3), np.uint8)
print("Press the following key (lowercase or caps-lock) to change the setting:")
print("1,2,3: Switch to another webcam")
print("c/C : decrease/increase Constrast")
print("b/B : decrease/increase Brightness")
print("f/F : decrease/increase Focus")
print("e/E : decrease/increase Exposure")
print("l/L : hide/show live stream")
print(" s : open DirectShow settings")
print(" q : exit the application")
while(True):
if live_feed:
_, frame = vid.read()
if frame is not None:
cv2.imshow('image',frame)
else:
cv2.imshow('image',blank_image)
frame = None
key = cv2.waitKey(10)
if key == ord('q') or key == ord('Q'):
break
if key == ord('s') or key == ord('S'):
print("Open DirectShow settings")
vid.release()
vid2 = cv2.VideoCapture(camera_id + cv2.CAP_DSHOW)
vid2.set(cv2.CAP_PROP_SETTINGS, 1)
vid2.release()
vid = cv2.VideoCapture(camera_id)
if key == ord('l'):
print(f'hide live video camera')
vid.release()
vid = cv2.VideoCapture(camera_id)
live_feed=False
if key == ord('L'):
print(f'show live video camera (blocked for other processes)')
if vid.isOpened():
vid.release()
vid = cv2.VideoCapture(camera_id)
live_feed=True
if key == ord('E'):
exposure+=0.5
r=vid.set(cv2.CAP_PROP_EXPOSURE, exposure)
print(f'exposure: {exposure}')
if key == ord('e'):
exposure-=0.5
print(f'exposure: {exposure}')
r=vid.set(cv2.CAP_PROP_EXPOSURE, exposure)
if key == ord('B'):
brightness+=10
print(f'brightness: {brightness}')
r=vid.set(cv2.CAP_PROP_BRIGHTNESS, brightness)
if key == ord('b'):
brightness-=10
print(f'brightness: {brightness}')
vid.set(cv2.CAP_PROP_BRIGHTNESS, brightness)
if key == ord('C'):
contrast+=10
print(f'contrast: {contrast}')
vid.set(cv2.CAP_PROP_CONTRAST, contrast)
if key == ord('c'):
contrast-=10
print(f'contrast: {contrast}')
vid.set(cv2.CAP_PROP_CONTRAST, contrast)
if key == ord('F'):
focus+=5
print(f'focus: {focus}')
vid.set(cv2.CAP_PROP_AUTOFOCUS, 0)
vid.set(cv2.CAP_PROP_FOCUS, focus)
if key == ord('f'):
focus-=5
print(f'focus: {focus}')
vid.set(cv2.CAP_PROP_AUTOFOCUS, 0)
vid.set(cv2.CAP_PROP_FOCUS, focus)
if key >= ord('0') and key <= ord('3'):
vid.release()
camera_id=key-ord('0')
vid = cv2.VideoCapture(camera_id)
if not vid.isOpened():
raise ValueError('Unable to open video source')
if vid.isOpened():
vid.release()
cv2.destroyAllWindows()
| [
"cv2.imshow",
"numpy.zeros",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.waitKey"
] | [((288, 315), 'cv2.VideoCapture', 'cv2.VideoCapture', (['camera_id'], {}), '(camera_id)\n', (304, 315), False, 'import cv2\n'), ((402, 435), 'numpy.zeros', 'np.zeros', (['(200, 200, 3)', 'np.uint8'], {}), '((200, 200, 3), np.uint8)\n', (410, 435), True, 'import numpy as np\n'), ((2817, 2840), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2838, 2840), False, 'import cv2\n'), ((1015, 1030), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (1026, 1030), False, 'import cv2\n'), ((958, 990), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'blank_image'], {}), "('image', blank_image)\n", (968, 990), False, 'import cv2\n'), ((1180, 1223), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(camera_id + cv2.CAP_DSHOW)'], {}), '(camera_id + cv2.CAP_DSHOW)\n', (1196, 1223), False, 'import cv2\n'), ((1286, 1313), 'cv2.VideoCapture', 'cv2.VideoCapture', (['camera_id'], {}), '(camera_id)\n', (1302, 1313), False, 'import cv2\n'), ((1394, 1421), 'cv2.VideoCapture', 'cv2.VideoCapture', (['camera_id'], {}), '(camera_id)\n', (1410, 1421), False, 'import cv2\n'), ((1572, 1599), 'cv2.VideoCapture', 'cv2.VideoCapture', (['camera_id'], {}), '(camera_id)\n', (1588, 1599), False, 'import cv2\n'), ((2673, 2700), 'cv2.VideoCapture', 'cv2.VideoCapture', (['camera_id'], {}), '(camera_id)\n', (2689, 2700), False, 'import cv2\n'), ((923, 949), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'frame'], {}), "('image', frame)\n", (933, 949), False, 'import cv2\n')] |
# coding: utf-8
import os, sys, time, concurrent.futures
import pandas as pd
import numpy as np
import online_node2vec.evaluation.ndcg_computer as ndcgc
import online_node2vec.data.tennis_handler as th
import online_node2vec.data.n2v_embedding_handler as n2veh
output_folder = "../results/"
delta_time = 3600*6
# updater parameters
data_id = "rg17"
def evaluate_embeddings(param_item):
sample_id, root_dir_prefix = param_item
ndcg_eval_dir = output_folder + "%s/eval_%i/delta_%i/" % (data_id, sample_id, delta_time)
ndcg_eval_file = output_folder + "%s/%s.csv" % (ndcg_eval_dir, parameters)
features_dir = output_folder + "%s/features_%i/delta_%i/%s" % (data_id, sample_id, delta_time, parameters)
# load n2v embeddings
print("\nLoading embeddings...")
data = n2veh.load_n2v_features(features_dir, delta_time, total_days, player_labels, eval_window, sep=",")
print(len(data[0]), len(data))
res_dot = ndcgc.parallel_eval_ndcg(data, gen_id_to_account, "-dot", n_threads=4)
all_df = pd.concat(res_dot)
if not os.path.exists(ndcg_eval_dir):
os.makedirs(ndcg_eval_dir)
all_df.to_csv(ndcg_eval_file)
mean_ndcg = all_df["ndcg"].mean()
print(sample_id, mean_ndcg)
return mean_ndcg
if __name__ == "__main__":
if len(sys.argv) >= 4:
parameters = sys.argv[1]
num_samples = int(sys.argv[2])
num_threads = int(sys.argv[3])
if len(sys.argv) >= 5:
max_days = int(sys.argv[4])
else:
max_days = None
print(num_samples, num_threads, max_days)
samples = range(num_samples)
START = time.time()
# data
if data_id == "rg17":
total_days = 15 if max_days == None else max_days
elif data_id == "uo17":
total_days = 14 if max_days == None else min(14,max_days)
else:
raise RuntimeError("Invalid dataset!")
root_dirs = ["%s/%s/features_%s/delta_%i" % (output_folder, data_id, sample_id, delta_time) for sample_id in range(num_samples)]
eval_window = delta_time
# load data
gen_id_to_account, player_labels = th.get_data_info("../data/%s_preprocessed" % data_id)
param_items = list(zip(samples, root_dirs))
executor = concurrent.futures.ProcessPoolExecutor(num_threads)
metrics = list(executor.map(evaluate_embeddings, param_items))
print()
print(parameters)
print("### ELAPSED TIME ###")
print("%.2f minutes" % ((time.time()-START) / 60))
print("### METRICS ###")
print(metrics)
print("### PERFORMANCE STATS ###")
print(list(zip(['mean','std','min','max'],[np.mean(metrics), np.std(metrics), np.min(metrics), np.max(metrics)])))
else:
print("Usage: <num_samples> <max_threads> <max_days?>") | [
"os.path.exists",
"numpy.mean",
"os.makedirs",
"numpy.std",
"online_node2vec.evaluation.ndcg_computer.parallel_eval_ndcg",
"numpy.min",
"numpy.max",
"online_node2vec.data.tennis_handler.get_data_info",
"time.time",
"pandas.concat",
"online_node2vec.data.n2v_embedding_handler.load_n2v_features"
] | [((797, 899), 'online_node2vec.data.n2v_embedding_handler.load_n2v_features', 'n2veh.load_n2v_features', (['features_dir', 'delta_time', 'total_days', 'player_labels', 'eval_window'], {'sep': '""","""'}), "(features_dir, delta_time, total_days, player_labels,\n eval_window, sep=',')\n", (820, 899), True, 'import online_node2vec.data.n2v_embedding_handler as n2veh\n'), ((950, 1020), 'online_node2vec.evaluation.ndcg_computer.parallel_eval_ndcg', 'ndcgc.parallel_eval_ndcg', (['data', 'gen_id_to_account', '"""-dot"""'], {'n_threads': '(4)'}), "(data, gen_id_to_account, '-dot', n_threads=4)\n", (974, 1020), True, 'import online_node2vec.evaluation.ndcg_computer as ndcgc\n'), ((1039, 1057), 'pandas.concat', 'pd.concat', (['res_dot'], {}), '(res_dot)\n', (1048, 1057), True, 'import pandas as pd\n'), ((1069, 1098), 'os.path.exists', 'os.path.exists', (['ndcg_eval_dir'], {}), '(ndcg_eval_dir)\n', (1083, 1098), False, 'import os, sys, time, concurrent.futures\n'), ((1108, 1134), 'os.makedirs', 'os.makedirs', (['ndcg_eval_dir'], {}), '(ndcg_eval_dir)\n', (1119, 1134), False, 'import os, sys, time, concurrent.futures\n'), ((1642, 1653), 'time.time', 'time.time', ([], {}), '()\n', (1651, 1653), False, 'import os, sys, time, concurrent.futures\n'), ((2163, 2216), 'online_node2vec.data.tennis_handler.get_data_info', 'th.get_data_info', (["('../data/%s_preprocessed' % data_id)"], {}), "('../data/%s_preprocessed' % data_id)\n", (2179, 2216), True, 'import online_node2vec.data.tennis_handler as th\n'), ((2526, 2537), 'time.time', 'time.time', ([], {}), '()\n', (2535, 2537), False, 'import os, sys, time, concurrent.futures\n'), ((2702, 2718), 'numpy.mean', 'np.mean', (['metrics'], {}), '(metrics)\n', (2709, 2718), True, 'import numpy as np\n'), ((2720, 2735), 'numpy.std', 'np.std', (['metrics'], {}), '(metrics)\n', (2726, 2735), True, 'import numpy as np\n'), ((2737, 2752), 'numpy.min', 'np.min', (['metrics'], {}), '(metrics)\n', (2743, 2752), True, 'import numpy as np\n'), ((2754, 2769), 'numpy.max', 'np.max', (['metrics'], {}), '(metrics)\n', (2760, 2769), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import gensim.downloader as api
import re
from sklearn.neighbors import KNeighborsClassifier
from sklearn import preprocessing
class process_txt:
def __init__(self):
print("Loading pre-trained Word2Vec model...")
self.model = api.load("word2vec-google-news-300")
self.le = preprocessing.LabelEncoder()
def clean_line(self, line):
clean_line = ""
line = line.replace("’", "")
line = line.replace("'", "")
line = line.replace("-", " ") # replace hyphens with spaces
line = line.replace("\t", " ")
line = line.replace("\n", " ")
line = line.lower()
for char in line:
if char in "qwertyuiopasdfghjklzxcvbnm ":
clean_line += char
else:
clean_line += " "
clean_line = re.sub(" +", " ", clean_line) # delete extra spaces
if clean_line[0] == " ":
clean_line = clean_line[1:]
return clean_line
def preprocess(self, txt):
txt = txt.apply(lambda x: self.clean_line(x))
return txt
def filter_text(self, raw_text):
"""
Excluding unknown words and get corresponding token
"""
raw_text = raw_text.split()
return list(filter(lambda x: x in self.model.vocab, raw_text))
def transform_text(self, txt):
tokens = self.filter_text(txt)
if not tokens:
return np.zeros(self.model.vector_size)
text_vector = np.mean(self.model[tokens], axis=0)
return np.array(text_vector)
def label_encoder(self, y_train):
return self.le.fit_transform(y_train)
class KNNClassifier():
def __init__(self):
self.preprocess = process_txt()
def fit(self, X_train, y_train):
X_train = self.preprocess.preprocess(X_train)
X_train = X_train.apply(lambda x : self.preprocess.transform_text(x)).values
y_train = self.preprocess.label_encoder(y_train)
unique, counts = np.unique(y_train, return_counts=True)
sample_size=min(counts)
clf = KNeighborsClassifier(n_neighbors=sample_size, p=2)
clf.fit(list(X_train), y_train)
self.clf = clf
def predict(self, X_test):
X_test = self.preprocess.preprocess(X_test)
X_test = [self.preprocess.transform_text(txt) for txt in X_test]
y_pred = self.clf.predict(X_test)
y_pred = self.preprocess.le.inverse_transform(y_pred)
return y_pred
| [
"numpy.mean",
"sklearn.preprocessing.LabelEncoder",
"numpy.unique",
"sklearn.neighbors.KNeighborsClassifier",
"gensim.downloader.load",
"numpy.array",
"numpy.zeros",
"re.sub"
] | [((341, 377), 'gensim.downloader.load', 'api.load', (['"""word2vec-google-news-300"""'], {}), "('word2vec-google-news-300')\n", (349, 377), True, 'import gensim.downloader as api\n'), ((396, 424), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (422, 424), False, 'from sklearn import preprocessing\n'), ((923, 952), 're.sub', 're.sub', (['""" +"""', '""" """', 'clean_line'], {}), "(' +', ' ', clean_line)\n", (929, 952), False, 'import re\n'), ((1589, 1624), 'numpy.mean', 'np.mean', (['self.model[tokens]'], {'axis': '(0)'}), '(self.model[tokens], axis=0)\n', (1596, 1624), True, 'import numpy as np\n'), ((1641, 1662), 'numpy.array', 'np.array', (['text_vector'], {}), '(text_vector)\n', (1649, 1662), True, 'import numpy as np\n'), ((2100, 2138), 'numpy.unique', 'np.unique', (['y_train'], {'return_counts': '(True)'}), '(y_train, return_counts=True)\n', (2109, 2138), True, 'import numpy as np\n'), ((2187, 2237), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'sample_size', 'p': '(2)'}), '(n_neighbors=sample_size, p=2)\n', (2207, 2237), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1533, 1565), 'numpy.zeros', 'np.zeros', (['self.model.vector_size'], {}), '(self.model.vector_size)\n', (1541, 1565), True, 'import numpy as np\n')] |
import torch
import numpy as np
def fit(train_loader, val_loader, model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval, metrics=[],
start_epoch=0):
"""
Loaders, model, loss function and metrics should work together for a given task,
i.e. The model should be able to process data output of loaders,
loss function should process target output of loaders and outputs from the model
Examples: Classification: batch loader, classification model, NLL loss, accuracy metric
Siamese network: Siamese loader, siamese model, contrastive loss
Online triplet learning: batch loader, embedding model, online triplet loss
"""
for epoch in range(0, start_epoch):
scheduler.step()
for epoch in range(start_epoch, n_epochs):
scheduler.step()
# Train stage
# duplicates - train_loss is already in metric
train_loss, metrics = train_epoch(
train_loader, model, loss_fn, optimizer, cuda, log_interval, metrics)
message = 'Epoch: {}/{}. Train set: Average loss: {:.4f}'.format(
epoch + 1, n_epochs, train_loss)
for metric in metrics:
message += '\t{}: {}'.format(metric.name(), metric.value())
# duplicates - val_loss is already in metric
val_loss, metrics = test_epoch(
val_loader, model, loss_fn, cuda, metrics)
val_loss /= len(val_loader)
message += '\nEpoch: {}/{}. Validation set: Average loss: {:.4f}'.format(epoch + 1, n_epochs,
val_loss)
for metric in metrics:
message += '\t{}: {}'.format(metric.name(), metric.value())
print(message)
def train_epoch(train_loader, model, loss_fn, optimizer, cuda, log_interval, metrics):
for metric in metrics:
metric.reset()
model.train()
losses = []
total_loss = 0
for batch_idx, (data, target) in enumerate(train_loader):
target = target if len(target) > 0 else None
if not type(data) in (tuple, list):
data = (data,)
if cuda:
data = tuple(d.cuda() for d in data)
if target is not None:
target = target.cuda()
optimizer.zero_grad()
outputs = model(*data)
if type(outputs) not in (tuple, list):
outputs = (outputs,)
loss_inputs = outputs
if target is not None:
target = (target,)
loss_inputs += target
loss_outputs = loss_fn(*loss_inputs)
# is this case even possible?
loss = loss_outputs[0] if type(loss_outputs) in (
tuple, list) else loss_outputs
losses.append(loss.item())
total_loss += loss.item()
loss.backward()
optimizer.step()
for metric in metrics:
metric(outputs, target, loss_outputs)
# check message edge case, batch_idx is 0 indexed
if batch_idx % log_interval == 0:
message = 'Train: [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
batch_idx * len(data[0]), len(train_loader.dataset),
100. * batch_idx / len(train_loader), np.mean(losses))
for metric in metrics:
message += '\t{}: {}'.format(metric.name(), metric.value())
print(message)
losses = []
total_loss /= (batch_idx + 1)
return total_loss, metrics
def test_epoch(val_loader, model, loss_fn, cuda, metrics):
with torch.no_grad():
for metric in metrics:
metric.reset()
model.eval()
val_loss = 0
for batch_idx, (data, target) in enumerate(val_loader):
target = target if len(target) > 0 else None
if not type(data) in (tuple, list):
data = (data,)
if cuda:
data = tuple(d.cuda() for d in data)
if target is not None:
target = target.cuda()
outputs = model(*data)
if type(outputs) not in (tuple, list):
outputs = (outputs,)
loss_inputs = outputs
if target is not None:
target = (target,)
loss_inputs += target
loss_outputs = loss_fn(*loss_inputs)
loss = loss_outputs[0] if type(loss_outputs) in (
tuple, list) else loss_outputs
val_loss += loss.item()
for metric in metrics:
metric(outputs, target, loss_outputs)
return val_loss, metrics
| [
"torch.no_grad",
"numpy.mean"
] | [((3527, 3542), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3540, 3542), False, 'import torch\n'), ((3210, 3225), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (3217, 3225), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
from asdf.versioning import AsdfVersion
from astropy.modeling.bounding_box import ModelBoundingBox, CompoundBoundingBox
from astropy.modeling import mappings
from astropy.modeling import functional_models
from astropy.modeling.core import CompoundModel
from astropy.io.misc.asdf.types import AstropyAsdfType, AstropyType
from . import _parameter_to_value
__all__ = ['TransformType', 'IdentityType', 'ConstantType']
class TransformType(AstropyAsdfType):
version = '1.2.0'
requires = ['astropy']
@classmethod
def _from_tree_base_transform_members(cls, model, node, ctx):
if 'name' in node:
model.name = node['name']
if "inputs" in node:
model.inputs = tuple(node["inputs"])
if "outputs" in node:
model.outputs = tuple(node["outputs"])
if 'bounding_box' in node:
model.bounding_box = node['bounding_box']
elif 'selector_args' in node:
cbbox_keys = [tuple(key) for key in node['cbbox_keys']]
bbox_dict = dict(zip(cbbox_keys, node['cbbox_values']))
selector_args = node['selector_args']
model.bounding_box = CompoundBoundingBox.validate(model, bbox_dict, selector_args)
param_and_model_constraints = {}
for constraint in ['fixed', 'bounds']:
if constraint in node:
param_and_model_constraints[constraint] = node[constraint]
model._initialize_constraints(param_and_model_constraints)
yield model
if 'inverse' in node:
model.inverse = node['inverse']
@classmethod
def from_tree_transform(cls, node, ctx):
raise NotImplementedError(
"Must be implemented in TransformType subclasses")
@classmethod
def from_tree(cls, node, ctx):
model = cls.from_tree_transform(node, ctx)
return cls._from_tree_base_transform_members(model, node, ctx)
@classmethod
def _to_tree_base_transform_members(cls, model, node, ctx):
if getattr(model, '_user_inverse', None) is not None:
node['inverse'] = model._user_inverse
if model.name is not None:
node['name'] = model.name
node['inputs'] = list(model.inputs)
node['outputs'] = list(model.outputs)
try:
bb = model.bounding_box
except NotImplementedError:
bb = None
if isinstance(bb, ModelBoundingBox):
bb = bb.bounding_box(order='C')
if model.n_inputs == 1:
bb = list(bb)
else:
bb = [list(item) for item in bb]
node['bounding_box'] = bb
elif isinstance(bb, CompoundBoundingBox):
selector_args = [[sa.index, sa.ignore] for sa in bb.selector_args]
node['selector_args'] = selector_args
node['cbbox_keys'] = list(bb.bounding_boxes.keys())
bounding_boxes = list(bb.bounding_boxes.values())
if len(model.inputs) - len(selector_args) == 1:
node['cbbox_values'] = [list(sbbox.bounding_box()) for sbbox in bounding_boxes]
else:
node['cbbox_values'] = [[list(item) for item in sbbox.bounding_box()
if np.isfinite(item[0])] for sbbox in bounding_boxes]
# model / parameter constraints
if not isinstance(model, CompoundModel):
fixed_nondefaults = {k: f for k, f in model.fixed.items() if f}
if fixed_nondefaults:
node['fixed'] = fixed_nondefaults
bounds_nondefaults = {k: b for k, b in model.bounds.items() if any(b)}
if bounds_nondefaults:
node['bounds'] = bounds_nondefaults
return node
@classmethod
def to_tree_transform(cls, model, ctx):
raise NotImplementedError("Must be implemented in TransformType subclasses")
@classmethod
def to_tree(cls, model, ctx):
node = cls.to_tree_transform(model, ctx)
return cls._to_tree_base_transform_members(model, node, ctx)
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
assert a.name == b.name
# TODO: Assert inverses are the same
# assert the bounding_boxes are the same
assert a.get_bounding_box() == b.get_bounding_box()
assert a.inputs == b.inputs
assert a.outputs == b.outputs
class IdentityType(TransformType):
name = "transform/identity"
types = ['astropy.modeling.mappings.Identity']
@classmethod
def from_tree_transform(cls, node, ctx):
return mappings.Identity(node.get('n_dims', 1))
@classmethod
def to_tree_transform(cls, data, ctx):
node = {}
if data.n_inputs != 1:
node['n_dims'] = data.n_inputs
return node
@classmethod
def assert_equal(cls, a, b):
# TODO: If models become comparable themselves, remove this.
TransformType.assert_equal(a, b)
assert (isinstance(a, mappings.Identity) and
isinstance(b, mappings.Identity) and
a.n_inputs == b.n_inputs)
class ConstantType(TransformType):
name = "transform/constant"
version = '1.4.0'
supported_versions = ['1.0.0', '1.1.0', '1.2.0', '1.3.0', '1.4.0']
types = ['astropy.modeling.functional_models.Const1D',
'astropy.modeling.functional_models.Const2D']
@classmethod
def from_tree_transform(cls, node, ctx):
if cls.version < AsdfVersion('1.4.0'):
# The 'dimensions' property was added in 1.4.0,
# previously all values were 1D.
return functional_models.Const1D(node['value'])
elif node['dimensions'] == 1:
return functional_models.Const1D(node['value'])
elif node['dimensions'] == 2:
return functional_models.Const2D(node['value'])
@classmethod
def to_tree_transform(cls, data, ctx):
if cls.version < AsdfVersion('1.4.0'):
if not isinstance(data, functional_models.Const1D):
raise ValueError(
f'constant-{cls.version} does not support models with > 1 dimension')
return {
'value': _parameter_to_value(data.amplitude)
}
else:
if isinstance(data, functional_models.Const1D):
dimension = 1
elif isinstance(data, functional_models.Const2D):
dimension = 2
return {
'value': _parameter_to_value(data.amplitude),
'dimensions': dimension
}
class GenericModel(mappings.Mapping):
def __init__(self, n_inputs, n_outputs):
mapping = tuple(range(n_inputs))
super().__init__(mapping)
self._n_outputs = n_outputs
self._outputs = tuple('x' + str(idx) for idx in range(n_outputs))
@property
def inverse(self):
raise NotImplementedError()
class GenericType(TransformType):
name = "transform/generic"
types = [GenericModel]
@classmethod
def from_tree_transform(cls, node, ctx):
return GenericModel(
node['n_inputs'], node['n_outputs'])
@classmethod
def to_tree_transform(cls, data, ctx):
return {
'n_inputs': data.n_inputs,
'n_outputs': data.n_outputs
}
class UnitsMappingType(AstropyType):
name = "transform/units_mapping"
version = "1.0.0"
types = [mappings.UnitsMapping]
@classmethod
def to_tree(cls, node, ctx):
tree = {}
if node.name is not None:
tree["name"] = node.name
inputs = []
outputs = []
for i, o, m in zip(node.inputs, node.outputs, node.mapping):
input = {
"name": i,
"allow_dimensionless": node.input_units_allow_dimensionless[i],
}
if m[0] is not None:
input["unit"] = m[0]
if node.input_units_equivalencies is not None and i in node.input_units_equivalencies:
input["equivalencies"] = node.input_units_equivalencies[i]
inputs.append(input)
output = {
"name": o,
}
if m[-1] is not None:
output["unit"] = m[-1]
outputs.append(output)
tree["inputs"] = inputs
tree["outputs"] = outputs
return tree
@classmethod
def from_tree(cls, tree, ctx):
mapping = tuple((i.get("unit"), o.get("unit"))
for i, o in zip(tree["inputs"], tree["outputs"]))
equivalencies = None
for i in tree["inputs"]:
if "equivalencies" in i:
if equivalencies is None:
equivalencies = {}
equivalencies[i["name"]] = i["equivalencies"]
kwargs = {
"input_units_equivalencies": equivalencies,
"input_units_allow_dimensionless": {
i["name"]: i.get("allow_dimensionless", False) for i in tree["inputs"]},
}
if "name" in tree:
kwargs["name"] = tree["name"]
return mappings.UnitsMapping(mapping, **kwargs)
| [
"astropy.modeling.functional_models.Const2D",
"astropy.modeling.functional_models.Const1D",
"numpy.isfinite",
"astropy.modeling.bounding_box.CompoundBoundingBox.validate",
"asdf.versioning.AsdfVersion",
"astropy.modeling.mappings.UnitsMapping"
] | [((9308, 9348), 'astropy.modeling.mappings.UnitsMapping', 'mappings.UnitsMapping', (['mapping'], {}), '(mapping, **kwargs)\n', (9329, 9348), False, 'from astropy.modeling import mappings\n'), ((5651, 5671), 'asdf.versioning.AsdfVersion', 'AsdfVersion', (['"""1.4.0"""'], {}), "('1.4.0')\n", (5662, 5671), False, 'from asdf.versioning import AsdfVersion\n'), ((5797, 5837), 'astropy.modeling.functional_models.Const1D', 'functional_models.Const1D', (["node['value']"], {}), "(node['value'])\n", (5822, 5837), False, 'from astropy.modeling import functional_models\n'), ((6120, 6140), 'asdf.versioning.AsdfVersion', 'AsdfVersion', (['"""1.4.0"""'], {}), "('1.4.0')\n", (6131, 6140), False, 'from asdf.versioning import AsdfVersion\n'), ((1274, 1335), 'astropy.modeling.bounding_box.CompoundBoundingBox.validate', 'CompoundBoundingBox.validate', (['model', 'bbox_dict', 'selector_args'], {}), '(model, bbox_dict, selector_args)\n', (1302, 1335), False, 'from astropy.modeling.bounding_box import ModelBoundingBox, CompoundBoundingBox\n'), ((5895, 5935), 'astropy.modeling.functional_models.Const1D', 'functional_models.Const1D', (["node['value']"], {}), "(node['value'])\n", (5920, 5935), False, 'from astropy.modeling import functional_models\n'), ((5993, 6033), 'astropy.modeling.functional_models.Const2D', 'functional_models.Const2D', (["node['value']"], {}), "(node['value'])\n", (6018, 6033), False, 'from astropy.modeling import functional_models\n'), ((3373, 3393), 'numpy.isfinite', 'np.isfinite', (['item[0]'], {}), '(item[0])\n', (3384, 3393), True, 'import numpy as np\n')] |
import numpy as np
# import cupy as np
# def softmax_cross_entropy(x, y):
# ''' 对输入先进行 softmax 操作后再使用交叉熵求损失 '''
# # softmax forward
# x = x - np.max(x)
# out = np.exp(x) / np.reshape(np.sum(np.exp(x), 1), (x.shape[0], 1))
# loss, dout = cross_entropy(out, y)
# diag = np.zeros((dout.shape[0],dout.shape[1],dout.shape[1]))
# for i in range(diag.shape[0]):
# diag[i, :, :] = np.diag(out[i])
# # 计算梯度 dout reshape to N x C x 1 * (diag - out reshape to N x C x 1 @ out reshape to N x 1 x C (N=1 时就相当于 <EMAIL>)) -> N x C x C (这个矩阵一行是 yi 对每个 x 的导数,一列是每个 y 对 xi 的导数) -> sum -> N x 1 x C = N x C
# dx = np.sum(dout.reshape(dout.shape[0], -1, 1) * (diag - out.reshape((out.shape[0], -1, 1)) @ out.reshape((out.shape[0], 1, -1))), 1)
# return loss, dx
def cross_entropy(pred, y):
'''
交叉熵
Args:
pred: pred 为 softmax 函数的输出结果(这个函数不进行 softmax 操作)
y: 正确的标签(标量形式,不是 one-hot 形式)
Return:
loss: 损失
dpred:损失对输入的导数
'''
# 就是第 label 类的概率变成 -log 然后加起来
# 反向传播就是第 label 类的导数变成 -1/pred,其它都是 0
y = y.astype(np.int)
# 限制最小值,免得被 1e-253 之类的极端数值爆掉
pred = np.clip(pred, 1e-10, 1)
log_pred = -np.log(pred)
loss = np.sum(log_pred[np.arange(0, pred.shape[0]), y]) / pred.shape[0]
dpred = np.zeros_like(pred)
dpred[np.arange(0, pred.shape[0]), y] = - pred[np.arange(0, pred.shape[0]), y] ** (-1)
dpred = dpred / pred.shape[0]
return loss, dpred
def hinge_loss(scores, y):
'''
合页损失
Args:
scores: scores 为最后全连接层的输出结果
y: 正确的标签(标量形式,不是 one-hot 形式)
Return:
loss: 损失
dscores:损失对输入的导数
'''
y = y.astype(np.int)
# 选出 yi
score_y = scores[range(y.shape[0]), y]
# si - yi + 1
score_plus_1_minus_y = scores + 1 - score_y.reshape((score_y.shape[0], 1))
loss_array = np.maximum(0, score_plus_1_minus_y)
loss_array[range(y.shape[0]), y] = 0
# 除的这个主要是为了让 loss 和 dloss 值变小一点,不影响整个 loss 的分布
loss = np.sum(loss_array) / (scores.shape[0]*scores.shape[1])
# 最后一步求和的反向传播
dscores = np.ones_like(loss_array) / (scores.shape[0]*scores.shape[1])
# loss_array[range(y.shape[0]), y] = 0 的反向传播
dscores[range(y.shape[0]), y] = 0
# maximum 操作的反向传播
dscores[score_plus_1_minus_y < 0] = 0
# si - yi + 1 操作的反向传播(除 label 外节点上游 grad 传回来乘 1(不变),label 是一行 grad 的 sum 取反)
dscores[range(y.shape[0]), y] = -np.sum(dscores, 1)
return loss, dscores
# def hinge_loss2(scores, y):
# scores = np.array(
# [[1.0, 1, 1,1,1],
# [1,1,1,1,1],
# [1,1,1,1,1]]
# )
# y = np.array([0,1,2])
# print(scores)
# print(y)
# print("-------------------Me Trash-----------------")
# l,dl = hinge_loss_origin(scores, y)
# print(l)
# print(dl)
# import torch
# y = torch.tensor(y, dtype=torch.long)
# scores = torch.tensor(scores, requires_grad=True)
# loss_fn = torch.nn.MultiMarginLoss()
# loss = loss_fn(scores, y)
# loss.backward()
# print("--------------------Torch-------------------")
# print(loss.detach().numpy())
# print(scores.grad.numpy())
# return loss.detach().numpy(), scores.grad.numpy()
# def hinge_loss_l2(scores, y):
# ''' SVM 合页损失 '''
# y = y.astype(np.int)
# # 选出 yi
# score_y = scores[range(y.shape[0]), y]
# # si - yi + 1
# score_plus_1_minus_y = scores + 1 - score_y.reshape((score_y.shape[0], 1))
# loss_array = np.maximum(0, score_plus_1_minus_y)
# loss_array_l2 = loss_array ** 2
# loss_array_l2[range(y.shape[0]), y] = 0
# loss = np.sum(loss_array_l2) / len(y)
# # 最后一步求和的反向传播
# dscores = np.ones_like(loss_array_l2) / len(y)
# # loss_array[range(y.shape[0]), y] = 0 的反向传播
# dscores[range(y.shape[0]), y] = 0
# # 平方操作的反向传播
# dscores = dscores * 2 * loss_array
# # maximum 操作的反向传播
# dscores[score_plus_1_minus_y < 0] = 0
# # si - yi + 1 操作的反向传播(除 label 外节点上游 grad 传回来乘 1(不变),label 是一行 grad 的 sum 取反)
# dscores[range(y.shape[0]), y] = -np.sum(dscores, 1)
# return loss, dscores | [
"numpy.clip",
"numpy.ones_like",
"numpy.log",
"numpy.sum",
"numpy.maximum",
"numpy.zeros_like",
"numpy.arange"
] | [((1177, 1200), 'numpy.clip', 'np.clip', (['pred', '(1e-10)', '(1)'], {}), '(pred, 1e-10, 1)\n', (1184, 1200), True, 'import numpy as np\n'), ((1318, 1337), 'numpy.zeros_like', 'np.zeros_like', (['pred'], {}), '(pred)\n', (1331, 1337), True, 'import numpy as np\n'), ((1905, 1940), 'numpy.maximum', 'np.maximum', (['(0)', 'score_plus_1_minus_y'], {}), '(0, score_plus_1_minus_y)\n', (1915, 1940), True, 'import numpy as np\n'), ((1217, 1229), 'numpy.log', 'np.log', (['pred'], {}), '(pred)\n', (1223, 1229), True, 'import numpy as np\n'), ((2044, 2062), 'numpy.sum', 'np.sum', (['loss_array'], {}), '(loss_array)\n', (2050, 2062), True, 'import numpy as np\n'), ((2132, 2156), 'numpy.ones_like', 'np.ones_like', (['loss_array'], {}), '(loss_array)\n', (2144, 2156), True, 'import numpy as np\n'), ((2462, 2480), 'numpy.sum', 'np.sum', (['dscores', '(1)'], {}), '(dscores, 1)\n', (2468, 2480), True, 'import numpy as np\n'), ((1348, 1375), 'numpy.arange', 'np.arange', (['(0)', 'pred.shape[0]'], {}), '(0, pred.shape[0])\n', (1357, 1375), True, 'import numpy as np\n'), ((1257, 1284), 'numpy.arange', 'np.arange', (['(0)', 'pred.shape[0]'], {}), '(0, pred.shape[0])\n', (1266, 1284), True, 'import numpy as np\n'), ((1390, 1417), 'numpy.arange', 'np.arange', (['(0)', 'pred.shape[0]'], {}), '(0, pred.shape[0])\n', (1399, 1417), True, 'import numpy as np\n')] |
"""This file contains code used in "Think Bayes",
by <NAME>, available from greenteapress.com
Copyright 2012 <NAME>
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import matplotlib.pyplot as pyplot
import thinkplot
import numpy
import csv
import random
import shelve
import sys
import time
import thinkbayes2
import warnings
warnings.simplefilter('error', RuntimeWarning)
FORMATS = ['pdf', 'eps', 'png']
class Locker(object):
"""Encapsulates a shelf for storing key-value pairs."""
def __init__(self, shelf_file):
self.shelf = shelve.open(shelf_file)
def Close(self):
"""Closes the shelf.
"""
self.shelf.close()
def Add(self, key, value):
"""Adds a key-value pair."""
self.shelf[str(key)] = value
def Lookup(self, key):
"""Looks up a key."""
return self.shelf.get(str(key))
def Keys(self):
"""Returns an iterator of keys."""
return self.shelf.iterkeys()
def Read(self):
"""Returns the contents of the shelf as a map."""
return dict(self.shelf)
class Subject(object):
"""Represents a subject from the belly button study."""
def __init__(self, code):
"""
code: string ID
species: sequence of (int count, string species) pairs
"""
self.code = code
self.species = []
self.suite = None
self.num_reads = None
self.num_species = None
self.total_reads = None
self.total_species = None
self.prev_unseen = None
self.pmf_n = None
self.pmf_q = None
self.pmf_l = None
def Add(self, species, count):
"""Add a species-count pair.
It is up to the caller to ensure that species names are unique.
species: string species/genus name
count: int number of individuals
"""
self.species.append((count, species))
def Done(self, reverse=False, clean_param=0):
"""Called when we are done adding species counts.
reverse: which order to sort in
"""
if clean_param:
self.Clean(clean_param)
self.species.sort(reverse=reverse)
counts = self.GetCounts()
self.num_species = len(counts)
self.num_reads = sum(counts)
def Clean(self, clean_param=50):
"""Identifies and removes bogus data.
clean_param: parameter that controls the number of legit species
"""
def prob_bogus(k, r):
"""Compute the probability that a species is bogus."""
q = clean_param / r
p = (1-q) ** k
return p
print(self.code, clean_param)
counts = self.GetCounts()
r = 1.0 * sum(counts)
species_seq = []
for k, species in sorted(self.species):
if random.random() < prob_bogus(k, r):
continue
species_seq.append((k, species))
self.species = species_seq
def GetM(self):
"""Gets number of observed species."""
return len(self.species)
def GetCounts(self):
"""Gets the list of species counts
Should be in increasing order, if Sort() has been invoked.
"""
return [count for count, _ in self.species]
def MakeCdf(self):
"""Makes a CDF of total prevalence vs rank."""
counts = self.GetCounts()
counts.sort(reverse=True)
cdf = thinkbayes2.Cdf(dict(enumerate(counts)))
return cdf
def GetNames(self):
"""Gets the names of the seen species."""
return [name for _, name in self.species]
def PrintCounts(self):
"""Prints the counts and species names."""
for count, name in reversed(self.species):
print(count, name)
def GetSpecies(self, index):
"""Gets the count and name of the indicated species.
Returns: count-species pair
"""
return self.species[index]
def GetCdf(self):
"""Returns cumulative prevalence vs number of species.
"""
counts = self.GetCounts()
items = enumerate(counts)
cdf = thinkbayes2.Cdf(items)
return cdf
def GetPrevalences(self):
"""Returns a sequence of prevalences (normalized counts).
"""
counts = self.GetCounts()
total = sum(counts)
prevalences = numpy.array(counts, dtype=numpy.float) / total
return prevalences
def Process(self, low=None, high=500, conc=1, iters=100):
"""Computes the posterior distribution of n and the prevalences.
Sets attribute: self.suite
low: minimum number of species
high: maximum number of species
conc: concentration parameter
iters: number of iterations to use in the estimator
"""
counts = self.GetCounts()
m = len(counts)
if low is None:
low = max(m, 2)
ns = range(low, high+1)
#start = time.time()
self.suite = Species5(ns, conc=conc, iters=iters)
self.suite.Update(counts)
#end = time.time()
#print 'Processing time' end-start
def MakePrediction(self, num_sims=100):
"""Make predictions for the given subject.
Precondition: Process has run
num_sims: how many simulations to run for predictions
Adds attributes
pmf_l: predictive distribution of additional species
"""
add_reads = self.total_reads - self.num_reads
curves = self.RunSimulations(num_sims, add_reads)
self.pmf_l = self.MakePredictive(curves)
def MakeQuickPrediction(self, num_sims=100):
"""Make predictions for the given subject.
Precondition: Process has run
num_sims: how many simulations to run for predictions
Adds attribute:
pmf_l: predictive distribution of additional species
"""
add_reads = self.total_reads - self.num_reads
pmf = thinkbayes2.Pmf()
_, seen = self.GetSeenSpecies()
for _ in range(num_sims):
_, observations = self.GenerateObservations(add_reads)
all_seen = seen.union(observations)
l = len(all_seen) - len(seen)
pmf.Incr(l)
pmf.Normalize()
self.pmf_l = pmf
def DistL(self):
"""Returns the distribution of additional species, l.
"""
return self.pmf_l
def MakeFigures(self):
"""Makes figures showing distribution of n and the prevalences."""
self.PlotDistN()
self.PlotPrevalences()
def PlotDistN(self):
"""Plots distribution of n."""
pmf = self.suite.DistN()
print('90% CI for N:', pmf.CredibleInterval(90))
pmf.label = self.code
thinkplot.Clf()
thinkplot.PrePlot(num=1)
thinkplot.Pmf(pmf)
root = 'species-ndist-%s' % self.code
thinkplot.Save(root=root,
xlabel='Number of species',
ylabel='Prob',
formats=FORMATS,
)
def PlotPrevalences(self, num=5):
"""Plots dist of prevalence for several species.
num: how many species (starting with the highest prevalence)
"""
thinkplot.Clf()
thinkplot.PrePlot(num=5)
for rank in range(1, num+1):
self.PlotPrevalence(rank)
root = 'species-prev-%s' % self.code
thinkplot.Save(root=root,
xlabel='Prevalence',
ylabel='Prob',
formats=FORMATS,
axis=[0, 0.3, 0, 1],
)
def PlotPrevalence(self, rank=1, cdf_flag=True):
"""Plots dist of prevalence for one species.
rank: rank order of the species to plot.
cdf_flag: whether to plot the CDF
"""
# convert rank to index
index = self.GetM() - rank
_, mix = self.suite.DistOfPrevalence(index)
count, _ = self.GetSpecies(index)
mix.label = '%d (%d)' % (rank, count)
print('90%% CI for prevalence of species %d:' % rank, end=' ')
print(mix.CredibleInterval(90))
if cdf_flag:
cdf = mix.MakeCdf()
thinkplot.Cdf(cdf)
else:
thinkplot.Pmf(mix)
def PlotMixture(self, rank=1):
"""Plots dist of prevalence for all n, and the mix.
rank: rank order of the species to plot
"""
# convert rank to index
index = self.GetM() - rank
print(self.GetSpecies(index))
print(self.GetCounts()[index])
metapmf, mix = self.suite.DistOfPrevalence(index)
thinkplot.Clf()
for pmf in metapmf.Values():
thinkplot.Pmf(pmf, color='blue', alpha=0.2, linewidth=0.5)
thinkplot.Pmf(mix, color='blue', alpha=0.9, linewidth=2)
root = 'species-mix-%s' % self.code
thinkplot.Save(root=root,
xlabel='Prevalence',
ylabel='Prob',
formats=FORMATS,
axis=[0, 0.3, 0, 0.3],
legend=False)
def GetSeenSpecies(self):
"""Makes a set of the names of seen species.
Returns: number of species, set of string species names
"""
names = self.GetNames()
m = len(names)
seen = set(SpeciesGenerator(names, m))
return m, seen
def GenerateObservations(self, num_reads):
"""Generates a series of random observations.
num_reads: number of reads to generate
Returns: number of species, sequence of string species names
"""
n, prevalences = self.suite.SamplePosterior()
names = self.GetNames()
name_iter = SpeciesGenerator(names, n)
items = zip(name_iter, prevalences)
cdf = thinkbayes2.Cdf(dict(items))
observations = cdf.Sample(num_reads)
#for ob in observations:
# print ob
return n, observations
def Resample(self, num_reads):
"""Choose a random subset of the data (without replacement).
num_reads: number of reads in the subset
"""
t = []
for count, species in self.species:
t.extend([species]*count)
random.shuffle(t)
reads = t[:num_reads]
subject = Subject(self.code)
hist = thinkbayes2.Hist(reads)
for species, count in hist.Items():
subject.Add(species, count)
subject.Done()
return subject
def Match(self, match):
"""Match up a rarefied subject with a complete subject.
match: complete Subject
Assigns attributes:
total_reads:
total_species:
prev_unseen:
"""
self.total_reads = match.num_reads
self.total_species = match.num_species
# compute the prevalence of unseen species (at least approximately,
# based on all species counts in match
_, seen = self.GetSeenSpecies()
seen_total = 0.0
unseen_total = 0.0
for count, species in match.species:
if species in seen:
seen_total += count
else:
unseen_total += count
self.prev_unseen = unseen_total / (seen_total + unseen_total)
def RunSimulation(self, num_reads, frac_flag=False, jitter=0.01):
"""Simulates additional observations and returns a rarefaction curve.
k is the number of additional observations
num_new is the number of new species seen
num_reads: how many new reads to simulate
frac_flag: whether to convert to fraction of species seen
jitter: size of jitter added if frac_flag is true
Returns: list of (k, num_new) pairs
"""
m, seen = self.GetSeenSpecies()
n, observations = self.GenerateObservations(num_reads)
curve = []
for i, obs in enumerate(observations):
seen.add(obs)
if frac_flag:
frac_seen = len(seen) / float(n)
frac_seen += random.uniform(-jitter, jitter)
curve.append((i+1, frac_seen))
else:
num_new = len(seen) - m
curve.append((i+1, num_new))
return curve
def RunSimulations(self, num_sims, num_reads, frac_flag=False):
"""Runs simulations and returns a list of curves.
Each curve is a sequence of (k, num_new) pairs.
num_sims: how many simulations to run
num_reads: how many samples to generate in each simulation
frac_flag: whether to convert num_new to fraction of total
"""
curves = [self.RunSimulation(num_reads, frac_flag)
for _ in range(num_sims)]
return curves
def MakePredictive(self, curves):
"""Makes a predictive distribution of additional species.
curves: list of (k, num_new) curves
Returns: Pmf of num_new
"""
pred = thinkbayes2.Pmf(label=self.code)
for curve in curves:
_, last_num_new = curve[-1]
pred.Incr(last_num_new)
pred.Normalize()
return pred
def MakeConditionals(curves, ks):
"""Makes Cdfs of the distribution of num_new conditioned on k.
curves: list of (k, num_new) curves
ks: list of values of k
Returns: list of Cdfs
"""
joint = MakeJointPredictive(curves)
cdfs = []
for k in ks:
pmf = joint.Conditional(1, 0, k)
pmf.label = 'k=%d' % k
cdf = pmf.MakeCdf()
cdfs.append(cdf)
print('90%% credible interval for %d' % k, end=' ')
print(cdf.CredibleInterval(90))
return cdfs
def MakeJointPredictive(curves):
"""Makes a joint distribution of k and num_new.
curves: list of (k, num_new) curves
Returns: joint Pmf of (k, num_new)
"""
joint = thinkbayes2.Joint()
for curve in curves:
for k, num_new in curve:
joint.Incr((k, num_new))
joint.Normalize()
return joint
def MakeFracCdfs(curves, ks):
"""Makes Cdfs of the fraction of species seen.
curves: list of (k, num_new) curves
Returns: list of Cdfs
"""
d = {}
for curve in curves:
for k, frac in curve:
if k in ks:
d.setdefault(k, []).append(frac)
cdfs = {}
for k, fracs in d.items():
cdf = thinkbayes2.Cdf(fracs)
cdfs[k] = cdf
return cdfs
def SpeciesGenerator(names, num):
"""Generates a series of names, starting with the given names.
Additional names are 'unseen' plus a serial number.
names: list of strings
num: total number of species names to generate
Returns: string iterator
"""
i = 0
for name in names:
yield name
i += 1
while i < num:
yield 'unseen-%d' % i
i += 1
def ReadRarefactedData(filename='journal.pone.0047712.s001.csv',
clean_param=0):
"""Reads a data file and returns a list of Subjects.
Data from http://www.plosone.org/article/
info%3Adoi%2F10.1371%2Fjournal.pone.0047712#s4
filename: string filename to read
clean_param: parameter passed to Clean
Returns: map from code to Subject
"""
fp = open(filename)
reader = csv.reader(fp)
#_ = reader.next()
_ = next(reader)
subject = Subject('')
subject_map = {}
i = 0
for t in reader:
code = t[0]
if code != subject.code:
# start a new subject
subject = Subject(code)
subject_map[code] = subject
# append a number to the species names so they're unique
species = t[1]
species = '%s-%d' % (species, i)
i += 1
count = int(t[2])
subject.Add(species, count)
for code, subject in subject_map.items():
subject.Done(clean_param=clean_param)
return subject_map
def ReadCompleteDataset(filename='BBB_data_from_Rob.csv', clean_param=0):
"""Reads a data file and returns a list of Subjects.
Data from personal correspondence with <NAME>, received 2-7-13.
Converted from xlsx to csv.
filename: string filename to read
clean_param: parameter passed to Clean
Returns: map from code to Subject
"""
fp = open(filename)
reader = csv.reader(fp)
header = next(reader)
header = next(reader)
subject_codes = header[1:-1]
subject_codes = ['B'+code for code in subject_codes]
# create the subject map
uber_subject = Subject('uber')
subject_map = {}
for code in subject_codes:
subject_map[code] = Subject(code)
# read lines
i = 0
for t in reader:
otu_code = t[0]
if otu_code == '':
continue
# pull out a species name and give it a number
otu_names = t[-1]
taxons = otu_names.split(';')
species = taxons[-1]
species = '%s-%d' % (species, i)
i += 1
counts = [int(x) for x in t[1:-1]]
# print otu_code, species
for code, count in zip(subject_codes, counts):
if count > 0:
subject_map[code].Add(species, count)
uber_subject.Add(species, count)
uber_subject.Done(clean_param=clean_param)
for code, subject in subject_map.items():
subject.Done(clean_param=clean_param)
return subject_map, uber_subject
def JoinSubjects():
"""Reads both datasets and computers their inner join.
Finds all subjects that appear in both datasets.
For subjects in the rarefacted dataset, looks up the total
number of reads and stores it as total_reads. num_reads
is normally 400.
Returns: map from code to Subject
"""
# read the rarefacted dataset
sampled_subjects = ReadRarefactedData()
# read the complete dataset
all_subjects, _ = ReadCompleteDataset()
for code, subject in sampled_subjects.items():
if code in all_subjects:
match = all_subjects[code]
subject.Match(match)
return sampled_subjects
def JitterCurve(curve, dx=0.2, dy=0.3):
"""Adds random noise to the pairs in a curve.
dx and dy control the amplitude of the noise in each dimension.
"""
curve = [(x+random.uniform(-dx, dx),
y+random.uniform(-dy, dy)) for x, y in curve]
return curve
def OffsetCurve(curve, i, n, dx=0.3, dy=0.3):
"""Adds random noise to the pairs in a curve.
i is the index of the curve
n is the number of curves
dx and dy control the amplitude of the noise in each dimension.
"""
xoff = -dx + 2 * dx * i / (n-1)
yoff = -dy + 2 * dy * i / (n-1)
curve = [(x+xoff, y+yoff) for x, y in curve]
return curve
def PlotCurves(curves, root='species-rare'):
"""Plots a set of curves.
curves is a list of curves; each curve is a list of (x, y) pairs.
"""
thinkplot.Clf()
color = '#225EA8'
n = len(curves)
for i, curve in enumerate(curves):
curve = OffsetCurve(curve, i, n)
xs, ys = zip(*curve)
thinkplot.Plot(xs, ys, color=color, alpha=0.3, linewidth=0.5)
thinkplot.Save(root=root,
xlabel='# samples',
ylabel='# species',
formats=FORMATS,
legend=False)
def PlotConditionals(cdfs, root='species-cond'):
"""Plots cdfs of num_new conditioned on k.
cdfs: list of Cdf
root: string filename root
"""
thinkplot.Clf()
thinkplot.PrePlot(num=len(cdfs))
thinkplot.Cdfs(cdfs)
thinkplot.Save(root=root,
xlabel='# new species',
ylabel='Prob',
formats=FORMATS)
def PlotFracCdfs(cdfs, root='species-frac'):
"""Plots CDFs of the fraction of species seen.
cdfs: map from k to CDF of fraction of species seen after k samples
"""
thinkplot.Clf()
color = '#225EA8'
for k, cdf in cdfs.items():
xs, ys = cdf.Render()
ys = [1-y for y in ys]
thinkplot.Plot(xs, ys, color=color, linewidth=1)
x = 0.9
y = 1 - cdf.Prob(x)
pyplot.text(x, y, str(k), fontsize=9, color=color,
horizontalalignment='center',
verticalalignment='center',
bbox=dict(facecolor='white', edgecolor='none'))
thinkplot.Save(root=root,
xlabel='Fraction of species seen',
ylabel='Probability',
formats=FORMATS,
legend=False)
class Species(thinkbayes2.Suite):
"""Represents hypotheses about the number of species."""
def __init__(self, ns, conc=1, iters=1000):
hypos = [thinkbayes2.Dirichlet(n, conc) for n in ns]
thinkbayes2.Suite.__init__(self, hypos)
self.iters = iters
def Update(self, data):
"""Updates the suite based on the data.
data: list of observed frequencies
"""
# call Update in the parent class, which calls Likelihood
thinkbayes2.Suite.Update(self, data)
# update the next level of the hierarchy
for hypo in self.Values():
hypo.Update(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under this hypothesis.
hypo: Dirichlet object
data: list of observed frequencies
"""
dirichlet = hypo
# draw sample Likelihoods from the hypothetical Dirichlet dist
# and add them up
like = 0
for _ in range(self.iters):
like += dirichlet.Likelihood(data)
# correct for the number of ways the observed species
# might have been chosen from all species
m = len(data)
like *= thinkbayes2.BinomialCoef(dirichlet.n, m)
return like
def DistN(self):
"""Computes the distribution of n."""
pmf = thinkbayes2.Pmf()
for hypo, prob in self.Items():
pmf.Set(hypo.n, prob)
return pmf
class Species2(object):
"""Represents hypotheses about the number of species.
Combines two layers of the hierarchy into one object.
ns and probs represent the distribution of N
params represents the parameters of the Dirichlet distributions
"""
def __init__(self, ns, conc=1, iters=1000):
self.ns = ns
self.conc = conc
self.probs = numpy.ones(len(ns), dtype=numpy.float)
self.params = numpy.ones(self.ns[-1], dtype=numpy.float) * conc
self.iters = iters
self.num_reads = 0
self.m = 0
def Preload(self, data):
"""Change the initial parameters to fit the data better.
Just an experiment. Doesn't work.
"""
m = len(data)
singletons = data.count(1)
num = m - singletons
print(m, singletons, num)
addend = numpy.ones(num, dtype=numpy.float) * 1
print(len(addend))
print(len(self.params[singletons:m]))
self.params[singletons:m] += addend
print('Preload', num)
def Update(self, data):
"""Updates the distribution based on data.
data: numpy array of counts
"""
self.num_reads += sum(data)
like = numpy.zeros(len(self.ns), dtype=numpy.float)
for _ in range(self.iters):
like += self.SampleLikelihood(data)
self.probs *= like
self.probs /= self.probs.sum()
self.m = len(data)
#self.params[:self.m] += data * self.conc
self.params[:self.m] += data
def SampleLikelihood(self, data):
"""Computes the likelihood of the data for all values of n.
Draws one sample from the distribution of prevalences.
data: sequence of observed counts
Returns: numpy array of m likelihoods
"""
gammas = numpy.random.gamma(self.params)
m = len(data)
row = gammas[:m]
col = numpy.cumsum(gammas)
log_likes = []
for n in self.ns:
ps = row / col[n-1]
terms = numpy.log(ps) * data
log_like = terms.sum()
log_likes.append(log_like)
log_likes -= numpy.max(log_likes)
likes = numpy.exp(log_likes)
coefs = [thinkbayes2.BinomialCoef(n, m) for n in self.ns]
likes *= coefs
return likes
def DistN(self):
"""Computes the distribution of n.
Returns: new Pmf object
"""
pmf = thinkbayes2.Pmf(dict(zip(self.ns, self.probs)))
return pmf
def RandomN(self):
"""Returns a random value of n."""
return self.DistN().Random()
def DistQ(self, iters=100):
"""Computes the distribution of q based on distribution of n.
Returns: pmf of q
"""
cdf_n = self.DistN().MakeCdf()
sample_n = cdf_n.Sample(iters)
pmf = thinkbayes2.Pmf()
for n in sample_n:
q = self.RandomQ(n)
pmf.Incr(q)
pmf.Normalize()
return pmf
def RandomQ(self, n):
"""Returns a random value of q.
Based on n, self.num_reads and self.conc.
n: number of species
Returns: q
"""
# generate random prevalences
dirichlet = thinkbayes2.Dirichlet(n, conc=self.conc)
prevalences = dirichlet.Random()
# generate a simulated sample
pmf = thinkbayes2.Pmf(dict(enumerate(prevalences)))
cdf = pmf.MakeCdf()
sample = cdf.Sample(self.num_reads)
seen = set(sample)
# add up the prevalence of unseen species
q = 0
for species, prev in enumerate(prevalences):
if species not in seen:
q += prev
return q
def MarginalBeta(self, n, index):
"""Computes the conditional distribution of the indicated species.
n: conditional number of species
index: which species
Returns: Beta object representing a distribution of prevalence.
"""
alpha0 = self.params[:n].sum()
alpha = self.params[index]
return thinkbayes2.Beta(alpha, alpha0-alpha)
def DistOfPrevalence(self, index):
"""Computes the distribution of prevalence for the indicated species.
index: which species
Returns: (metapmf, mix) where metapmf is a MetaPmf and mix is a Pmf
"""
metapmf = thinkbayes2.Pmf()
for n, prob in zip(self.ns, self.probs):
beta = self.MarginalBeta(n, index)
pmf = beta.MakePmf()
metapmf.Set(pmf, prob)
mix = thinkbayes2.MakeMixture(metapmf)
return metapmf, mix
def SamplePosterior(self):
"""Draws random n and prevalences.
Returns: (n, prevalences)
"""
n = self.RandomN()
prevalences = self.SamplePrevalences(n)
#print 'Peeking at n_cheat'
#n = n_cheat
return n, prevalences
def SamplePrevalences(self, n):
"""Draws a sample of prevalences given n.
n: the number of species assumed in the conditional
Returns: numpy array of n prevalences
"""
if n == 1:
return [1.0]
q_desired = self.RandomQ(n)
q_desired = max(q_desired, 1e-6)
params = self.Unbias(n, self.m, q_desired)
gammas = numpy.random.gamma(params)
gammas /= gammas.sum()
return gammas
def Unbias(self, n, m, q_desired):
"""Adjusts the parameters to achieve desired prev_unseen (q).
n: number of species
m: seen species
q_desired: prevalence of unseen species
"""
params = self.params[:n].copy()
if n == m:
return params
x = sum(params[:m])
y = sum(params[m:])
a = x + y
#print x, y, a, x/a, y/a
g = q_desired * a / y
f = (a - g * y) / x
params[:m] *= f
params[m:] *= g
return params
class Species3(Species2):
"""Represents hypotheses about the number of species."""
def Update(self, data):
"""Updates the suite based on the data.
data: list of observations
"""
# sample the likelihoods and add them up
like = numpy.zeros(len(self.ns), dtype=numpy.float)
for _ in range(self.iters):
like += self.SampleLikelihood(data)
self.probs *= like
self.probs /= self.probs.sum()
m = len(data)
self.params[:m] += data
def SampleLikelihood(self, data):
"""Computes the likelihood of the data under all hypotheses.
data: list of observations
"""
# get a random sample
gammas = numpy.random.gamma(self.params)
# row is just the first m elements of gammas
m = len(data)
row = gammas[:m]
# col is the cumulative sum of gammas
col = numpy.cumsum(gammas)[self.ns[0]-1:]
# each row of the array is a set of ps, normalized
# for each hypothetical value of n
array = row / col[:, numpy.newaxis]
# computing the multinomial PDF under a log transform
# take the log of the ps and multiply by the data
terms = numpy.log(array) * data
# add up the rows
log_likes = terms.sum(axis=1)
# before exponentiating, scale into a reasonable range
log_likes -= numpy.max(log_likes)
likes = numpy.exp(log_likes)
# correct for the number of ways we could see m species
# out of a possible n
coefs = [thinkbayes2.BinomialCoef(n, m) for n in self.ns]
likes *= coefs
return likes
class Species4(Species):
"""Represents hypotheses about the number of species."""
def Update(self, data):
"""Updates the suite based on the data.
data: list of observed frequencies
"""
m = len(data)
# loop through the species and update one at a time
for i in range(m):
one = numpy.zeros(i+1)
one[i] = data[i]
# call the parent class
Species.Update(self, one)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under this hypothesis.
Note: this only works correctly if we update one species at a time.
hypo: Dirichlet object
data: list of observed frequencies
"""
dirichlet = hypo
like = 0
for _ in range(self.iters):
like += dirichlet.Likelihood(data)
# correct for the number of unseen species the new one
# could have been
m = len(data)
num_unseen = dirichlet.n - m + 1
like *= num_unseen
return like
class Species5(Species2):
"""Represents hypotheses about the number of species.
Combines two laters of the hierarchy into one object.
ns and probs represent the distribution of N
params represents the parameters of the Dirichlet distributions
"""
def Update(self, data):
"""Updates the suite based on the data.
data: list of observed frequencies in increasing order
"""
# loop through the species and update one at a time
m = len(data)
for i in range(m):
self.UpdateOne(i+1, data[i])
self.params[i] += data[i]
def UpdateOne(self, i, count):
"""Updates the suite based on the data.
Evaluates the likelihood for all values of n.
i: which species was observed (1..n)
count: how many were observed
"""
# how many species have we seen so far
self.m = i
# how many reads have we seen
self.num_reads += count
if self.iters == 0:
return
# sample the likelihoods and add them up
likes = numpy.zeros(len(self.ns), dtype=numpy.float)
for _ in range(self.iters):
likes += self.SampleLikelihood(i, count)
# correct for the number of unseen species the new one
# could have been
unseen_species = [n-i+1 for n in self.ns]
likes *= unseen_species
# multiply the priors by the likelihoods and renormalize
self.probs *= likes
self.probs /= self.probs.sum()
def SampleLikelihood(self, i, count):
"""Computes the likelihood of the data under all hypotheses.
i: which species was observed
count: how many were observed
"""
# get a random sample of p
gammas = numpy.random.gamma(self.params)
# sums is the cumulative sum of p, for each value of n
sums = numpy.cumsum(gammas)[self.ns[0]-1:]
# get p for the mth species, for each value of n
ps = gammas[i-1] / sums
log_likes = numpy.log(ps) * count
# before exponentiating, scale into a reasonable range
log_likes -= numpy.max(log_likes)
likes = numpy.exp(log_likes)
return likes
def MakePosterior(constructor, data, ns, conc=1, iters=1000):
"""Makes a suite, updates it and returns the posterior suite.
Prints the elapsed time.
data: observed species and their counts
ns: sequence of hypothetical ns
conc: concentration parameter
iters: how many samples to draw
Returns: posterior suite of the given type
"""
suite = constructor(ns, conc=conc, iters=iters)
# print constructor.__name__
start = time.time()
suite.Update(data)
end = time.time()
print('Processing time', end-start)
return suite
def PlotAllVersions():
"""Makes a graph of posterior distributions of N."""
data = [1, 2, 3]
m = len(data)
n = 20
ns = range(m, n)
for constructor in [Species, Species2, Species3, Species4, Species5]:
suite = MakePosterior(constructor, data, ns)
pmf = suite.DistN()
pmf.label = '%s' % (constructor.__name__)
thinkplot.Pmf(pmf)
thinkplot.Save(root='species3',
xlabel='Number of species',
ylabel='Prob')
def PlotMedium():
"""Makes a graph of posterior distributions of N."""
data = [1, 1, 1, 1, 2, 3, 5, 9]
m = len(data)
n = 20
ns = range(m, n)
for constructor in [Species, Species2, Species3, Species4, Species5]:
suite = MakePosterior(constructor, data, ns)
pmf = suite.DistN()
pmf.label = '%s' % (constructor.__name__)
thinkplot.Pmf(pmf)
thinkplot.Show()
def SimpleDirichletExample():
"""Makes a plot showing posterior distributions for three species.
This is the case where we know there are exactly three species.
"""
thinkplot.Clf()
thinkplot.PrePlot(3)
names = ['lions', 'tigers', 'bears']
data = [3, 2, 1]
dirichlet = thinkbayes2.Dirichlet(3)
for i in range(3):
beta = dirichlet.MarginalBeta(i)
print('mean', names[i], beta.Mean())
dirichlet.Update(data)
for i in range(3):
beta = dirichlet.MarginalBeta(i)
print('mean', names[i], beta.Mean())
pmf = beta.MakePmf(label=names[i])
thinkplot.Pmf(pmf)
thinkplot.Save(root='species1',
xlabel='Prevalence',
ylabel='Prob',
formats=FORMATS,
)
def HierarchicalExample():
"""Shows the posterior distribution of n for lions, tigers and bears.
"""
ns = range(3, 30)
suite = Species(ns, iters=8000)
data = [3, 2, 1]
suite.Update(data)
thinkplot.Clf()
thinkplot.PrePlot(num=1)
pmf = suite.DistN()
thinkplot.Pdf(pmf)
thinkplot.Save(root='species2',
xlabel='Number of species',
ylabel='Prob',
formats=FORMATS,
)
def CompareHierarchicalExample():
"""Makes a graph of posterior distributions of N."""
data = [3, 2, 1]
m = len(data)
n = 30
ns = range(m, n)
constructors = [Species, Species5]
iters = [1000, 100]
for constructor, iters in zip(constructors, iters):
suite = MakePosterior(constructor, data, ns, iters)
pmf = suite.DistN()
pmf.label = '%s' % (constructor.__name__)
thinkplot.Pmf(pmf)
thinkplot.Show()
def ProcessSubjects(codes):
"""Process subjects with the given codes and plot their posteriors.
code: sequence of string codes
"""
thinkplot.Clf()
thinkplot.PrePlot(len(codes))
subjects = ReadRarefactedData()
pmfs = []
for code in codes:
subject = subjects[code]
subject.Process()
pmf = subject.suite.DistN()
pmf.label = subject.code
thinkplot.Pmf(pmf)
pmfs.append(pmf)
print('ProbGreater', thinkbayes2.PmfProbGreater(pmfs[0], pmfs[1]))
print('ProbLess', thinkbayes2.PmfProbLess(pmfs[0], pmfs[1]))
thinkplot.Save(root='species4',
xlabel='Number of species',
ylabel='Prob',
formats=FORMATS,
)
def RunSubject(code, conc=1, high=500):
"""Run the analysis for the subject with the given code.
code: string code
"""
subjects = JoinSubjects()
subject = subjects[code]
subject.Process(conc=conc, high=high, iters=300)
subject.MakeQuickPrediction()
PrintSummary(subject)
actual_l = subject.total_species - subject.num_species
cdf_l = subject.DistL().MakeCdf()
PrintPrediction(cdf_l, actual_l)
subject.MakeFigures()
num_reads = 400
curves = subject.RunSimulations(100, num_reads)
root = 'species-rare-%s' % subject.code
PlotCurves(curves, root=root)
num_reads = 800
curves = subject.RunSimulations(500, num_reads)
ks = [100, 200, 400, 800]
cdfs = MakeConditionals(curves, ks)
root = 'species-cond-%s' % subject.code
PlotConditionals(cdfs, root=root)
num_reads = 1000
curves = subject.RunSimulations(500, num_reads, frac_flag=True)
ks = [10, 100, 200, 400, 600, 800, 1000]
cdfs = MakeFracCdfs(curves, ks)
root = 'species-frac-%s' % subject.code
PlotFracCdfs(cdfs, root=root)
def PrintSummary(subject):
"""Print a summary of a subject.
subject: Subject
"""
print(subject.code)
print('found %d species in %d reads' % (subject.num_species,
subject.num_reads))
print('total %d species in %d reads' % (subject.total_species,
subject.total_reads))
cdf = subject.suite.DistN().MakeCdf()
print('n')
PrintPrediction(cdf, 'unknown')
def PrintPrediction(cdf, actual):
"""Print a summary of a prediction.
cdf: predictive distribution
actual: actual value
"""
median = cdf.Percentile(50)
low, high = cdf.CredibleInterval(75)
print('predicted %0.2f (%0.2f %0.2f)' % (median, low, high))
print('actual', actual)
def RandomSeed(x):
"""Initialize random.random and numpy.random.
x: int seed
"""
random.seed(x)
numpy.random.seed(x)
def GenerateFakeSample(n, r, tr, conc=1):
"""Generates fake data with the given parameters.
n: number of species
r: number of reads in subsample
tr: total number of reads
conc: concentration parameter
Returns: hist of all reads, hist of subsample, prev_unseen
"""
# generate random prevalences
dirichlet = thinkbayes2.Dirichlet(n, conc=conc)
prevalences = dirichlet.Random()
prevalences.sort()
# generate a simulated sample
pmf = thinkbayes2.Pmf(dict(enumerate(prevalences)))
cdf = pmf.MakeCdf()
sample = cdf.Sample(tr)
# collect the species counts
hist = thinkbayes2.Hist(sample)
# extract a subset of the data
if tr > r:
random.shuffle(sample)
subsample = sample[:r]
subhist = thinkbayes2.Hist(subsample)
else:
subhist = hist
# add up the prevalence of unseen species
prev_unseen = 0
for species, prev in enumerate(prevalences):
if species not in subhist:
prev_unseen += prev
return hist, subhist, prev_unseen
def PlotActualPrevalences():
"""Makes a plot comparing actual prevalences with a model.
"""
# read data
subject_map, _ = ReadCompleteDataset()
# for subjects with more than 50 species,
# PMF of max prevalence, and PMF of max prevalence
# generated by a simulation
pmf_actual = thinkbayes2.Pmf()
pmf_sim = thinkbayes2.Pmf()
# concentration parameter used in the simulation
conc = 0.06
for code, subject in subject_map.items():
prevalences = subject.GetPrevalences()
m = len(prevalences)
if m < 2:
continue
actual_max = max(prevalences)
print(code, m, actual_max)
# incr the PMFs
if m > 50:
pmf_actual.Incr(actual_max)
pmf_sim.Incr(SimulateMaxPrev(m, conc))
# plot CDFs for the actual and simulated max prevalence
cdf_actual = pmf_actual.MakeCdf(label='actual')
cdf_sim = pmf_sim.MakeCdf(label='sim')
thinkplot.Cdfs([cdf_actual, cdf_sim])
thinkplot.Show()
def ScatterPrevalences(ms, actual):
"""Make a scatter plot of actual prevalences and expected values.
ms: sorted sequence of in m (number of species)
actual: sequence of actual max prevalence
"""
for conc in [1, 0.5, 0.2, 0.1]:
expected = [ExpectedMaxPrev(m, conc) for m in ms]
thinkplot.Plot(ms, expected)
thinkplot.Scatter(ms, actual)
thinkplot.Show(xscale='log')
def SimulateMaxPrev(m, conc=1):
"""Returns random max prevalence from a Dirichlet distribution.
m: int number of species
conc: concentration parameter of the Dirichlet distribution
Returns: float max of m prevalences
"""
dirichlet = thinkbayes2.Dirichlet(m, conc)
prevalences = dirichlet.Random()
return max(prevalences)
def ExpectedMaxPrev(m, conc=1, iters=100):
"""Estimate expected max prevalence.
m: number of species
conc: concentration parameter
iters: how many iterations to run
Returns: expected max prevalence
"""
dirichlet = thinkbayes2.Dirichlet(m, conc)
t = []
for _ in range(iters):
prevalences = dirichlet.Random()
t.append(max(prevalences))
return numpy.mean(t)
class Calibrator(object):
"""Encapsulates the calibration process."""
def __init__(self, conc=0.1):
"""
"""
self.conc = conc
self.ps = range(10, 100, 10)
self.total_n = numpy.zeros(len(self.ps))
self.total_q = numpy.zeros(len(self.ps))
self.total_l = numpy.zeros(len(self.ps))
self.n_seq = []
self.q_seq = []
self.l_seq = []
def Calibrate(self, num_runs=100, n_low=30, n_high=400, r=400, tr=1200):
"""Runs calibrations.
num_runs: how many runs
"""
for seed in range(num_runs):
self.RunCalibration(seed, n_low, n_high, r, tr)
self.total_n *= 100.0 / num_runs
self.total_q *= 100.0 / num_runs
self.total_l *= 100.0 / num_runs
def Validate(self, num_runs=100, clean_param=0):
"""Runs validations.
num_runs: how many runs
"""
subject_map, _ = ReadCompleteDataset(clean_param=clean_param)
i = 0
for match in subject_map.itervalues():
if match.num_reads < 400:
continue
num_reads = 100
print('Validate', match.code)
subject = match.Resample(num_reads)
subject.Match(match)
n_actual = None
q_actual = subject.prev_unseen
l_actual = subject.total_species - subject.num_species
self.RunSubject(subject, n_actual, q_actual, l_actual)
i += 1
if i == num_runs:
break
self.total_n *= 100.0 / num_runs
self.total_q *= 100.0 / num_runs
self.total_l *= 100.0 / num_runs
def PlotN(self, root='species-n'):
"""Makes a scatter plot of simulated vs actual prev_unseen (q).
"""
xs, ys = zip(*self.n_seq)
if None in xs:
return
high = max(xs+ys)
thinkplot.Plot([0, high], [0, high], color='gray')
thinkplot.Scatter(xs, ys)
thinkplot.Save(root=root,
xlabel='Actual n',
ylabel='Predicted')
def PlotQ(self, root='species-q'):
"""Makes a scatter plot of simulated vs actual prev_unseen (q).
"""
thinkplot.Plot([0, 0.2], [0, 0.2], color='gray')
xs, ys = zip(*self.q_seq)
thinkplot.Scatter(xs, ys)
thinkplot.Save(root=root,
xlabel='Actual q',
ylabel='Predicted')
def PlotL(self, root='species-n'):
"""Makes a scatter plot of simulated vs actual l.
"""
thinkplot.Plot([0, 20], [0, 20], color='gray')
xs, ys = zip(*self.l_seq)
thinkplot.Scatter(xs, ys)
thinkplot.Save(root=root,
xlabel='Actual l',
ylabel='Predicted')
def PlotCalibrationCurves(self, root='species5'):
"""Plots calibration curves"""
print(self.total_n)
print(self.total_q)
print(self.total_l)
thinkplot.Plot([0, 100], [0, 100], color='gray', alpha=0.2)
if self.total_n[0] >= 0:
thinkplot.Plot(self.ps, self.total_n, label='n')
thinkplot.Plot(self.ps, self.total_q, label='q')
thinkplot.Plot(self.ps, self.total_l, label='l')
thinkplot.Save(root=root,
axis=[0, 100, 0, 100],
xlabel='Ideal percentages',
ylabel='Predictive distributions',
formats=FORMATS,
)
def RunCalibration(self, seed, n_low, n_high, r, tr):
"""Runs a single calibration run.
Generates N and prevalences from a Dirichlet distribution,
then generates simulated data.
Runs analysis to get the posterior distributions.
Generates calibration curves for each posterior distribution.
seed: int random seed
"""
# generate a random number of species and their prevalences
# (from a Dirichlet distribution with alpha_i = conc for all i)
RandomSeed(seed)
n_actual = random.randrange(n_low, n_high+1)
hist, subhist, q_actual = GenerateFakeSample(
n_actual,
r,
tr,
self.conc)
l_actual = len(hist) - len(subhist)
print('Run low, high, conc', n_low, n_high, self.conc)
print('Run r, tr', r, tr)
print('Run n, q, l', n_actual, q_actual, l_actual)
# extract the data
data = [count for species, count in subhist.Items()]
data.sort()
print('data', data)
# make a Subject and process
subject = Subject('simulated')
subject.num_reads = r
subject.total_reads = tr
for species, count in subhist.Items():
subject.Add(species, count)
subject.Done()
self.RunSubject(subject, n_actual, q_actual, l_actual)
def RunSubject(self, subject, n_actual, q_actual, l_actual):
"""Runs the analysis for a subject.
subject: Subject
n_actual: number of species
q_actual: prevalence of unseen species
l_actual: number of new species
"""
# process and make prediction
subject.Process(conc=self.conc, iters=100)
subject.MakeQuickPrediction()
# extract the posterior suite
suite = subject.suite
# check the distribution of n
pmf_n = suite.DistN()
print('n')
self.total_n += self.CheckDistribution(pmf_n, n_actual, self.n_seq)
# check the distribution of q
pmf_q = suite.DistQ()
print('q')
self.total_q += self.CheckDistribution(pmf_q, q_actual, self.q_seq)
# check the distribution of additional species
pmf_l = subject.DistL()
print('l')
self.total_l += self.CheckDistribution(pmf_l, l_actual, self.l_seq)
def CheckDistribution(self, pmf, actual, seq):
"""Checks a predictive distribution and returns a score vector.
pmf: predictive distribution
actual: actual value
seq: which sequence to append (actual, mean) onto
"""
mean = pmf.Mean()
seq.append((actual, mean))
cdf = pmf.MakeCdf()
PrintPrediction(cdf, actual)
sv = ScoreVector(cdf, self.ps, actual)
return sv
def ScoreVector(cdf, ps, actual):
"""Checks whether the actual value falls in each credible interval.
cdf: predictive distribution
ps: percentages to check (0-100)
actual: actual value
Returns: numpy array of 0, 0.5, or 1
"""
scores = []
for p in ps:
low, high = cdf.CredibleInterval(p)
score = Score(low, high, actual)
scores.append(score)
return numpy.array(scores)
def Score(low, high, n):
"""Score whether the actual value falls in the range.
Hitting the posts counts as 0.5, -1 is invalid.
low: low end of range
high: high end of range
n: actual value
Returns: -1, 0, 0.5 or 1
"""
if n is None:
return -1
if low < n < high:
return 1
if n == low or n == high:
return 0.5
else:
return 0
def FakeSubject(n=300, conc=0.1, num_reads=400, prevalences=None):
"""Makes a fake Subject.
If prevalences is provided, n and conc are ignored.
n: number of species
conc: concentration parameter
num_reads: number of reads
prevalences: numpy array of prevalences (overrides n and conc)
"""
# generate random prevalences
if prevalences is None:
dirichlet = thinkbayes2.Dirichlet(n, conc=conc)
prevalences = dirichlet.Random()
prevalences.sort()
# generate a simulated sample
pmf = thinkbayes2.Pmf(dict(enumerate(prevalences)))
cdf = pmf.MakeCdf()
sample = cdf.Sample(num_reads)
# collect the species counts
hist = thinkbayes2.Hist(sample)
# extract the data
data = [count for species, count in hist.Items()]
data.sort()
# make a Subject and process
subject = Subject('simulated')
for species, count in hist.Items():
subject.Add(species, count)
subject.Done()
return subject
def PlotSubjectCdf(code=None, clean_param=0):
"""Checks whether the Dirichlet model can replicate the data.
"""
subject_map, uber_subject = ReadCompleteDataset(clean_param=clean_param)
if code is None:
subjects = subject_map.values()
subject = random.choice(subjects)
code = subject.code
elif code == 'uber':
subject = uber_subject
else:
subject = subject_map[code]
print(subject.code)
m = subject.GetM()
subject.Process(high=m, conc=0.1, iters=0)
print(subject.suite.params[:m])
# plot the cdf
options = dict(linewidth=3, color='blue', alpha=0.5)
cdf = subject.MakeCdf()
thinkplot.Cdf(cdf, **options)
options = dict(linewidth=1, color='green', alpha=0.5)
# generate fake subjects and plot their CDFs
for _ in range(10):
prevalences = subject.suite.SamplePrevalences(m)
fake = FakeSubject(prevalences=prevalences)
cdf = fake.MakeCdf()
thinkplot.Cdf(cdf, **options)
root = 'species-cdf-%s' % code
thinkplot.Save(root=root,
xlabel='rank',
ylabel='CDF',
xscale='log',
formats=FORMATS,
)
def RunCalibration(flag='cal', num_runs=100, clean_param=50):
"""Runs either the calibration or validation process.
flag: string 'cal' or 'val'
num_runs: how many runs
clean_param: parameter used for data cleaning
"""
cal = Calibrator(conc=0.1)
if flag == 'val':
cal.Validate(num_runs=num_runs, clean_param=clean_param)
else:
cal.Calibrate(num_runs=num_runs)
cal.PlotN(root='species-n-%s' % flag)
cal.PlotQ(root='species-q-%s' % flag)
cal.PlotL(root='species-l-%s' % flag)
cal.PlotCalibrationCurves(root='species5-%s' % flag)
def RunTests():
"""Runs calibration code and generates some figures."""
RunCalibration(flag='val')
RunCalibration(flag='cal')
PlotSubjectCdf('B1558.G', clean_param=50)
PlotSubjectCdf(None)
def main(script):
RandomSeed(17)
RunSubject('B1242', conc=1, high=100)
RandomSeed(17)
SimpleDirichletExample()
RandomSeed(17)
HierarchicalExample()
if __name__ == '__main__':
main(*sys.argv)
| [
"thinkbayes2.Suite.Update",
"numpy.log",
"thinkbayes2.PmfProbLess",
"numpy.array",
"shelve.open",
"thinkbayes2.Beta",
"thinkplot.Plot",
"thinkplot.Clf",
"numpy.mean",
"thinkbayes2.MakeMixture",
"thinkbayes2.Dirichlet",
"thinkplot.Cdf",
"thinkbayes2.BinomialCoef",
"numpy.max",
"numpy.exp"... | [((383, 429), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""', 'RuntimeWarning'], {}), "('error', RuntimeWarning)\n", (404, 429), False, 'import warnings\n'), ((13979, 13998), 'thinkbayes2.Joint', 'thinkbayes2.Joint', ([], {}), '()\n', (13996, 13998), False, 'import thinkbayes2\n'), ((15402, 15416), 'csv.reader', 'csv.reader', (['fp'], {}), '(fp)\n', (15412, 15416), False, 'import csv\n'), ((16433, 16447), 'csv.reader', 'csv.reader', (['fp'], {}), '(fp)\n', (16443, 16447), False, 'import csv\n'), ((19024, 19039), 'thinkplot.Clf', 'thinkplot.Clf', ([], {}), '()\n', (19037, 19039), False, 'import thinkplot\n'), ((19267, 19368), 'thinkplot.Save', 'thinkplot.Save', ([], {'root': 'root', 'xlabel': '"""# samples"""', 'ylabel': '"""# species"""', 'formats': 'FORMATS', 'legend': '(False)'}), "(root=root, xlabel='# samples', ylabel='# species', formats=\n FORMATS, legend=False)\n", (19281, 19368), False, 'import thinkplot\n'), ((19592, 19607), 'thinkplot.Clf', 'thinkplot.Clf', ([], {}), '()\n', (19605, 19607), False, 'import thinkplot\n'), ((19650, 19670), 'thinkplot.Cdfs', 'thinkplot.Cdfs', (['cdfs'], {}), '(cdfs)\n', (19664, 19670), False, 'import thinkplot\n'), ((19676, 19762), 'thinkplot.Save', 'thinkplot.Save', ([], {'root': 'root', 'xlabel': '"""# new species"""', 'ylabel': '"""Prob"""', 'formats': 'FORMATS'}), "(root=root, xlabel='# new species', ylabel='Prob', formats=\n FORMATS)\n", (19690, 19762), False, 'import thinkplot\n'), ((19989, 20004), 'thinkplot.Clf', 'thinkplot.Clf', ([], {}), '()\n', (20002, 20004), False, 'import thinkplot\n'), ((20453, 20571), 'thinkplot.Save', 'thinkplot.Save', ([], {'root': 'root', 'xlabel': '"""Fraction of species seen"""', 'ylabel': '"""Probability"""', 'formats': 'FORMATS', 'legend': '(False)'}), "(root=root, xlabel='Fraction of species seen', ylabel=\n 'Probability', formats=FORMATS, legend=False)\n", (20467, 20571), False, 'import thinkplot\n'), ((33567, 33578), 'time.time', 'time.time', ([], {}), '()\n', (33576, 33578), False, 'import time\n'), ((33612, 33623), 'time.time', 'time.time', ([], {}), '()\n', (33621, 33623), False, 'import time\n'), ((34073, 34147), 'thinkplot.Save', 'thinkplot.Save', ([], {'root': '"""species3"""', 'xlabel': '"""Number of species"""', 'ylabel': '"""Prob"""'}), "(root='species3', xlabel='Number of species', ylabel='Prob')\n", (34087, 34147), False, 'import thinkplot\n'), ((34581, 34597), 'thinkplot.Show', 'thinkplot.Show', ([], {}), '()\n', (34595, 34597), False, 'import thinkplot\n'), ((34782, 34797), 'thinkplot.Clf', 'thinkplot.Clf', ([], {}), '()\n', (34795, 34797), False, 'import thinkplot\n'), ((34802, 34822), 'thinkplot.PrePlot', 'thinkplot.PrePlot', (['(3)'], {}), '(3)\n', (34819, 34822), False, 'import thinkplot\n'), ((34904, 34928), 'thinkbayes2.Dirichlet', 'thinkbayes2.Dirichlet', (['(3)'], {}), '(3)\n', (34925, 34928), False, 'import thinkbayes2\n'), ((35251, 35340), 'thinkplot.Save', 'thinkplot.Save', ([], {'root': '"""species1"""', 'xlabel': '"""Prevalence"""', 'ylabel': '"""Prob"""', 'formats': 'FORMATS'}), "(root='species1', xlabel='Prevalence', ylabel='Prob', formats\n =FORMATS)\n", (35265, 35340), False, 'import thinkplot\n'), ((35621, 35636), 'thinkplot.Clf', 'thinkplot.Clf', ([], {}), '()\n', (35634, 35636), False, 'import thinkplot\n'), ((35641, 35665), 'thinkplot.PrePlot', 'thinkplot.PrePlot', ([], {'num': '(1)'}), '(num=1)\n', (35658, 35665), False, 'import thinkplot\n'), ((35695, 35713), 'thinkplot.Pdf', 'thinkplot.Pdf', (['pmf'], {}), '(pmf)\n', (35708, 35713), False, 'import thinkplot\n'), ((35718, 35813), 'thinkplot.Save', 'thinkplot.Save', ([], {'root': '"""species2"""', 'xlabel': '"""Number of species"""', 'ylabel': '"""Prob"""', 'formats': 'FORMATS'}), "(root='species2', xlabel='Number of species', ylabel='Prob',\n formats=FORMATS)\n", (35732, 35813), False, 'import thinkplot\n'), ((36331, 36347), 'thinkplot.Show', 'thinkplot.Show', ([], {}), '()\n', (36345, 36347), False, 'import thinkplot\n'), ((36498, 36513), 'thinkplot.Clf', 'thinkplot.Clf', ([], {}), '()\n', (36511, 36513), False, 'import thinkplot\n'), ((36946, 37041), 'thinkplot.Save', 'thinkplot.Save', ([], {'root': '"""species4"""', 'xlabel': '"""Number of species"""', 'ylabel': '"""Prob"""', 'formats': 'FORMATS'}), "(root='species4', xlabel='Number of species', ylabel='Prob',\n formats=FORMATS)\n", (36960, 37041), False, 'import thinkplot\n'), ((39093, 39107), 'random.seed', 'random.seed', (['x'], {}), '(x)\n', (39104, 39107), False, 'import random\n'), ((39112, 39132), 'numpy.random.seed', 'numpy.random.seed', (['x'], {}), '(x)\n', (39129, 39132), False, 'import numpy\n'), ((39479, 39514), 'thinkbayes2.Dirichlet', 'thinkbayes2.Dirichlet', (['n'], {'conc': 'conc'}), '(n, conc=conc)\n', (39500, 39514), False, 'import thinkbayes2\n'), ((39763, 39787), 'thinkbayes2.Hist', 'thinkbayes2.Hist', (['sample'], {}), '(sample)\n', (39779, 39787), False, 'import thinkbayes2\n'), ((40514, 40531), 'thinkbayes2.Pmf', 'thinkbayes2.Pmf', ([], {}), '()\n', (40529, 40531), False, 'import thinkbayes2\n'), ((40546, 40563), 'thinkbayes2.Pmf', 'thinkbayes2.Pmf', ([], {}), '()\n', (40561, 40563), False, 'import thinkbayes2\n'), ((41166, 41203), 'thinkplot.Cdfs', 'thinkplot.Cdfs', (['[cdf_actual, cdf_sim]'], {}), '([cdf_actual, cdf_sim])\n', (41180, 41203), False, 'import thinkplot\n'), ((41208, 41224), 'thinkplot.Show', 'thinkplot.Show', ([], {}), '()\n', (41222, 41224), False, 'import thinkplot\n'), ((41576, 41605), 'thinkplot.Scatter', 'thinkplot.Scatter', (['ms', 'actual'], {}), '(ms, actual)\n', (41593, 41605), False, 'import thinkplot\n'), ((41610, 41638), 'thinkplot.Show', 'thinkplot.Show', ([], {'xscale': '"""log"""'}), "(xscale='log')\n", (41624, 41638), False, 'import thinkplot\n'), ((41900, 41930), 'thinkbayes2.Dirichlet', 'thinkbayes2.Dirichlet', (['m', 'conc'], {}), '(m, conc)\n', (41921, 41930), False, 'import thinkbayes2\n'), ((42250, 42280), 'thinkbayes2.Dirichlet', 'thinkbayes2.Dirichlet', (['m', 'conc'], {}), '(m, conc)\n', (42271, 42280), False, 'import thinkbayes2\n'), ((42408, 42421), 'numpy.mean', 'numpy.mean', (['t'], {}), '(t)\n', (42418, 42421), False, 'import numpy\n'), ((49211, 49230), 'numpy.array', 'numpy.array', (['scores'], {}), '(scores)\n', (49222, 49230), False, 'import numpy\n'), ((50340, 50364), 'thinkbayes2.Hist', 'thinkbayes2.Hist', (['sample'], {}), '(sample)\n', (50356, 50364), False, 'import thinkbayes2\n'), ((51319, 51348), 'thinkplot.Cdf', 'thinkplot.Cdf', (['cdf'], {}), '(cdf, **options)\n', (51332, 51348), False, 'import thinkplot\n'), ((51698, 51787), 'thinkplot.Save', 'thinkplot.Save', ([], {'root': 'root', 'xlabel': '"""rank"""', 'ylabel': '"""CDF"""', 'xscale': '"""log"""', 'formats': 'FORMATS'}), "(root=root, xlabel='rank', ylabel='CDF', xscale='log',\n formats=FORMATS)\n", (51712, 51787), False, 'import thinkplot\n'), ((606, 629), 'shelve.open', 'shelve.open', (['shelf_file'], {}), '(shelf_file)\n', (617, 629), False, 'import shelve\n'), ((4209, 4231), 'thinkbayes2.Cdf', 'thinkbayes2.Cdf', (['items'], {}), '(items)\n', (4224, 4231), False, 'import thinkbayes2\n'), ((6045, 6062), 'thinkbayes2.Pmf', 'thinkbayes2.Pmf', ([], {}), '()\n', (6060, 6062), False, 'import thinkbayes2\n'), ((6844, 6859), 'thinkplot.Clf', 'thinkplot.Clf', ([], {}), '()\n', (6857, 6859), False, 'import thinkplot\n'), ((6868, 6892), 'thinkplot.PrePlot', 'thinkplot.PrePlot', ([], {'num': '(1)'}), '(num=1)\n', (6885, 6892), False, 'import thinkplot\n'), ((6902, 6920), 'thinkplot.Pmf', 'thinkplot.Pmf', (['pmf'], {}), '(pmf)\n', (6915, 6920), False, 'import thinkplot\n'), ((6976, 7065), 'thinkplot.Save', 'thinkplot.Save', ([], {'root': 'root', 'xlabel': '"""Number of species"""', 'ylabel': '"""Prob"""', 'formats': 'FORMATS'}), "(root=root, xlabel='Number of species', ylabel='Prob',\n formats=FORMATS)\n", (6990, 7065), False, 'import thinkplot\n'), ((7330, 7345), 'thinkplot.Clf', 'thinkplot.Clf', ([], {}), '()\n', (7343, 7345), False, 'import thinkplot\n'), ((7354, 7378), 'thinkplot.PrePlot', 'thinkplot.PrePlot', ([], {'num': '(5)'}), '(num=5)\n', (7371, 7378), False, 'import thinkplot\n'), ((7509, 7613), 'thinkplot.Save', 'thinkplot.Save', ([], {'root': 'root', 'xlabel': '"""Prevalence"""', 'ylabel': '"""Prob"""', 'formats': 'FORMATS', 'axis': '[0, 0.3, 0, 1]'}), "(root=root, xlabel='Prevalence', ylabel='Prob', formats=\n FORMATS, axis=[0, 0.3, 0, 1])\n", (7523, 7613), False, 'import thinkplot\n'), ((8743, 8758), 'thinkplot.Clf', 'thinkplot.Clf', ([], {}), '()\n', (8756, 8758), False, 'import thinkplot\n'), ((8876, 8932), 'thinkplot.Pmf', 'thinkplot.Pmf', (['mix'], {'color': '"""blue"""', 'alpha': '(0.9)', 'linewidth': '(2)'}), "(mix, color='blue', alpha=0.9, linewidth=2)\n", (8889, 8932), False, 'import thinkplot\n'), ((8986, 9106), 'thinkplot.Save', 'thinkplot.Save', ([], {'root': 'root', 'xlabel': '"""Prevalence"""', 'ylabel': '"""Prob"""', 'formats': 'FORMATS', 'axis': '[0, 0.3, 0, 0.3]', 'legend': '(False)'}), "(root=root, xlabel='Prevalence', ylabel='Prob', formats=\n FORMATS, axis=[0, 0.3, 0, 0.3], legend=False)\n", (9000, 9106), False, 'import thinkplot\n'), ((10349, 10366), 'random.shuffle', 'random.shuffle', (['t'], {}), '(t)\n', (10363, 10366), False, 'import random\n'), ((10450, 10473), 'thinkbayes2.Hist', 'thinkbayes2.Hist', (['reads'], {}), '(reads)\n', (10466, 10473), False, 'import thinkbayes2\n'), ((13086, 13118), 'thinkbayes2.Pmf', 'thinkbayes2.Pmf', ([], {'label': 'self.code'}), '(label=self.code)\n', (13101, 13118), False, 'import thinkbayes2\n'), ((14492, 14514), 'thinkbayes2.Cdf', 'thinkbayes2.Cdf', (['fracs'], {}), '(fracs)\n', (14507, 14514), False, 'import thinkbayes2\n'), ((19200, 19261), 'thinkplot.Plot', 'thinkplot.Plot', (['xs', 'ys'], {'color': 'color', 'alpha': '(0.3)', 'linewidth': '(0.5)'}), '(xs, ys, color=color, alpha=0.3, linewidth=0.5)\n', (19214, 19261), False, 'import thinkplot\n'), ((20129, 20177), 'thinkplot.Plot', 'thinkplot.Plot', (['xs', 'ys'], {'color': 'color', 'linewidth': '(1)'}), '(xs, ys, color=color, linewidth=1)\n', (20143, 20177), False, 'import thinkplot\n'), ((20850, 20889), 'thinkbayes2.Suite.__init__', 'thinkbayes2.Suite.__init__', (['self', 'hypos'], {}), '(self, hypos)\n', (20876, 20889), False, 'import thinkbayes2\n'), ((21124, 21160), 'thinkbayes2.Suite.Update', 'thinkbayes2.Suite.Update', (['self', 'data'], {}), '(self, data)\n', (21148, 21160), False, 'import thinkbayes2\n'), ((21846, 21886), 'thinkbayes2.BinomialCoef', 'thinkbayes2.BinomialCoef', (['dirichlet.n', 'm'], {}), '(dirichlet.n, m)\n', (21870, 21886), False, 'import thinkbayes2\n'), ((21990, 22007), 'thinkbayes2.Pmf', 'thinkbayes2.Pmf', ([], {}), '()\n', (22005, 22007), False, 'import thinkbayes2\n'), ((23939, 23970), 'numpy.random.gamma', 'numpy.random.gamma', (['self.params'], {}), '(self.params)\n', (23957, 23970), False, 'import numpy\n'), ((24033, 24053), 'numpy.cumsum', 'numpy.cumsum', (['gammas'], {}), '(gammas)\n', (24045, 24053), False, 'import numpy\n'), ((24273, 24293), 'numpy.max', 'numpy.max', (['log_likes'], {}), '(log_likes)\n', (24282, 24293), False, 'import numpy\n'), ((24310, 24330), 'numpy.exp', 'numpy.exp', (['log_likes'], {}), '(log_likes)\n', (24319, 24330), False, 'import numpy\n'), ((24973, 24990), 'thinkbayes2.Pmf', 'thinkbayes2.Pmf', ([], {}), '()\n', (24988, 24990), False, 'import thinkbayes2\n'), ((25356, 25396), 'thinkbayes2.Dirichlet', 'thinkbayes2.Dirichlet', (['n'], {'conc': 'self.conc'}), '(n, conc=self.conc)\n', (25377, 25396), False, 'import thinkbayes2\n'), ((26201, 26240), 'thinkbayes2.Beta', 'thinkbayes2.Beta', (['alpha', '(alpha0 - alpha)'], {}), '(alpha, alpha0 - alpha)\n', (26217, 26240), False, 'import thinkbayes2\n'), ((26494, 26511), 'thinkbayes2.Pmf', 'thinkbayes2.Pmf', ([], {}), '()\n', (26509, 26511), False, 'import thinkbayes2\n'), ((26692, 26724), 'thinkbayes2.MakeMixture', 'thinkbayes2.MakeMixture', (['metapmf'], {}), '(metapmf)\n', (26715, 26724), False, 'import thinkbayes2\n'), ((27446, 27472), 'numpy.random.gamma', 'numpy.random.gamma', (['params'], {}), '(params)\n', (27464, 27472), False, 'import numpy\n'), ((28826, 28857), 'numpy.random.gamma', 'numpy.random.gamma', (['self.params'], {}), '(self.params)\n', (28844, 28857), False, 'import numpy\n'), ((29514, 29534), 'numpy.max', 'numpy.max', (['log_likes'], {}), '(log_likes)\n', (29523, 29534), False, 'import numpy\n'), ((29551, 29571), 'numpy.exp', 'numpy.exp', (['log_likes'], {}), '(log_likes)\n', (29560, 29571), False, 'import numpy\n'), ((32658, 32689), 'numpy.random.gamma', 'numpy.random.gamma', (['self.params'], {}), '(self.params)\n', (32676, 32689), False, 'import numpy\n'), ((33022, 33042), 'numpy.max', 'numpy.max', (['log_likes'], {}), '(log_likes)\n', (33031, 33042), False, 'import numpy\n'), ((33059, 33079), 'numpy.exp', 'numpy.exp', (['log_likes'], {}), '(log_likes)\n', (33068, 33079), False, 'import numpy\n'), ((34049, 34067), 'thinkplot.Pmf', 'thinkplot.Pmf', (['pmf'], {}), '(pmf)\n', (34062, 34067), False, 'import thinkplot\n'), ((34557, 34575), 'thinkplot.Pmf', 'thinkplot.Pmf', (['pmf'], {}), '(pmf)\n', (34570, 34575), False, 'import thinkplot\n'), ((35227, 35245), 'thinkplot.Pmf', 'thinkplot.Pmf', (['pmf'], {}), '(pmf)\n', (35240, 35245), False, 'import thinkplot\n'), ((36307, 36325), 'thinkplot.Pmf', 'thinkplot.Pmf', (['pmf'], {}), '(pmf)\n', (36320, 36325), False, 'import thinkplot\n'), ((36759, 36777), 'thinkplot.Pmf', 'thinkplot.Pmf', (['pmf'], {}), '(pmf)\n', (36772, 36777), False, 'import thinkplot\n'), ((36830, 36874), 'thinkbayes2.PmfProbGreater', 'thinkbayes2.PmfProbGreater', (['pmfs[0]', 'pmfs[1]'], {}), '(pmfs[0], pmfs[1])\n', (36856, 36874), False, 'import thinkbayes2\n'), ((36898, 36939), 'thinkbayes2.PmfProbLess', 'thinkbayes2.PmfProbLess', (['pmfs[0]', 'pmfs[1]'], {}), '(pmfs[0], pmfs[1])\n', (36921, 36939), False, 'import thinkbayes2\n'), ((39847, 39869), 'random.shuffle', 'random.shuffle', (['sample'], {}), '(sample)\n', (39861, 39869), False, 'import random\n'), ((39919, 39946), 'thinkbayes2.Hist', 'thinkbayes2.Hist', (['subsample'], {}), '(subsample)\n', (39935, 39946), False, 'import thinkbayes2\n'), ((41542, 41570), 'thinkplot.Plot', 'thinkplot.Plot', (['ms', 'expected'], {}), '(ms, expected)\n', (41556, 41570), False, 'import thinkplot\n'), ((44340, 44390), 'thinkplot.Plot', 'thinkplot.Plot', (['[0, high]', '[0, high]'], {'color': '"""gray"""'}), "([0, high], [0, high], color='gray')\n", (44354, 44390), False, 'import thinkplot\n'), ((44399, 44424), 'thinkplot.Scatter', 'thinkplot.Scatter', (['xs', 'ys'], {}), '(xs, ys)\n', (44416, 44424), False, 'import thinkplot\n'), ((44433, 44497), 'thinkplot.Save', 'thinkplot.Save', ([], {'root': 'root', 'xlabel': '"""Actual n"""', 'ylabel': '"""Predicted"""'}), "(root=root, xlabel='Actual n', ylabel='Predicted')\n", (44447, 44497), False, 'import thinkplot\n'), ((44676, 44724), 'thinkplot.Plot', 'thinkplot.Plot', (['[0, 0.2]', '[0, 0.2]'], {'color': '"""gray"""'}), "([0, 0.2], [0, 0.2], color='gray')\n", (44690, 44724), False, 'import thinkplot\n'), ((44767, 44792), 'thinkplot.Scatter', 'thinkplot.Scatter', (['xs', 'ys'], {}), '(xs, ys)\n', (44784, 44792), False, 'import thinkplot\n'), ((44801, 44865), 'thinkplot.Save', 'thinkplot.Save', ([], {'root': 'root', 'xlabel': '"""Actual q"""', 'ylabel': '"""Predicted"""'}), "(root=root, xlabel='Actual q', ylabel='Predicted')\n", (44815, 44865), False, 'import thinkplot\n'), ((45030, 45076), 'thinkplot.Plot', 'thinkplot.Plot', (['[0, 20]', '[0, 20]'], {'color': '"""gray"""'}), "([0, 20], [0, 20], color='gray')\n", (45044, 45076), False, 'import thinkplot\n'), ((45119, 45144), 'thinkplot.Scatter', 'thinkplot.Scatter', (['xs', 'ys'], {}), '(xs, ys)\n', (45136, 45144), False, 'import thinkplot\n'), ((45153, 45217), 'thinkplot.Save', 'thinkplot.Save', ([], {'root': 'root', 'xlabel': '"""Actual l"""', 'ylabel': '"""Predicted"""'}), "(root=root, xlabel='Actual l', ylabel='Predicted')\n", (45167, 45217), False, 'import thinkplot\n'), ((45451, 45510), 'thinkplot.Plot', 'thinkplot.Plot', (['[0, 100]', '[0, 100]'], {'color': '"""gray"""', 'alpha': '(0.2)'}), "([0, 100], [0, 100], color='gray', alpha=0.2)\n", (45465, 45510), False, 'import thinkplot\n'), ((45615, 45663), 'thinkplot.Plot', 'thinkplot.Plot', (['self.ps', 'self.total_q'], {'label': '"""q"""'}), "(self.ps, self.total_q, label='q')\n", (45629, 45663), False, 'import thinkplot\n'), ((45672, 45720), 'thinkplot.Plot', 'thinkplot.Plot', (['self.ps', 'self.total_l'], {'label': '"""l"""'}), "(self.ps, self.total_l, label='l')\n", (45686, 45720), False, 'import thinkplot\n'), ((45730, 45862), 'thinkplot.Save', 'thinkplot.Save', ([], {'root': 'root', 'axis': '[0, 100, 0, 100]', 'xlabel': '"""Ideal percentages"""', 'ylabel': '"""Predictive distributions"""', 'formats': 'FORMATS'}), "(root=root, axis=[0, 100, 0, 100], xlabel='Ideal percentages',\n ylabel='Predictive distributions', formats=FORMATS)\n", (45744, 45862), False, 'import thinkplot\n'), ((46540, 46575), 'random.randrange', 'random.randrange', (['n_low', '(n_high + 1)'], {}), '(n_low, n_high + 1)\n', (46556, 46575), False, 'import random\n'), ((50041, 50076), 'thinkbayes2.Dirichlet', 'thinkbayes2.Dirichlet', (['n'], {'conc': 'conc'}), '(n, conc=conc)\n', (50062, 50076), False, 'import thinkbayes2\n'), ((50923, 50946), 'random.choice', 'random.choice', (['subjects'], {}), '(subjects)\n', (50936, 50946), False, 'import random\n'), ((51628, 51657), 'thinkplot.Cdf', 'thinkplot.Cdf', (['cdf'], {}), '(cdf, **options)\n', (51641, 51657), False, 'import thinkplot\n'), ((4444, 4482), 'numpy.array', 'numpy.array', (['counts'], {'dtype': 'numpy.float'}), '(counts, dtype=numpy.float)\n', (4455, 4482), False, 'import numpy\n'), ((8309, 8327), 'thinkplot.Cdf', 'thinkplot.Cdf', (['cdf'], {}), '(cdf)\n', (8322, 8327), False, 'import thinkplot\n'), ((8354, 8372), 'thinkplot.Pmf', 'thinkplot.Pmf', (['mix'], {}), '(mix)\n', (8367, 8372), False, 'import thinkplot\n'), ((8808, 8866), 'thinkplot.Pmf', 'thinkplot.Pmf', (['pmf'], {'color': '"""blue"""', 'alpha': '(0.2)', 'linewidth': '(0.5)'}), "(pmf, color='blue', alpha=0.2, linewidth=0.5)\n", (8821, 8866), False, 'import thinkplot\n'), ((20798, 20828), 'thinkbayes2.Dirichlet', 'thinkbayes2.Dirichlet', (['n', 'conc'], {}), '(n, conc)\n', (20819, 20828), False, 'import thinkbayes2\n'), ((22560, 22602), 'numpy.ones', 'numpy.ones', (['self.ns[-1]'], {'dtype': 'numpy.float'}), '(self.ns[-1], dtype=numpy.float)\n', (22570, 22602), False, 'import numpy\n'), ((22971, 23005), 'numpy.ones', 'numpy.ones', (['num'], {'dtype': 'numpy.float'}), '(num, dtype=numpy.float)\n', (22981, 23005), False, 'import numpy\n'), ((24349, 24379), 'thinkbayes2.BinomialCoef', 'thinkbayes2.BinomialCoef', (['n', 'm'], {}), '(n, m)\n', (24373, 24379), False, 'import thinkbayes2\n'), ((29020, 29040), 'numpy.cumsum', 'numpy.cumsum', (['gammas'], {}), '(gammas)\n', (29032, 29040), False, 'import numpy\n'), ((29340, 29356), 'numpy.log', 'numpy.log', (['array'], {}), '(array)\n', (29349, 29356), False, 'import numpy\n'), ((29684, 29714), 'thinkbayes2.BinomialCoef', 'thinkbayes2.BinomialCoef', (['n', 'm'], {}), '(n, m)\n', (29708, 29714), False, 'import thinkbayes2\n'), ((30131, 30149), 'numpy.zeros', 'numpy.zeros', (['(i + 1)'], {}), '(i + 1)\n', (30142, 30149), False, 'import numpy\n'), ((32769, 32789), 'numpy.cumsum', 'numpy.cumsum', (['gammas'], {}), '(gammas)\n', (32781, 32789), False, 'import numpy\n'), ((32915, 32928), 'numpy.log', 'numpy.log', (['ps'], {}), '(ps)\n', (32924, 32928), False, 'import numpy\n'), ((45557, 45605), 'thinkplot.Plot', 'thinkplot.Plot', (['self.ps', 'self.total_n'], {'label': '"""n"""'}), "(self.ps, self.total_n, label='n')\n", (45571, 45605), False, 'import thinkplot\n'), ((2892, 2907), 'random.random', 'random.random', ([], {}), '()\n', (2905, 2907), False, 'import random\n'), ((12168, 12199), 'random.uniform', 'random.uniform', (['(-jitter)', 'jitter'], {}), '(-jitter, jitter)\n', (12182, 12199), False, 'import random\n'), ((18385, 18408), 'random.uniform', 'random.uniform', (['(-dx)', 'dx'], {}), '(-dx, dx)\n', (18399, 18408), False, 'import random\n'), ((18427, 18450), 'random.uniform', 'random.uniform', (['(-dy)', 'dy'], {}), '(-dy, dy)\n', (18441, 18450), False, 'import random\n'), ((24156, 24169), 'numpy.log', 'numpy.log', (['ps'], {}), '(ps)\n', (24165, 24169), False, 'import numpy\n')] |
#!/usr/bin/env python
import argparse
import os
from datetime import datetime
import subprocess
import numpy as np
import atmodat_checklib.utils.output_directory_util as output_directory
import atmodat_checklib.utils.summary_creation_util as summary_creation
from atmodat_checklib.utils.env_util import set_env_variables
def main():
# Set environment variables
udunits2_xml_path, atmodat_cvs = set_env_variables()
os.environ['PYESSV_ARCHIVE_HOME'] = os.path.join(atmodat_cvs, 'pyessv-archive')
os.environ['UDUNITS2_XML_PATH'] = udunits2_xml_path
idiryml = os.path.join(atmodat_cvs, '')
# record start time
start_time = datetime.now()
# read command line input
args = command_line_parse()
verbose = args.verbose
ifile = args.file
ipath = args.path
ipath_norec = args.path_no_recursive
opath_in = args.opath
cfversion = args.cfversion
whatchecks = args.whatchecks
parsed_summary = args.summary
# Define output path for checker output
# user-defined opath
if opath_in:
if opath_in.strip() == '/':
raise RuntimeError('Root directory should not be defined as output path!')
opath = os.path.abspath(opath_in)
# predefined opath
else:
# default path with subdirectory containing timestamp of check
opath = os.getcwd()
opath = os.path.join(opath, 'atmodat_checker_output', '')
# Define version of CF table against which the files shall be checked.
# Default is auto --> CF table version parsed from global attribute 'Conventions'.
if cfversion != 'auto':
if cfversion not in ('1.4', '1.5', '1.6', '1.7', '1.8'):
print('User-defined -cfv option invalid; using \'auto\' instead')
cfversion = 'auto'
# Let user choose if files shall be checked against (AT) the ATMODAT metadata specifications
# (excluding the CF checker), (CF) the CF Conventions or (both) against both. Default is both.
if whatchecks != 'both':
if whatchecks not in ('AT', 'CF'):
print('User-defined -check option invalid; using \'both\' instead')
whatchecks = 'both'
# Check that either ifile or ipath exist
if not ifile and not ipath and not ipath_norec:
raise RuntimeError('No file and path given')
check_types = ['atmodat', 'CF']
if whatchecks == 'CF':
check_types.remove('atmodat')
elif whatchecks == 'AT':
check_types.remove('CF')
# Create directory for checker output
opath_run = output_directory.create_directories(opath, check_types)
# Run checks
file_counter = 0
if ifile and not (ipath or ipath_norec):
if ifile.endswith(".nc"):
if os.path.isfile(ifile):
run_checks([ifile], verbose, check_types, cfversion, opath_run, idiryml)
file_counter = 1
else:
raise RuntimeError('File: ' + ifile + ' does not exist')
else:
print('Skipping ' + ifile + ' as it does not end with ".nc'"")
elif (ipath or ipath_norec) and not ifile:
if ipath:
files_all = output_directory.return_files_in_directory_tree(ipath)
else:
files_all = output_directory.return_files_in_directory(ipath_norec)
file_nc = []
[file_nc.append(file) for file in files_all if file.endswith(".nc")]
if len(file_nc) == 0:
raise RuntimeError('No netCDF files found in: ' + ipath)
else:
run_checks(file_nc, verbose, check_types, cfversion, opath_run, idiryml)
file_counter = len(file_nc)
# Create summary of results if specified
if parsed_summary:
summary_creation.create_output_summary(file_counter, opath_run, check_types)
# Create a symbolic link to latest checker output
latest_link = os.path.join(opath, 'latest')
if os.path.isdir(latest_link):
os.unlink(latest_link)
os.symlink(opath_run, os.path.join(opath, 'latest'))
# Calculate run time of this script
print("--- %.4f seconds for checking %s files---" % ((datetime.now() - start_time).total_seconds(), file_counter))
def utf8len(string_in):
return len(string_in.encode('utf-8'))
def cmd_string_checker(io_in, idiryml_in):
# String of input and output files
ifile_in_string = " ".join(io_in[0])
ofiles_checker_string = " ".join(io_in[1])
# Output string creation
return 'compliance-checker --y ' + idiryml_in + 'atmodat_standard_checks.yml -f json_new -f text ' + \
ofiles_checker_string + ' --test atmodat_standard:3.0 ' + ifile_in_string
def cmd_string_cf(ifiles_in, cf_version_in):
ifile_in_string = " ".join(ifiles_in)
return 'cfchecks -v ' + cf_version_in + ' ' + ifile_in_string
def cmd_string_creation(check_in, ifiles_in, opath_file_in, filenames_base_in, idiryml_in, cf_version_in):
max_string_len = 131072
cmd_out = []
if check_in == 'atmodat':
# List of output files
ofiles_checker = ['-o ' + os.path.join(opath_file_in, check_in, filename_base + '_' + check_in + '_result.json')
for filename_base in filenames_base_in]
# Output string creation
tmp_cmd_out = cmd_string_checker((ifiles_in, ofiles_checker), idiryml_in)
num_split = int(np.ceil(utf8len(tmp_cmd_out) / max_string_len))
if num_split > len(ifiles_in):
num_split = len(ifiles_in)
# Split if size of string exceeds max_string_len
if num_split < 2:
cmd_out.append(tmp_cmd_out)
else:
for io_files in zip(np.array_split(ifiles_in, num_split), np.array_split(ofiles_checker, num_split)):
cmd_out.append(cmd_string_checker(io_files, idiryml_in))
elif check_in == 'CF':
tmp_cmd_out = cmd_string_cf(ifiles_in, cf_version_in)
num_split = int(np.ceil(utf8len(tmp_cmd_out) / max_string_len))
if num_split > len(ifiles_in):
num_split = len(ifiles_in)
if num_split < 2:
cmd_out.append(tmp_cmd_out)
else:
for split_ifiles in np.array_split(ifiles_in, num_split):
cmd_out.append(cmd_string_cf(split_ifiles, cf_version_in))
return cmd_out
def run_checks(ifile_in, verbose_in, check_types_in, cfversion_in, opath_file, idiryml_in):
"""run all checks"""
# Get base filename and output path
filenames_base = [os.path.basename(os.path.realpath(f)).rstrip('.nc') for f in ifile_in]
for check in check_types_in:
# Remove preexisting checker output
opath_checks = os.path.join(opath_file, check, '')
for old_file in os.listdir(opath_checks):
os.remove(os.path.join(opath_checks, old_file))
if check == 'atmodat':
cmd_checker = cmd_string_creation(check, ifile_in, opath_file, filenames_base, idiryml_in, cfversion_in)
for cmd in cmd_checker:
subprocess.run(cmd, shell=True)
elif check == 'CF':
cmd_cf = cmd_string_creation(check, ifile_in, opath_file, filenames_base, idiryml_in, cfversion_in)
output_string_all = []
for cmd in cmd_cf:
output_string_all.append(subprocess.run(cmd, shell=True, capture_output=True, text=True).stdout)
output_string = ''.join(output_string_all)
split_string = 'CHECKING NetCDF FILE'
output_string_files = output_string.split(split_string)[1::]
for ofile_data_cf in zip(filenames_base, output_string_files):
ofile_cf = os.path.join(opath_file, 'CF', '') + ofile_data_cf[0] + '_' + check + '_result.txt'
with open(ofile_cf, 'w', encoding='utf-8') as f_cf:
f_cf.write(split_string + ofile_data_cf[1])
for filename_base in filenames_base:
for check in check_types_in:
file_verbose = os.path.join(opath_file, check, '') + filename_base + '_' + check + '_result.txt'
if verbose_in:
with open(file_verbose, 'r', encoding='utf-8') as f_verbose:
if check == 'CF':
print('')
print('==============================================================================')
print('===============================CF-Checker=====================================')
print('==============================================================================')
print('')
print(f_verbose.read())
elif check == 'atmodat':
print('')
print('==============================================================================')
print('===============================AtMoDat-Checks=================================')
print('==============================================================================')
print('')
print(f_verbose.read())
# Clean-up
if check == 'atmodat':
if os.path.isfile(file_verbose):
os.remove(file_verbose)
return
def command_line_parse():
"""parse command line input"""
# Parser for command line options
parser = argparse.ArgumentParser(description="Run the AtMoDat checks suits.")
parser.add_argument("-v", "--verbose",
help="Print output of checkers (longer runtime due to double call of checkers)",
action="store_true",
default=False)
parser.add_argument("-op", "--opath", help="Define custom path where checker output shall be written",
default=False)
parser.add_argument("-cfv", "--cfversion", help="Define custom CF table version against which the file shall be "
"checked. Valid are versions from 1.3 to 1.8. "
"Example: \"-cfv 1.6\". Default is 'auto'",
default='auto')
parser.add_argument("-check", "--whatchecks", help="Define if AtMoDat or CF check or both shall be executed. "
"Valid options: AT, CF, both. Example: \"-check CF\". "
"Default is 'both'",
default='both')
parser.add_argument("-s", "--summary", help="Create summary of checker output",
action="store_true",
default=False)
group = parser.add_mutually_exclusive_group()
group.add_argument("-f", "--file", help="Processes the given file")
group.add_argument("-p", "--path", help="Processes all files in a given path and subdirectories "
"(recursive file search)")
group.add_argument("-pnr", "--path_no_recursive", help="Processes all files in a given directory")
return parser.parse_args()
if __name__ == "__main__":
main()
| [
"os.listdir",
"atmodat_checklib.utils.output_directory_util.create_directories",
"argparse.ArgumentParser",
"atmodat_checklib.utils.output_directory_util.return_files_in_directory",
"subprocess.run",
"os.path.join",
"os.getcwd",
"os.path.isfile",
"datetime.datetime.now",
"atmodat_checklib.utils.su... | [((407, 426), 'atmodat_checklib.utils.env_util.set_env_variables', 'set_env_variables', ([], {}), '()\n', (424, 426), False, 'from atmodat_checklib.utils.env_util import set_env_variables\n'), ((467, 510), 'os.path.join', 'os.path.join', (['atmodat_cvs', '"""pyessv-archive"""'], {}), "(atmodat_cvs, 'pyessv-archive')\n", (479, 510), False, 'import os\n'), ((582, 611), 'os.path.join', 'os.path.join', (['atmodat_cvs', '""""""'], {}), "(atmodat_cvs, '')\n", (594, 611), False, 'import os\n'), ((654, 668), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (666, 668), False, 'from datetime import datetime\n'), ((1364, 1413), 'os.path.join', 'os.path.join', (['opath', '"""atmodat_checker_output"""', '""""""'], {}), "(opath, 'atmodat_checker_output', '')\n", (1376, 1413), False, 'import os\n'), ((2534, 2589), 'atmodat_checklib.utils.output_directory_util.create_directories', 'output_directory.create_directories', (['opath', 'check_types'], {}), '(opath, check_types)\n', (2569, 2589), True, 'import atmodat_checklib.utils.output_directory_util as output_directory\n'), ((3849, 3878), 'os.path.join', 'os.path.join', (['opath', '"""latest"""'], {}), "(opath, 'latest')\n", (3861, 3878), False, 'import os\n'), ((3886, 3912), 'os.path.isdir', 'os.path.isdir', (['latest_link'], {}), '(latest_link)\n', (3899, 3912), False, 'import os\n'), ((9358, 9426), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run the AtMoDat checks suits."""'}), "(description='Run the AtMoDat checks suits.')\n", (9381, 9426), False, 'import argparse\n'), ((1194, 1219), 'os.path.abspath', 'os.path.abspath', (['opath_in'], {}), '(opath_in)\n', (1209, 1219), False, 'import os\n'), ((1340, 1351), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1349, 1351), False, 'import os\n'), ((3699, 3775), 'atmodat_checklib.utils.summary_creation_util.create_output_summary', 'summary_creation.create_output_summary', (['file_counter', 'opath_run', 'check_types'], {}), '(file_counter, opath_run, check_types)\n', (3737, 3775), True, 'import atmodat_checklib.utils.summary_creation_util as summary_creation\n'), ((3922, 3944), 'os.unlink', 'os.unlink', (['latest_link'], {}), '(latest_link)\n', (3931, 3944), False, 'import os\n'), ((3971, 4000), 'os.path.join', 'os.path.join', (['opath', '"""latest"""'], {}), "(opath, 'latest')\n", (3983, 4000), False, 'import os\n'), ((6610, 6645), 'os.path.join', 'os.path.join', (['opath_file', 'check', '""""""'], {}), "(opath_file, check, '')\n", (6622, 6645), False, 'import os\n'), ((6670, 6694), 'os.listdir', 'os.listdir', (['opath_checks'], {}), '(opath_checks)\n', (6680, 6694), False, 'import os\n'), ((2723, 2744), 'os.path.isfile', 'os.path.isfile', (['ifile'], {}), '(ifile)\n', (2737, 2744), False, 'import os\n'), ((3137, 3191), 'atmodat_checklib.utils.output_directory_util.return_files_in_directory_tree', 'output_directory.return_files_in_directory_tree', (['ipath'], {}), '(ipath)\n', (3184, 3191), True, 'import atmodat_checklib.utils.output_directory_util as output_directory\n'), ((3230, 3285), 'atmodat_checklib.utils.output_directory_util.return_files_in_directory', 'output_directory.return_files_in_directory', (['ipath_norec'], {}), '(ipath_norec)\n', (3272, 3285), True, 'import atmodat_checklib.utils.output_directory_util as output_directory\n'), ((5029, 5119), 'os.path.join', 'os.path.join', (['opath_file_in', 'check_in', "(filename_base + '_' + check_in + '_result.json')"], {}), "(opath_file_in, check_in, filename_base + '_' + check_in +\n '_result.json')\n", (5041, 5119), False, 'import os\n'), ((5618, 5654), 'numpy.array_split', 'np.array_split', (['ifiles_in', 'num_split'], {}), '(ifiles_in, num_split)\n', (5632, 5654), True, 'import numpy as np\n'), ((5656, 5697), 'numpy.array_split', 'np.array_split', (['ofiles_checker', 'num_split'], {}), '(ofiles_checker, num_split)\n', (5670, 5697), True, 'import numpy as np\n'), ((6125, 6161), 'numpy.array_split', 'np.array_split', (['ifiles_in', 'num_split'], {}), '(ifiles_in, num_split)\n', (6139, 6161), True, 'import numpy as np\n'), ((6718, 6754), 'os.path.join', 'os.path.join', (['opath_checks', 'old_file'], {}), '(opath_checks, old_file)\n', (6730, 6754), False, 'import os\n'), ((6957, 6988), 'subprocess.run', 'subprocess.run', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (6971, 6988), False, 'import subprocess\n'), ((9159, 9187), 'os.path.isfile', 'os.path.isfile', (['file_verbose'], {}), '(file_verbose)\n', (9173, 9187), False, 'import os\n'), ((6455, 6474), 'os.path.realpath', 'os.path.realpath', (['f'], {}), '(f)\n', (6471, 6474), False, 'import os\n'), ((9209, 9232), 'os.remove', 'os.remove', (['file_verbose'], {}), '(file_verbose)\n', (9218, 9232), False, 'import os\n'), ((4101, 4115), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4113, 4115), False, 'from datetime import datetime\n'), ((7236, 7299), 'subprocess.run', 'subprocess.run', (['cmd'], {'shell': '(True)', 'capture_output': '(True)', 'text': '(True)'}), '(cmd, shell=True, capture_output=True, text=True)\n', (7250, 7299), False, 'import subprocess\n'), ((7909, 7944), 'os.path.join', 'os.path.join', (['opath_file', 'check', '""""""'], {}), "(opath_file, check, '')\n", (7921, 7944), False, 'import os\n'), ((7588, 7622), 'os.path.join', 'os.path.join', (['opath_file', '"""CF"""', '""""""'], {}), "(opath_file, 'CF', '')\n", (7600, 7622), False, 'import os\n')] |
import cma
import logging as log
import numpy as np
import os.path
import pickle
import humblerl as hrl
from humblerl import ChainInterpreter, Mind, Worker
from memory import build_rnn_model, MDNInterpreter
from vision import BasicInterpreter, build_vae_model
from common_utils import ReturnTracker, create_directory, get_model_path_if_exists
def compute_weight_decay(weight_decay, model_param_list):
model_param_grid = np.array(model_param_list)
return weight_decay * np.mean(model_param_grid * model_param_grid, axis=1)
class CMAES:
"""Agent using CMA-ES algorithm.
Args:
n_params (int) : Number of model parameters (NN weights).
sigma_init (float) : Initial standard deviation. (Default: 0.1)
popsize (int) : Population size. (Default: 100)
weight_decay (float) : L2 weight decay rate. (Default: 0.01)
"""
def __init__(self, n_params, sigma_init=0.1, popsize=100, weight_decay=0.01):
self.weight_decay = weight_decay
self.population = None
self.es = cma.CMAEvolutionStrategy(n_params * [0], sigma_init, {'popsize': popsize})
self.best_score = -np.inf
def check_if_better(self, new_best_score):
"""If new score is better than current then update self best score"""
do_update = new_best_score > self.best_score
if do_update:
self.best_score = new_best_score
return do_update
def ask(self):
"""Returns a list of parameters for new population."""
self.population = np.array(self.es.ask())
return self.population
def tell(self, returns):
reward_table = np.array(returns)
if self.weight_decay > 0:
l2_decay = compute_weight_decay(self.weight_decay, self.population)
reward_table -= l2_decay
# Convert minimizer to maximizer.
self.es.tell(self.population, (-1 * reward_table).tolist())
def current_param(self):
return self.es.result[5] # mean solution, presumably better with noise
def best_param(self):
return self.es.result[0] # best evaluated solution
def save_es_ckpt_and_mind_weights(self, ckpt_path, mind_path, score):
# Create CMA-ES checkpoint dir if doesn't exist
create_directory(os.path.dirname(ckpt_path))
# Create Mind weights checkpoint dir if doesn't exist
mind_dir = os.path.dirname(mind_path)
create_directory(mind_dir)
# Create paths for best and mean Mind weights checkpoints
mind_name = os.path.basename(mind_path).split('.')[0]
best_path = os.path.join(mind_dir, mind_name + "_best.ckpt")
mean_path = os.path.join(mind_dir, mind_name + "_mean.ckpt")
with open(os.path.abspath(ckpt_path), 'wb') as f:
pickle.dump(self, f)
log.debug("Saved CMA-ES checkpoint in path: %s", ckpt_path)
if self.check_if_better(score):
log.info("New best score: %f", score)
with open(os.path.abspath(best_path), 'wb') as f:
pickle.dump(self.best_param(), f)
with open(os.path.abspath(mean_path), 'wb') as f:
pickle.dump(self.current_param(), f)
log.debug("Saved Mind weights in path: %s", mind_path)
@staticmethod
def load_ckpt(path):
with open(os.path.abspath(path), 'rb') as f:
return pickle.load(f)
class Evaluator(Worker):
def __init__(self, config, state_size, action_space, vae_path, mdn_path):
self.config = config
self.state_size = state_size
self.action_space = action_space
self.vae_path = vae_path
self.mdn_path = mdn_path
self._env = None
self._basic_interpreter = None
self._mdn_interpreter = None
def initialize(self):
self._env = hrl.create_gym(self.config.general['game_name'])
self._basic_interpreter, self._mdn_interpreter = self._interpreter_factory()
def mind_factory(self, weights):
mind = LinearModel(self.state_size, self.action_space)
mind.set_weights(weights)
return mind
@property
def callbacks(self):
return [ReturnTracker(), self._mdn_interpreter]
@property
def interpreter(self):
return ChainInterpreter(self._basic_interpreter, self._mdn_interpreter)
def _interpreter_factory(self):
# Build VAE model and load checkpoint
_, encoder, _ = build_vae_model(self.config.vae,
self.config.general['state_shape'],
self.vae_path)
# Build MDN-RNN model and load checkpoint
rnn = build_rnn_model(self.config.rnn,
self.config.vae['latent_space_dim'],
self.action_space,
self.mdn_path)
return (BasicInterpreter(
state_shape=self.config.general['state_shape'],
crop_range=self.config.general['crop_range']),
MDNInterpreter(encoder, rnn.model, self.config.vae['latent_space_dim']))
class LinearModel(Mind):
"""Simple linear regression agent."""
def __init__(self, input_dim, action_space):
self.in_dim = input_dim
self.action_space = action_space
self.out_dim = action_space.num
self.is_discrete = isinstance(action_space, hrl.environments.Discrete)
self.weights = np.zeros((self.in_dim + 1, self.out_dim))
def plan(self, state, train_mode, debug_mode):
action_vec = np.concatenate((state, [1.])) @ self.weights
# Discrete: Treat action_vec as logits and pass them to humblerl
# Continuous: Treat action_vec as action to perform and use tanh
# to bound its values to [-1, 1]
return action_vec if self.is_discrete else np.tanh(action_vec)
def set_weights(self, weights):
self.weights[:] = weights.reshape(self.in_dim + 1, self.out_dim)
@property
def n_weights(self):
return (self.in_dim + 1) * self.out_dim
@staticmethod
def load_weights(path):
with open(os.path.abspath(path), 'rb') as f:
return pickle.load(f)
def build_mind(es_params, input_dim, action_space, model_path):
"""Builds linear regression controller model.
Args:
es_params (dict): CMA-ES training parameters from .json config.
input_dim (int): Should be interpreter latent space dim. + memory hidden state size.
action_space (hrl.environments.ActionSpace): Action space, discrete or continuous.
model_path (str): Path to Mind weights.
Returns:
LinearModel: HumbleRL 'Mind' with weights loaded from file if available.
"""
mind = LinearModel(input_dim, action_space)
mind.set_weights(LinearModel.load_weights(path=model_path))
log.info("Loaded Mind weights from: %s", model_path)
return mind
def build_es_model(es_params, n_params, model_path=None):
"""Builds CMA-ES solver.
Args:
es_params (dict): CMA-ES training parameters from .json config.
n_params (int): Number of parameters for CMA-ES.
model_path (str): Path to CMA-ES ckpt. Taken from .json config if `None` (Default: None)
Returns:
CMAES: CMA-ES solver ready for training.
"""
model_path = get_model_path_if_exists(
path=model_path, default_path=es_params['ckpt_path'], model_name="CMA-ES")
if model_path is not None:
solver = CMAES.load_ckpt(model_path)
log.info("Loaded CMA-ES parameters from: %s", model_path)
else:
solver = CMAES(
n_params=n_params, popsize=es_params['popsize'], weight_decay=es_params['l2_decay'])
log.info("CMA-ES parameters in \"%s\" doesn't exist! "
"Created solver with pop. size: %d and l2 decay: %f.",
es_params['ckpt_path'], es_params['popsize'], es_params['l2_decay'])
return solver
| [
"logging.debug",
"memory.MDNInterpreter",
"humblerl.ChainInterpreter",
"numpy.array",
"cma.CMAEvolutionStrategy",
"logging.info",
"vision.BasicInterpreter",
"numpy.mean",
"numpy.tanh",
"numpy.concatenate",
"common_utils.create_directory",
"humblerl.create_gym",
"pickle.load",
"common_utils... | [((428, 454), 'numpy.array', 'np.array', (['model_param_list'], {}), '(model_param_list)\n', (436, 454), True, 'import numpy as np\n'), ((6852, 6904), 'logging.info', 'log.info', (['"""Loaded Mind weights from: %s"""', 'model_path'], {}), "('Loaded Mind weights from: %s', model_path)\n", (6860, 6904), True, 'import logging as log\n'), ((7337, 7441), 'common_utils.get_model_path_if_exists', 'get_model_path_if_exists', ([], {'path': 'model_path', 'default_path': "es_params['ckpt_path']", 'model_name': '"""CMA-ES"""'}), "(path=model_path, default_path=es_params[\n 'ckpt_path'], model_name='CMA-ES')\n", (7361, 7441), False, 'from common_utils import ReturnTracker, create_directory, get_model_path_if_exists\n'), ((481, 533), 'numpy.mean', 'np.mean', (['(model_param_grid * model_param_grid)'], {'axis': '(1)'}), '(model_param_grid * model_param_grid, axis=1)\n', (488, 533), True, 'import numpy as np\n'), ((1056, 1130), 'cma.CMAEvolutionStrategy', 'cma.CMAEvolutionStrategy', (['(n_params * [0])', 'sigma_init', "{'popsize': popsize}"], {}), "(n_params * [0], sigma_init, {'popsize': popsize})\n", (1080, 1130), False, 'import cma\n'), ((1654, 1671), 'numpy.array', 'np.array', (['returns'], {}), '(returns)\n', (1662, 1671), True, 'import numpy as np\n'), ((2431, 2457), 'common_utils.create_directory', 'create_directory', (['mind_dir'], {}), '(mind_dir)\n', (2447, 2457), False, 'from common_utils import ReturnTracker, create_directory, get_model_path_if_exists\n'), ((2825, 2884), 'logging.debug', 'log.debug', (['"""Saved CMA-ES checkpoint in path: %s"""', 'ckpt_path'], {}), "('Saved CMA-ES checkpoint in path: %s', ckpt_path)\n", (2834, 2884), True, 'import logging as log\n'), ((3828, 3876), 'humblerl.create_gym', 'hrl.create_gym', (["self.config.general['game_name']"], {}), "(self.config.general['game_name'])\n", (3842, 3876), True, 'import humblerl as hrl\n'), ((4270, 4334), 'humblerl.ChainInterpreter', 'ChainInterpreter', (['self._basic_interpreter', 'self._mdn_interpreter'], {}), '(self._basic_interpreter, self._mdn_interpreter)\n', (4286, 4334), False, 'from humblerl import ChainInterpreter, Mind, Worker\n'), ((4442, 4530), 'vision.build_vae_model', 'build_vae_model', (['self.config.vae', "self.config.general['state_shape']", 'self.vae_path'], {}), "(self.config.vae, self.config.general['state_shape'], self.\n vae_path)\n", (4457, 4530), False, 'from vision import BasicInterpreter, build_vae_model\n'), ((4671, 4779), 'memory.build_rnn_model', 'build_rnn_model', (['self.config.rnn', "self.config.vae['latent_space_dim']", 'self.action_space', 'self.mdn_path'], {}), "(self.config.rnn, self.config.vae['latent_space_dim'], self.\n action_space, self.mdn_path)\n", (4686, 4779), False, 'from memory import build_rnn_model, MDNInterpreter\n'), ((5439, 5480), 'numpy.zeros', 'np.zeros', (['(self.in_dim + 1, self.out_dim)'], {}), '((self.in_dim + 1, self.out_dim))\n', (5447, 5480), True, 'import numpy as np\n'), ((7531, 7588), 'logging.info', 'log.info', (['"""Loaded CMA-ES parameters from: %s"""', 'model_path'], {}), "('Loaded CMA-ES parameters from: %s', model_path)\n", (7539, 7588), True, 'import logging as log\n'), ((7728, 7912), 'logging.info', 'log.info', (['"""CMA-ES parameters in "%s" doesn\'t exist! Created solver with pop. size: %d and l2 decay: %f."""', "es_params['ckpt_path']", "es_params['popsize']", "es_params['l2_decay']"], {}), '(\n \'CMA-ES parameters in "%s" doesn\\\'t exist! Created solver with pop. size: %d and l2 decay: %f.\'\n , es_params[\'ckpt_path\'], es_params[\'popsize\'], es_params[\'l2_decay\'])\n', (7736, 7912), True, 'import logging as log\n'), ((2796, 2816), 'pickle.dump', 'pickle.dump', (['self', 'f'], {}), '(self, f)\n', (2807, 2816), False, 'import pickle\n'), ((2938, 2975), 'logging.info', 'log.info', (['"""New best score: %f"""', 'score'], {}), "('New best score: %f', score)\n", (2946, 2975), True, 'import logging as log\n'), ((3215, 3269), 'logging.debug', 'log.debug', (['"""Saved Mind weights in path: %s"""', 'mind_path'], {}), "('Saved Mind weights in path: %s', mind_path)\n", (3224, 3269), True, 'import logging as log\n'), ((3386, 3400), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3397, 3400), False, 'import pickle\n'), ((4173, 4188), 'common_utils.ReturnTracker', 'ReturnTracker', ([], {}), '()\n', (4186, 4188), False, 'from common_utils import ReturnTracker, create_directory, get_model_path_if_exists\n'), ((4882, 4997), 'vision.BasicInterpreter', 'BasicInterpreter', ([], {'state_shape': "self.config.general['state_shape']", 'crop_range': "self.config.general['crop_range']"}), "(state_shape=self.config.general['state_shape'], crop_range\n =self.config.general['crop_range'])\n", (4898, 4997), False, 'from vision import BasicInterpreter, build_vae_model\n'), ((5031, 5102), 'memory.MDNInterpreter', 'MDNInterpreter', (['encoder', 'rnn.model', "self.config.vae['latent_space_dim']"], {}), "(encoder, rnn.model, self.config.vae['latent_space_dim'])\n", (5045, 5102), False, 'from memory import build_rnn_model, MDNInterpreter\n'), ((5554, 5584), 'numpy.concatenate', 'np.concatenate', (['(state, [1.0])'], {}), '((state, [1.0]))\n', (5568, 5584), True, 'import numpy as np\n'), ((5849, 5868), 'numpy.tanh', 'np.tanh', (['action_vec'], {}), '(action_vec)\n', (5856, 5868), True, 'import numpy as np\n'), ((6186, 6200), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6197, 6200), False, 'import pickle\n')] |
import numpy as np
from abc import ABC, abstractmethod
class AbstractGOL(ABC):
def __init__(self, config, seed=None):
"""
Abstract Conway Game of Life
:param config: configuration for this GOL instance (cell survival and generation settings)
"""
self.config = config
if seed:
np.random.seed(seed)
@abstractmethod
def update(self):
"""
Update status of the grid
"""
pass
@abstractmethod
def get_neighbours_count(self, index):
pass
def get_cell_newstate(self, cell_currstate, neighbours_count):
if neighbours_count == self.config['neighbours_count_born']:
return 1
if (neighbours_count < self.config['neighbours_mincount_survive']
or neighbours_count > self.config['neighbours_maxcount_survive']):
return 0
return cell_currstate | [
"numpy.random.seed"
] | [((354, 374), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (368, 374), True, 'import numpy as np\n')] |
# Copyright 2021 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Constrained Quadratic Model class.
"""
import collections.abc as abc
import json
import re
import tempfile
import uuid
import warnings
import zipfile
from numbers import Number
from typing import Hashable, Optional, Union, BinaryIO, ByteString, Iterable, Collection, Dict
from typing import Callable, MutableMapping, Iterator, Tuple, Mapping, Any
import numpy as np
from dimod.core.bqm import BQM as BQMabc
from dimod.binary.binary_quadratic_model import BinaryQuadraticModel, Binary, Spin, as_bqm
from dimod.discrete.discrete_quadratic_model import DiscreteQuadraticModel
from dimod.quadratic import QuadraticModel
from dimod.sym import Comparison, Eq, Le, Ge, Sense
from dimod.serialization.fileview import SpooledTemporaryFile, _BytesIO
from dimod.serialization.fileview import load, read_header, write_header
from dimod.typing import Bias, Variable
from dimod.utilities import new_label
from dimod.variables import Variables, serialize_variable, deserialize_variable
from dimod.vartypes import Vartype, as_vartype, VartypeLike
__all__ = ['ConstrainedQuadraticModel', 'CQM', 'cqm_to_bqm']
CQM_MAGIC_PREFIX = b'DIMODCQM'
class ConstrainedQuadraticModel:
r"""A constrained quadratic model.
Constrained quadratic models are problems of the form:
.. math::
\begin{align}
\text{Minimize an objective:} & \\
& \sum_{i} a_i x_i + \sum_{i<j} b_{ij} x_i x_j + c, \\
\text{Subject to constraints:} & \\
& \sum_i a_i^{(c)} x_i + \sum_{i<j} b_{ij}^{(c)} x_i x_j+ c^{(c)} \le 0,
\quad c=1, \dots, C_{\rm ineq.}, \\
& \sum_i a_i^{(d)} x_i + \sum_{i<j} b_{ij}^{(d)} x_i x_j + c^{(d)} = 0,
\quad d=1, \dots, C_{\rm eq.},
\end{align}
where :math:`\{ x_i\}_{i=1, \dots, N}` can be binary or integer
variables, :math:`a_{i}, b_{ij}, c` are real values and
:math:`C_{\rm ineq.}, C_{\rm eq,}` are the number of inequality and
equality constraints respectively.
The objective and constraints are encoded as either :class:`.QuadraticModel`
or :class:`.BinaryQuadraticModel` depending on the variable types used.
Example:
Solve a simple `bin packing problem <https://w.wiki/3jz4>`_. In this
problem we wish to pack a set of items of different weights into
the smallest number of bins possible.
See :func:`~dimod.generators.bin_packing` for a general function to
generate bin packing problems. We follow the same naming conventions
in this example.
Let's start with four object weights and assume that each bin has a
capacity of 1.
>>> weights = [.9, .7, .2, .1]
>>> capacity = 1
Let :math:`y_j` indicate that we used bin :math:`j`. We know that we
will use four or fewer total bins.
>>> y = [dimod.Binary(f'y_{j}') for j in range(len(weights))]
Let :math:`x_{i,j}` indicate that we put item :math:`i` in bin
:math:`j`.
>>> x = [[dimod.Binary(f'x_{i}_{j}') for j in range(len(weights))]
... for i in range(len(weights))]
Create an empty constrained quadratic model with no objective or
constraints.
>>> cqm = dimod.ConstrainedQuadraticModel()
We wish to minimize the number of bins used. Therefore our objective
is to minimize the value of :math:`\sum_j y_j`.
>>> cqm.set_objective(sum(y))
We also need to enforce the constraint that each item can only go
in one bin. We can express this constraint, for a given item :math:`i`,
with :math:`\sum_j x_{i, j} == 1`. Note that the label of each
constraint is returned so that we can access them in the future if
desired.
>>> for i in range(len(weights)):
... cqm.add_constraint(sum(x[i]) == 1, label=f'item_placing_{i}')
'item_placing_0'
'item_placing_1'
'item_placing_2'
'item_placing_3'
Finally, we need to enforce the limits on each bin. We can express
this constraint, for a given bin :math:`j`, with
:math:`\sum_i x_{i, j} * w_i <= c` where :math:`w_i` is the weight
of item :math:`i` and :math:`c` is the capacity.
>>> for j in range(len(weights)):
... cqm.add_constraint(
... sum(weights[i] * x[i][j] for i in range(len(weights))) - y[j] * capacity <= 0,
... label=f'capacity_bin_{j}')
'capacity_bin_0'
'capacity_bin_1'
'capacity_bin_2'
'capacity_bin_3'
"""
def __init__(self):
# discrete variable tracking, we probably can do this with less memory
# but for now let's keep it simple
self.discrete: Set[Hashable] = set() # collection of discrete constraints
self._discrete: Set[Variable] = set() # collection of all variables used in discrete
self._objective = QuadraticModel()
@property
def constraints(self) -> Dict[Hashable, Comparison]:
"""The constraints as a dictionary.
This dictionary and its contents should not be modified.
"""
try:
return self._constraints
except AttributeError:
pass
self._constraints: Dict[Hashable, Comparison] = {}
return self._constraints
@property
def objective(self) -> QuadraticModel:
"""The objective to be minimized."""
return self._objective
@property
def variables(self) -> Variables:
"""The variables in use over the objective and all constraints."""
try:
return self._variables
except AttributeError:
pass
self._variables = variables = self.objective.variables
# to support backwards compatibility (0.10.0 - 0.10.5), we annotate
# this object with some attributes. All of these will be removed in
# 0.11.0
def vartype(v):
warnings.warn(
"cqm.variables.vartype(v) is deprecated and will be removed in dimod 0.11.0, "
"use cqm.vartype(v) instead.", DeprecationWarning, stacklevel=2)
return self.vartype(v)
variables.vartype = vartype # method
variables.vartypes = _Vartypes(self)
variables.lower_bounds = _LowerBounds(self)
variables.upper_bounds = _UpperBounds(self)
return variables
def _add_variables_from(self, model: Union[BinaryQuadraticModel, QuadraticModel]):
# todo: singledispatchmethod in 3.8+
if isinstance(model, (BinaryQuadraticModel, BQMabc)):
vartype = model.vartype
for v in model.variables:
self.objective.add_variable(vartype, v)
elif isinstance(model, QuadraticModel):
for v in model.variables:
# for spin, binary variables the bounds are ignored anyway
self.objective.add_variable(model.vartype(v), v,
lower_bound=model.lower_bound(v),
upper_bound=model.upper_bound(v))
else:
raise TypeError("model should be a QuadraticModel or a BinaryQuadraticModel")
def add_constraint(self, data, *args, **kwargs) -> Hashable:
"""A convenience wrapper for other methods that add constraints.
Examples:
>>> from dimod import ConstrainedQuadraticModel, Integers
>>> i, j = Integers(['i', 'j'])
>>> cqm = ConstrainedQuadraticModel()
>>> cqm.add_constraint(i + j <= 3, label='Constrained i-j range')
'Constrained i-j range'
See also:
:meth:`~.ConstrainedQuadraticModel.add_constraint_from_model`
:meth:`~.ConstrainedQuadraticModel.add_constraint_from_comparison`
:meth:`~.ConstrainedQuadraticModel.add_constraint_from_iterable`
"""
# in python 3.8+ we can use singledispatchmethod
if isinstance(data, (BinaryQuadraticModel, QuadraticModel, BQMabc)):
return self.add_constraint_from_model(data, *args, **kwargs)
elif isinstance(data, Comparison):
return self.add_constraint_from_comparison(data, *args, **kwargs)
elif isinstance(data, Iterable):
return self.add_constraint_from_iterable(data, *args, **kwargs)
else:
raise TypeError("unexpected data format")
def add_constraint_from_model(self,
qm: Union[BinaryQuadraticModel, QuadraticModel],
sense: Union[Sense, str],
rhs: Bias = 0,
label: Optional[Hashable] = None,
copy: bool = True) -> Hashable:
"""Add a constraint from a quadratic model.
Args:
qm: A quadratic model or binary quadratic model.
sense: One of `<=', '>=', '=='.
rhs: The right hand side of the constraint.
label: A label for the constraint. Must be unique. If no label
is provided, then one is generated using :mod:`uuid`.
copy: If `True`, the BQM is copied. This can be set to `False` to
improve performance, but subsequently mutating the bqm can
cause issues.
Returns:
The label of the added constraint.
Examples:
>>> from dimod import ConstrainedQuadraticModel, Binary
>>> cqm = ConstrainedQuadraticModel()
>>> x = Binary('x')
>>> cqm.add_constraint_from_model(x, '>=', 0, 'Min x')
'Min x'
"""
variables = self.variables
# get sense as an enum
if isinstance(sense, str):
sense = Sense(sense)
if label is None:
# we support up to 100k constraints and :6 gives us 16777216
# possible so pretty safe
label = uuid.uuid4().hex[:6]
while label in self.constraints:
label = uuid.uuid4().hex[:6]
elif label in self.constraints:
raise ValueError("a constraint with that label already exists")
if isinstance(qm, BQMabc):
qm = as_bqm(qm) # handle legacy BQMs
self._add_variables_from(qm)
if copy:
qm = qm.copy()
if sense is Sense.Le:
self.constraints[label] = Le(qm, rhs)
elif sense is Sense.Ge:
self.constraints[label] = Ge(qm, rhs)
elif sense is Sense.Eq:
self.constraints[label] = Eq(qm, rhs)
else:
raise RuntimeError("unexpected sense")
return label
def add_constraint_from_comparison(self,
comp: Comparison,
label: Optional[Hashable] = None,
copy: bool = True) -> Hashable:
"""Add a constraint from a comparison.
Args:
comp: A comparison object.
label: A label for the constraint. Must be unique. If no label
is provided, one is generated using :mod:`uuid`.
copy: If `True`, the model is copied. You can set to `False` to
improve performance, but subsequently mutating the model can
cause issues.
Returns:
Label of the added constraint.
Examples:
>>> from dimod import ConstrainedQuadraticModel, Integer
>>> i = Integer('i')
>>> cqm = ConstrainedQuadraticModel()
>>> cqm.add_constraint_from_comparison(i <= 3, label='Max i')
'Max i'
"""
if not isinstance(comp.rhs, Number):
raise TypeError("comparison should have a numeric rhs")
if isinstance(comp.lhs, (BinaryQuadraticModel, QuadraticModel)):
return self.add_constraint_from_model(comp.lhs, comp.sense, rhs=comp.rhs,
label=label, copy=copy)
else:
raise ValueError("comparison should have a binary quadratic model "
"or quadratic model lhs.")
def add_constraint_from_iterable(self, iterable: Iterable,
sense: Union[Sense, str],
rhs: Bias = 0,
label: Optional[Hashable] = None,
) -> Hashable:
"""Add a constraint from an iterable of tuples.
Args:
iterable: An iterable of terms as tuples. The variables must
have already been added to the object.
sense: One of `<=', '>=', '=='.
rhs: The right hand side of the constraint.
label: A label for the constraint. Must be unique. If no label
is provided, then one is generated using :mod:`uuid`.
Returns:
The label of the added constraint.
Examples:
>>> from dimod import ConstrainedQuadraticModel, Integer, Binary
>>> cqm = ConstrainedQuadraticModel()
>>> cqm.add_variable('i', 'INTEGER') # doctest: +IGNORE_RESULT
>>> cqm.add_variable('j', 'INTEGER') # doctest: +IGNORE_RESULT
>>> cqm.add_variable('x', 'BINARY') # doctest: +IGNORE_RESULT
>>> cqm.add_variable('y', 'BINARY') # doctest: +IGNORE_RESULT
>>> label1 = cqm.add_constraint_from_iterable([('x', 'y', 1), ('i', 2), ('j', 3),
... ('i', 'j', 1)], '<=', rhs=1)
"""
qm = QuadraticModel()
def _add_variable(v):
# handles vartype, and bounds
vartype = self.vartype(v)
if vartype is not Vartype.SPIN and vartype is not Vartype.BINARY:
# need to worry about bounds
qm.add_variable(vartype, v,
lower_bound=self.lower_bound(v),
upper_bound=self.upper_bound(v))
else:
qm.add_variable(vartype, v)
for *variables, bias in iterable:
if len(variables) == 0:
qm.offset += bias
elif len(variables) == 1:
v, = variables
_add_variable(v)
qm.add_linear(v, bias)
elif len(variables) == 2:
u, v = variables
_add_variable(u)
_add_variable(v)
qm.add_quadratic(u, v, bias)
else:
raise ValueError("terms must be constant, linear or quadratic")
# use quadratic model in the future
return self.add_constraint_from_model(
qm, sense, rhs=rhs, label=label, copy=False)
def add_discrete(self, variables: Collection[Variable],
label: Optional[Hashable] = None) -> Hashable:
"""Add an iterable of binary variables as a disjoint one-hot constraint.
Adds a special kind of one-hot constraint. These one-hot constraints
must be disjoint, that is they must not have any overlapping variables.
Args:
variables: An iterable of variables.
label: Label for the constraint. Must be unique. If no label
is provided, then one is generated using :mod:`uuid`.
Returns:
Label of the added constraint.
Raises:
ValueError: If any of the given variables have already been added
to the model with any vartype other than `BINARY`.
ValueError: If any of the given variables are already used in
another discrete variable.
"""
if label is not None and label in self.constraints:
raise ValueError("a constraint with that label already exists")
for v in variables:
if v in self._discrete:
# todo: language around discrete variables?
raise ValueError(f"variable {v!r} is already used in a discrete variable")
elif v in self.variables and self.vartype(v) != Vartype.BINARY:
raise ValueError(f"variable {v!r} has already been added but is not BINARY")
# we can! So add them
bqm = BinaryQuadraticModel('BINARY', dtype=np.float32)
bqm.add_variables_from((v, 1) for v in variables)
label = self.add_constraint(bqm == 1, label=label)
self.discrete.add(label)
self._discrete.update(variables)
return label
def add_variable(self, v: Variable, vartype: VartypeLike,
*, lower_bound: int = 0, upper_bound: Optional[int] = None):
"""Add a variable to the model.
Args:
variable: A variable label.
vartype:
Variable type. One of:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
* :class:`.Vartype.INTEGER`, ``'INTEGER'``
lower_bound:
A lower bound on the variable. Ignored when the variable is
not :class:`Vartype.INTEGER`.
upper_bound:
An upper bound on the variable. Ignored when the variable is
not :class:`Vartype.INTEGER`.
Examples:
>>> from dimod import ConstrainedQuadraticModel, Integer
>>> cqm = ConstrainedQuadraticModel()
>>> cqm.add_variable('i', 'INTEGER') # doctest: +IGNORE_RESULT
"""
if self.variables.count(v):
if as_vartype(vartype, extended=True) != self.vartype(v):
raise ValueError("given variable has already been added with a different vartype")
else:
return self.objective.add_variable(vartype, v, lower_bound=lower_bound, upper_bound=upper_bound)
@classmethod
def from_bqm(cls, bqm: BinaryQuadraticModel) -> 'ConstrainedQuadraticModel':
"""Alias for :meth:`from_quadratic_model`."""
return cls.from_quadratic_model(bqm)
@classmethod
def from_discrete_quadratic_model(cls, dqm: DiscreteQuadraticModel, *,
relabel_func: Callable[[Variable, int], Variable] = lambda v, c: (v, c),
) -> 'ConstrainedQuadraticModel':
"""Construct a constrained quadratic model from a discrete quadratic model.
Args:
dqm: a discrete quadratic model.
relabel_func (optional): A function that takes two arguments, the
variable label and the case label, and returns a new variable
label to be used in the CQM. By default generates a 2-tuple
`(variable, case)`.
Returns:
A constrained quadratic model.
"""
cqm = cls()
objective = BinaryQuadraticModel(Vartype.BINARY)
seen = set()
for v in dqm.variables:
seen.add(v)
# convert v, case to a flat set of variables
v_vars = list(relabel_func(v, case) for case in dqm.get_cases(v))
# add the one-hot constraint
cqm.add_discrete(v_vars, label=v)
# add to the objective
objective.add_linear_from(zip(v_vars, dqm.get_linear(v)))
for u in dqm.adj[v]:
if u in seen: # only want upper-triangle
continue
u_vars = list(relabel_func(u, case) for case in dqm.get_cases(u))
objective.add_quadratic_from(
(u_vars[cu], v_vars[cv], bias)
for (cu, cv), bias
in dqm.get_quadratic(u, v).items()
)
objective.offset = dqm.offset
cqm.set_objective(objective)
return cqm
from_dqm = from_discrete_quadratic_model
@classmethod
def from_quadratic_model(cls, qm: Union[QuadraticModel, BinaryQuadraticModel]
) -> 'ConstrainedQuadraticModel':
"""Construct a constrained quadratic model from a quadratic model or
binary quadratic model.
The specified model is set as the objective to be minimzed in the constructed
constrained quadratic model (CQM). You can then add constraints that any feasible
solutions should meet.
Args:
qm: Binary quadratic model (BQM) or quadratic model (QM).
Examples:
This example creates a CQM to minimize a triangular problem with the added
constraint that one of the variables must have value 1 in feasible solutions.
>>> from dimod import ConstrainedQuadraticModel, BinaryQuadraticModel
>>> bqm = BinaryQuadraticModel.from_ising({}, {'ab': 1, 'bc': 1, 'ac': 1})
>>> cqm = ConstrainedQuadraticModel().from_bqm(bqm)
>>> cqm.objective.linear
{'a': 0.0, 'b': 0.0, 'c': 0.0}
>>> cqm.objective.quadratic
{('b', 'a'): 1.0, ('c', 'a'): 1.0, ('c', 'b'): 1.0}
>>> label1 = cqm.add_constraint_from_model(BinaryQuadraticModel({'a': 0}, {}, 0, 'SPIN'), '>=', 0)
"""
cqm = cls()
cqm.set_objective(qm)
return cqm
@classmethod
def from_qm(cls, qm: QuadraticModel) -> 'ConstrainedQuadraticModel':
"""Alias for :meth:`from_quadratic_model`."""
return cls.from_quadratic_model(qm)
@classmethod
def from_file(cls, fp: Union[BinaryIO, ByteString]) -> "ConstrainedQuadraticModel":
"""Construct from a file-like object.
The inverse of :meth:`~ConstrainedQuadraticModel.to_file`.
"""
if isinstance(fp, ByteString):
file_like: BinaryIO = _BytesIO(fp) # type: ignore[assignment]
else:
file_like = fp
header_info = read_header(file_like, CQM_MAGIC_PREFIX)
if header_info.version >= (2, 0):
raise ValueError("cannot load a BQM serialized with version "
f"{header_info.version!r}, try upgrading your "
"dimod version")
# we don't actually need the data
cqm = CQM()
with zipfile.ZipFile(file_like, mode='r') as zf:
cqm.set_objective(load(zf.read("objective")))
constraint_labels = set()
for arch in zf.namelist():
# even on windows zip uses /
match = re.match("constraints/([^/]+)/", arch)
if match is not None:
constraint_labels.add(match.group(1))
for constraint in constraint_labels:
lhs = load(zf.read(f"constraints/{constraint}/lhs"))
rhs = np.frombuffer(zf.read(f"constraints/{constraint}/rhs"), np.float64)[0]
sense = zf.read(f"constraints/{constraint}/sense").decode('ascii')
discrete = any(zf.read(f"constraints/{constraint}/discrete"))
label = deserialize_variable(json.loads(constraint))
cqm.add_constraint(lhs, rhs=rhs, sense=sense, label=label)
if discrete:
cqm.discrete.add(label)
return cqm
def lower_bound(self, v: Variable) -> Bias:
"""Return the lower bound on the specified variable."""
return self.objective.lower_bound(v)
def num_biases(self) -> int:
"""The number of biases accross the objective and constraints."""
num_biases = len(self.objective.linear) + len(self.objective.quadratic)
num_biases += sum(len(const.lhs.linear) + len(const.lhs.quadratic)
for const in self.constraints.values())
return num_biases
def num_quadratic_variables(self) -> int:
"""Return the total number of variables with at least one quadratic
interaction accross all constraints."""
count = 0
for const in self.constraints.values():
lhs = const.lhs
count += sum(lhs.degree(v) > 0 for v in lhs.variables)
return count
def set_objective(self, objective: Union[BinaryQuadraticModel, QuadraticModel]):
"""Set the objective of the constrained quadratic model.
Args:
objective: Binary quadratic model (BQM) or quadratic model (QM).
Examples:
>>> from dimod import Integer, ConstrainedQuadraticModel
>>> i = Integer('i')
>>> j = Integer('j')
>>> cqm = ConstrainedQuadraticModel()
>>> cqm.set_objective(2*i - 0.5*i*j + 10)
"""
# clear out current objective, keeping only the variables
self.objective.quadratic.clear() # there may be a more performant way...
for v in self.objective.variables:
self.objective.set_linear(v, 0)
# offset is overwritten later
# now add everything from the new objective
self._add_variables_from(objective)
for v in objective.variables:
self.objective.set_linear(v, objective.get_linear(v))
self.objective.add_quadratic_from(objective.iter_quadratic())
self.objective.offset = objective.offset
def _substitute_self_loops_from_model(self, qm: Union[BinaryQuadraticModel, QuadraticModel],
mapping: MutableMapping[Variable, Variable]):
if isinstance(qm, BinaryQuadraticModel):
# bqms never have self-loops
return
for u in qm.variables:
vartype = qm.vartype(u)
# integer and binary variables never have self-loops
if vartype is Vartype.SPIN or vartype is Vartype.BINARY:
continue
try:
bias = qm.get_quadratic(u, u)
except ValueError:
# no self-loop
continue
lb = qm.lower_bound(u)
ub = qm.upper_bound(u)
if u not in mapping:
# we've never seen this integer before
new: Variable = new_label()
# on the off chance there are conflicts. Luckily self.variables
# is global accross all constraints/objective so we don't need
# to worry about accidentally picking something we'll regret
while new in self.constraints or new in self.variables:
new = new_label()
mapping[u] = new
self.objective.add_variable(vartype, new, lower_bound=lb, upper_bound=ub)
# we don't add the constraint yet because we don't want
# to modify self.constraints
else:
new = mapping[u]
qm.add_variable(vartype, new, lower_bound=lb, upper_bound=ub)
qm.add_quadratic(u, new, bias)
qm.remove_interaction(u, u)
def substitute_self_loops(self) -> Dict[Variable, Variable]:
"""Replace any integer self-loops in the objective or constraints.
Self-loop :math:`i^2` is removed by introducing a new variable
:math:`j` with interaction :math:`i*j` and adding constraint
:math:`j == i`.
Acts on the objective and constraints in-place.
Returns:
Mapping from the integer variable labels to their introduced
counterparts. The constraint enforcing :math:`j == i` uses
the same label.
Examples:
>>> from dimod import Integer, ConstrainedQuadraticModel
>>> i = Integer('i')
>>> cqm = ConstrainedQuadraticModel()
>>> cqm.add_constraint(i*i <=3, label='i squared')
'i squared'
>>> cqm.substitute_self_loops() # doctest: +IGNORE_RESULT
>>> cqm.constraints # doctest: +IGNORE_RESULT
{'i squared': QuadraticModel({'i': 0.0, 'cf651f3d-bdf8-4735-9139-eee0a32e217f': 0.0}, {('cf651f3d-bdf8-4735-9139-eee0a32e217f', 'i'): 1.0}, 0.0, {'i': 'INTEGER', 'cf651f3d-bdf8-4735-9139-eee0a32e217f': 'INTEGER'}, dtype='float64') <= 3,
'cf651f3d-bdf8-4735-9139-eee0a32e217f': QuadraticModel({'i': 1.0, 'cf651f3d-bdf8-4735-9139-eee0a32e217f': -1.0}, {}, 0.0, {'i': 'INTEGER', 'cf651f3d-bdf8-4735-9139-eee0a32e217f': 'INTEGER'}, dtype='float64') == 0}
"""
mapping: Dict[Variable, Variable] = dict()
self._substitute_self_loops_from_model(self.objective, mapping)
for comparison in self.constraints.values():
self._substitute_self_loops_from_model(comparison.lhs, mapping)
# finally add the constraints for the variables
for v, new in mapping.items():
self.add_constraint([(v, 1), (new, -1)], rhs=0, sense='==', label=new)
return mapping
def to_file(self, *, spool_size: int = int(1e9)) -> tempfile.SpooledTemporaryFile:
"""Serialize to a file-like object.
Args:
spool_size: Defines the `max_size` passed to the constructor of
:class:`tempfile.SpooledTemporaryFile`. Determines whether
the returned file-like's contents will be kept on disk or in
memory.
Format Specification (Version 1.1):
This format is inspired by the `NPY format`_
The first 8 bytes are a magic string: exactly "DIMODCQM".
The next 1 byte is an unsigned byte: the major version of the file
format.
The next 1 byte is an unsigned byte: the minor version of the file
format.
The next 4 bytes form a little-endian unsigned int, the length of
the header data HEADER_LEN.
The next HEADER_LEN bytes form the header data. This is a
json-serialized dictionary. The dictionary is exactly:
.. code-block:: python
dict(num_variables=len(cqm.variables),
num_constraints=len(cqm.constraints),
num_biases=cqm.num_biases(),
num_quadratic_variables=cqm.num_quadratic_variables(),
)
it is terminated by a newline character and padded with spaces to
make the entire length of the entire header divisible by 64.
The constraint quadratic model data comes after the header. It is
encoded as a zip file. The zip file will contain one file
named `objective`, containing the objective as encoded as a file
view. It will also contain a directory called `constraints`. The
`constraints` directory will contain one subdirectory for each
constraint, each containing `lhs`, `rhs` and `sense` encoding
the `lhs` as a fileview, the `rhs` as a float and the sense
as a string. Each directory will also contain a `discrete` file,
encoding whether the constraint represents a discrete variable.
Format Specification (Version 1.0):
This format is the same as Version 1.1, except that the data dict
does not have `num_quadratic_variables`.
.. _NPY format: https://numpy.org/doc/stable/reference/generated/numpy.lib.format.html
"""
file = SpooledTemporaryFile(max_size=spool_size)
data = dict(num_variables=len(self.variables),
num_constraints=len(self.constraints),
num_biases=self.num_biases(),
num_quadratic_variables=self.num_quadratic_variables(),
)
write_header(file, CQM_MAGIC_PREFIX, data, version=(1, 1))
# write the values
with zipfile.ZipFile(file, mode='a') as zf:
try:
zf.writestr(
'objective', self.objective.to_file(spool_size=int(1e12))._file.getbuffer())
except AttributeError:
# no objective to write
pass
for label, constraint in self.constraints.items():
# put everything in a constraints/label/ directory
lstr = json.dumps(serialize_variable(label))
lhs = constraint.lhs.to_file(spool_size=int(1e12))._file.getbuffer()
zf.writestr(f'constraints/{lstr}/lhs', lhs)
rhs = np.float64(constraint.rhs).tobytes()
zf.writestr(f'constraints/{lstr}/rhs', rhs)
sense = bytes(constraint.sense.value, 'ascii')
zf.writestr(f'constraints/{lstr}/sense', sense)
discrete = bytes((label in self.discrete,))
zf.writestr(f'constraints/{lstr}/discrete', discrete)
file.seek(0)
return file
def upper_bound(self, v: Variable) -> Bias:
"""Return the upper bound on the specified variable."""
return self.objective.upper_bound(v)
def vartype(self, v: Variable) -> Vartype:
"""The vartype of the given variable."""
return self.objective.vartype(v)
CQM = ConstrainedQuadraticModel
class _Vartypes(abc.Sequence):
"""Support deprecated attribute on ``CQM.variables``"""
def __init__(self, cqm: ConstrainedQuadraticModel):
self.cqm: ConstrainedQuadraticModel = cqm
def __getitem__(self, index: int) -> Vartype:
warnings.warn(
"cqm.variables.vartypes[i] is deprecated and will be removed in dimod 0.11.0, "
"use cqm.vartype(cqm.variables[i]) instead.", DeprecationWarning, stacklevel=3)
return self.cqm.vartype(self.cqm.variables[index])
def __len__(self) -> int:
warnings.warn(
"cqm.variables.vartypes is deprecated and will be removed in dimod 0.11.0",
DeprecationWarning, stacklevel=3)
return len(self.cqm.variables)
class _LowerBounds(abc.Mapping):
"""Support deprecated attribute on ``CQM.variables``"""
def __init__(self, cqm: ConstrainedQuadraticModel):
self.cqm: ConstrainedQuadraticModel = cqm
def __getitem__(self, key: Variable) -> float:
warnings.warn(
"cqm.variables.lower_bounds[v] is deprecated and will be removed in dimod 0.11.0, "
"use cqm.lower_bound(v) instead.", DeprecationWarning, stacklevel=3)
return self.cqm.lower_bound(key)
def __iter__(self) -> Iterator[Variable]:
warnings.warn(
"cqm.variables.lower_bounds is deprecated and will be removed in dimod 0.11.0",
DeprecationWarning, stacklevel=3)
yield from self.cqm.variables
def __len__(self) -> int:
warnings.warn(
"cqm.variables.lower_bounds is deprecated and will be removed in dimod 0.11.0",
DeprecationWarning, stacklevel=3)
return len(self.cqm.variables)
class _UpperBounds(abc.Mapping):
"""Support deprecated attribute on ``CQM.variables``"""
def __init__(self, cqm: ConstrainedQuadraticModel):
self.cqm: ConstrainedQuadraticModel = cqm
def __getitem__(self, key: Variable) -> float:
warnings.warn(
"cqm.variables.upper_bounds[v] is deprecated and will be removed in dimod 0.11.0, "
"use cqm.upper_bound(v) instead.", DeprecationWarning, stacklevel=3)
return self.cqm.upper_bound(key)
def __iter__(self) -> Iterator[Variable]:
warnings.warn(
"cqm.variables.upper_bounds is deprecated and will be removed in dimod 0.11.0",
DeprecationWarning, stacklevel=3)
yield from self.cqm.variables
def __len__(self) -> int:
warnings.warn(
"cqm.variables.upper_bounds is deprecated and will be removed in dimod 0.11.0",
DeprecationWarning, stacklevel=3)
return len(self.cqm.variables)
def _qm_to_bqm(qm: QuadraticModel, integers: MutableMapping[Variable, BinaryQuadraticModel],
) -> BinaryQuadraticModel:
# dev note: probably we'll want to make this function or something similar
# public facing at some point, but right now the interface is pretty weird
# and it only returns BINARY bqms
if any(qm.vartype(v) is Vartype.SPIN for v in qm.variables):
# bqm is BINARY so we want to handle these
qm = qm.spin_to_binary(inplace=False)
bqm = BinaryQuadraticModel(Vartype.BINARY)
for v in qm.variables:
if v in integers:
bqm += qm.get_linear(v) * integers[v]
else:
bqm.add_linear(v, qm.get_linear(v))
for u, v, bias in qm.iter_quadratic():
if u in integers:
if v in integers:
bqm += integers[u] * integers[v] * bias
else:
bqm += Binary(v) * integers[u] * bias
elif v in integers:
bqm += Binary(u) * integers[v] * bias
else:
bqm.add_quadratic(u, v, bias)
bqm.offset += qm.offset
return bqm
class CQMToBQMInverter:
"""Invert a sample from a binary quadratic model constructed by :func:`cqm_to_bqm`."""
__slots__ = ('_binary', '_integers')
def __init__(self,
binary: Mapping[Variable, Vartype],
integers: Mapping[Variable, BinaryQuadraticModel]):
self._binary = binary
self._integers = integers
def __call__(self, sample: Mapping[Variable, int]) -> Mapping[Variable, int]:
new = {}
for v, vartype in self._binary.items():
if vartype is Vartype.BINARY:
new[v] = sample[v]
elif vartype is Vartype.SPIN:
new[v] = 2*sample[v] - 1
else:
raise RuntimeError("unexpected vartype")
for v, bqm in self._integers.items():
new[v] = 0
for u in bqm.variables:
new[v] += sample[u] * u[1]
return new
@classmethod
def from_dict(cls, doc: Dict[str, Dict[Variable, Any]]) -> 'CQMToBQMInverter':
"""Construct an inverter from a serialized representation."""
integers = {}
for v, variables in doc['integers'].items():
v = deserialize_variable(v)
bqm = BinaryQuadraticModel(Vartype.BINARY)
bqm.add_linear_from((deserialize_variable(u), u[1]) for u in variables)
integers[v] = bqm
return cls(
dict((deserialize_variable(v), as_vartype(vartype))
for v, vartype in doc['binary'].items()),
integers,
)
def to_dict(self) -> Dict[str, Dict[Variable, Any]]:
"""Return a json-serializable encoding of the inverter."""
# todo: in 3.8 we can used TypedDict for the typing
return dict(
binary=dict((serialize_variable(v), vartype.name)
for v, vartype in self._binary.items()),
integers=dict((serialize_variable(v), bqm.variables.to_serializable())
for v, bqm in self._integers.items()),
)
# Developer note: This function is *super* ad hoc. In the future, we may want
# A BQM.from_cqm method or similar, but for now I think it makes sense to
# expose that functionality as a function for easier later deprecation.
def cqm_to_bqm(cqm: ConstrainedQuadraticModel, lagrange_multiplier: Optional[Bias] = None,
) -> Tuple[BinaryQuadraticModel, CQMToBQMInverter]:
"""Construct a binary quadratic model from a constrained quadratic model.
Args:
cqm: A constrained quadratic model. All constraints must be linear
and all integer variables must have a lower bound of 0.
lagrange_multiplier: The penalty strength used when converting
constraints into penalty models. Defaults to 10x the largest
bias in the objective.
Returns:
A 2-tuple containing:
A binary quadratic model
A function that converts samples over the binary quadratic model
back into samples for the constrained quadratic model.
Example:
Start with a constrained quadratic model
>>> num_widget_a = dimod.Integer('num_widget_a', upper_bound=7)
>>> num_widget_b = dimod.Integer('num_widget_b', upper_bound=3)
>>> cqm = dimod.ConstrainedQuadraticModel()
>>> cqm.set_objective(-3 * num_widget_a - 4 * num_widget_b)
>>> cqm.add_constraint(num_widget_a + num_widget_b <= 5, label='total widgets')
'total widgets'
Convert it to a binary quadratic model and solve it using
:class:`dimod.ExactSolver`.
>>> bqm, invert = dimod.cqm_to_bqm(cqm)
>>> sampleset = dimod.ExactSolver().sample(bqm)
Interpret the answer in the original variable classes
>>> invert(sampleset.first.sample)
{'num_widget_a': 2, 'num_widget_b': 3}
Note that the inverter is also serializable.
>>> import json
>>> newinvert = dimod.constrained.CQMToBQMInverter.from_dict(
... json.loads(json.dumps(invert.to_dict())))
>>> newinvert(sampleset.first.sample)
{'num_widget_a': 2, 'num_widget_b': 3}
"""
from dimod.generators.integer import binary_encoding # avoid circular import
bqm = BinaryQuadraticModel(Vartype.BINARY)
binary: Dict[Variable, Vartype] = {}
integers: Dict[Variable, BinaryQuadraticModel] = {}
# add the variables
for v in cqm.variables:
vartype = cqm.vartype(v)
if vartype is Vartype.SPIN or vartype is Vartype.BINARY:
binary[v] = vartype
elif vartype is Vartype.INTEGER:
if cqm.lower_bound(v) != 0:
raise ValueError("integer variables must have a lower bound of 0, "
f"variable {v} has a lower bound of {cqm.lower_bound(v)}")
v_bqm = integers[v] = binary_encoding(v, int(cqm.upper_bound(v)))
if not v_bqm.variables.isdisjoint(bqm.variables):
# this should be pretty unusual, so let's not bend over backwards
# to accommodate it.
raise ValueError("given CQM has conflicting variables with ones "
"generated by dimod.generators.binary_encoding")
bqm.add_variables_from((v, 0) for v in v_bqm.variables)
else:
raise RuntimeError("unexpected vartype")
# objective, we know it's always a QM
bqm += _qm_to_bqm(cqm.objective, integers)
if lagrange_multiplier is None:
if cqm.constraints and bqm.num_variables:
max_bias = max(-bqm.linear.min(), bqm.linear.max())
if not bqm.is_linear():
max_bias = max(-bqm.quadratic.min(), bqm.quadratic.max(), max_bias)
lagrange_multiplier = 10 * max_bias
else:
lagrange_multiplier = 0 # doesn't matter
for constraint in cqm.constraints.values():
lhs = constraint.lhs
rhs = constraint.rhs
sense = constraint.sense
if isinstance(lhs, QuadraticModel):
lhs = _qm_to_bqm(lhs, integers)
if not lhs.is_linear():
raise ValueError("CQM must not have any quadratic constraints")
if lhs.vartype is Vartype.SPIN:
lhs = lhs.change_vartype(Vartype.BINARY, inplace=True)
# at this point we know we have a BINARY bqm
if sense is Sense.Eq:
bqm.add_linear_equality_constraint(
((v, lhs.get_linear(v)) for v in lhs.variables),
lagrange_multiplier,
lhs.offset - rhs,
)
elif sense is Sense.Ge:
bqm.add_linear_inequality_constraint(
((v, lhs.get_linear(v)) for v in lhs.variables),
lagrange_multiplier,
new_label(),
constant=lhs.offset,
lb=rhs,
ub=np.iinfo(np.int64).max,
)
elif sense is Sense.Le:
bqm.add_linear_inequality_constraint(
((v, lhs.get_linear(v)) for v in lhs.variables),
lagrange_multiplier,
new_label(),
constant=lhs.offset,
lb=np.iinfo(np.int64).min,
ub=rhs,
)
else:
raise RuntimeError("unexpected sense")
return bqm, CQMToBQMInverter(binary, integers)
# register fileview loader
load.register(CQM_MAGIC_PREFIX, ConstrainedQuadraticModel.from_file)
| [
"dimod.variables.serialize_variable",
"zipfile.ZipFile",
"dimod.serialization.fileview.load.register",
"numpy.iinfo",
"dimod.utilities.new_label",
"dimod.sym.Eq",
"dimod.serialization.fileview._BytesIO",
"dimod.binary.binary_quadratic_model.as_bqm",
"dimod.serialization.fileview.write_header",
"nu... | [((44947, 45015), 'dimod.serialization.fileview.load.register', 'load.register', (['CQM_MAGIC_PREFIX', 'ConstrainedQuadraticModel.from_file'], {}), '(CQM_MAGIC_PREFIX, ConstrainedQuadraticModel.from_file)\n', (44960, 45015), False, 'from dimod.serialization.fileview import load, read_header, write_header\n'), ((36894, 36930), 'dimod.binary.binary_quadratic_model.BinaryQuadraticModel', 'BinaryQuadraticModel', (['Vartype.BINARY'], {}), '(Vartype.BINARY)\n', (36914, 36930), False, 'from dimod.binary.binary_quadratic_model import BinaryQuadraticModel, Binary, Spin, as_bqm\n'), ((41786, 41822), 'dimod.binary.binary_quadratic_model.BinaryQuadraticModel', 'BinaryQuadraticModel', (['Vartype.BINARY'], {}), '(Vartype.BINARY)\n', (41806, 41822), False, 'from dimod.binary.binary_quadratic_model import BinaryQuadraticModel, Binary, Spin, as_bqm\n'), ((5527, 5543), 'dimod.quadratic.QuadraticModel', 'QuadraticModel', ([], {}), '()\n', (5541, 5543), False, 'from dimod.quadratic import QuadraticModel\n'), ((14295, 14311), 'dimod.quadratic.QuadraticModel', 'QuadraticModel', ([], {}), '()\n', (14309, 14311), False, 'from dimod.quadratic import QuadraticModel\n'), ((16962, 17010), 'dimod.binary.binary_quadratic_model.BinaryQuadraticModel', 'BinaryQuadraticModel', (['"""BINARY"""'], {'dtype': 'np.float32'}), "('BINARY', dtype=np.float32)\n", (16982, 17010), False, 'from dimod.binary.binary_quadratic_model import BinaryQuadraticModel, Binary, Spin, as_bqm\n'), ((19574, 19610), 'dimod.binary.binary_quadratic_model.BinaryQuadraticModel', 'BinaryQuadraticModel', (['Vartype.BINARY'], {}), '(Vartype.BINARY)\n', (19594, 19610), False, 'from dimod.binary.binary_quadratic_model import BinaryQuadraticModel, Binary, Spin, as_bqm\n'), ((22550, 22590), 'dimod.serialization.fileview.read_header', 'read_header', (['file_like', 'CQM_MAGIC_PREFIX'], {}), '(file_like, CQM_MAGIC_PREFIX)\n', (22561, 22590), False, 'from dimod.serialization.fileview import load, read_header, write_header\n'), ((31921, 31962), 'dimod.serialization.fileview.SpooledTemporaryFile', 'SpooledTemporaryFile', ([], {'max_size': 'spool_size'}), '(max_size=spool_size)\n', (31941, 31962), False, 'from dimod.serialization.fileview import SpooledTemporaryFile, _BytesIO\n'), ((32235, 32293), 'dimod.serialization.fileview.write_header', 'write_header', (['file', 'CQM_MAGIC_PREFIX', 'data'], {'version': '(1, 1)'}), '(file, CQM_MAGIC_PREFIX, data, version=(1, 1))\n', (32247, 32293), False, 'from dimod.serialization.fileview import load, read_header, write_header\n'), ((33960, 34140), 'warnings.warn', 'warnings.warn', (['"""cqm.variables.vartypes[i] is deprecated and will be removed in dimod 0.11.0, use cqm.vartype(cqm.variables[i]) instead."""', 'DeprecationWarning'], {'stacklevel': '(3)'}), "(\n 'cqm.variables.vartypes[i] is deprecated and will be removed in dimod 0.11.0, use cqm.vartype(cqm.variables[i]) instead.'\n , DeprecationWarning, stacklevel=3)\n", (33973, 34140), False, 'import warnings\n'), ((34257, 34389), 'warnings.warn', 'warnings.warn', (['"""cqm.variables.vartypes is deprecated and will be removed in dimod 0.11.0"""', 'DeprecationWarning'], {'stacklevel': '(3)'}), "(\n 'cqm.variables.vartypes is deprecated and will be removed in dimod 0.11.0',\n DeprecationWarning, stacklevel=3)\n", (34270, 34389), False, 'import warnings\n'), ((34706, 34879), 'warnings.warn', 'warnings.warn', (['"""cqm.variables.lower_bounds[v] is deprecated and will be removed in dimod 0.11.0, use cqm.lower_bound(v) instead."""', 'DeprecationWarning'], {'stacklevel': '(3)'}), "(\n 'cqm.variables.lower_bounds[v] is deprecated and will be removed in dimod 0.11.0, use cqm.lower_bound(v) instead.'\n , DeprecationWarning, stacklevel=3)\n", (34719, 34879), False, 'import warnings\n'), ((34994, 35131), 'warnings.warn', 'warnings.warn', (['"""cqm.variables.lower_bounds is deprecated and will be removed in dimod 0.11.0"""', 'DeprecationWarning'], {'stacklevel': '(3)'}), "(\n 'cqm.variables.lower_bounds is deprecated and will be removed in dimod 0.11.0'\n , DeprecationWarning, stacklevel=3)\n", (35007, 35131), False, 'import warnings\n'), ((35224, 35361), 'warnings.warn', 'warnings.warn', (['"""cqm.variables.lower_bounds is deprecated and will be removed in dimod 0.11.0"""', 'DeprecationWarning'], {'stacklevel': '(3)'}), "(\n 'cqm.variables.lower_bounds is deprecated and will be removed in dimod 0.11.0'\n , DeprecationWarning, stacklevel=3)\n", (35237, 35361), False, 'import warnings\n'), ((35677, 35850), 'warnings.warn', 'warnings.warn', (['"""cqm.variables.upper_bounds[v] is deprecated and will be removed in dimod 0.11.0, use cqm.upper_bound(v) instead."""', 'DeprecationWarning'], {'stacklevel': '(3)'}), "(\n 'cqm.variables.upper_bounds[v] is deprecated and will be removed in dimod 0.11.0, use cqm.upper_bound(v) instead.'\n , DeprecationWarning, stacklevel=3)\n", (35690, 35850), False, 'import warnings\n'), ((35965, 36102), 'warnings.warn', 'warnings.warn', (['"""cqm.variables.upper_bounds is deprecated and will be removed in dimod 0.11.0"""', 'DeprecationWarning'], {'stacklevel': '(3)'}), "(\n 'cqm.variables.upper_bounds is deprecated and will be removed in dimod 0.11.0'\n , DeprecationWarning, stacklevel=3)\n", (35978, 36102), False, 'import warnings\n'), ((36195, 36332), 'warnings.warn', 'warnings.warn', (['"""cqm.variables.upper_bounds is deprecated and will be removed in dimod 0.11.0"""', 'DeprecationWarning'], {'stacklevel': '(3)'}), "(\n 'cqm.variables.upper_bounds is deprecated and will be removed in dimod 0.11.0'\n , DeprecationWarning, stacklevel=3)\n", (36208, 36332), False, 'import warnings\n'), ((6557, 6721), 'warnings.warn', 'warnings.warn', (['"""cqm.variables.vartype(v) is deprecated and will be removed in dimod 0.11.0, use cqm.vartype(v) instead."""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "(\n 'cqm.variables.vartype(v) is deprecated and will be removed in dimod 0.11.0, use cqm.vartype(v) instead.'\n , DeprecationWarning, stacklevel=2)\n", (6570, 6721), False, 'import warnings\n'), ((10413, 10425), 'dimod.sym.Sense', 'Sense', (['sense'], {}), '(sense)\n', (10418, 10425), False, 'from dimod.sym import Comparison, Eq, Le, Ge, Sense\n'), ((10864, 10874), 'dimod.binary.binary_quadratic_model.as_bqm', 'as_bqm', (['qm'], {}), '(qm)\n', (10870, 10874), False, 'from dimod.binary.binary_quadratic_model import BinaryQuadraticModel, Binary, Spin, as_bqm\n'), ((11049, 11060), 'dimod.sym.Le', 'Le', (['qm', 'rhs'], {}), '(qm, rhs)\n', (11051, 11060), False, 'from dimod.sym import Comparison, Eq, Le, Ge, Sense\n'), ((22445, 22457), 'dimod.serialization.fileview._BytesIO', '_BytesIO', (['fp'], {}), '(fp)\n', (22453, 22457), False, 'from dimod.serialization.fileview import SpooledTemporaryFile, _BytesIO\n'), ((22909, 22945), 'zipfile.ZipFile', 'zipfile.ZipFile', (['file_like'], {'mode': '"""r"""'}), "(file_like, mode='r')\n", (22924, 22945), False, 'import zipfile\n'), ((32335, 32366), 'zipfile.ZipFile', 'zipfile.ZipFile', (['file'], {'mode': '"""a"""'}), "(file, mode='a')\n", (32350, 32366), False, 'import zipfile\n'), ((38687, 38710), 'dimod.variables.deserialize_variable', 'deserialize_variable', (['v'], {}), '(v)\n', (38707, 38710), False, 'from dimod.variables import Variables, serialize_variable, deserialize_variable\n'), ((38730, 38766), 'dimod.binary.binary_quadratic_model.BinaryQuadraticModel', 'BinaryQuadraticModel', (['Vartype.BINARY'], {}), '(Vartype.BINARY)\n', (38750, 38766), False, 'from dimod.binary.binary_quadratic_model import BinaryQuadraticModel, Binary, Spin, as_bqm\n'), ((11131, 11142), 'dimod.sym.Ge', 'Ge', (['qm', 'rhs'], {}), '(qm, rhs)\n', (11133, 11142), False, 'from dimod.sym import Comparison, Eq, Le, Ge, Sense\n'), ((18295, 18329), 'dimod.vartypes.as_vartype', 'as_vartype', (['vartype'], {'extended': '(True)'}), '(vartype, extended=True)\n', (18305, 18329), False, 'from dimod.vartypes import Vartype, as_vartype, VartypeLike\n'), ((23158, 23196), 're.match', 're.match', (['"""constraints/([^/]+)/"""', 'arch'], {}), "('constraints/([^/]+)/', arch)\n", (23166, 23196), False, 'import re\n'), ((26743, 26754), 'dimod.utilities.new_label', 'new_label', ([], {}), '()\n', (26752, 26754), False, 'from dimod.utilities import new_label\n'), ((10584, 10596), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10594, 10596), False, 'import uuid\n'), ((11213, 11224), 'dimod.sym.Eq', 'Eq', (['qm', 'rhs'], {}), '(qm, rhs)\n', (11215, 11224), False, 'from dimod.sym import Comparison, Eq, Le, Ge, Sense\n'), ((23711, 23733), 'json.loads', 'json.loads', (['constraint'], {}), '(constraint)\n', (23721, 23733), False, 'import json\n'), ((27090, 27101), 'dimod.utilities.new_label', 'new_label', ([], {}), '()\n', (27099, 27101), False, 'from dimod.utilities import new_label\n'), ((32778, 32803), 'dimod.variables.serialize_variable', 'serialize_variable', (['label'], {}), '(label)\n', (32796, 32803), False, 'from dimod.variables import Variables, serialize_variable, deserialize_variable\n'), ((44331, 44342), 'dimod.utilities.new_label', 'new_label', ([], {}), '()\n', (44340, 44342), False, 'from dimod.utilities import new_label\n'), ((10674, 10686), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10684, 10686), False, 'import uuid\n'), ((32974, 33000), 'numpy.float64', 'np.float64', (['constraint.rhs'], {}), '(constraint.rhs)\n', (32984, 33000), True, 'import numpy as np\n'), ((37294, 37303), 'dimod.binary.binary_quadratic_model.Binary', 'Binary', (['v'], {}), '(v)\n', (37300, 37303), False, 'from dimod.binary.binary_quadratic_model import BinaryQuadraticModel, Binary, Spin, as_bqm\n'), ((37372, 37381), 'dimod.binary.binary_quadratic_model.Binary', 'Binary', (['u'], {}), '(u)\n', (37378, 37381), False, 'from dimod.binary.binary_quadratic_model import BinaryQuadraticModel, Binary, Spin, as_bqm\n'), ((38800, 38823), 'dimod.variables.deserialize_variable', 'deserialize_variable', (['u'], {}), '(u)\n', (38820, 38823), False, 'from dimod.variables import Variables, serialize_variable, deserialize_variable\n'), ((38921, 38944), 'dimod.variables.deserialize_variable', 'deserialize_variable', (['v'], {}), '(v)\n', (38941, 38944), False, 'from dimod.variables import Variables, serialize_variable, deserialize_variable\n'), ((38946, 38965), 'dimod.vartypes.as_vartype', 'as_vartype', (['vartype'], {}), '(vartype)\n', (38956, 38965), False, 'from dimod.vartypes import Vartype, as_vartype, VartypeLike\n'), ((44666, 44677), 'dimod.utilities.new_label', 'new_label', ([], {}), '()\n', (44675, 44677), False, 'from dimod.utilities import new_label\n'), ((39293, 39314), 'dimod.variables.serialize_variable', 'serialize_variable', (['v'], {}), '(v)\n', (39311, 39314), False, 'from dimod.variables import Variables, serialize_variable, deserialize_variable\n'), ((39422, 39443), 'dimod.variables.serialize_variable', 'serialize_variable', (['v'], {}), '(v)\n', (39440, 39443), False, 'from dimod.variables import Variables, serialize_variable, deserialize_variable\n'), ((44424, 44442), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (44432, 44442), True, 'import numpy as np\n'), ((44735, 44753), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (44743, 44753), True, 'import numpy as np\n')] |
import numpy as np
from unittest import SkipTest, expectedFailure
from parameterized import parameterized
from holoviews import NdOverlay, Store
from holoviews.element import Curve, Area, Scatter, Points, Path, HeatMap
from holoviews.element.comparison import ComparisonTestCase
from ..util import is_dask
class TestChart2D(ComparisonTestCase):
def setUp(self):
try:
import pandas as pd
except:
raise SkipTest('Pandas not available')
import hvplot.pandas # noqa
self.df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], columns=['x', 'y'])
self.cat_df = pd.DataFrame([[1, 2, 'A'], [3, 4, 'B'], [5, 6, 'C']],
columns=['x', 'y', 'category'])
self.time_df = pd.DataFrame({
'time': pd.date_range('1/1/2000', periods=5*24, freq='1H', tz='UTC'),
'temp': np.sin(np.linspace(0, 5*2*np.pi, 5*24)).cumsum()})
@parameterized.expand([('points', Points), ('paths', Path)])
def test_2d_defaults(self, kind, element):
plot = self.df.hvplot(kind=kind)
self.assertEqual(plot, element(self.df, ['x', 'y']))
@parameterized.expand([('points', Points), ('paths', Path)])
def test_2d_chart(self, kind, element):
plot = self.df.hvplot(x='x', y='y', kind=kind)
self.assertEqual(plot, element(self.df, ['x', 'y']))
@parameterized.expand([('points', Points), ('paths', Path)])
def test_2d_index_and_c(self, kind, element):
plot = self.df.hvplot(x='index', y='y', c='x', kind=kind)
self.assertEqual(plot, element(self.df, ['index', 'y'], ['x']))
@parameterized.expand([('points', Points), ('paths', Path)])
def test_2d_set_hover_cols_to_list(self, kind, element):
plot = self.cat_df.hvplot(x='x', y='y', hover_cols=['category'], kind=kind)
self.assertEqual(plot, element(self.cat_df, ['x', 'y'], ['category']))
@parameterized.expand([('points', Points), ('paths', Path)])
def test_2d_set_hover_cols_including_index(self, kind, element):
plot = self.cat_df.hvplot(x='x', y='y', hover_cols=['index'], kind=kind)
data = plot.data[0] if kind == 'paths' else plot.data
assert 'index' in data.columns
self.assertEqual(plot, element(self.cat_df.reset_index(), ['x', 'y'], ['index']))
@parameterized.expand([('points', Points), ('paths', Path)])
def test_2d_set_hover_cols_to_all(self, kind, element):
plot = self.cat_df.hvplot(x='x', y='y', hover_cols='all', kind=kind)
data = plot.data[0] if kind == 'paths' else plot.data
assert 'index' in data.columns
self.assertEqual(plot, element(self.cat_df.reset_index(), ['x', 'y'], ['index', 'category']))
@parameterized.expand([('points', Points), ('paths', Path)])
def test_2d_set_hover_cols_to_all_with_use_index_as_false(self, kind, element):
plot = self.cat_df.hvplot(x='x', y='y', hover_cols='all', use_index=False, kind=kind)
self.assertEqual(plot, element(self.cat_df, ['x', 'y'], ['category']))
def test_heatmap_2d_index_columns(self):
plot = self.df.hvplot.heatmap()
self.assertEqual(plot, HeatMap((['x', 'y'], [0, 1, 2], self.df.values),
['columns', 'index'], 'value'))
def test_heatmap_2d_derived_x_and_y(self):
plot = self.time_df.hvplot.heatmap(x='time.hour', y='time.day', C='temp')
assert plot.kdims == ['time.hour', 'time.day']
assert plot.vdims == ['temp']
class TestChart2DDask(TestChart2D):
def setUp(self):
super(TestChart2DDask, self).setUp()
try:
import dask.dataframe as dd
except:
raise SkipTest('Dask not available')
import hvplot.dask # noqa
self.df = dd.from_pandas(self.df, npartitions=2)
self.cat_df = dd.from_pandas(self.cat_df, npartitions=3)
@expectedFailure
def test_heatmap_2d_index_columns(self):
self.df.hvplot.heatmap()
class TestChart1D(ComparisonTestCase):
def setUp(self):
try:
import pandas as pd
except:
raise SkipTest('Pandas not available')
import hvplot.pandas # noqa
self.df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], columns=['x', 'y'])
self.dt_df = pd.DataFrame(np.random.rand(90), index=pd.date_range('2019-01-01', '2019-03-31'))
self.cat_df = pd.DataFrame([[1, 2, 'A'], [3, 4, 'B'], [5, 6, 'C']],
columns=['x', 'y', 'category'])
self.cat_only_df = pd.DataFrame([['A', 'a'], ['B', 'b'], ['C', 'c']],
columns=['upper', 'lower'])
self.time_df = pd.DataFrame({
'time': pd.date_range('1/1/2000', periods=10, tz='UTC'),
'A': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'B': list('abcdefghij')})
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_wide_chart(self, kind, element):
plot = self.df.hvplot(kind=kind)
obj = NdOverlay({'x': element(self.df, 'index', 'x').redim(x='value'),
'y': element(self.df, 'index', 'y').redim(y='value')}, 'Variable')
self.assertEqual(plot, obj)
def test_by_datetime_accessor(self):
plot = self.dt_df.hvplot.line('index.dt.day', '0', by='index.dt.month')
obj = NdOverlay({m: Curve((g.index.day, g[0]), 'index.dt.day', '0')
for m, g in self.dt_df.groupby(self.dt_df.index.month)}, 'index.dt.month')
self.assertEqual(plot, obj)
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_wide_chart_labels(self, kind, element):
plot = self.df.hvplot(kind=kind, value_label='Test', group_label='Category')
obj = NdOverlay({'x': element(self.df, 'index', 'x').redim(x='Test'),
'y': element(self.df, 'index', 'y').redim(y='Test')}, 'Category')
self.assertEqual(plot, obj)
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_wide_chart_legend_position(self, kind, element):
plot = self.df.hvplot(kind=kind, value_label='Test', group_label='Category', legend='left')
opts = Store.lookup_options('bokeh', plot, 'plot')
self.assertEqual(opts.kwargs['legend_position'], 'left')
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_tidy_chart(self, kind, element):
plot = self.df.hvplot(x='x', y='y', kind=kind)
self.assertEqual(plot, element(self.df, 'x', 'y'))
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_tidy_chart_index(self, kind, element):
plot = self.df.hvplot(x='index', y='y', kind=kind)
self.assertEqual(plot, element(self.df, 'index', 'y'))
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_tidy_chart_index_by(self, kind, element):
plot = self.df.hvplot(x='index', y='y', by='x', kind=kind)
obj = NdOverlay({1: element(self.df[self.df.x==1], 'index', 'y'),
3: element(self.df[self.df.x==3], 'index', 'y'),
5: element(self.df[self.df.x==5], 'index', 'y')}, 'x')
self.assertEqual(plot, obj)
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_tidy_chart_index_by_legend_position(self, kind, element):
plot = self.df.hvplot(x='index', y='y', by='x', kind=kind, legend='left')
opts = Store.lookup_options('bokeh', plot, 'plot')
self.assertEqual(opts.kwargs['legend_position'], 'left')
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_use_index_disabled_uses_first_cols(self, kind, element):
plot = self.df.hvplot(use_index=False, kind=kind)
self.assertEqual(plot.kdims, ['x'])
self.assertEqual(plot.vdims, ['y'])
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_tidy_chart_ranges(self, kind, element):
plot = self.df.hvplot(x='x', y='y', kind=kind, xlim=(0, 3), ylim=(5, 10))
opts = Store.lookup_options('bokeh', plot, 'plot').options
self.assertEqual(opts['xlim'], (0, 3))
self.assertEqual(opts['ylim'], (5, 10))
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_wide_chart_ranges(self, kind, element):
plot = self.df.hvplot(kind=kind, xlim=(0, 3), ylim=(5, 10))
opts = Store.lookup_options('bokeh', plot.last, 'plot').options
self.assertEqual(opts['xlim'], (0, 3))
self.assertEqual(opts['ylim'], (5, 10))
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_tidy_chart_with_hover_cols(self, kind, element):
plot = self.cat_df.hvplot(x='x', y='y', kind=kind, hover_cols=['category'])
self.assertEqual(plot, element(self.cat_df, 'x', ['y', 'category']))
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_tidy_chart_with_index_in_hover_cols(self, kind, element):
plot = self.df.hvplot(x='x', y='y', kind=kind, hover_cols=['index'])
altered_df = self.df.reset_index()
self.assertEqual(plot, element(altered_df, 'x', ['y', 'index']))
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_tidy_chart_with_hover_cols_as_all(self, kind, element):
plot = self.cat_df.hvplot(x='x', y='y', kind=kind, hover_cols='all')
altered_df = self.cat_df.reset_index()
self.assertEqual(plot, element(altered_df, 'x', ['y', 'index', 'category']))
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_tidy_chart_with_hover_cols_as_all_with_use_index_as_false(self, kind, element):
plot = self.cat_df.hvplot(x='x', y='y', kind=kind, hover_cols='all', use_index=False)
self.assertEqual(plot, element(self.cat_df, 'x', ['y', 'category']))
def test_area_stacked(self):
plot = self.df.hvplot.area(stacked=True)
obj = NdOverlay({'x': Area(self.df, 'index', 'x').redim(x='value'),
'y': Area(self.df, 'index', 'y').redim(y='value')}, 'Variable')
self.assertEqual(plot, Area.stack(obj))
def test_scatter_color_set_to_series(self):
if is_dask(self.df['y']):
y = self.df['y'].compute()
else:
y = self.df['y']
actual = self.df.hvplot.scatter('x', 'y', c=y)
altered_df = self.df.assign(_color=y)
expected = altered_df.hvplot.scatter('x', 'y', c='_color')
self.assertEqual(actual, expected)
def test_scatter_size_set_to_series(self):
if is_dask(self.df['y']):
y = self.df['y'].compute()
else:
y = self.df['y']
plot = self.df.hvplot.scatter('x', 'y', s=y)
opts = Store.lookup_options('bokeh', plot, 'style')
assert '_size' in plot.data.columns
self.assertEqual(opts.kwargs['size'], '_size')
def test_scatter_color_by_legend_position(self):
plot = self.cat_df.hvplot.scatter('x', 'y', c='category', legend='left')
opts = Store.lookup_options('bokeh', plot, 'plot')
self.assertEqual(opts.kwargs['legend_position'], 'left')
def test_histogram_by_category_legend_position(self):
plot = self.cat_df.hvplot.hist('y', by='category', legend='left')
opts = Store.lookup_options('bokeh', plot, 'plot')
self.assertEqual(opts.kwargs['legend_position'], 'left')
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_only_includes_num_chart(self, kind, element):
plot = self.cat_df.hvplot(kind=kind)
obj = NdOverlay({'x': element(self.cat_df, 'index', 'x').redim(x='value'),
'y': element(self.cat_df, 'index', 'y').redim(y='value'),
}, 'Variable')
self.assertEqual(plot, obj)
@parameterized.expand([('line', Curve), ('area', Area), ('scatter', Scatter)])
def test_includes_str_if_no_num_chart(self, kind, element):
plot = self.cat_only_df.hvplot(kind=kind)
obj = NdOverlay({'upper': element(self.cat_only_df, 'index', 'upper').redim(upper='value'),
'lower': element(self.cat_only_df, 'index', 'lower').redim(lower='value'),
}, 'Variable')
self.assertEqual(plot, obj)
def test_time_df_sorts_on_plot(self):
scrambled = self.time_df.sample(frac=1)
plot = scrambled.hvplot(x='time')
assert (plot.data == self.time_df).all().all()
assert len(plot.data.time.unique()) == len(plot.data.time)
def test_time_df_does_not_sort_on_plot_if_sort_date_off(self):
scrambled = self.time_df.sample(frac=1)
plot = scrambled.hvplot(x='time', sort_date=False)
assert (plot.data == scrambled).all().all()
assert len(plot.data.time.unique()) == len(plot.data.time)
def test_time_df_sorts_on_plot_using_index_as_x(self):
df = self.time_df.set_index('time')
scrambled = df.sample(frac=1)
plot = scrambled.hvplot()
assert (plot.data['time'] == df.index).all()
assert len(plot.data.time.unique()) == len(plot.data.time)
def test_time_df_does_not_sort_on_plot_if_sort_date_off_using_index_as_x(self):
df = self.time_df.set_index('time')
scrambled = df.sample(frac=1)
plot = scrambled.hvplot(sort_date=False)
assert (plot.data.time == scrambled.index).all().all()
assert len(plot.data.time.unique()) == len(plot.data.time)
def test_time_df_with_groupby_as_derived_datetime(self):
plot = self.time_df.hvplot(groupby='time.dayofweek', dynamic=False)
assert list(plot.keys()) == [0, 1, 2, 3, 4, 5, 6]
assert list(plot.dimensions()) == ['time.dayofweek', 'index', 'A']
def test_time_df_with_by_as_derived_datetime(self):
plot = self.time_df.hvplot(by='time.month', dynamic=False)
assert list(plot.keys()) == [1]
assert list(plot.dimensions()) == ['time.month', 'index', 'A']
def test_time_df_with_x_as_derived_datetime(self):
plot = self.time_df.hvplot.scatter(x='time.day', dynamic=False)
assert list(plot.dimensions()) == ['time.day', 'A']
def test_time_df_as_index_with_x_as_derived_datetime_using_name(self):
indexed = self.time_df.set_index('time')
plot = indexed.hvplot.scatter(x='time.day', dynamic=False)
assert list(plot.dimensions()) == ['time.day', 'A']
def test_time_df_as_index_with_x_as_derived_datetime_using_index(self):
indexed = self.time_df.set_index('time')
plot = indexed.hvplot.scatter(x='index.day', dynamic=False)
assert list(plot.dimensions()) == ['index.day', 'A']
def test_default_y_not_in_by(self):
plot = self.cat_df.hvplot.scatter(by='x')
assert plot.kdims == ['x']
assert plot[1].kdims == ['index']
assert plot[1].vdims == ['y']
class TestChart1DDask(TestChart1D):
def setUp(self):
super(TestChart1DDask, self).setUp()
try:
import dask.dataframe as dd
except:
raise SkipTest('Dask not available')
import hvplot.dask # noqa
self.df = dd.from_pandas(self.df, npartitions=2)
self.dt_df = dd.from_pandas(self.dt_df, npartitions=3)
self.cat_df = dd.from_pandas(self.cat_df, npartitions=3)
self.cat_only_df = dd.from_pandas(self.cat_only_df, npartitions=1)
def test_by_datetime_accessor(self):
raise SkipTest("Can't expand dt accessor columns when using dask")
| [
"numpy.random.rand",
"parameterized.parameterized.expand",
"dask.dataframe.from_pandas",
"holoviews.element.HeatMap",
"numpy.linspace",
"unittest.SkipTest",
"pandas.DataFrame",
"holoviews.element.Area",
"holoviews.element.Curve",
"holoviews.Store.lookup_options",
"pandas.date_range",
"holoview... | [((939, 998), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('points', Points), ('paths', Path)]"], {}), "([('points', Points), ('paths', Path)])\n", (959, 998), False, 'from parameterized import parameterized\n'), ((1154, 1213), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('points', Points), ('paths', Path)]"], {}), "([('points', Points), ('paths', Path)])\n", (1174, 1213), False, 'from parameterized import parameterized\n'), ((1380, 1439), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('points', Points), ('paths', Path)]"], {}), "([('points', Points), ('paths', Path)])\n", (1400, 1439), False, 'from parameterized import parameterized\n'), ((1634, 1693), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('points', Points), ('paths', Path)]"], {}), "([('points', Points), ('paths', Path)])\n", (1654, 1693), False, 'from parameterized import parameterized\n'), ((1924, 1983), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('points', Points), ('paths', Path)]"], {}), "([('points', Points), ('paths', Path)])\n", (1944, 1983), False, 'from parameterized import parameterized\n'), ((2331, 2390), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('points', Points), ('paths', Path)]"], {}), "([('points', Points), ('paths', Path)])\n", (2351, 2390), False, 'from parameterized import parameterized\n'), ((2737, 2796), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('points', Points), ('paths', Path)]"], {}), "([('points', Points), ('paths', Path)])\n", (2757, 2796), False, 'from parameterized import parameterized\n'), ((4878, 4955), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('line', Curve), ('area', Area), ('scatter', Scatter)]"], {}), "([('line', Curve), ('area', Area), ('scatter', Scatter)])\n", (4898, 4955), False, 'from parameterized import parameterized\n'), ((5591, 5668), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('line', Curve), ('area', Area), ('scatter', Scatter)]"], {}), "([('line', Curve), ('area', Area), ('scatter', Scatter)])\n", (5611, 5668), False, 'from parameterized import parameterized\n'), ((6018, 6095), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('line', Curve), ('area', Area), ('scatter', Scatter)]"], {}), "([('line', Curve), ('area', Area), ('scatter', Scatter)])\n", (6038, 6095), False, 'from parameterized import parameterized\n'), ((6388, 6465), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('line', Curve), ('area', Area), ('scatter', Scatter)]"], {}), "([('line', Curve), ('area', Area), ('scatter', Scatter)])\n", (6408, 6465), False, 'from parameterized import parameterized\n'), ((6632, 6709), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('line', Curve), ('area', Area), ('scatter', Scatter)]"], {}), "([('line', Curve), ('area', Area), ('scatter', Scatter)])\n", (6652, 6709), False, 'from parameterized import parameterized\n'), ((6890, 6967), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('line', Curve), ('area', Area), ('scatter', Scatter)]"], {}), "([('line', Curve), ('area', Area), ('scatter', Scatter)])\n", (6910, 6967), False, 'from parameterized import parameterized\n'), ((7360, 7437), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('line', Curve), ('area', Area), ('scatter', Scatter)]"], {}), "([('line', Curve), ('area', Area), ('scatter', Scatter)])\n", (7380, 7437), False, 'from parameterized import parameterized\n'), ((7721, 7798), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('line', Curve), ('area', Area), ('scatter', Scatter)]"], {}), "([('line', Curve), ('area', Area), ('scatter', Scatter)])\n", (7741, 7798), False, 'from parameterized import parameterized\n'), ((8021, 8098), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('line', Curve), ('area', Area), ('scatter', Scatter)]"], {}), "([('line', Curve), ('area', Area), ('scatter', Scatter)])\n", (8041, 8098), False, 'from parameterized import parameterized\n'), ((8402, 8479), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('line', Curve), ('area', Area), ('scatter', Scatter)]"], {}), "([('line', Curve), ('area', Area), ('scatter', Scatter)])\n", (8422, 8479), False, 'from parameterized import parameterized\n'), ((8774, 8851), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('line', Curve), ('area', Area), ('scatter', Scatter)]"], {}), "([('line', Curve), ('area', Area), ('scatter', Scatter)])\n", (8794, 8851), False, 'from parameterized import parameterized\n'), ((9081, 9158), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('line', Curve), ('area', Area), ('scatter', Scatter)]"], {}), "([('line', Curve), ('area', Area), ('scatter', Scatter)])\n", (9101, 9158), False, 'from parameterized import parameterized\n'), ((9429, 9506), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('line', Curve), ('area', Area), ('scatter', Scatter)]"], {}), "([('line', Curve), ('area', Area), ('scatter', Scatter)])\n", (9449, 9506), False, 'from parameterized import parameterized\n'), ((9791, 9868), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('line', Curve), ('area', Area), ('scatter', Scatter)]"], {}), "([('line', Curve), ('area', Area), ('scatter', Scatter)])\n", (9811, 9868), False, 'from parameterized import parameterized\n'), ((11703, 11780), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('line', Curve), ('area', Area), ('scatter', Scatter)]"], {}), "([('line', Curve), ('area', Area), ('scatter', Scatter)])\n", (11723, 11780), False, 'from parameterized import parameterized\n'), ((12132, 12209), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('line', Curve), ('area', Area), ('scatter', Scatter)]"], {}), "([('line', Curve), ('area', Area), ('scatter', Scatter)])\n", (12152, 12209), False, 'from parameterized import parameterized\n'), ((540, 598), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 2], [3, 4], [5, 6]]'], {'columns': "['x', 'y']"}), "([[1, 2], [3, 4], [5, 6]], columns=['x', 'y'])\n", (552, 598), True, 'import pandas as pd\n'), ((621, 710), 'pandas.DataFrame', 'pd.DataFrame', (["[[1, 2, 'A'], [3, 4, 'B'], [5, 6, 'C']]"], {'columns': "['x', 'y', 'category']"}), "([[1, 2, 'A'], [3, 4, 'B'], [5, 6, 'C']], columns=['x', 'y',\n 'category'])\n", (633, 710), True, 'import pandas as pd\n'), ((3791, 3829), 'dask.dataframe.from_pandas', 'dd.from_pandas', (['self.df'], {'npartitions': '(2)'}), '(self.df, npartitions=2)\n', (3805, 3829), True, 'import dask.dataframe as dd\n'), ((3852, 3894), 'dask.dataframe.from_pandas', 'dd.from_pandas', (['self.cat_df'], {'npartitions': '(3)'}), '(self.cat_df, npartitions=3)\n', (3866, 3894), True, 'import dask.dataframe as dd\n'), ((4226, 4284), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 2], [3, 4], [5, 6]]'], {'columns': "['x', 'y']"}), "([[1, 2], [3, 4], [5, 6]], columns=['x', 'y'])\n", (4238, 4284), True, 'import pandas as pd\n'), ((4410, 4499), 'pandas.DataFrame', 'pd.DataFrame', (["[[1, 2, 'A'], [3, 4, 'B'], [5, 6, 'C']]"], {'columns': "['x', 'y', 'category']"}), "([[1, 2, 'A'], [3, 4, 'B'], [5, 6, 'C']], columns=['x', 'y',\n 'category'])\n", (4422, 4499), True, 'import pandas as pd\n'), ((4558, 4636), 'pandas.DataFrame', 'pd.DataFrame', (["[['A', 'a'], ['B', 'b'], ['C', 'c']]"], {'columns': "['upper', 'lower']"}), "([['A', 'a'], ['B', 'b'], ['C', 'c']], columns=['upper', 'lower'])\n", (4570, 4636), True, 'import pandas as pd\n'), ((6273, 6316), 'holoviews.Store.lookup_options', 'Store.lookup_options', (['"""bokeh"""', 'plot', '"""plot"""'], {}), "('bokeh', plot, 'plot')\n", (6293, 6316), False, 'from holoviews import NdOverlay, Store\n'), ((7606, 7649), 'holoviews.Store.lookup_options', 'Store.lookup_options', (['"""bokeh"""', 'plot', '"""plot"""'], {}), "('bokeh', plot, 'plot')\n", (7626, 7649), False, 'from holoviews import NdOverlay, Store\n'), ((11037, 11081), 'holoviews.Store.lookup_options', 'Store.lookup_options', (['"""bokeh"""', 'plot', '"""style"""'], {}), "('bokeh', plot, 'style')\n", (11057, 11081), False, 'from holoviews import NdOverlay, Store\n'), ((11331, 11374), 'holoviews.Store.lookup_options', 'Store.lookup_options', (['"""bokeh"""', 'plot', '"""plot"""'], {}), "('bokeh', plot, 'plot')\n", (11351, 11374), False, 'from holoviews import NdOverlay, Store\n'), ((11588, 11631), 'holoviews.Store.lookup_options', 'Store.lookup_options', (['"""bokeh"""', 'plot', '"""plot"""'], {}), "('bokeh', plot, 'plot')\n", (11608, 11631), False, 'from holoviews import NdOverlay, Store\n'), ((15474, 15512), 'dask.dataframe.from_pandas', 'dd.from_pandas', (['self.df'], {'npartitions': '(2)'}), '(self.df, npartitions=2)\n', (15488, 15512), True, 'import dask.dataframe as dd\n'), ((15534, 15575), 'dask.dataframe.from_pandas', 'dd.from_pandas', (['self.dt_df'], {'npartitions': '(3)'}), '(self.dt_df, npartitions=3)\n', (15548, 15575), True, 'import dask.dataframe as dd\n'), ((15598, 15640), 'dask.dataframe.from_pandas', 'dd.from_pandas', (['self.cat_df'], {'npartitions': '(3)'}), '(self.cat_df, npartitions=3)\n', (15612, 15640), True, 'import dask.dataframe as dd\n'), ((15668, 15715), 'dask.dataframe.from_pandas', 'dd.from_pandas', (['self.cat_only_df'], {'npartitions': '(1)'}), '(self.cat_only_df, npartitions=1)\n', (15682, 15715), True, 'import dask.dataframe as dd\n'), ((15772, 15832), 'unittest.SkipTest', 'SkipTest', (['"""Can\'t expand dt accessor columns when using dask"""'], {}), '("Can\'t expand dt accessor columns when using dask")\n', (15780, 15832), False, 'from unittest import SkipTest, expectedFailure\n'), ((3171, 3250), 'holoviews.element.HeatMap', 'HeatMap', (["(['x', 'y'], [0, 1, 2], self.df.values)", "['columns', 'index']", '"""value"""'], {}), "((['x', 'y'], [0, 1, 2], self.df.values), ['columns', 'index'], 'value')\n", (3178, 3250), False, 'from holoviews.element import Curve, Area, Scatter, Points, Path, HeatMap\n'), ((4319, 4337), 'numpy.random.rand', 'np.random.rand', (['(90)'], {}), '(90)\n', (4333, 4337), True, 'import numpy as np\n'), ((8249, 8292), 'holoviews.Store.lookup_options', 'Store.lookup_options', (['"""bokeh"""', 'plot', '"""plot"""'], {}), "('bokeh', plot, 'plot')\n", (8269, 8292), False, 'from holoviews import NdOverlay, Store\n'), ((8616, 8664), 'holoviews.Store.lookup_options', 'Store.lookup_options', (['"""bokeh"""', 'plot.last', '"""plot"""'], {}), "('bokeh', plot.last, 'plot')\n", (8636, 8664), False, 'from holoviews import NdOverlay, Store\n'), ((10412, 10427), 'holoviews.element.Area.stack', 'Area.stack', (['obj'], {}), '(obj)\n', (10422, 10427), False, 'from holoviews.element import Curve, Area, Scatter, Points, Path, HeatMap\n'), ((451, 483), 'unittest.SkipTest', 'SkipTest', (['"""Pandas not available"""'], {}), "('Pandas not available')\n", (459, 483), False, 'from unittest import SkipTest, expectedFailure\n'), ((800, 862), 'pandas.date_range', 'pd.date_range', (['"""1/1/2000"""'], {'periods': '(5 * 24)', 'freq': '"""1H"""', 'tz': '"""UTC"""'}), "('1/1/2000', periods=5 * 24, freq='1H', tz='UTC')\n", (813, 862), True, 'import pandas as pd\n'), ((3706, 3736), 'unittest.SkipTest', 'SkipTest', (['"""Dask not available"""'], {}), "('Dask not available')\n", (3714, 3736), False, 'from unittest import SkipTest, expectedFailure\n'), ((4137, 4169), 'unittest.SkipTest', 'SkipTest', (['"""Pandas not available"""'], {}), "('Pandas not available')\n", (4145, 4169), False, 'from unittest import SkipTest, expectedFailure\n'), ((4345, 4386), 'pandas.date_range', 'pd.date_range', (['"""2019-01-01"""', '"""2019-03-31"""'], {}), "('2019-01-01', '2019-03-31')\n", (4358, 4386), True, 'import pandas as pd\n'), ((4735, 4782), 'pandas.date_range', 'pd.date_range', (['"""1/1/2000"""'], {'periods': '(10)', 'tz': '"""UTC"""'}), "('1/1/2000', periods=10, tz='UTC')\n", (4748, 4782), True, 'import pandas as pd\n'), ((5400, 5447), 'holoviews.element.Curve', 'Curve', (['(g.index.day, g[0])', '"""index.dt.day"""', '"""0"""'], {}), "((g.index.day, g[0]), 'index.dt.day', '0')\n", (5405, 5447), False, 'from holoviews.element import Curve, Area, Scatter, Points, Path, HeatMap\n'), ((15389, 15419), 'unittest.SkipTest', 'SkipTest', (['"""Dask not available"""'], {}), "('Dask not available')\n", (15397, 15419), False, 'from unittest import SkipTest, expectedFailure\n'), ((10246, 10273), 'holoviews.element.Area', 'Area', (['self.df', '"""index"""', '"""x"""'], {}), "(self.df, 'index', 'x')\n", (10250, 10273), False, 'from holoviews.element import Curve, Area, Scatter, Points, Path, HeatMap\n'), ((10322, 10349), 'holoviews.element.Area', 'Area', (['self.df', '"""index"""', '"""y"""'], {}), "(self.df, 'index', 'y')\n", (10326, 10349), False, 'from holoviews.element import Curve, Area, Scatter, Points, Path, HeatMap\n'), ((889, 926), 'numpy.linspace', 'np.linspace', (['(0)', '(5 * 2 * np.pi)', '(5 * 24)'], {}), '(0, 5 * 2 * np.pi, 5 * 24)\n', (900, 926), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The Project U-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import numpy as np
import sys
from utils import util
from prjuray.db import Database
def print_top(seed, f=sys.stdout):
np.random.seed(seed)
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
bufgces_by_tile = {}
rclk_int_l = []
slices_by_tile = {}
for tile in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile)
if tile.startswith("RCLK_INT_L"):
rclk_int_l.append((loc.grid_x, loc.grid_y, tile))
gridinfo = grid.gridinfo_at_loc(loc)
for site, site_type in gridinfo.sites.items():
if ("BUFGCE" in site or "BUFGCTRL" in site) and "HDIO" not in site:
if tile not in bufgces_by_tile:
bufgces_by_tile[tile] = []
bufgces_by_tile[tile].append((site, site_type))
elif "SLICE_" in site:
slices_by_tile[loc.grid_x, loc.grid_y] = site
halfcolumn_slices_by_row = {}
for x, y, rclk in rclk_int_l:
hc_up = []
hc_down = []
if y not in halfcolumn_slices_by_row:
halfcolumn_slices_by_row[y] = []
for yplus in range(y + 1, y + 31):
if (x, yplus) not in grid.loc:
continue
tile = grid.tilename_at_loc((x, yplus))
if not tile.startswith("INT_"):
break
slice_x = x + np.random.choice([+1, -1])
if (slice_x, yplus) not in slices_by_tile:
continue
hc_up.append(slices_by_tile[slice_x, yplus])
for yminus in range(y - 1, y - 31, -1):
if (x, yminus) not in grid.loc:
continue
tile = grid.tilename_at_loc((x, yminus))
if not tile.startswith("INT_"):
break
slice_x = x + np.random.choice([+1, -1])
if (slice_x, yminus) not in slices_by_tile:
continue
hc_down.append(slices_by_tile[slice_x, yminus])
halfcolumn_slices_by_row[y].append(hc_up)
halfcolumn_slices_by_row[y].append(hc_down)
buffers = []
tiles = list(sorted(bufgces_by_tile.keys()))
np.random.shuffle(tiles)
for tile in tiles:
shuffled_bufs = list(bufgces_by_tile[tile])
np.random.shuffle(shuffled_bufs)
target_type = np.random.choice(
["BUFGCE", "BUFGCE_DIV", "BUFGCTRL"]
if len(buffers) > 0 else ["BUFGCE", "BUFGCE_DIV"])
tile_buffers = np.random.randint(6)
found_buffers = 0
for buf, buftype in shuffled_bufs:
if found_buffers >= tile_buffers:
break
if buftype != target_type:
continue
buffers.append((buf, buftype))
found_buffers += 1
def random_inversion(pins):
return ", ".join(
[".IS_%s_INVERTED(%d)" % (p, np.random.randint(2)) for p in pins])
def random_control(pins):
return ", ".join(
[".%s(aux[%d])" % (p, np.random.randint(10)) for p in pins])
CCIO_CLKS = 24
print(
"module top(input [{CCIO_CLKS}-1:0] i, input [9:0] aux, input d, output o, q);"
.format(CCIO_CLKS=CCIO_CLKS),
file=f)
print(" wire [71:0] r;", file=f)
# print(" assign r[0] = i;", file=f)
# print(" assign o = r[%d];" % N, file=f)
# for i in range(N):
# bg, buftype = buffers[i]
# #print("(* LOC=\"%s\" *)" % bg, file=f)
# if "BUFGCTRL" in buftype:
# print(" BUFGCTRL #(", file=f)
# print(" %s," % random_inversion(["I0", "I1", "S0", "S1", "CE0", "CE1", "IGNORE0", "IGNORE1"]), file=f)
# print(" .INIT_OUT(%d), .PRESELECT_I0(\"%s\"), .PRESELECT_I1(\"%s\")" %
# (np.random.randint(2), np.random.choice(["TRUE", "FALSE"]), np.random.choice(["TRUE", "FALSE"])), file=f)
# print(" ) bufgctrl_%d (" % i, file=f)
# print(" .I0(r[%d]), .I1(r[%d]), " % (i, np.random.randint(i+1)), file=f)
# print(" %s," % random_control(["S0", "S1", "CE0", "CE1", "IGNORE0", "IGNORE1"]), file=f)
# print(" .O(r[%d])" % (i+1), file=f)
# print(" );", file=f)
print(
" assign r[{CCIO_CLKS}-1:0] = i;".format(CCIO_CLKS=CCIO_CLKS),
file=f)
for i in range(12):
print(" BUFGCE_DIV #(", file=f)
print(" .BUFGCE_DIVIDE(%d)," % np.random.randint(1, 9), file=f)
print(" %s" % random_inversion(["I", "CE", "CLR"]), file=f)
print(" ) bufgce_div_%d (" % i, file=f)
print(" .I(i[%d])," % i, file=f)
print(" %s," % random_control(["CE", "CLR"]), file=f)
print(" .O(r[%d])" % (CCIO_CLKS + i), file=f)
print(" );", file=f)
for i in range(12):
print(" BUFGCE #(", file=f)
print(
" .CE_TYPE(\"%s\")," % np.random.choice(["SYNC", "ASYNC"]),
file=f)
print(" %s" % random_inversion(["I", "CE"]), file=f)
print(" ) bufgce_%d (" % i, file=f)
print(" .I(i[%d])," % np.random.randint(CCIO_CLKS), file=f)
print(" %s," % random_control(["CE"]), file=f)
print(" .O(r[%d])" % (i + 36), file=f)
print(" );", file=f)
R2 = 0
NS = 16
ffs = ""
for row, hcs in sorted(halfcolumn_slices_by_row.items()):
row_clks = np.random.randint(16, 25)
clks = [np.random.randint(72) for k in range(row_clks)]
halfs = [hcs[np.random.randint(len(hcs))] for k in range(NS)]
for h in halfs:
half_clks = np.random.randint(8, 17)
rclks = [np.random.choice(clks) for k in range(half_clks)]
for sl in h:
ffs += "(* LOC=\"%s\" *) FDCE ff_%d (.C(r[%d]), .CE(aux[%d]), .CLR(1'b0), .D(r2[%d]), .Q(r2[%d]));\n" % (
sl, R2, np.random.choice(rclks), np.random.randint(5), R2,
R2 + 1)
R2 += 1
print(" wire [%d:0] r2;" % R2, file=f)
print(" assign r2[0] = d;", file=f)
print(" assign q = r2[%d];" % R2, file=f)
print(ffs, file=f)
print("endmodule", file=f)
with open("top.tcl", "w") as f:
print('opt_design', file=f)
| [
"numpy.random.choice",
"numpy.random.randint",
"utils.util.get_part",
"numpy.random.seed",
"utils.util.get_db_root",
"numpy.random.shuffle"
] | [((406, 426), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (420, 426), True, 'import numpy as np\n'), ((2425, 2449), 'numpy.random.shuffle', 'np.random.shuffle', (['tiles'], {}), '(tiles)\n', (2442, 2449), True, 'import numpy as np\n'), ((446, 464), 'utils.util.get_db_root', 'util.get_db_root', ([], {}), '()\n', (462, 464), False, 'from utils import util\n'), ((466, 481), 'utils.util.get_part', 'util.get_part', ([], {}), '()\n', (479, 481), False, 'from utils import util\n'), ((2534, 2566), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffled_bufs'], {}), '(shuffled_bufs)\n', (2551, 2566), True, 'import numpy as np\n'), ((2742, 2762), 'numpy.random.randint', 'np.random.randint', (['(6)'], {}), '(6)\n', (2759, 2762), True, 'import numpy as np\n'), ((5650, 5675), 'numpy.random.randint', 'np.random.randint', (['(16)', '(25)'], {}), '(16, 25)\n', (5667, 5675), True, 'import numpy as np\n'), ((5692, 5713), 'numpy.random.randint', 'np.random.randint', (['(72)'], {}), '(72)\n', (5709, 5713), True, 'import numpy as np\n'), ((5858, 5882), 'numpy.random.randint', 'np.random.randint', (['(8)', '(17)'], {}), '(8, 17)\n', (5875, 5882), True, 'import numpy as np\n'), ((1654, 1680), 'numpy.random.choice', 'np.random.choice', (['[+1, -1]'], {}), '([+1, -1])\n', (1670, 1680), True, 'import numpy as np\n'), ((2082, 2108), 'numpy.random.choice', 'np.random.choice', (['[+1, -1]'], {}), '([+1, -1])\n', (2098, 2108), True, 'import numpy as np\n'), ((4648, 4671), 'numpy.random.randint', 'np.random.randint', (['(1)', '(9)'], {}), '(1, 9)\n', (4665, 4671), True, 'import numpy as np\n'), ((5137, 5172), 'numpy.random.choice', 'np.random.choice', (["['SYNC', 'ASYNC']"], {}), "(['SYNC', 'ASYNC'])\n", (5153, 5172), True, 'import numpy as np\n'), ((5346, 5374), 'numpy.random.randint', 'np.random.randint', (['CCIO_CLKS'], {}), '(CCIO_CLKS)\n', (5363, 5374), True, 'import numpy as np\n'), ((5904, 5926), 'numpy.random.choice', 'np.random.choice', (['clks'], {}), '(clks)\n', (5920, 5926), True, 'import numpy as np\n'), ((3138, 3158), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (3155, 3158), True, 'import numpy as np\n'), ((3267, 3288), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (3284, 3288), True, 'import numpy as np\n'), ((6129, 6152), 'numpy.random.choice', 'np.random.choice', (['rclks'], {}), '(rclks)\n', (6145, 6152), True, 'import numpy as np\n'), ((6154, 6174), 'numpy.random.randint', 'np.random.randint', (['(5)'], {}), '(5)\n', (6171, 6174), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for emql.util."""
from language.emql import util
import numpy as np
import tensorflow.compat.v1 as tf
class UtilTest(tf.test.TestCase):
def setUp(self):
super(UtilTest, self).setUp()
self.sess = tf.Session()
self.logits = tf.constant([[2, 3, 1, -1], [4, 1, 9, 3]], dtype=tf.float32)
self.labels = tf.constant([[0, 1, 0, 1], [1, 0, 0, 0]], dtype=tf.float32)
def test_hits_at_k(self):
hits_at_one = util.compute_hits_at_k(self.logits, self.labels, k=1)
hits_at_two = util.compute_hits_at_k(self.logits, self.labels, k=2)
self.assertAllEqual(
hits_at_one.eval(session=self.sess), np.array([1, 0]))
self.assertAllEqual(
hits_at_two.eval(session=self.sess), np.array([1, 1]))
def test_recall_at_k(self):
recall_at_one = util.compute_recall_at_k(self.logits, self.labels, k=1)
recall_at_two = util.compute_recall_at_k(self.logits, self.labels, k=2)
self.assertAllEqual(
recall_at_one.eval(session=self.sess), np.array([0.5, 0]))
self.assertAllEqual(
recall_at_two.eval(session=self.sess), np.array([0.5, 1]))
def test_map_at_k(self):
map_at_one = util.compute_average_precision_at_k(
self.logits, self.labels, k=1)
map_at_two = util.compute_average_precision_at_k(
self.logits, self.labels, k=2)
self.assertAllEqual(
map_at_one.eval(session=self.sess), np.array([1.0, 0.0]))
self.assertAllEqual(
map_at_two.eval(session=self.sess), np.array([1.0, 0.5]))
def test_get_nonzero_ids(self):
nonzero_at_one = util.get_nonzero_ids(self.labels, k=1)
nonzero_at_two = util.get_nonzero_ids(self.labels, k=2)
self.assertAllEqual(
nonzero_at_one.eval(session=self.sess), np.array([[1], [0]]))
self.assertAllEqual(
nonzero_at_two.eval(session=self.sess), np.array([[1, 3], [0, -1]]))
def test_embedding_lookup_with_padding(self):
tokens = tf.constant([0, -1], dtype=tf.int32)
embeddings_mat = tf.constant([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]])
embs = util.embedding_lookup_with_padding(
embeddings_mat, tokens, padding=-1)
embs_np = embs.eval(session=self.sess) # 2, 3
self.assertAllClose(embs_np[0, :], [0.1, 0.2, 0.3])
self.assertAllClose(embs_np[1, :], [0, 0, 0])
def test_x_in_set(self):
x = tf.constant([[1, 2], [3, 4]], dtype=tf.int32)
s = tf.constant([[1, 2, 5], [4, 7, 8]], dtype=tf.int32)
_, x_in_s = util.compute_x_in_set(x, s)
self.assertAllEqual(
x_in_s.eval(session=self.sess), np.array([[1, 1], [0, 1]]))
def test_bert_tokenizer(self):
text = '<NAME>'
bert_tokenizer = util.BertTokenizer()
_, (token_ids, _, input_mask) = bert_tokenizer.tokenize(text)
self.assertGreater(np.sum(input_mask), 2)
self.assertAllEqual(token_ids != 0, input_mask)
self.assertEqual(len(token_ids), bert_tokenizer.max_seq_length)
if __name__ == '__main__':
tf.test.main()
| [
"language.emql.util.compute_recall_at_k",
"language.emql.util.get_nonzero_ids",
"language.emql.util.compute_x_in_set",
"language.emql.util.compute_average_precision_at_k",
"language.emql.util.compute_hits_at_k",
"language.emql.util.BertTokenizer",
"numpy.array",
"numpy.sum",
"tensorflow.compat.v1.co... | [((3544, 3558), 'tensorflow.compat.v1.test.main', 'tf.test.main', ([], {}), '()\n', (3556, 3558), True, 'import tensorflow.compat.v1 as tf\n'), ((852, 864), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (862, 864), True, 'import tensorflow.compat.v1 as tf\n'), ((883, 943), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[2, 3, 1, -1], [4, 1, 9, 3]]'], {'dtype': 'tf.float32'}), '([[2, 3, 1, -1], [4, 1, 9, 3]], dtype=tf.float32)\n', (894, 943), True, 'import tensorflow.compat.v1 as tf\n'), ((962, 1021), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[0, 1, 0, 1], [1, 0, 0, 0]]'], {'dtype': 'tf.float32'}), '([[0, 1, 0, 1], [1, 0, 0, 0]], dtype=tf.float32)\n', (973, 1021), True, 'import tensorflow.compat.v1 as tf\n'), ((1069, 1122), 'language.emql.util.compute_hits_at_k', 'util.compute_hits_at_k', (['self.logits', 'self.labels'], {'k': '(1)'}), '(self.logits, self.labels, k=1)\n', (1091, 1122), False, 'from language.emql import util\n'), ((1141, 1194), 'language.emql.util.compute_hits_at_k', 'util.compute_hits_at_k', (['self.logits', 'self.labels'], {'k': '(2)'}), '(self.logits, self.labels, k=2)\n', (1163, 1194), False, 'from language.emql import util\n'), ((1423, 1478), 'language.emql.util.compute_recall_at_k', 'util.compute_recall_at_k', (['self.logits', 'self.labels'], {'k': '(1)'}), '(self.logits, self.labels, k=1)\n', (1447, 1478), False, 'from language.emql import util\n'), ((1499, 1554), 'language.emql.util.compute_recall_at_k', 'util.compute_recall_at_k', (['self.logits', 'self.labels'], {'k': '(2)'}), '(self.logits, self.labels, k=2)\n', (1523, 1554), False, 'from language.emql import util\n'), ((1785, 1851), 'language.emql.util.compute_average_precision_at_k', 'util.compute_average_precision_at_k', (['self.logits', 'self.labels'], {'k': '(1)'}), '(self.logits, self.labels, k=1)\n', (1820, 1851), False, 'from language.emql import util\n'), ((1878, 1944), 'language.emql.util.compute_average_precision_at_k', 'util.compute_average_precision_at_k', (['self.logits', 'self.labels'], {'k': '(2)'}), '(self.logits, self.labels, k=2)\n', (1913, 1944), False, 'from language.emql import util\n'), ((2193, 2231), 'language.emql.util.get_nonzero_ids', 'util.get_nonzero_ids', (['self.labels'], {'k': '(1)'}), '(self.labels, k=1)\n', (2213, 2231), False, 'from language.emql import util\n'), ((2253, 2291), 'language.emql.util.get_nonzero_ids', 'util.get_nonzero_ids', (['self.labels'], {'k': '(2)'}), '(self.labels, k=2)\n', (2273, 2291), False, 'from language.emql import util\n'), ((2552, 2588), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0, -1]'], {'dtype': 'tf.int32'}), '([0, -1], dtype=tf.int32)\n', (2563, 2588), True, 'import tensorflow.compat.v1 as tf\n'), ((2610, 2657), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]]'], {}), '([[0.1, 0.2, 0.3], [0.2, 0.3, 0.4]])\n', (2621, 2657), True, 'import tensorflow.compat.v1 as tf\n'), ((2669, 2739), 'language.emql.util.embedding_lookup_with_padding', 'util.embedding_lookup_with_padding', (['embeddings_mat', 'tokens'], {'padding': '(-1)'}), '(embeddings_mat, tokens, padding=-1)\n', (2703, 2739), False, 'from language.emql import util\n'), ((2942, 2987), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[1, 2], [3, 4]]'], {'dtype': 'tf.int32'}), '([[1, 2], [3, 4]], dtype=tf.int32)\n', (2953, 2987), True, 'import tensorflow.compat.v1 as tf\n'), ((2996, 3047), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[1, 2, 5], [4, 7, 8]]'], {'dtype': 'tf.int32'}), '([[1, 2, 5], [4, 7, 8]], dtype=tf.int32)\n', (3007, 3047), True, 'import tensorflow.compat.v1 as tf\n'), ((3064, 3091), 'language.emql.util.compute_x_in_set', 'util.compute_x_in_set', (['x', 's'], {}), '(x, s)\n', (3085, 3091), False, 'from language.emql import util\n'), ((3260, 3280), 'language.emql.util.BertTokenizer', 'util.BertTokenizer', ([], {}), '()\n', (3278, 3280), False, 'from language.emql import util\n'), ((1266, 1282), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (1274, 1282), True, 'import numpy as np\n'), ((1354, 1370), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (1362, 1370), True, 'import numpy as np\n'), ((1628, 1646), 'numpy.array', 'np.array', (['[0.5, 0]'], {}), '([0.5, 0])\n', (1636, 1646), True, 'import numpy as np\n'), ((1720, 1738), 'numpy.array', 'np.array', (['[0.5, 1]'], {}), '([0.5, 1])\n', (1728, 1738), True, 'import numpy as np\n'), ((2024, 2044), 'numpy.array', 'np.array', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (2032, 2044), True, 'import numpy as np\n'), ((2115, 2135), 'numpy.array', 'np.array', (['[1.0, 0.5]'], {}), '([1.0, 0.5])\n', (2123, 2135), True, 'import numpy as np\n'), ((2366, 2386), 'numpy.array', 'np.array', (['[[1], [0]]'], {}), '([[1], [0]])\n', (2374, 2386), True, 'import numpy as np\n'), ((2461, 2488), 'numpy.array', 'np.array', (['[[1, 3], [0, -1]]'], {}), '([[1, 3], [0, -1]])\n', (2469, 2488), True, 'import numpy as np\n'), ((3157, 3183), 'numpy.array', 'np.array', (['[[1, 1], [0, 1]]'], {}), '([[1, 1], [0, 1]])\n', (3165, 3183), True, 'import numpy as np\n'), ((3370, 3388), 'numpy.sum', 'np.sum', (['input_mask'], {}), '(input_mask)\n', (3376, 3388), True, 'import numpy as np\n')] |
import numpy as np
import torch
from torch.nn.parameter import Parameter
from HyperSphere.GP.modules.gp_modules import GPModule, log_lower_bnd, log_upper_bnd
from HyperSphere.feature_map.functionals import id_transform
class Kernel(GPModule):
def __init__(self, input_map=None):
super(Kernel, self).__init__()
self.log_amp = Parameter(torch.FloatTensor(1))
if input_map is not None:
self.input_map = input_map
else:
self.input_map = id_transform
def reset_parameters(self):
self.log_amp.data.normal_()
if isinstance(self.input_map, GPModule):
self.input_map.reset_parameters()
def init_parameters(self, amp):
self.log_amp.data.fill_(np.log(amp))
if isinstance(self.input_map, GPModule):
self.input_map.init_parameters()
def log_kernel_amp(self):
return self.log_amp
def out_of_bounds(self, vec=None):
if vec is None:
if (log_lower_bnd <= self.log_amp.data).all() and (self.log_amp.data <= log_upper_bnd).all():
if isinstance(self.input_map, GPModule):
return self.input_map.out_of_bounds()
return False
else:
if log_lower_bnd <= vec[0] and vec[0] <= log_upper_bnd:
if isinstance(self.input_map, GPModule):
return self.input_map.out_of_bounds(vec[1:])
return False
return True
def n_params(self):
cnt = 1
if isinstance(self.input_map, GPModule):
for p in self.input_map.parameters():
cnt += p.numel()
return cnt
def param_to_vec(self):
flat_param_list = [self.log_amp.data.clone()]
if isinstance(self.input_map, GPModule):
flat_param_list.append(self.input_map.param_to_vec())
return torch.cat(flat_param_list)
def vec_to_param(self, vec):
self.log_amp.data = vec[:1]
if isinstance(self.input_map, GPModule):
self.input_map.vec_to_param(vec[1:])
def prior(self, vec):
if isinstance(self.input_map, GPModule):
return self.input_map.prior(vec[1:])
return 0
def forward(self, input1, input2=None):
raise NotImplementedError
| [
"numpy.log",
"torch.FloatTensor",
"torch.cat"
] | [((1881, 1907), 'torch.cat', 'torch.cat', (['flat_param_list'], {}), '(flat_param_list)\n', (1890, 1907), False, 'import torch\n'), ((359, 379), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)'], {}), '(1)\n', (376, 379), False, 'import torch\n'), ((743, 754), 'numpy.log', 'np.log', (['amp'], {}), '(amp)\n', (749, 754), True, 'import numpy as np\n')] |
"""Jobs for performing electron phonon calculations in VASP."""
from __future__ import annotations
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
from jobflow import Flow, Response, job
from pymatgen.core import Structure
from pymatgen.electronic_structure.bandstructure import BandStructure
from atomate2.vasp.jobs.base import BaseVaspMaker, vasp_job
from atomate2.vasp.jobs.core import TransmuterMaker
from atomate2.vasp.schemas.elph import ElectronPhononRenormalisationDoc
from atomate2.vasp.sets.core import ElectronPhononSetGenerator
__all__ = [
"DEFAULT_ELPH_TEMPERATURES",
"DEFAULT_MIN_SUPERCELL_LENGTH",
"SupercellElectronPhononDisplacedStructureMaker",
"run_elph_displacements",
"calculate_electron_phonon_renormalisation",
]
DEFAULT_ELPH_TEMPERATURES = (0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000)
DEFAULT_MIN_SUPERCELL_LENGTH = 15
logger = logging.getLogger(__name__)
@dataclass
class SupercellElectronPhononDisplacedStructureMaker(TransmuterMaker):
"""
Maker to run electron phonon VASP jobs to generate displaced structures.
This job:
1. Generates a close to cubic supercell with cell lengths > 15 Å.
2. Performs an IBRION = 6 finite-displacement calculation to calculate the phonon
eigenvalues and eigenvectors.
3. Displaces the atoms to simulate a range of temperatures.
.. warning::
Electron phonon properties should be converged with respect to supercell size.
Typically, cells with all lattice vectors greater than 15 Å should be a
reasonable starting point.
.. note::
The input structure should be well relaxed to avoid imaginary modes. For
example, using :obj:`TightRelaxMaker`.
.. note::
Requires VASP 6.0 and higher. See https://www.vasp.at/wiki/index.php/Electron-
phonon_interactions_from_Monte-Carlo_sampling
for more details.
Parameters
----------
name : str
The job name.
input_set_generator : .VaspInputSetGenerator
A generator used to make the input set.
write_input_set_kwargs : dict
Keyword arguments that will get passed to :obj:`.write_vasp_input_set`.
copy_vasp_kwargs : dict
Keyword arguments that will get passed to :obj:`.copy_vasp_outputs`.
run_vasp_kwargs : dict
Keyword arguments that will get passed to :obj:`.run_vasp`.
task_document_kwargs : dict
Keyword arguments that will get passed to :obj:`.TaskDocument.from_directory`.
stop_children_kwargs : dict
Keyword arguments that will get passed to :obj:`.should_stop_children`.
write_additional_data : dict
Additional data to write to the current directory. Given as a dict of
{filename: data}. Note that if using FireWorks, dictionary keys cannot contain
the "." character which is typically used to denote file extensions. To avoid
this, use the ":" character, which will automatically be converted to ".". E.g.
``{"my_file:txt": "contents of the file"}``.
"""
name: str = "supercell electron phonon displacements"
input_set_generator: ElectronPhononSetGenerator = field(
default_factory=ElectronPhononSetGenerator
)
transformations: tuple[str, ...] = ("SupercellTransformation",)
transformation_params: tuple[dict, ...] = None
temperatures: Tuple[float, ...] = DEFAULT_ELPH_TEMPERATURES
min_supercell_length: float = DEFAULT_MIN_SUPERCELL_LENGTH
@vasp_job
def make(
self,
structure: Structure,
prev_vasp_dir: str | Path | None = None,
):
"""
Run a transmuter VASP job.
Parameters
----------
structure : Structure
A pymatgen structure object.
prev_vasp_dir : str or Path or None
A previous VASP calculation directory to copy output files from.
"""
dim = self.min_supercell_length / np.array(structure.lattice.abc)
scaling_matrix = np.diag(dim.round().astype(int)).tolist()
if self.transformation_params is None:
# only overwrite transformation params if it is not set
self.transformation_params = ({"scaling_matrix": scaling_matrix},)
# update temperatures
self.input_set_generator.temperatures = self.temperatures
return super().make.original(self, structure, prev_vasp_dir)
@job
def run_elph_displacements(
temperatures: List[float],
structures: List[Structure],
vasp_maker: BaseVaspMaker,
prev_vasp_dir: str | Path | None = None,
original_structure: Structure = None,
supercell_structure: Structure = None,
):
"""
Run electron phonon displaced structures.
Note, this job will replace itself with N displacement calculations.
Parameters
----------
temperatures : list of float
Temperatures at which electron phonon structures were generated.
structures : list of Structure
Electron phonon displaced structures for each temperature.
vasp_maker : BaseVaspMaker
A maker to generate VASP calculations on the displaced structures.
prev_vasp_dir : str or Path or None
A previous VASP directory to use for copying VASP outputs.
original_structure : Structure
The original structure before supercell is made and before electron phonon
displacements.
"""
if len(temperatures) != len(structures):
raise ValueError(
f"Number of temperatures ({len(temperatures)}) does not equal number of "
f"structures ({len(structures)})."
)
jobs = []
outputs: Dict[str, list] = {
"temperatures": [],
"band_structures": [],
"structures": [],
"uuids": [],
"dirs": [],
}
for temp, structure in zip(temperatures, structures):
# create the job
elph_job = vasp_maker.make(structure, prev_vasp_dir=prev_vasp_dir)
elph_job.append_name(f" T={temp}")
# write details of the electron phonon temperature and structure elph_info.json
# file. this file will automatically get added to the task document and allow
# the elph builder to reconstruct the elph document. note the ":" is
# automatically converted to a "." in the filename.
info = {
"temperature": temp,
"original_structure": original_structure,
"supercell_structure": supercell_structure,
}
elph_job.update_maker_kwargs(
{"_set": {"write_additional_data->elph_info:json": info}}, dict_mod=True
)
jobs.append(elph_job)
# extract the outputs we want
outputs["temperatures"].append(temp)
outputs["band_structures"].append(elph_job.output.vasp_objects["bandstructure"])
outputs["structures"].append(elph_job.output.structure)
outputs["dirs"].append(elph_job.output.dir_name)
outputs["uuids"].append(elph_job.output.uuid)
disp_flow = Flow(jobs, outputs)
return Response(replace=disp_flow)
@job(output_schema=ElectronPhononRenormalisationDoc)
def calculate_electron_phonon_renormalisation(
temperatures: List[float],
displacement_band_structures: List[BandStructure],
displacement_structures: List[Structure],
displacement_uuids: List[str],
displacement_dirs: List[str],
bulk_band_structure: BandStructure,
bulk_structure: Structure,
bulk_uuid: str,
bulk_dir: str,
elph_uuid: str,
elph_dir: str,
original_structure: Structure,
):
"""
Calculate the electron-phonon renormalisation of the band gap.
Parameters
----------
temperatures : list of float
The temperatures at which electron phonon properties were calculated.
displacement_band_structures : list of BandStructure
The electron-phonon displaced band structures.
displacement_structures : list of Structure
The electron-phonon displaced structures.
displacement_uuids : list of str
The UUIDs of the electron-phonon displaced band structure calculations.
displacement_dirs : list of str
The calculation directories of the electron-phonon displaced band structure
calculations.
bulk_band_structure : BandStructure
The band structure of the bulk undisplaced supercell calculation.
bulk_structure : Structure
The structure of the bulk undisplaced supercell.
bulk_uuid : str
The UUID of the bulk undisplaced supercell band structure calculation.
bulk_dir : str
The directory of the bulk undisplaced supercell band structure calculation.
elph_uuid : str
The UUID of electron-phonon calculation that generated the displaced structures.
elph_dir : str
The directory of electron-phonon calculation that generated the displaced
structures.
original_structure : Structure
The original primitive structure for which electron-phonon calculations
were performed.
"""
if bulk_structure is None:
raise ValueError(
"Bulk (undisplaced) supercell band structure calculation failed. Cannot "
"calculate electron-phonon renormalisation."
)
# filter band structures that are None (i.e., the displacement calculation failed)
keep = [i for i, b in enumerate(displacement_band_structures) if b is not None]
temperatures = [temperatures[i] for i in keep]
displacement_band_structures = [displacement_band_structures[i] for i in keep]
displacement_structures = [displacement_structures[i] for i in keep]
displacement_uuids = [displacement_uuids[i] for i in keep]
displacement_dirs = [displacement_dirs[i] for i in keep]
logger.info("Calculating electron-phonon renormalisation")
return ElectronPhononRenormalisationDoc.from_band_structures(
temperatures,
displacement_band_structures,
displacement_structures,
displacement_uuids,
displacement_dirs,
bulk_band_structure,
bulk_structure,
bulk_uuid,
bulk_dir,
elph_uuid,
elph_dir,
original_structure,
)
| [
"logging.getLogger",
"jobflow.Response",
"jobflow.job",
"numpy.array",
"atomate2.vasp.schemas.elph.ElectronPhononRenormalisationDoc.from_band_structures",
"jobflow.Flow",
"dataclasses.field"
] | [((978, 1005), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (995, 1005), False, 'import logging\n'), ((7140, 7191), 'jobflow.job', 'job', ([], {'output_schema': 'ElectronPhononRenormalisationDoc'}), '(output_schema=ElectronPhononRenormalisationDoc)\n', (7143, 7191), False, 'from jobflow import Flow, Response, job\n'), ((3249, 3298), 'dataclasses.field', 'field', ([], {'default_factory': 'ElectronPhononSetGenerator'}), '(default_factory=ElectronPhononSetGenerator)\n', (3254, 3298), False, 'from dataclasses import dataclass, field\n'), ((7078, 7097), 'jobflow.Flow', 'Flow', (['jobs', 'outputs'], {}), '(jobs, outputs)\n', (7082, 7097), False, 'from jobflow import Flow, Response, job\n'), ((7109, 7136), 'jobflow.Response', 'Response', ([], {'replace': 'disp_flow'}), '(replace=disp_flow)\n', (7117, 7136), False, 'from jobflow import Flow, Response, job\n'), ((9883, 10159), 'atomate2.vasp.schemas.elph.ElectronPhononRenormalisationDoc.from_band_structures', 'ElectronPhononRenormalisationDoc.from_band_structures', (['temperatures', 'displacement_band_structures', 'displacement_structures', 'displacement_uuids', 'displacement_dirs', 'bulk_band_structure', 'bulk_structure', 'bulk_uuid', 'bulk_dir', 'elph_uuid', 'elph_dir', 'original_structure'], {}), '(temperatures,\n displacement_band_structures, displacement_structures,\n displacement_uuids, displacement_dirs, bulk_band_structure,\n bulk_structure, bulk_uuid, bulk_dir, elph_uuid, elph_dir,\n original_structure)\n', (9936, 10159), False, 'from atomate2.vasp.schemas.elph import ElectronPhononRenormalisationDoc\n'), ((4020, 4051), 'numpy.array', 'np.array', (['structure.lattice.abc'], {}), '(structure.lattice.abc)\n', (4028, 4051), True, 'import numpy as np\n')] |
from __future__ import division,print_function
import numpy as np
import tensorflow as tf
import sys
import os
import glob
import re
from tensorflow.keras.applications.imagenet_utils import preprocess_input, decode_predictions
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
from flask import Flask,redirect,url_for,request,render_template
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer
# Define a Flask App
app = Flask(__name__)
# Load model
model_path = 'vgg19_imagenet_weights.h5'
model = load_model(model_path)
model.summary()
# model._make_predict_function() #Necessary for imagenet weights
def model_predict(img_path,model):
img = image.load_img(img_path,target_size=(224,224))
# Preprocessing the image
x = image.img_to_array(img)
x = np.expand_dims(x,axis=0)
# Be careful how your trained model deals with the input
# otherwise, it won't make correct prediction!
x = preprocess_input(x)
preds = model.predict(x)
return preds
@app.route('/',methods = ['GET'])
def index():
# Main page
return render_template('./index.html')
@app.route('/',methods=["POST","GET"])
def upload():
if request.method=='POST':
f = request.files['file']
print(f)
# Save the file to './uploads
basepath = os.path.dirname(__file__)
file_path = os.path.join(basepath,'uploads',secure_filename(f.filename))
f.save(file_path)
# Make prediction
preds = model_predict(file_path,model)
# Process your result for human
# pred_class = pred.argmax(axis=-1)
pred_class = decode_predictions(preds,top=1)
result = str(pred_class[0][0][1])
return result
else:
return None
if __name__ == '__main__':
app.run(debug=True) | [
"tensorflow.keras.preprocessing.image.load_img",
"flask.render_template",
"tensorflow.keras.applications.imagenet_utils.decode_predictions",
"flask.Flask",
"os.path.dirname",
"tensorflow.keras.models.load_model",
"werkzeug.utils.secure_filename",
"numpy.expand_dims",
"tensorflow.keras.preprocessing.... | [((519, 534), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (524, 534), False, 'from flask import Flask, redirect, url_for, request, render_template\n'), ((602, 624), 'tensorflow.keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (612, 624), False, 'from tensorflow.keras.models import load_model\n'), ((760, 808), 'tensorflow.keras.preprocessing.image.load_img', 'image.load_img', (['img_path'], {'target_size': '(224, 224)'}), '(img_path, target_size=(224, 224))\n', (774, 808), False, 'from tensorflow.keras.preprocessing import image\n'), ((849, 872), 'tensorflow.keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (867, 872), False, 'from tensorflow.keras.preprocessing import image\n'), ((882, 907), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (896, 907), True, 'import numpy as np\n'), ((1032, 1051), 'tensorflow.keras.applications.imagenet_utils.preprocess_input', 'preprocess_input', (['x'], {}), '(x)\n', (1048, 1051), False, 'from tensorflow.keras.applications.imagenet_utils import preprocess_input, decode_predictions\n'), ((1184, 1215), 'flask.render_template', 'render_template', (['"""./index.html"""'], {}), "('./index.html')\n", (1199, 1215), False, 'from flask import Flask, redirect, url_for, request, render_template\n'), ((1419, 1444), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1434, 1444), False, 'import os\n'), ((1741, 1773), 'tensorflow.keras.applications.imagenet_utils.decode_predictions', 'decode_predictions', (['preds'], {'top': '(1)'}), '(preds, top=1)\n', (1759, 1773), False, 'from tensorflow.keras.applications.imagenet_utils import preprocess_input, decode_predictions\n'), ((1498, 1525), 'werkzeug.utils.secure_filename', 'secure_filename', (['f.filename'], {}), '(f.filename)\n', (1513, 1525), False, 'from werkzeug.utils import secure_filename\n')] |
from ..meshio import form_mesh
import numpy as np
import logging
def merge_meshes(input_meshes):
""" Merge multiple meshes into a single mesh.
Args:
input_meshes (``list``): a list of input :class:`Mesh` objects.
Returns:
A :py:class:`Mesh` consists of all vertices, faces and voxels
from ``input_meshes``. The following mesh attributes are defined:
* ``vertex_sources``: Indices of source vertices from the input mesh.
* ``face_sources``: Indices of source faces from the input mesh if the
output contains at least 1 face.
* ``voxel_sources``: Indices of source voxels from the input mesh if the
output contains at least 1 voxel.
"""
logger = logging.getLogger(__name__)
vertices = []
faces = []
voxels = []
vertex_count = 0
vertex_sources = []
face_sources = []
voxel_sources = []
for i,mesh in enumerate(input_meshes):
vertices.append(mesh.vertices)
vertex_sources.append(np.ones(mesh.num_vertices) * i)
if mesh.num_faces > 0:
faces.append(mesh.faces + vertex_count)
face_sources.append(np.ones(mesh.num_faces) * i)
if mesh.num_voxels > 0:
voxels.append(mesh.voxels + vertex_count)
voxel_sources.append(np.ones(mesh.num_voxels) * i)
vertex_count += mesh.num_vertices
if len(vertices) > 0:
vertices = np.vstack(vertices)
vertex_sources = np.concatenate(vertex_sources)
else:
vertices = np.zeros((0, 3), dtype=float)
vertex_sources = np.array([])
if len(faces) > 0:
faces = np.vstack(faces)
face_sources = np.concatenate(face_sources)
else:
faces = np.zeros((0, 3), dtype=int)
face_sources = np.array([])
if len(voxels) > 0 and len(voxels) == len(input_meshes):
voxels = np.vstack(voxels)
voxel_sources = np.concatenate(voxel_sources)
else:
# Not all input meshes contains voxels. So the merged mesh will not be
# a valid volume representation. It is probably base to drop all
# voxels.
if (len(voxels) > 0):
logger.warning("Not all input meshes represent a volume, "
"so dropping all voxels.")
voxels = np.zeros((0, 4), dtype=int)
voxel_sources = np.array([])
output_mesh = form_mesh(vertices, faces, voxels)
output_mesh.add_attribute("vertex_sources")
output_mesh.set_attribute("vertex_sources", vertex_sources)
if (len(face_sources) > 0):
output_mesh.add_attribute("face_sources")
output_mesh.set_attribute("face_sources", face_sources)
if (len(voxel_sources) > 0):
output_mesh.add_attribute("voxel_sources")
output_mesh.set_attribute("voxel_sources", voxel_sources)
return output_mesh
| [
"logging.getLogger",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.vstack",
"numpy.concatenate"
] | [((759, 786), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (776, 786), False, 'import logging\n'), ((1475, 1494), 'numpy.vstack', 'np.vstack', (['vertices'], {}), '(vertices)\n', (1484, 1494), True, 'import numpy as np\n'), ((1521, 1551), 'numpy.concatenate', 'np.concatenate', (['vertex_sources'], {}), '(vertex_sources)\n', (1535, 1551), True, 'import numpy as np\n'), ((1583, 1612), 'numpy.zeros', 'np.zeros', (['(0, 3)'], {'dtype': 'float'}), '((0, 3), dtype=float)\n', (1591, 1612), True, 'import numpy as np\n'), ((1639, 1651), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1647, 1651), True, 'import numpy as np\n'), ((1695, 1711), 'numpy.vstack', 'np.vstack', (['faces'], {}), '(faces)\n', (1704, 1711), True, 'import numpy as np\n'), ((1736, 1764), 'numpy.concatenate', 'np.concatenate', (['face_sources'], {}), '(face_sources)\n', (1750, 1764), True, 'import numpy as np\n'), ((1793, 1820), 'numpy.zeros', 'np.zeros', (['(0, 3)'], {'dtype': 'int'}), '((0, 3), dtype=int)\n', (1801, 1820), True, 'import numpy as np\n'), ((1845, 1857), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1853, 1857), True, 'import numpy as np\n'), ((1940, 1957), 'numpy.vstack', 'np.vstack', (['voxels'], {}), '(voxels)\n', (1949, 1957), True, 'import numpy as np\n'), ((1983, 2012), 'numpy.concatenate', 'np.concatenate', (['voxel_sources'], {}), '(voxel_sources)\n', (1997, 2012), True, 'import numpy as np\n'), ((2370, 2397), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'int'}), '((0, 4), dtype=int)\n', (2378, 2397), True, 'import numpy as np\n'), ((2423, 2435), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2431, 2435), True, 'import numpy as np\n'), ((1052, 1078), 'numpy.ones', 'np.ones', (['mesh.num_vertices'], {}), '(mesh.num_vertices)\n', (1059, 1078), True, 'import numpy as np\n'), ((1202, 1225), 'numpy.ones', 'np.ones', (['mesh.num_faces'], {}), '(mesh.num_faces)\n', (1209, 1225), True, 'import numpy as np\n'), ((1353, 1377), 'numpy.ones', 'np.ones', (['mesh.num_voxels'], {}), '(mesh.num_voxels)\n', (1360, 1377), True, 'import numpy as np\n')] |
from collections.abc import MutableMapping
import numpy as np
_HIDDEN_ATTRS = frozenset(
[
"REFERENCE_LIST",
"CLASS",
"DIMENSION_LIST",
"NAME",
"_Netcdf4Dimid",
"_Netcdf4Coordinates",
"_nc3_strict",
"_NCProperties",
]
)
class Attributes(MutableMapping):
def __init__(self, h5attrs, check_dtype):
self._h5attrs = h5attrs
self._check_dtype = check_dtype
def __getitem__(self, key):
import h5py
if key in _HIDDEN_ATTRS:
raise KeyError(key)
# see https://github.com/h5netcdf/h5netcdf/issues/94 for details
if isinstance(self._h5attrs[key], h5py.Empty):
string_info = h5py.check_string_dtype(self._h5attrs[key].dtype)
if string_info and string_info.length == 1:
return b""
return self._h5attrs[key]
def __setitem__(self, key, value):
if key in _HIDDEN_ATTRS:
raise AttributeError("cannot write attribute with reserved name %r" % key)
if hasattr(value, "dtype"):
dtype = value.dtype
else:
dtype = np.asarray(value).dtype
self._check_dtype(dtype)
self._h5attrs[key] = value
def __delitem__(self, key):
del self._h5attrs[key]
def __iter__(self):
for key in self._h5attrs:
if key not in _HIDDEN_ATTRS:
yield key
def __len__(self):
hidden_count = sum(1 if attr in self._h5attrs else 0 for attr in _HIDDEN_ATTRS)
return len(self._h5attrs) - hidden_count
def __repr__(self):
return "\n".join(
["%r" % type(self)] + ["%s: %r" % (k, v) for k, v in self.items()]
)
| [
"h5py.check_string_dtype",
"numpy.asarray"
] | [((721, 770), 'h5py.check_string_dtype', 'h5py.check_string_dtype', (['self._h5attrs[key].dtype'], {}), '(self._h5attrs[key].dtype)\n', (744, 770), False, 'import h5py\n'), ((1150, 1167), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (1160, 1167), True, 'import numpy as np\n')] |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A mock-up showing a ResNet50 network with training on synthetic data.
This file uses the stax neural network definition library and the optimizers
optimization library.
"""
import numpy.random as npr
import jax.numpy as jnp
from jax.config import config
from jax import jit, grad, random
from jax.experimental import optimizers
from jax.experimental import stax
from jax.experimental.stax import (AvgPool, BatchNorm, Conv, Dense, FanInSum,
FanOut, Flatten, GeneralConv, Identity,
MaxPool, Relu, LogSoftmax)
# ResNet blocks compose other layers
def ConvBlock(kernel_size, filters, strides=(2, 2)):
ks = kernel_size
filters1, filters2, filters3 = filters
Main = stax.serial(
Conv(filters1, (1, 1), strides), BatchNorm(), Relu,
Conv(filters2, (ks, ks), padding='SAME'), BatchNorm(), Relu,
Conv(filters3, (1, 1)), BatchNorm())
Shortcut = stax.serial(Conv(filters3, (1, 1), strides), BatchNorm())
return stax.serial(FanOut(2), stax.parallel(Main, Shortcut), FanInSum, Relu)
def IdentityBlock(kernel_size, filters):
ks = kernel_size
filters1, filters2 = filters
def make_main(input_shape):
# the number of output channels depends on the number of input channels
return stax.serial(
Conv(filters1, (1, 1)), BatchNorm(), Relu,
Conv(filters2, (ks, ks), padding='SAME'), BatchNorm(), Relu,
Conv(input_shape[3], (1, 1)), BatchNorm())
Main = stax.shape_dependent(make_main)
return stax.serial(FanOut(2), stax.parallel(Main, Identity), FanInSum, Relu)
# ResNet architectures compose layers and ResNet blocks
def ResNet50(num_classes):
return stax.serial(
GeneralConv(('HWCN', 'OIHW', 'NHWC'), 64, (7, 7), (2, 2), 'SAME'),
BatchNorm(), Relu, MaxPool((3, 3), strides=(2, 2)),
ConvBlock(3, [64, 64, 256], strides=(1, 1)),
IdentityBlock(3, [64, 64]),
IdentityBlock(3, [64, 64]),
ConvBlock(3, [128, 128, 512]),
IdentityBlock(3, [128, 128]),
IdentityBlock(3, [128, 128]),
IdentityBlock(3, [128, 128]),
ConvBlock(3, [256, 256, 1024]),
IdentityBlock(3, [256, 256]),
IdentityBlock(3, [256, 256]),
IdentityBlock(3, [256, 256]),
IdentityBlock(3, [256, 256]),
IdentityBlock(3, [256, 256]),
ConvBlock(3, [512, 512, 2048]),
IdentityBlock(3, [512, 512]),
IdentityBlock(3, [512, 512]),
AvgPool((7, 7)), Flatten, Dense(num_classes), LogSoftmax)
if __name__ == "__main__":
rng_key = random.PRNGKey(0)
batch_size = 8
num_classes = 1001
input_shape = (224, 224, 3, batch_size)
step_size = 0.1
num_steps = 10
init_fun, predict_fun = ResNet50(num_classes)
_, init_params = init_fun(rng_key, input_shape)
def loss(params, batch):
inputs, targets = batch
logits = predict_fun(params, inputs)
return -jnp.sum(logits * targets)
def accuracy(params, batch):
inputs, targets = batch
target_class = jnp.argmax(targets, axis=-1)
predicted_class = jnp.argmax(predict_fun(params, inputs), axis=-1)
return jnp.mean(predicted_class == target_class)
def synth_batches():
rng = npr.RandomState(0)
while True:
images = rng.rand(*input_shape).astype('float32')
labels = rng.randint(num_classes, size=(batch_size, 1))
onehot_labels = labels == jnp.arange(num_classes)
yield images, onehot_labels
opt_init, opt_update, get_params = optimizers.momentum(step_size, mass=0.9)
batches = synth_batches()
@jit
def update(i, opt_state, batch):
params = get_params(opt_state)
return opt_update(i, grad(loss)(params, batch), opt_state)
opt_state = opt_init(init_params)
for i in range(num_steps):
opt_state = update(i, opt_state, next(batches))
trained_params = get_params(opt_state)
| [
"jax.experimental.stax.Conv",
"jax.random.PRNGKey",
"jax.experimental.stax.FanOut",
"jax.experimental.stax.BatchNorm",
"jax.experimental.stax.Dense",
"jax.numpy.arange",
"jax.experimental.stax.shape_dependent",
"jax.experimental.stax.parallel",
"jax.experimental.stax.AvgPool",
"jax.numpy.sum",
"... | [((2055, 2086), 'jax.experimental.stax.shape_dependent', 'stax.shape_dependent', (['make_main'], {}), '(make_main)\n', (2075, 2086), False, 'from jax.experimental import stax\n'), ((3102, 3119), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (3116, 3119), False, 'from jax import jit, grad, random\n'), ((4017, 4057), 'jax.experimental.optimizers.momentum', 'optimizers.momentum', (['step_size'], {'mass': '(0.9)'}), '(step_size, mass=0.9)\n', (4036, 4057), False, 'from jax.experimental import optimizers\n'), ((1340, 1371), 'jax.experimental.stax.Conv', 'Conv', (['filters1', '(1, 1)', 'strides'], {}), '(filters1, (1, 1), strides)\n', (1344, 1371), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((1373, 1384), 'jax.experimental.stax.BatchNorm', 'BatchNorm', ([], {}), '()\n', (1382, 1384), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((1398, 1438), 'jax.experimental.stax.Conv', 'Conv', (['filters2', '(ks, ks)'], {'padding': '"""SAME"""'}), "(filters2, (ks, ks), padding='SAME')\n", (1402, 1438), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((1440, 1451), 'jax.experimental.stax.BatchNorm', 'BatchNorm', ([], {}), '()\n', (1449, 1451), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((1465, 1487), 'jax.experimental.stax.Conv', 'Conv', (['filters3', '(1, 1)'], {}), '(filters3, (1, 1))\n', (1469, 1487), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((1489, 1500), 'jax.experimental.stax.BatchNorm', 'BatchNorm', ([], {}), '()\n', (1498, 1500), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((1527, 1558), 'jax.experimental.stax.Conv', 'Conv', (['filters3', '(1, 1)', 'strides'], {}), '(filters3, (1, 1), strides)\n', (1531, 1558), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((1560, 1571), 'jax.experimental.stax.BatchNorm', 'BatchNorm', ([], {}), '()\n', (1569, 1571), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((1594, 1603), 'jax.experimental.stax.FanOut', 'FanOut', (['(2)'], {}), '(2)\n', (1600, 1603), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((1605, 1634), 'jax.experimental.stax.parallel', 'stax.parallel', (['Main', 'Shortcut'], {}), '(Main, Shortcut)\n', (1618, 1634), False, 'from jax.experimental import stax\n'), ((2108, 2117), 'jax.experimental.stax.FanOut', 'FanOut', (['(2)'], {}), '(2)\n', (2114, 2117), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2119, 2148), 'jax.experimental.stax.parallel', 'stax.parallel', (['Main', 'Identity'], {}), '(Main, Identity)\n', (2132, 2148), False, 'from jax.experimental import stax\n'), ((2280, 2345), 'jax.experimental.stax.GeneralConv', 'GeneralConv', (["('HWCN', 'OIHW', 'NHWC')", '(64)', '(7, 7)', '(2, 2)', '"""SAME"""'], {}), "(('HWCN', 'OIHW', 'NHWC'), 64, (7, 7), (2, 2), 'SAME')\n", (2291, 2345), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2353, 2364), 'jax.experimental.stax.BatchNorm', 'BatchNorm', ([], {}), '()\n', (2362, 2364), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2372, 2403), 'jax.experimental.stax.MaxPool', 'MaxPool', (['(3, 3)'], {'strides': '(2, 2)'}), '((3, 3), strides=(2, 2))\n', (2379, 2403), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((3003, 3018), 'jax.experimental.stax.AvgPool', 'AvgPool', (['(7, 7)'], {}), '((7, 7))\n', (3010, 3018), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((3029, 3047), 'jax.experimental.stax.Dense', 'Dense', (['num_classes'], {}), '(num_classes)\n', (3034, 3047), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((3549, 3577), 'jax.numpy.argmax', 'jnp.argmax', (['targets'], {'axis': '(-1)'}), '(targets, axis=-1)\n', (3559, 3577), True, 'import jax.numpy as jnp\n'), ((3660, 3701), 'jax.numpy.mean', 'jnp.mean', (['(predicted_class == target_class)'], {}), '(predicted_class == target_class)\n', (3668, 3701), True, 'import jax.numpy as jnp\n'), ((3736, 3754), 'numpy.random.RandomState', 'npr.RandomState', (['(0)'], {}), '(0)\n', (3751, 3754), True, 'import numpy.random as npr\n'), ((1883, 1905), 'jax.experimental.stax.Conv', 'Conv', (['filters1', '(1, 1)'], {}), '(filters1, (1, 1))\n', (1887, 1905), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((1907, 1918), 'jax.experimental.stax.BatchNorm', 'BatchNorm', ([], {}), '()\n', (1916, 1918), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((1934, 1974), 'jax.experimental.stax.Conv', 'Conv', (['filters2', '(ks, ks)'], {'padding': '"""SAME"""'}), "(filters2, (ks, ks), padding='SAME')\n", (1938, 1974), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((1976, 1987), 'jax.experimental.stax.BatchNorm', 'BatchNorm', ([], {}), '()\n', (1985, 1987), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2003, 2031), 'jax.experimental.stax.Conv', 'Conv', (['input_shape[3]', '(1, 1)'], {}), '(input_shape[3], (1, 1))\n', (2007, 2031), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((2033, 2044), 'jax.experimental.stax.BatchNorm', 'BatchNorm', ([], {}), '()\n', (2042, 2044), False, 'from jax.experimental.stax import AvgPool, BatchNorm, Conv, Dense, FanInSum, FanOut, Flatten, GeneralConv, Identity, MaxPool, Relu, LogSoftmax\n'), ((3444, 3469), 'jax.numpy.sum', 'jnp.sum', (['(logits * targets)'], {}), '(logits * targets)\n', (3451, 3469), True, 'import jax.numpy as jnp\n'), ((3921, 3944), 'jax.numpy.arange', 'jnp.arange', (['num_classes'], {}), '(num_classes)\n', (3931, 3944), True, 'import jax.numpy as jnp\n'), ((4189, 4199), 'jax.grad', 'grad', (['loss'], {}), '(loss)\n', (4193, 4199), False, 'from jax import jit, grad, random\n')] |
from __future__ import print_function
from unittest import TestCase
import numpy as np
from nose import SkipTest
from numpy.testing import assert_array_equal, assert_array_almost_equal
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn.mixture import GMM
from sklearn.utils import check_random_state
from hmmlearn import hmm
from hmmlearn.utils import normalize, assert_raises
np.seterr(all='warn')
def fit_hmm_and_monitor_log_likelihood(h, X, lengths=None, n_iter=1):
h.n_iter = 1 # make sure we do a single iteration at a time
h.init_params = '' # and don't re-init params
loglikelihoods = np.empty(n_iter, dtype=float)
for i in range(n_iter):
h.fit(X, lengths=lengths)
loglikelihoods[i] = h.score(X, lengths=lengths)
return loglikelihoods
class GaussianHMMTestMixin(object):
covariance_type = None # set by subclasses
def setUp(self):
self.prng = prng = np.random.RandomState(10)
self.n_components = n_components = 3
self.n_features = n_features = 3
self.startprob = prng.rand(n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = prng.rand(n_components, n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, n_components))
self.means = prng.randint(-20, 20, (n_components, n_features))
self.covars = {
'spherical': (1.0 + 2 * np.dot(prng.rand(n_components, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)),
'diag': (1.0 + 2 * prng.rand(n_components, n_features)) ** 2,
'full': np.array([make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)
for x in range(n_components)]),
}
self.expanded_covars = {
'spherical': [np.eye(n_features) * cov
for cov in self.covars['spherical']],
'diag': [np.diag(cov) for cov in self.covars['diag']],
'tied': [self.covars['tied']] * n_components,
'full': self.covars['full'],
}
def test_bad_covariance_type(self):
with assert_raises(ValueError):
h = hmm.GaussianHMM(20, covariance_type='badcovariance_type')
h.means_ = self.means
h.covars_ = []
h.startprob_ = self.startprob
h.transmat_ = self.transmat
h._check()
def test_score_samples_and_decode(self):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.means_ = self.means
h.covars_ = self.covars[self.covariance_type]
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * h.means_
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = self.prng.randn(n_samples, self.n_features) + h.means_[gaussidx]
h._init(X, params="st")
ll, posteriors = h.score_samples(X)
self.assertEqual(posteriors.shape, (n_samples, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(n_samples))
viterbi_ll, stateseq = h.decode(X)
assert_array_equal(stateseq, gaussidx)
def test_sample(self, n=1000):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = self.transmat
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * self.means
h.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
X, state_sequence = h.sample(n, random_state=self.prng)
self.assertEqual(X.shape, (n, self.n_features))
self.assertEqual(len(state_sequence), n)
def test_fit(self, params='stmc', n_iter=5, **kwargs):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.means_ = 20 * self.means
h.covars_ = self.covars[self.covariance_type]
lengths = [10] * 10
X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(X, lengths=lengths)
trainll = fit_hmm_and_monitor_log_likelihood(
h, X, lengths=lengths, n_iter=n_iter)
# Check that the log-likelihood is always increasing during training.
diff = np.diff(trainll)
message = ("Decreasing log-likelihood for {0} covariance: {1}"
.format(self.covariance_type, diff))
self.assertTrue(np.all(diff >= -1e-6), message)
def test_fit_works_on_sequences_of_different_length(self):
lengths = [3, 4, 5]
X = self.prng.rand(sum(lengths), self.n_features)
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(X, lengths=lengths)
def test_fit_with_length_one_signal(self):
lengths = [10, 8, 1]
X = self.prng.rand(sum(lengths), self.n_features)
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: zero-size array to reduction operation maximum which
# has no identity
h.fit(X, lengths=lengths)
def test_fit_with_priors(self, params='stmc', n_iter=5):
startprob_prior = 10 * self.startprob + 2.0
transmat_prior = 10 * self.transmat + 2.0
means_prior = self.means
means_weight = 2.0
covars_weight = 2.0
if self.covariance_type in ('full', 'tied'):
covars_weight += self.n_features
covars_prior = self.covars[self.covariance_type]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.startprob_prior = startprob_prior
h.transmat_ = normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.transmat_prior = transmat_prior
h.means_ = 20 * self.means
h.means_prior = means_prior
h.means_weight = means_weight
h.covars_ = self.covars[self.covariance_type]
h.covars_prior = covars_prior
h.covars_weight = covars_weight
lengths = [100] * 10
X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)
# Re-initialize the parameters and check that we can converge to the
# original parameter values.
h_learn = hmm.GaussianHMM(self.n_components, self.covariance_type,
params=params)
h_learn.n_iter = 0
h_learn.fit(X, lengths=lengths)
fit_hmm_and_monitor_log_likelihood(
h_learn, X, lengths=lengths, n_iter=n_iter)
# Make sure we've converged to the right parameters.
# a) means
self.assertTrue(np.allclose(sorted(h.means_.tolist()),
sorted(h_learn.means_.tolist()),
0.01))
# b) covars are hard to estimate precisely from a relatively small
# sample, thus the large threshold
self.assertTrue(np.allclose(sorted(h._covars_.tolist()),
sorted(h_learn._covars_.tolist()),
10))
def test_fit_non_ergodic_transmat(self):
h = hmm.GaussianHMM(n_components=5, covariance_type='full',
n_iter=100, init_params='st')
h.startprob_ = np.array([1, 0, 0, 0, 0])
h.transmat_ = np.array([[0.9, 0.1, 0, 0, 0],
[0, 0.9, 0.1, 0, 0],
[0, 0, 0.9, 0.1, 0],
[0, 0, 0, 0.9, 0.1],
[0, 0, 0, 0, 1.0]])
h.means_ = np.zeros((5, 10))
h.covars_ = np.tile(np.identity(10), (5, 1, 1))
lengths = [10] * 10
X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)
h.fit(X, lengths=lengths)
# TODO: write the actual test
class TestGaussianHMMWithSphericalCovars(GaussianHMMTestMixin, TestCase):
covariance_type = 'spherical'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
class TestGaussianHMMWithDiagonalCovars(GaussianHMMTestMixin, TestCase):
covariance_type = 'diag'
def test_covar_is_writeable(self):
h = hmm.GaussianHMM(n_components=1, covariance_type='diag')
X = np.random.normal(size=(1000, 5))
h._init(X, params="c")
# np.diag returns a read-only view of the array in NumPy 1.9.X.
# Make sure this doesn't prevent us from fitting an HMM with
# diagonal covariance matrix. See PR#44 on GitHub for details
# and discussion.
assert h._covars_.flags["WRITEABLE"]
class TestGaussianHMMWithTiedCovars(GaussianHMMTestMixin, TestCase):
covariance_type = 'tied'
class TestGaussianHMMWithFullCovars(GaussianHMMTestMixin, TestCase):
covariance_type = 'full'
class MultinomialHMMTestCase(TestCase):
"""Using examples from Wikipedia
- http://en.wikipedia.org/wiki/Hidden_Markov_model
- http://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 2 # ('Rainy', 'Sunny')
self.n_features = 3 # ('walk', 'shop', 'clean')
self.emissionprob = np.array([[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]])
self.startprob = np.array([0.6, 0.4])
self.transmat = np.array([[0.7, 0.3], [0.4, 0.6]])
self.h = hmm.MultinomialHMM(self.n_components)
self.h.startprob_ = self.startprob
self.h.transmat_ = self.transmat
self.h.emissionprob_ = self.emissionprob
def test_set_emissionprob(self):
h = hmm.MultinomialHMM(self.n_components)
emissionprob = np.array([[0.8, 0.2, 0.0], [0.7, 0.2, 1.0]])
h.emissionprob = emissionprob
assert np.allclose(emissionprob, h.emissionprob)
def test_wikipedia_viterbi_example(self):
# From http://en.wikipedia.org/wiki/Viterbi_algorithm:
# "This reveals that the observations ['walk', 'shop', 'clean']
# were most likely generated by states ['Sunny', 'Rainy',
# 'Rainy'], with probability 0.01344."
X = [[0], [1], [2]]
logprob, state_sequence = self.h.decode(X)
self.assertAlmostEqual(np.exp(logprob), 0.01344)
assert_array_equal(state_sequence, [1, 0, 0])
def test_decode_map_algorithm(self):
X = [[0], [1], [2]]
h = hmm.MultinomialHMM(self.n_components, algorithm="map")
h.startprob_ = self.startprob
h.transmat_ = self.transmat
h.emissionprob_ = self.emissionprob
_logprob, state_sequence = h.decode(X)
assert_array_equal(state_sequence, [1, 0, 0])
def test_predict(self):
X = [[0], [1], [2]]
state_sequence = self.h.predict(X)
posteriors = self.h.predict_proba(X)
assert_array_equal(state_sequence, [1, 0, 0])
assert_array_almost_equal(posteriors, [
[0.23170303, 0.76829697],
[0.62406281, 0.37593719],
[0.86397706, 0.13602294],
])
def test_attributes(self):
h = hmm.MultinomialHMM(self.n_components)
self.assertEqual(h.n_components, self.n_components)
h.startprob_ = self.startprob
h.transmat_ = self.transmat
h.emissionprob_ = self.emissionprob
assert_array_almost_equal(h.emissionprob_, self.emissionprob)
with assert_raises(ValueError):
h.emissionprob_ = []
h._check()
with assert_raises(ValueError):
h.emissionprob_ = np.zeros((self.n_components - 2,
self.n_features))
h._check()
def test_score_samples(self):
idx = np.repeat(np.arange(self.n_components), 10)
n_samples = len(idx)
X = np.atleast_2d(
(self.prng.rand(n_samples) * self.n_features).astype(int)).T
ll, posteriors = self.h.score_samples(X)
self.assertEqual(posteriors.shape, (n_samples, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(n_samples))
def test_sample(self, n=1000):
X, state_sequence = self.h.sample(n, random_state=self.prng)
self.assertEqual(X.ndim, 2)
self.assertEqual(len(X), n)
self.assertEqual(len(state_sequence), n)
self.assertEqual(len(np.unique(X)), self.n_features)
def test_fit(self, params='ste', n_iter=5, **kwargs):
h = self.h
h.params = params
lengths = np.array([10] * 10)
X, _state_sequence = h.sample(lengths.sum(), random_state=self.prng)
# Mess up the parameters and see if we can re-learn them.
h.startprob_ = normalize(self.prng.rand(self.n_components))
h.transmat_ = normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.emissionprob_ = normalize(
self.prng.rand(self.n_components, self.n_features), axis=1)
trainll = fit_hmm_and_monitor_log_likelihood(
h, X, lengths=lengths, n_iter=n_iter)
# Check that the log-likelihood is always increasing during training.
diff = np.diff(trainll)
self.assertTrue(np.all(diff >= -1e-6),
"Decreasing log-likelihood: {0}" .format(diff))
def test_fit_emissionprob(self):
self.test_fit('e')
def test_fit_with_init(self, params='ste', n_iter=5, verbose=False,
**kwargs):
h = self.h
learner = hmm.MultinomialHMM(self.n_components)
lengths = [10] * 10
X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)
# use init_function to initialize paramerters
learner._init(X, lengths=lengths, params=params)
trainll = fit_hmm_and_monitor_log_likelihood(learner, X, n_iter=n_iter)
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print()
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
self.assertTrue(np.all(np.diff(trainll) > -1.e-3))
def test__check_input_symbols(self):
self.assertTrue(self.h._check_input_symbols([[0, 0, 2, 1, 3, 1, 1]]))
self.assertFalse(self.h._check_input_symbols([[0, 0, 3, 5, 10]]))
self.assertFalse(self.h._check_input_symbols([[0]]))
self.assertFalse(self.h._check_input_symbols([[0., 2., 1., 3.]]))
self.assertFalse(self.h._check_input_symbols([[0, 0, -2, 1, 3, 1, 1]]))
def create_random_gmm(n_mix, n_features, covariance_type, prng=0):
prng = check_random_state(prng)
g = GMM(n_mix, covariance_type=covariance_type)
g.means_ = prng.randint(-20, 20, (n_mix, n_features))
mincv = 0.1
g.covars_ = {
'spherical': (mincv + mincv * np.dot(prng.rand(n_mix, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features)),
'diag': (mincv + mincv * prng.rand(n_mix, n_features)) ** 2,
'full': np.array(
[make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features) for x in range(n_mix)])
}[covariance_type]
g.weights_ = normalize(prng.rand(n_mix))
return g
class GMMHMMTestMixin(object):
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 3
self.n_mix = 2
self.n_features = 2
self.covariance_type = 'diag'
self.startprob = self.prng.rand(self.n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = self.prng.rand(self.n_components, self.n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, self.n_components))
self.gmms = []
for state in range(self.n_components):
self.gmms.append(create_random_gmm(
self.n_mix, self.n_features, self.covariance_type,
prng=self.prng))
def test_score_samples_and_decode(self):
h = hmm.GMMHMM(self.n_components)
h.startprob_ = self.startprob
h.transmat_ = self.transmat
h.gmms_ = self.gmms
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
for g in h.gmms_:
g.means_ *= 20
refstateseq = np.repeat(np.arange(self.n_components), 5)
n_samples = len(refstateseq)
X = [h.gmms_[x].sample(1, random_state=self.prng).flatten()
for x in refstateseq]
_ll, posteriors = h.score_samples(X)
self.assertEqual(posteriors.shape, (n_samples, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(n_samples))
_logprob, stateseq = h.decode(X)
assert_array_equal(stateseq, refstateseq)
def test_sample(self, n=1000):
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = self.transmat
h.gmms_ = self.gmms
X, state_sequence = h.sample(n, random_state=self.prng)
self.assertEqual(X.shape, (n, self.n_features))
self.assertEqual(len(state_sequence), n)
def test_fit(self, params='stmwc', n_iter=5, verbose=False, **kwargs):
h = hmm.GMMHMM(self.n_components, covars_prior=1.0)
h.startprob_ = self.startprob
h.transmat_ = normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.gmms_ = self.gmms
lengths = [10] * 10
X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(X, lengths=lengths)
h.transmat_ = normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.startprob_ = normalize(self.prng.rand(self.n_components))
trainll = fit_hmm_and_monitor_log_likelihood(
h, X, lengths=lengths, n_iter=n_iter)
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
# XXX: this test appears to check that training log likelihood should
# never be decreasing (up to a tolerance of 0.5, why?) but this is not
# the case when the seed changes.
raise SkipTest("Unstable test: trainll is not always increasing "
"depending on seed")
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_works_on_sequences_of_different_length(self):
lengths = [3, 4, 5]
X = self.prng.rand(sum(lengths), self.n_features)
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(X, lengths=lengths)
class TestGMMHMMWithDiagCovars(GMMHMMTestMixin, TestCase):
covariance_type = 'diag'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
def test_fit_means(self):
self.test_fit('m')
class TestGMMHMMWithTiedCovars(GMMHMMTestMixin, TestCase):
covariance_type = 'tied'
class TestGMMHMMWithFullCovars(GMMHMMTestMixin, TestCase):
covariance_type = 'full'
| [
"numpy.array",
"sklearn.mixture.GMM",
"numpy.arange",
"numpy.random.RandomState",
"numpy.testing.assert_array_almost_equal",
"numpy.diff",
"numpy.exp",
"numpy.empty",
"numpy.maximum",
"numpy.testing.assert_array_equal",
"numpy.random.normal",
"hmmlearn.hmm.GaussianHMM",
"numpy.all",
"sklea... | [((406, 427), 'numpy.seterr', 'np.seterr', ([], {'all': '"""warn"""'}), "(all='warn')\n", (415, 427), True, 'import numpy as np\n'), ((643, 672), 'numpy.empty', 'np.empty', (['n_iter'], {'dtype': 'float'}), '(n_iter, dtype=float)\n', (651, 672), True, 'import numpy as np\n'), ((15400, 15424), 'sklearn.utils.check_random_state', 'check_random_state', (['prng'], {}), '(prng)\n', (15418, 15424), False, 'from sklearn.utils import check_random_state\n'), ((15433, 15476), 'sklearn.mixture.GMM', 'GMM', (['n_mix'], {'covariance_type': 'covariance_type'}), '(n_mix, covariance_type=covariance_type)\n', (15436, 15476), False, 'from sklearn.mixture import GMM\n'), ((952, 977), 'numpy.random.RandomState', 'np.random.RandomState', (['(10)'], {}), '(10)\n', (973, 977), True, 'import numpy as np\n'), ((2690, 2746), 'hmmlearn.hmm.GaussianHMM', 'hmm.GaussianHMM', (['self.n_components', 'self.covariance_type'], {}), '(self.n_components, self.covariance_type)\n', (2705, 2746), False, 'from hmmlearn import hmm\n'), ((3460, 3498), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['stateseq', 'gaussidx'], {}), '(stateseq, gaussidx)\n', (3478, 3498), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((3547, 3603), 'hmmlearn.hmm.GaussianHMM', 'hmm.GaussianHMM', (['self.n_components', 'self.covariance_type'], {}), '(self.n_components, self.covariance_type)\n', (3562, 3603), False, 'from hmmlearn import hmm\n'), ((3872, 3922), 'numpy.maximum', 'np.maximum', (['self.covars[self.covariance_type]', '(0.1)'], {}), '(self.covars[self.covariance_type], 0.1)\n', (3882, 3922), True, 'import numpy as np\n'), ((4165, 4221), 'hmmlearn.hmm.GaussianHMM', 'hmm.GaussianHMM', (['self.n_components', 'self.covariance_type'], {}), '(self.n_components, self.covariance_type)\n', (4180, 4221), False, 'from hmmlearn import hmm\n'), ((4883, 4899), 'numpy.diff', 'np.diff', (['trainll'], {}), '(trainll)\n', (4890, 4899), True, 'import numpy as np\n'), ((5246, 5302), 'hmmlearn.hmm.GaussianHMM', 'hmm.GaussianHMM', (['self.n_components', 'self.covariance_type'], {}), '(self.n_components, self.covariance_type)\n', (5261, 5302), False, 'from hmmlearn import hmm\n'), ((5580, 5636), 'hmmlearn.hmm.GaussianHMM', 'hmm.GaussianHMM', (['self.n_components', 'self.covariance_type'], {}), '(self.n_components, self.covariance_type)\n', (5595, 5636), False, 'from hmmlearn import hmm\n'), ((6235, 6291), 'hmmlearn.hmm.GaussianHMM', 'hmm.GaussianHMM', (['self.n_components', 'self.covariance_type'], {}), '(self.n_components, self.covariance_type)\n', (6250, 6291), False, 'from hmmlearn import hmm\n'), ((7004, 7075), 'hmmlearn.hmm.GaussianHMM', 'hmm.GaussianHMM', (['self.n_components', 'self.covariance_type'], {'params': 'params'}), '(self.n_components, self.covariance_type, params=params)\n', (7019, 7075), False, 'from hmmlearn import hmm\n'), ((7890, 7979), 'hmmlearn.hmm.GaussianHMM', 'hmm.GaussianHMM', ([], {'n_components': '(5)', 'covariance_type': '"""full"""', 'n_iter': '(100)', 'init_params': '"""st"""'}), "(n_components=5, covariance_type='full', n_iter=100,\n init_params='st')\n", (7905, 7979), False, 'from hmmlearn import hmm\n'), ((8027, 8052), 'numpy.array', 'np.array', (['[1, 0, 0, 0, 0]'], {}), '([1, 0, 0, 0, 0])\n', (8035, 8052), True, 'import numpy as np\n'), ((8075, 8192), 'numpy.array', 'np.array', (['[[0.9, 0.1, 0, 0, 0], [0, 0.9, 0.1, 0, 0], [0, 0, 0.9, 0.1, 0], [0, 0, 0, \n 0.9, 0.1], [0, 0, 0, 0, 1.0]]'], {}), '([[0.9, 0.1, 0, 0, 0], [0, 0.9, 0.1, 0, 0], [0, 0, 0.9, 0.1, 0], [0,\n 0, 0, 0.9, 0.1], [0, 0, 0, 0, 1.0]])\n', (8083, 8192), True, 'import numpy as np\n'), ((8336, 8353), 'numpy.zeros', 'np.zeros', (['(5, 10)'], {}), '((5, 10))\n', (8344, 8353), True, 'import numpy as np\n'), ((8929, 8984), 'hmmlearn.hmm.GaussianHMM', 'hmm.GaussianHMM', ([], {'n_components': '(1)', 'covariance_type': '"""diag"""'}), "(n_components=1, covariance_type='diag')\n", (8944, 8984), False, 'from hmmlearn import hmm\n'), ((8997, 9029), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1000, 5)'}), '(size=(1000, 5))\n', (9013, 9029), True, 'import numpy as np\n'), ((9781, 9805), 'numpy.random.RandomState', 'np.random.RandomState', (['(9)'], {}), '(9)\n', (9802, 9805), True, 'import numpy as np\n'), ((9948, 9992), 'numpy.array', 'np.array', (['[[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]]'], {}), '([[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]])\n', (9956, 9992), True, 'import numpy as np\n'), ((10018, 10038), 'numpy.array', 'np.array', (['[0.6, 0.4]'], {}), '([0.6, 0.4])\n', (10026, 10038), True, 'import numpy as np\n'), ((10063, 10097), 'numpy.array', 'np.array', (['[[0.7, 0.3], [0.4, 0.6]]'], {}), '([[0.7, 0.3], [0.4, 0.6]])\n', (10071, 10097), True, 'import numpy as np\n'), ((10116, 10153), 'hmmlearn.hmm.MultinomialHMM', 'hmm.MultinomialHMM', (['self.n_components'], {}), '(self.n_components)\n', (10134, 10153), False, 'from hmmlearn import hmm\n'), ((10337, 10374), 'hmmlearn.hmm.MultinomialHMM', 'hmm.MultinomialHMM', (['self.n_components'], {}), '(self.n_components)\n', (10355, 10374), False, 'from hmmlearn import hmm\n'), ((10398, 10442), 'numpy.array', 'np.array', (['[[0.8, 0.2, 0.0], [0.7, 0.2, 1.0]]'], {}), '([[0.8, 0.2, 0.0], [0.7, 0.2, 1.0]])\n', (10406, 10442), True, 'import numpy as np\n'), ((10496, 10537), 'numpy.allclose', 'np.allclose', (['emissionprob', 'h.emissionprob'], {}), '(emissionprob, h.emissionprob)\n', (10507, 10537), True, 'import numpy as np\n'), ((10977, 11022), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['state_sequence', '[1, 0, 0]'], {}), '(state_sequence, [1, 0, 0])\n', (10995, 11022), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((11105, 11159), 'hmmlearn.hmm.MultinomialHMM', 'hmm.MultinomialHMM', (['self.n_components'], {'algorithm': '"""map"""'}), "(self.n_components, algorithm='map')\n", (11123, 11159), False, 'from hmmlearn import hmm\n'), ((11333, 11378), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['state_sequence', '[1, 0, 0]'], {}), '(state_sequence, [1, 0, 0])\n', (11351, 11378), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((11532, 11577), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['state_sequence', '[1, 0, 0]'], {}), '(state_sequence, [1, 0, 0])\n', (11550, 11577), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((11586, 11708), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['posteriors', '[[0.23170303, 0.76829697], [0.62406281, 0.37593719], [0.86397706, 0.13602294]]'], {}), '(posteriors, [[0.23170303, 0.76829697], [\n 0.62406281, 0.37593719], [0.86397706, 0.13602294]])\n', (11611, 11708), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((11795, 11832), 'hmmlearn.hmm.MultinomialHMM', 'hmm.MultinomialHMM', (['self.n_components'], {}), '(self.n_components)\n', (11813, 11832), False, 'from hmmlearn import hmm\n'), ((12021, 12082), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['h.emissionprob_', 'self.emissionprob'], {}), '(h.emissionprob_, self.emissionprob)\n', (12046, 12082), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((13199, 13218), 'numpy.array', 'np.array', (['([10] * 10)'], {}), '([10] * 10)\n', (13207, 13218), True, 'import numpy as np\n'), ((13880, 13896), 'numpy.diff', 'np.diff', (['trainll'], {}), '(trainll)\n', (13887, 13896), True, 'import numpy as np\n'), ((14229, 14266), 'hmmlearn.hmm.MultinomialHMM', 'hmm.MultinomialHMM', (['self.n_components'], {}), '(self.n_components)\n', (14247, 14266), False, 'from hmmlearn import hmm\n'), ((16201, 16225), 'numpy.random.RandomState', 'np.random.RandomState', (['(9)'], {}), '(9)\n', (16222, 16225), True, 'import numpy as np\n'), ((16953, 16982), 'hmmlearn.hmm.GMMHMM', 'hmm.GMMHMM', (['self.n_components'], {}), '(self.n_components)\n', (16963, 16982), False, 'from hmmlearn import hmm\n'), ((17734, 17775), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['stateseq', 'refstateseq'], {}), '(stateseq, refstateseq)\n', (17752, 17775), False, 'from numpy.testing import assert_array_equal, assert_array_almost_equal\n'), ((17824, 17891), 'hmmlearn.hmm.GMMHMM', 'hmm.GMMHMM', (['self.n_components'], {'covariance_type': 'self.covariance_type'}), '(self.n_components, covariance_type=self.covariance_type)\n', (17834, 17891), False, 'from hmmlearn import hmm\n'), ((18251, 18298), 'hmmlearn.hmm.GMMHMM', 'hmm.GMMHMM', (['self.n_components'], {'covars_prior': '(1.0)'}), '(self.n_components, covars_prior=1.0)\n', (18261, 18298), False, 'from hmmlearn import hmm\n'), ((19425, 19502), 'nose.SkipTest', 'SkipTest', (['"""Unstable test: trainll is not always increasing depending on seed"""'], {}), "('Unstable test: trainll is not always increasing depending on seed')\n", (19433, 19502), False, 'from nose import SkipTest\n'), ((19750, 19817), 'hmmlearn.hmm.GMMHMM', 'hmm.GMMHMM', (['self.n_components'], {'covariance_type': 'self.covariance_type'}), '(self.n_components, covariance_type=self.covariance_type)\n', (19760, 19817), False, 'from hmmlearn import hmm\n'), ((2365, 2390), 'hmmlearn.utils.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (2378, 2390), False, 'from hmmlearn.utils import normalize, assert_raises\n'), ((2408, 2465), 'hmmlearn.hmm.GaussianHMM', 'hmm.GaussianHMM', (['(20)'], {'covariance_type': '"""badcovariance_type"""'}), "(20, covariance_type='badcovariance_type')\n", (2423, 2465), False, 'from hmmlearn import hmm\n'), ((3034, 3062), 'numpy.arange', 'np.arange', (['self.n_components'], {}), '(self.n_components)\n', (3043, 3062), True, 'import numpy as np\n'), ((3388, 3406), 'numpy.ones', 'np.ones', (['n_samples'], {}), '(n_samples)\n', (3395, 3406), True, 'import numpy as np\n'), ((5051, 5073), 'numpy.all', 'np.all', (['(diff >= -1e-06)'], {}), '(diff >= -1e-06)\n', (5057, 5073), True, 'import numpy as np\n'), ((8382, 8397), 'numpy.identity', 'np.identity', (['(10)'], {}), '(10)\n', (8393, 8397), True, 'import numpy as np\n'), ((10943, 10958), 'numpy.exp', 'np.exp', (['logprob'], {}), '(logprob)\n', (10949, 10958), True, 'import numpy as np\n'), ((12096, 12121), 'hmmlearn.utils.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (12109, 12121), False, 'from hmmlearn.utils import normalize, assert_raises\n'), ((12192, 12217), 'hmmlearn.utils.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (12205, 12217), False, 'from hmmlearn.utils import normalize, assert_raises\n'), ((12249, 12299), 'numpy.zeros', 'np.zeros', (['(self.n_components - 2, self.n_features)'], {}), '((self.n_components - 2, self.n_features))\n', (12257, 12299), True, 'import numpy as np\n'), ((12422, 12450), 'numpy.arange', 'np.arange', (['self.n_components'], {}), '(self.n_components)\n', (12431, 12450), True, 'import numpy as np\n'), ((12769, 12787), 'numpy.ones', 'np.ones', (['n_samples'], {}), '(n_samples)\n', (12776, 12787), True, 'import numpy as np\n'), ((13921, 13943), 'numpy.all', 'np.all', (['(diff >= -1e-06)'], {}), '(diff >= -1e-06)\n', (13927, 13943), True, 'import numpy as np\n'), ((17311, 17339), 'numpy.arange', 'np.arange', (['self.n_components'], {}), '(self.n_components)\n', (17320, 17339), True, 'import numpy as np\n'), ((17664, 17682), 'numpy.ones', 'np.ones', (['n_samples'], {}), '(n_samples)\n', (17671, 17682), True, 'import numpy as np\n'), ((1628, 1671), 'sklearn.datasets.samples_generator.make_spd_matrix', 'make_spd_matrix', (['n_features'], {'random_state': '(0)'}), '(n_features, random_state=0)\n', (1643, 1671), False, 'from sklearn.datasets.samples_generator import make_spd_matrix\n'), ((1695, 1713), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (1701, 1713), True, 'import numpy as np\n'), ((2156, 2168), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (2163, 2168), True, 'import numpy as np\n'), ((13044, 13056), 'numpy.unique', 'np.unique', (['X'], {}), '(X)\n', (13053, 13056), True, 'import numpy as np\n'), ((15730, 15776), 'sklearn.datasets.samples_generator.make_spd_matrix', 'make_spd_matrix', (['n_features'], {'random_state': 'prng'}), '(n_features, random_state=prng)\n', (15745, 15776), False, 'from sklearn.datasets.samples_generator import make_spd_matrix\n'), ((2046, 2064), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (2052, 2064), True, 'import numpy as np\n'), ((14883, 14899), 'numpy.diff', 'np.diff', (['trainll'], {}), '(trainll)\n', (14890, 14899), True, 'import numpy as np\n'), ((15804, 15822), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (15810, 15822), True, 'import numpy as np\n'), ((19561, 19577), 'numpy.diff', 'np.diff', (['trainll'], {}), '(trainll)\n', (19568, 19577), True, 'import numpy as np\n'), ((1820, 1863), 'sklearn.datasets.samples_generator.make_spd_matrix', 'make_spd_matrix', (['n_features'], {'random_state': '(0)'}), '(n_features, random_state=0)\n', (1835, 1863), False, 'from sklearn.datasets.samples_generator import make_spd_matrix\n'), ((1896, 1914), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (1902, 1914), True, 'import numpy as np\n'), ((14657, 14673), 'numpy.diff', 'np.diff', (['trainll'], {}), '(trainll)\n', (14664, 14673), True, 'import numpy as np\n'), ((14833, 14849), 'numpy.diff', 'np.diff', (['trainll'], {}), '(trainll)\n', (14840, 14849), True, 'import numpy as np\n'), ((15933, 15979), 'sklearn.datasets.samples_generator.make_spd_matrix', 'make_spd_matrix', (['n_features'], {'random_state': 'prng'}), '(n_features, random_state=prng)\n', (15948, 15979), False, 'from sklearn.datasets.samples_generator import make_spd_matrix\n'), ((19036, 19052), 'numpy.diff', 'np.diff', (['trainll'], {}), '(trainll)\n', (19043, 19052), True, 'import numpy as np\n'), ((19192, 19208), 'numpy.diff', 'np.diff', (['trainll'], {}), '(trainll)\n', (19199, 19208), True, 'import numpy as np\n'), ((1574, 1598), 'numpy.ones', 'np.ones', (['(1, n_features)'], {}), '((1, n_features))\n', (1581, 1598), True, 'import numpy as np\n'), ((15680, 15704), 'numpy.ones', 'np.ones', (['(1, n_features)'], {}), '((1, n_features))\n', (15687, 15704), True, 'import numpy as np\n'), ((16003, 16021), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (16009, 16021), True, 'import numpy as np\n')] |
import numpy as np
import biorbd_casadi as biorbd
from casadi import MX, vertcat
from bioptim import (
OptimalControlProgram,
DynamicsFcn,
DynamicsList,
Bounds,
QAndQDotBounds,
InitialGuess,
ObjectiveFcn,
ObjectiveList,
ConstraintList,
ConstraintFcn,
InterpolationType,
Node,
BoundsList,
OdeSolver,
Solver,
CostType,
PhaseTransitionList,
PhaseTransitionFcn,
PhaseTransition,
OptimizationVariableList,
)
def anti_symmetric_cyclic_transition(
transition: PhaseTransition,
state_pre: OptimizationVariableList,
state_post: OptimizationVariableList,
first_index: int,
second_index: int,
):
"""
The constraint of the transition. The values from the end of the phase to the next are multiplied by coef to
determine the transition. If coef=1, then this function mimics the PhaseTransitionFcn.CONTINUOUS
Parameters
----------
transition: PhaseTransition
The ...
state_pre: MX
The states at the end of a phase
state_post: MX
The state at the beginning of the next phase
first_index: int
first state to be concerned
second_index: int
second state to be concerned
Returns
-------
The constraint such that: c(x) = 0
"""
# states_mapping can be defined in PhaseTransitionList. For this particular example, one could simply ignore the
# mapping stuff (it is merely for the sake of example how to use the mappings)
states_pre = transition.states_mapping.to_second.map(state_pre.cx_end)
states_post = transition.states_mapping.to_first.map(state_post.cx)
first_constraint = states_pre[first_index] - states_post[second_index]
second_constraint = states_pre[second_index] - states_post[first_index]
return vertcat(first_constraint, second_constraint)
def prepare_ocp(
biorbd_model_path: str,
final_time: float,
n_shooting: int,
ode_solver: OdeSolver = OdeSolver.RK4(),
use_sx: bool = False,
n_threads: int = 1,
implicit_dynamics: bool = False,
) -> OptimalControlProgram:
"""
The initialization of an ocp
Parameters
----------
biorbd_model_path: str
The path to the biorbd model
final_time: float
The time in second required to perform the task
n_shooting: int
The number of shooting points to define int the direct multiple shooting program
ode_solver: OdeSolver = OdeSolver.RK4()
Which type of OdeSolver to use
use_sx: bool
If the SX variable should be used instead of MX (can be extensive on RAM)
n_threads: int
The number of threads to use in the paralleling (1 = no parallel computing)
implicit_dynamics: bool
implicit
Returns
-------
The OptimalControlProgram ready to be solved
"""
model = biorbd.Model(biorbd_model_path)
n_q = model.nbQ()
n_qdot = model.nbQdot()
n_tau = model.nbGeneralizedTorque()
tau_min, tau_max, tau_init = -400, 400, 0
# --- Dynamics --- #
dynamics = DynamicsList()
dynamics.add(DynamicsFcn.TORQUE_DRIVEN, with_contact=True, phase=0)
# dynamics.add(DynamicsFcn.TORQUE_DRIVEN, with_contact=True, phase=1)
# --- Objective function --- #
objective_functions = ObjectiveList()
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="tau", phase=0)
# torso and head stability
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_QDDOT, phase=0, index=[0, 1, 2, 3], weight=0.01)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_QDDOT, derivative=True, phase=0, index=3, weight=0.01)
objective_functions.add(ObjectiveFcn.Lagrange.MINIMIZE_STATE, key="qdot", phase=0, index=3, weight=0.01)
# keep velocity at 1.5 m/s
objective_functions.add(ObjectiveFcn.Mayer.MINIMIZE_COM_VELOCITY, index=1, target=1.5, node=Node.START, weight=1000)
objective_functions.add(ObjectiveFcn.Mayer.MINIMIZE_COM_VELOCITY, index=1, target=1.5, node=Node.END, weight=1000)
# --- Constraints --- #
constraints = ConstraintList()
constraints.add(
ConstraintFcn.TRACK_CONTACT_FORCES, min_bound=0, max_bound=np.inf, node=Node.ALL, contact_index=1, phase=0
) # FP0 > 0 en Z
# constraints.add(ConstraintFcn.TRACK_MARKERS, node=Node.ALL_SHOOTING, marker_index="RFoot", phase=0)
constraints.add(ConstraintFcn.TRACK_MARKERS, node=Node.START, marker_index="RFoot", phase=0)
constraints.add(ConstraintFcn.TRACK_MARKERS_VELOCITY, node=Node.START, marker_index="RFoot", phase=0)
constraints.add(
ConstraintFcn.TRACK_MARKERS, target=np.array([0, -0.4, 0]), node=Node.START, marker_index="LFoot", phase=0
)
constraints.add(
ConstraintFcn.TRACK_MARKERS, target=np.array([0, 0.4, 0]), node=Node.END, marker_index="LFoot", phase=0
)
phase_transitions = PhaseTransitionList()
phase_transitions.add(PhaseTransitionFcn.CYCLIC, index=[2, 3], weight=1000)
phase_transitions.add(anti_symmetric_cyclic_transition, first_index=3, second_index=4, phase_pre_idx=0, weight=1000)
x_bounds = BoundsList()
x_bounds.add(bounds=QAndQDotBounds(model))
x_bounds[0][n_q + 3, 0] = 0 # head velocity zero at the beggining
u_bounds = BoundsList()
u_bounds.add([tau_min] * n_tau, [tau_max] * n_tau)
u_bounds[0][:3, :] = 0
# --- Initial guess --- #
q0 = [0] * n_q
q0[1] = 0.8
qdot0 = [0] * n_qdot
X0 = []
X0.extend(q0)
X0.extend(qdot0)
x_init = InitialGuess(X0, interpolation=InterpolationType.CONSTANT)
u_init = InitialGuess([tau_init] * n_tau)
return OptimalControlProgram(
biorbd_model=model,
dynamics=dynamics,
n_shooting=n_shooting,
ode_solver=ode_solver,
phase_time=final_time,
x_init=x_init,
u_init=u_init,
x_bounds=x_bounds,
u_bounds=u_bounds,
objective_functions=objective_functions,
constraints=constraints,
phase_transitions=phase_transitions,
use_sx=use_sx,
n_threads=n_threads,
)
def main():
model_path = "models/Humanoid4Dof.bioMod"
n_shooting = 10
ode_solver = OdeSolver.RK4(n_integration_steps=5)
# ode_solver = OdeSolver.COLLOCATION()
time = 0.3
n_threads = 8
# --- Solve the program --- #
ocp = prepare_ocp(
biorbd_model_path=model_path,
final_time=time,
n_shooting=n_shooting,
ode_solver=ode_solver,
implicit_dynamics=False,
n_threads=n_threads,
)
# ocp.add_plot_penalty(CostType.ALL)
# Plot CoM pos and velocity
for i, nlp in enumerate(ocp.nlp):
ocp.add_plot("CoM", lambda t, x, u, p: plot_com(x, nlp), phase=i, legend=["CoM", "CoM_dot"])
solv = Solver.IPOPT(show_online_optim=False, show_options=dict(show_bounds=True))
sol = ocp.solve(solv)
# --- Show results --- #
sol.print()
# sol.animate()
# sol.graphs(show_bounds=True)
print("verify phase transitions")
print(sol.states["q"][3, 0] - sol.states["q"][4, -1])
print(sol.states["q"][4, 0] - sol.states["q"][3, -1])
def plot_com(x, nlp):
com_func = biorbd.to_casadi_func("CoMPlot", nlp.model.CoM, nlp.states["q"].mx, expand=False)
com_dot_func = biorbd.to_casadi_func(
"Compute_CoM", nlp.model.CoMdot, nlp.states["q"].mx, nlp.states["qdot"].mx, expand=False
)
q = nlp.states["q"].mapping.to_second.map(x[nlp.states["q"].index, :])
qdot = nlp.states["qdot"].mapping.to_second.map(x[nlp.states["qdot"].index, :])
return np.concatenate((np.array(com_func(q)[1, :]), np.array(com_dot_func(q, qdot)[1, :])))
if __name__ == "__main__":
main()
| [
"bioptim.BoundsList",
"bioptim.ObjectiveList",
"bioptim.QAndQDotBounds",
"bioptim.InitialGuess",
"biorbd_casadi.to_casadi_func",
"casadi.vertcat",
"bioptim.DynamicsList",
"bioptim.ConstraintList",
"numpy.array",
"bioptim.PhaseTransitionList",
"bioptim.OdeSolver.RK4",
"bioptim.OptimalControlPro... | [((1819, 1863), 'casadi.vertcat', 'vertcat', (['first_constraint', 'second_constraint'], {}), '(first_constraint, second_constraint)\n', (1826, 1863), False, 'from casadi import MX, vertcat\n'), ((1983, 1998), 'bioptim.OdeSolver.RK4', 'OdeSolver.RK4', ([], {}), '()\n', (1996, 1998), False, 'from bioptim import OptimalControlProgram, DynamicsFcn, DynamicsList, Bounds, QAndQDotBounds, InitialGuess, ObjectiveFcn, ObjectiveList, ConstraintList, ConstraintFcn, InterpolationType, Node, BoundsList, OdeSolver, Solver, CostType, PhaseTransitionList, PhaseTransitionFcn, PhaseTransition, OptimizationVariableList\n'), ((2862, 2893), 'biorbd_casadi.Model', 'biorbd.Model', (['biorbd_model_path'], {}), '(biorbd_model_path)\n', (2874, 2893), True, 'import biorbd_casadi as biorbd\n'), ((3071, 3085), 'bioptim.DynamicsList', 'DynamicsList', ([], {}), '()\n', (3083, 3085), False, 'from bioptim import OptimalControlProgram, DynamicsFcn, DynamicsList, Bounds, QAndQDotBounds, InitialGuess, ObjectiveFcn, ObjectiveList, ConstraintList, ConstraintFcn, InterpolationType, Node, BoundsList, OdeSolver, Solver, CostType, PhaseTransitionList, PhaseTransitionFcn, PhaseTransition, OptimizationVariableList\n'), ((3294, 3309), 'bioptim.ObjectiveList', 'ObjectiveList', ([], {}), '()\n', (3307, 3309), False, 'from bioptim import OptimalControlProgram, DynamicsFcn, DynamicsList, Bounds, QAndQDotBounds, InitialGuess, ObjectiveFcn, ObjectiveList, ConstraintList, ConstraintFcn, InterpolationType, Node, BoundsList, OdeSolver, Solver, CostType, PhaseTransitionList, PhaseTransitionFcn, PhaseTransition, OptimizationVariableList\n'), ((4080, 4096), 'bioptim.ConstraintList', 'ConstraintList', ([], {}), '()\n', (4094, 4096), False, 'from bioptim import OptimalControlProgram, DynamicsFcn, DynamicsList, Bounds, QAndQDotBounds, InitialGuess, ObjectiveFcn, ObjectiveList, ConstraintList, ConstraintFcn, InterpolationType, Node, BoundsList, OdeSolver, Solver, CostType, PhaseTransitionList, PhaseTransitionFcn, PhaseTransition, OptimizationVariableList\n'), ((4872, 4893), 'bioptim.PhaseTransitionList', 'PhaseTransitionList', ([], {}), '()\n', (4891, 4893), False, 'from bioptim import OptimalControlProgram, DynamicsFcn, DynamicsList, Bounds, QAndQDotBounds, InitialGuess, ObjectiveFcn, ObjectiveList, ConstraintList, ConstraintFcn, InterpolationType, Node, BoundsList, OdeSolver, Solver, CostType, PhaseTransitionList, PhaseTransitionFcn, PhaseTransition, OptimizationVariableList\n'), ((5111, 5123), 'bioptim.BoundsList', 'BoundsList', ([], {}), '()\n', (5121, 5123), False, 'from bioptim import OptimalControlProgram, DynamicsFcn, DynamicsList, Bounds, QAndQDotBounds, InitialGuess, ObjectiveFcn, ObjectiveList, ConstraintList, ConstraintFcn, InterpolationType, Node, BoundsList, OdeSolver, Solver, CostType, PhaseTransitionList, PhaseTransitionFcn, PhaseTransition, OptimizationVariableList\n'), ((5258, 5270), 'bioptim.BoundsList', 'BoundsList', ([], {}), '()\n', (5268, 5270), False, 'from bioptim import OptimalControlProgram, DynamicsFcn, DynamicsList, Bounds, QAndQDotBounds, InitialGuess, ObjectiveFcn, ObjectiveList, ConstraintList, ConstraintFcn, InterpolationType, Node, BoundsList, OdeSolver, Solver, CostType, PhaseTransitionList, PhaseTransitionFcn, PhaseTransition, OptimizationVariableList\n'), ((5508, 5566), 'bioptim.InitialGuess', 'InitialGuess', (['X0'], {'interpolation': 'InterpolationType.CONSTANT'}), '(X0, interpolation=InterpolationType.CONSTANT)\n', (5520, 5566), False, 'from bioptim import OptimalControlProgram, DynamicsFcn, DynamicsList, Bounds, QAndQDotBounds, InitialGuess, ObjectiveFcn, ObjectiveList, ConstraintList, ConstraintFcn, InterpolationType, Node, BoundsList, OdeSolver, Solver, CostType, PhaseTransitionList, PhaseTransitionFcn, PhaseTransition, OptimizationVariableList\n'), ((5580, 5612), 'bioptim.InitialGuess', 'InitialGuess', (['([tau_init] * n_tau)'], {}), '([tau_init] * n_tau)\n', (5592, 5612), False, 'from bioptim import OptimalControlProgram, DynamicsFcn, DynamicsList, Bounds, QAndQDotBounds, InitialGuess, ObjectiveFcn, ObjectiveList, ConstraintList, ConstraintFcn, InterpolationType, Node, BoundsList, OdeSolver, Solver, CostType, PhaseTransitionList, PhaseTransitionFcn, PhaseTransition, OptimizationVariableList\n'), ((5625, 5978), 'bioptim.OptimalControlProgram', 'OptimalControlProgram', ([], {'biorbd_model': 'model', 'dynamics': 'dynamics', 'n_shooting': 'n_shooting', 'ode_solver': 'ode_solver', 'phase_time': 'final_time', 'x_init': 'x_init', 'u_init': 'u_init', 'x_bounds': 'x_bounds', 'u_bounds': 'u_bounds', 'objective_functions': 'objective_functions', 'constraints': 'constraints', 'phase_transitions': 'phase_transitions', 'use_sx': 'use_sx', 'n_threads': 'n_threads'}), '(biorbd_model=model, dynamics=dynamics, n_shooting=\n n_shooting, ode_solver=ode_solver, phase_time=final_time, x_init=x_init,\n u_init=u_init, x_bounds=x_bounds, u_bounds=u_bounds,\n objective_functions=objective_functions, constraints=constraints,\n phase_transitions=phase_transitions, use_sx=use_sx, n_threads=n_threads)\n', (5646, 5978), False, 'from bioptim import OptimalControlProgram, DynamicsFcn, DynamicsList, Bounds, QAndQDotBounds, InitialGuess, ObjectiveFcn, ObjectiveList, ConstraintList, ConstraintFcn, InterpolationType, Node, BoundsList, OdeSolver, Solver, CostType, PhaseTransitionList, PhaseTransitionFcn, PhaseTransition, OptimizationVariableList\n'), ((6178, 6214), 'bioptim.OdeSolver.RK4', 'OdeSolver.RK4', ([], {'n_integration_steps': '(5)'}), '(n_integration_steps=5)\n', (6191, 6214), False, 'from bioptim import OptimalControlProgram, DynamicsFcn, DynamicsList, Bounds, QAndQDotBounds, InitialGuess, ObjectiveFcn, ObjectiveList, ConstraintList, ConstraintFcn, InterpolationType, Node, BoundsList, OdeSolver, Solver, CostType, PhaseTransitionList, PhaseTransitionFcn, PhaseTransition, OptimizationVariableList\n'), ((7162, 7248), 'biorbd_casadi.to_casadi_func', 'biorbd.to_casadi_func', (['"""CoMPlot"""', 'nlp.model.CoM', "nlp.states['q'].mx"], {'expand': '(False)'}), "('CoMPlot', nlp.model.CoM, nlp.states['q'].mx, expand=\n False)\n", (7183, 7248), True, 'import biorbd_casadi as biorbd\n'), ((7263, 7378), 'biorbd_casadi.to_casadi_func', 'biorbd.to_casadi_func', (['"""Compute_CoM"""', 'nlp.model.CoMdot', "nlp.states['q'].mx", "nlp.states['qdot'].mx"], {'expand': '(False)'}), "('Compute_CoM', nlp.model.CoMdot, nlp.states['q'].mx,\n nlp.states['qdot'].mx, expand=False)\n", (7284, 7378), True, 'import biorbd_casadi as biorbd\n'), ((4631, 4653), 'numpy.array', 'np.array', (['[0, -0.4, 0]'], {}), '([0, -0.4, 0])\n', (4639, 4653), True, 'import numpy as np\n'), ((4773, 4794), 'numpy.array', 'np.array', (['[0, 0.4, 0]'], {}), '([0, 0.4, 0])\n', (4781, 4794), True, 'import numpy as np\n'), ((5148, 5169), 'bioptim.QAndQDotBounds', 'QAndQDotBounds', (['model'], {}), '(model)\n', (5162, 5169), False, 'from bioptim import OptimalControlProgram, DynamicsFcn, DynamicsList, Bounds, QAndQDotBounds, InitialGuess, ObjectiveFcn, ObjectiveList, ConstraintList, ConstraintFcn, InterpolationType, Node, BoundsList, OdeSolver, Solver, CostType, PhaseTransitionList, PhaseTransitionFcn, PhaseTransition, OptimizationVariableList\n')] |
import pandas as pd
import numpy as np
class GridMDP:
def __init__(self,
state_space,
action_space,
reward,
gamma):
self.state_space = state_space
self.action_space = action_space
self.reward = reward
self.gamma = gamma
self.__action_dir = pd.Series(data=[np.array((-1, 0)),
np.array((1, 0)),
np.array((0, -1)),
np.array((0, 1))],
index=self.action_space)
self.terminal_states = [(0, 0), (3, 3)]
# 定义值函数表, 策略表
self.value_space = pd.Series(np.zeros((len(state_space))), index=self.state_space)
self.policy = pd.Series(data=np.random.choice(self.action_space, size=(len(self.state_space))), index=self.state_space)
def transform(self, state, action):
dir = self.__action_dir[action]
state_ = np.array(state) + dir
# 判断更新后的状态是否在状态空间中,true更新状态,false保持原始状态
if (state_ >= 0).all() and (state_ < 4).all():
state_ = tuple(state_)
else:
state_ = state
return state_
def policy_evaluate(self):
while True:
v_s_ = self.value_space.copy()
# print(self.value_space)
for state in self.state_space:
action = self.policy[state]
state_ = self.transform(state, action)
if state_ in self.terminal_states:
self.value_space[state] = self.reward + 0.0
elif state_ != state:
self.value_space[state] = self.reward + self.gamma * v_s_[state_]
else:
self.value_space[state] = self.reward + self.gamma * v_s_[state]
if (np.abs(v_s_ - self.value_space) < 1e-8).all():
break
return self.value_space
def policy_improve(self):
# pd.Series()初始化一定要注意,给定大小否则后面索引会报错
policy_ = pd.Series(data=np.random.choice(self.action_space, size=(len(self.state_space))), index=self.state_space)
for state in self.state_space:
q_s_a = pd.Series()
for action in self.action_space:
state_ = self.transform(state, action)
if state_ in self.terminal_states:
q_s_a[action] = self.reward
else:
q_s_a[action] = self.reward + self.gamma * self.value_space[state_]
# 可能出现多个最大值,为避免重复选择同一个,随机选择
max_v = q_s_a[q_s_a == q_s_a.max()].index
policy_[state] = np.random.choice(max_v)
return policy_
def policy_iterate(self):
while True:
print(self.policy)
v_s = self.policy_evaluate()
policy_ = self.policy_improve()
if (policy_ == self.policy).all():
break
else:
self.policy = policy_
return self.policy
def main():
# 定义状态空间
state_space = [(i, j) for i in range(4) for j in range(4)]
state_space.remove((0, 0))
state_space.remove((3, 3))
# 定义动作空间
action_space = ["n", "s", "w", "e"]
# 定义mdp,注意gamma<1,否则self.value_space无法收敛
mdp = GridMDP(state_space=state_space,
action_space=action_space,
reward=-1,
gamma=0.8)
# 开始
mdp.policy_iterate()
if __name__ == '__main__':
main()
| [
"pandas.Series",
"numpy.array",
"numpy.abs",
"numpy.random.choice"
] | [((1030, 1045), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (1038, 1045), True, 'import numpy as np\n'), ((2256, 2267), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (2265, 2267), True, 'import pandas as pd\n'), ((2701, 2724), 'numpy.random.choice', 'np.random.choice', (['max_v'], {}), '(max_v)\n', (2717, 2724), True, 'import numpy as np\n'), ((372, 389), 'numpy.array', 'np.array', (['(-1, 0)'], {}), '((-1, 0))\n', (380, 389), True, 'import numpy as np\n'), ((435, 451), 'numpy.array', 'np.array', (['(1, 0)'], {}), '((1, 0))\n', (443, 451), True, 'import numpy as np\n'), ((497, 514), 'numpy.array', 'np.array', (['(0, -1)'], {}), '((0, -1))\n', (505, 514), True, 'import numpy as np\n'), ((560, 576), 'numpy.array', 'np.array', (['(0, 1)'], {}), '((0, 1))\n', (568, 576), True, 'import numpy as np\n'), ((1895, 1926), 'numpy.abs', 'np.abs', (['(v_s_ - self.value_space)'], {}), '(v_s_ - self.value_space)\n', (1901, 1926), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Calculate the Severe to critical infections column of Table 1
Needs the filter_SRAG.py csv output to run
The Comorbidities are written like the original database keywords:
'NENHUM': No Comorbidities
'PNEUMOPATI': Lung Disease
'IMUNODEPRE': Lung Disease
'OBESIDADE': Obesity
'SIND_DOWN': Down Syndrome
'RENAL': Kidney Disease
'NEUROLOGIC': Neurological chronic disease
'DIABETES': Diabetes
'PUERPERA': Post-partum
'HEMATOLOGI': Hematologic chronic disease
'ASMA': Asthma
'HEPATICA': Liver disease
'CARDIOPATI': Heart disease
'OUT_MORBI': other comorbidity
"""
import numpy as np
from scipy.optimize import minimize, root
import datetime
import pandas as pd
ref = pd.to_datetime(datetime.date(2019, 12, 31))
data_init = pd.read_csv('../Data/SRAG_filtered_morb.csv')
data_init['MORTE'] = (data_init.EVOLUCAO == 2)
states = np.r_[np.array([ 'BR' ]), data_init.SG_UF_INTE.unique()]
for col in data_init.columns:
if (col[:2] == 'DT') or (col[:4] == 'DOSE'):
data_init.loc[:,col] = pd.to_datetime(data_init[col], format='%Y/%m/%d', errors='coerce')
ages = [0, 18, 30, 40, 50, 65, 75, 85, np.inf]
nsep = len(ages) - 1
data_init['AGE_GRP'] = ''
for i in range(nsep):
if i == nsep-1:
data_init.loc[(data_init.NU_IDADE_N>=ages[i]),'AGE_GRP'] = 'AG85+'
else:
data_init.loc[(data_init.NU_IDADE_N>=ages[i])&(data_init.NU_IDADE_N<ages[i+1]), 'AGE_GRP'] = 'AG{}t{}'.format(ages[i],ages[i+1])
ibpv = [data_init.ibp.quantile(x) for x in [0.0,0.2,0.4,0.6,0.8,1.0]]
names = [ 'BDI_' + i for i in ['0', '1', '2', '3', '4']]
data_init['BDI_GRP'] = ''
for i in range(5):
if i == 4:
data_init.loc[(data_init.ibp>=ibpv[i]),'BDI_GRP'] = names[i]
else:
data_init.loc[(data_init.ibp>=ibpv[i])&(data_init.ibp<ibpv[i+1]), 'BDI_GRP'] = names[i]
# trad_raca = {1:'Branca', 2:'Preta', 3:'Amarela', 4:'Parda', 5:'Indigena'}
trad_raca = {1:'White', 2:'Black', 3:'Yellow', 4:'Mixed', 5:'Indigenous'}
data_init['RACA'] = data_init['CS_RACA'].map(trad_raca)
ages = {loc:(data_init.AGE_GRP==loc).sum() for loc in data_init.AGE_GRP.unique()}
print(ages)
sexs = {loc:(data_init.CS_SEXO==loc).sum() for loc in data_init.CS_SEXO.unique()}
print(sexs)
raca = {loc:(data_init.RACA==loc).sum() for loc in data_init.RACA.unique()}
raca[np.nan] = pd.isna(data_init.RACA).sum()
print(raca)
bdi = {loc:(data_init.BDI_GRP==loc).sum() for loc in data_init.BDI_GRP.unique()}
print(bdi)
gr_risco = ['PNEUMOPATI', 'IMUNODEPRE', 'OBESIDADE', 'SIND_DOWN', \
'RENAL', 'NEUROLOGIC', 'DIABETES', 'PUERPERA', 'OUT_MORBI', \
'HEMATOLOGI', 'ASMA', 'HEPATICA', 'CARDIOPATI']
no_risco = np.ones((len(data_init)))
grupos = dict()
for risco in gr_risco:
grupos[risco] = (data_init[risco]==1).sum()
no_risco = no_risco * (1-(data_init[risco] == 1))
grupos['NENHUM'] = no_risco.sum()
print(grupos) | [
"pandas.read_csv",
"numpy.array",
"datetime.date",
"pandas.isna",
"pandas.to_datetime"
] | [((775, 820), 'pandas.read_csv', 'pd.read_csv', (['"""../Data/SRAG_filtered_morb.csv"""'], {}), "('../Data/SRAG_filtered_morb.csv')\n", (786, 820), True, 'import pandas as pd\n'), ((733, 760), 'datetime.date', 'datetime.date', (['(2019)', '(12)', '(31)'], {}), '(2019, 12, 31)\n', (746, 760), False, 'import datetime\n'), ((883, 899), 'numpy.array', 'np.array', (["['BR']"], {}), "(['BR'])\n", (891, 899), True, 'import numpy as np\n'), ((1044, 1110), 'pandas.to_datetime', 'pd.to_datetime', (['data_init[col]'], {'format': '"""%Y/%m/%d"""', 'errors': '"""coerce"""'}), "(data_init[col], format='%Y/%m/%d', errors='coerce')\n", (1058, 1110), True, 'import pandas as pd\n'), ((2321, 2344), 'pandas.isna', 'pd.isna', (['data_init.RACA'], {}), '(data_init.RACA)\n', (2328, 2344), True, 'import pandas as pd\n')] |
"""
Author: <NAME>
Copyright (c) 2019, <NAME>
All rights reserved.
Max-Planck-Gesellschaft zur Foerderung der Wissenschaften e.V. (MPG) is holder of all proprietary rights on this
computer program.
You can only use this computer program if you have closed a license agreement with MPG or you get the right to use
the computer program from someone who is authorized to grant you that right.
Any use of the computer program without a valid license is prohibited and liable to prosecution.
Copyright 2019 Max-Planck-Gesellschaft zur Foerderung der Wissenschaften e.V. (MPG). acting on behalf of its
Max Planck Institute for Intelligent Systems and the Max Planck Institute for Biological Cybernetics.
All rights reserved.
More information about RingNet is available at https://ringnet.is.tue.mpg.de.
based on github.com/akanazawa/hmr
"""
## Demo of RingNet.
## Note that RingNet requires a loose crop of the face in the image.
## Sample usage:
## Run the following command to generate check the RingNet predictions on loosely cropped face images
# python -m demo --img_path *.jpg --out_folder ./RingNet_output
## To output the meshes run the following command
# python -m demo --img_path *.jpg --out_folder ./RingNet_output --save_obj_file=True
## To output both meshes and flame parameters run the following command
# python -m demo --img_path *.jpg --out_folder ./RingNet_output --save_obj_file=True --save_flame_parameters=True
## To output both meshes and flame parameters and generate a neutralized mesh run the following command
# python -m demo --img_path *.jpg --out_folder ./RingNet_output --save_obj_file=True --save_flame_parameters=True --neutralize_expression=True
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
from absl import flags
import numpy as np
import skimage.io as io
import cv2
import matplotlib.pyplot as plt
import tensorflow as tf
from psbody.mesh import Mesh
from smpl_webuser.serialization import load_model
from util import renderer as vis_util
from util import image as img_util
from config_test import get_config
from run_RingNet import RingNet_inference
def visualize(img, proc_param, verts, cam, img_name='test_image'):
"""
Renders the result in original image coordinate frame.
"""
cam_for_render, vert_shifted = vis_util.get_original(
proc_param, verts, cam, img_size=img.shape[:2])
# Render results
rend_img_overlay = renderer(
vert_shifted*1.0, cam=cam_for_render, img=img, do_alpha=True)
rend_img = renderer(
vert_shifted*1.0, cam=cam_for_render, img_size=img.shape[:2])
rend_img_vp1 = renderer.rotated(
vert_shifted, 30, cam=cam_for_render, img_size=img.shape[:2])
import matplotlib.pyplot as plt
fig = plt.figure(1)
plt.clf()
plt.subplot(221)
plt.imshow(img)
plt.title('input')
plt.axis('off')
plt.subplot(222)
plt.imshow(rend_img_overlay)
plt.title('3D Mesh overlay')
plt.axis('off')
plt.subplot(223)
plt.imshow(rend_img)
plt.title('3D mesh')
plt.axis('off')
plt.subplot(224)
plt.imshow(rend_img_vp1)
plt.title('diff vp')
plt.axis('off')
plt.draw()
plt.show(block=False)
fig.savefig(img_name + '.png')
# import ipdb
# ipdb.set_trace()
def preprocess_image(img_path):
img = io.imread(img_path)
if np.max(img.shape[:2]) != config.img_size:
print('Resizing so the max image size is %d..' % config.img_size)
scale = (float(config.img_size) / np.max(img.shape[:2]))
else:
scale = 1.0#scaling_factor
center = np.round(np.array(img.shape[:2]) / 2).astype(int)
# image center in (x,y)
center = center[::-1]
crop, proc_param = img_util.scale_and_crop(img, scale, center,
config.img_size)
# import ipdb; ipdb.set_trace()
# Normalize image to [-1, 1]
# plt.imshow(crop/255.0)
# plt.show()
crop = 2 * ((crop / 255.) - 0.5)
return crop, proc_param, img
def main(config, template_mesh):
sess = tf.Session()
model = RingNet_inference(config, sess=sess)
input_img, proc_param, img = preprocess_image(config.img_path)
vertices, flame_parameters = model.predict(np.expand_dims(input_img, axis=0), get_parameters=True)
cams = flame_parameters[0][:3]
visualize(img, proc_param, vertices[0], cams, img_name=config.out_folder + '/images/' + config.img_path.split('/')[-1][:-4])
if config.save_obj_file:
if not os.path.exists(config.out_folder + '/mesh'):
os.mkdir(config.out_folder + '/mesh')
mesh = Mesh(v=vertices[0], f=template_mesh.f)
mesh.write_obj(config.out_folder + '/mesh/' + config.img_path.split('/')[-1][:-4] + '.obj')
if config.save_flame_parameters:
if not os.path.exists(config.out_folder + '/params'):
os.mkdir(config.out_folder + '/params')
flame_parameters_ = {'cam': flame_parameters[0][:3], 'pose': flame_parameters[0][3:3+config.pose_params], 'shape': flame_parameters[0][3+config.pose_params:3+config.pose_params+config.shape_params],
'expression': flame_parameters[0][3+config.pose_params+config.shape_params:]}
np.save(config.out_folder + '/params/' + config.img_path.split('/')[-1][:-4] + '.npy', flame_parameters_)
if config.neutralize_expression:
from util.using_flame_parameters import make_prdicted_mesh_neutral
if not os.path.exists(config.out_folder + '/neutral_mesh'):
os.mkdir(config.out_folder + '/neutral_mesh')
neutral_mesh = make_prdicted_mesh_neutral(config.out_folder + '/params/' + config.img_path.split('/')[-1][:-4] + '.npy', config.flame_model_path)
neutral_mesh.write_obj(config.out_folder + '/neutral_mesh/' + config.img_path.split('/')[-1][:-4] + '.obj')
if __name__ == '__main__':
config = get_config()
template_mesh = Mesh(filename='./flame_model/FLAME_sample.ply')
renderer = vis_util.SMPLRenderer(faces=template_mesh.f)
if not os.path.exists(config.out_folder):
os.makedirs(config.out_folder)
if not os.path.exists(config.out_folder + '/images'):
os.mkdir(config.out_folder + '/images')
main(config, template_mesh)
| [
"numpy.array",
"psbody.mesh.Mesh",
"matplotlib.pyplot.imshow",
"os.path.exists",
"tensorflow.Session",
"util.renderer.SMPLRenderer",
"numpy.max",
"os.mkdir",
"matplotlib.pyplot.axis",
"skimage.io.imread",
"run_RingNet.RingNet_inference",
"matplotlib.pyplot.title",
"matplotlib.pyplot.draw",
... | [((2353, 2422), 'util.renderer.get_original', 'vis_util.get_original', (['proc_param', 'verts', 'cam'], {'img_size': 'img.shape[:2]'}), '(proc_param, verts, cam, img_size=img.shape[:2])\n', (2374, 2422), True, 'from util import renderer as vis_util\n'), ((2806, 2819), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2816, 2819), True, 'import matplotlib.pyplot as plt\n'), ((2824, 2833), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2831, 2833), True, 'import matplotlib.pyplot as plt\n'), ((2838, 2854), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (2849, 2854), True, 'import matplotlib.pyplot as plt\n'), ((2859, 2874), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2869, 2874), True, 'import matplotlib.pyplot as plt\n'), ((2879, 2897), 'matplotlib.pyplot.title', 'plt.title', (['"""input"""'], {}), "('input')\n", (2888, 2897), True, 'import matplotlib.pyplot as plt\n'), ((2902, 2917), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2910, 2917), True, 'import matplotlib.pyplot as plt\n'), ((2922, 2938), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (2933, 2938), True, 'import matplotlib.pyplot as plt\n'), ((2943, 2971), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rend_img_overlay'], {}), '(rend_img_overlay)\n', (2953, 2971), True, 'import matplotlib.pyplot as plt\n'), ((2976, 3004), 'matplotlib.pyplot.title', 'plt.title', (['"""3D Mesh overlay"""'], {}), "('3D Mesh overlay')\n", (2985, 3004), True, 'import matplotlib.pyplot as plt\n'), ((3009, 3024), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3017, 3024), True, 'import matplotlib.pyplot as plt\n'), ((3029, 3045), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (3040, 3045), True, 'import matplotlib.pyplot as plt\n'), ((3050, 3070), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rend_img'], {}), '(rend_img)\n', (3060, 3070), True, 'import matplotlib.pyplot as plt\n'), ((3075, 3095), 'matplotlib.pyplot.title', 'plt.title', (['"""3D mesh"""'], {}), "('3D mesh')\n", (3084, 3095), True, 'import matplotlib.pyplot as plt\n'), ((3100, 3115), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3108, 3115), True, 'import matplotlib.pyplot as plt\n'), ((3120, 3136), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (3131, 3136), True, 'import matplotlib.pyplot as plt\n'), ((3141, 3165), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rend_img_vp1'], {}), '(rend_img_vp1)\n', (3151, 3165), True, 'import matplotlib.pyplot as plt\n'), ((3170, 3190), 'matplotlib.pyplot.title', 'plt.title', (['"""diff vp"""'], {}), "('diff vp')\n", (3179, 3190), True, 'import matplotlib.pyplot as plt\n'), ((3195, 3210), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3203, 3210), True, 'import matplotlib.pyplot as plt\n'), ((3215, 3225), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (3223, 3225), True, 'import matplotlib.pyplot as plt\n'), ((3230, 3251), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (3238, 3251), True, 'import matplotlib.pyplot as plt\n'), ((3371, 3390), 'skimage.io.imread', 'io.imread', (['img_path'], {}), '(img_path)\n', (3380, 3390), True, 'import skimage.io as io\n'), ((3764, 3824), 'util.image.scale_and_crop', 'img_util.scale_and_crop', (['img', 'scale', 'center', 'config.img_size'], {}), '(img, scale, center, config.img_size)\n', (3787, 3824), True, 'from util import image as img_util\n'), ((4104, 4116), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4114, 4116), True, 'import tensorflow as tf\n'), ((4129, 4165), 'run_RingNet.RingNet_inference', 'RingNet_inference', (['config'], {'sess': 'sess'}), '(config, sess=sess)\n', (4146, 4165), False, 'from run_RingNet import RingNet_inference\n'), ((5908, 5920), 'config_test.get_config', 'get_config', ([], {}), '()\n', (5918, 5920), False, 'from config_test import get_config\n'), ((5941, 5988), 'psbody.mesh.Mesh', 'Mesh', ([], {'filename': '"""./flame_model/FLAME_sample.ply"""'}), "(filename='./flame_model/FLAME_sample.ply')\n", (5945, 5988), False, 'from psbody.mesh import Mesh\n'), ((6004, 6048), 'util.renderer.SMPLRenderer', 'vis_util.SMPLRenderer', ([], {'faces': 'template_mesh.f'}), '(faces=template_mesh.f)\n', (6025, 6048), True, 'from util import renderer as vis_util\n'), ((3398, 3419), 'numpy.max', 'np.max', (['img.shape[:2]'], {}), '(img.shape[:2])\n', (3404, 3419), True, 'import numpy as np\n'), ((4280, 4313), 'numpy.expand_dims', 'np.expand_dims', (['input_img'], {'axis': '(0)'}), '(input_img, axis=0)\n', (4294, 4313), True, 'import numpy as np\n'), ((4655, 4693), 'psbody.mesh.Mesh', 'Mesh', ([], {'v': 'vertices[0]', 'f': 'template_mesh.f'}), '(v=vertices[0], f=template_mesh.f)\n', (4659, 4693), False, 'from psbody.mesh import Mesh\n'), ((6061, 6094), 'os.path.exists', 'os.path.exists', (['config.out_folder'], {}), '(config.out_folder)\n', (6075, 6094), False, 'import os\n'), ((6104, 6134), 'os.makedirs', 'os.makedirs', (['config.out_folder'], {}), '(config.out_folder)\n', (6115, 6134), False, 'import os\n'), ((6147, 6192), 'os.path.exists', 'os.path.exists', (["(config.out_folder + '/images')"], {}), "(config.out_folder + '/images')\n", (6161, 6192), False, 'import os\n'), ((6202, 6241), 'os.mkdir', 'os.mkdir', (["(config.out_folder + '/images')"], {}), "(config.out_folder + '/images')\n", (6210, 6241), False, 'import os\n'), ((3556, 3577), 'numpy.max', 'np.max', (['img.shape[:2]'], {}), '(img.shape[:2])\n', (3562, 3577), True, 'import numpy as np\n'), ((4545, 4588), 'os.path.exists', 'os.path.exists', (["(config.out_folder + '/mesh')"], {}), "(config.out_folder + '/mesh')\n", (4559, 4588), False, 'import os\n'), ((4602, 4639), 'os.mkdir', 'os.mkdir', (["(config.out_folder + '/mesh')"], {}), "(config.out_folder + '/mesh')\n", (4610, 4639), False, 'import os\n'), ((4847, 4892), 'os.path.exists', 'os.path.exists', (["(config.out_folder + '/params')"], {}), "(config.out_folder + '/params')\n", (4861, 4892), False, 'import os\n'), ((4906, 4945), 'os.mkdir', 'os.mkdir', (["(config.out_folder + '/params')"], {}), "(config.out_folder + '/params')\n", (4914, 4945), False, 'import os\n'), ((5483, 5534), 'os.path.exists', 'os.path.exists', (["(config.out_folder + '/neutral_mesh')"], {}), "(config.out_folder + '/neutral_mesh')\n", (5497, 5534), False, 'import os\n'), ((5548, 5593), 'os.mkdir', 'os.mkdir', (["(config.out_folder + '/neutral_mesh')"], {}), "(config.out_folder + '/neutral_mesh')\n", (5556, 5593), False, 'import os\n'), ((3646, 3669), 'numpy.array', 'np.array', (['img.shape[:2]'], {}), '(img.shape[:2])\n', (3654, 3669), True, 'import numpy as np\n')] |
"""
Convert tabular data from
Tabular Benchmarks for Joint Architecture and Hyperparameter Optimization
<NAME> <NAME>
https://arxiv.org/pdf/1905.04970.pdf.
"""
import urllib
import tarfile
from pathlib import Path
from typing import Optional
import pandas as pd
import numpy as np
import ast
import h5py
from syne_tune.blackbox_repository.blackbox_tabular import serialize, BlackboxTabular
from syne_tune.blackbox_repository.conversion_scripts.utils import repository_path
from syne_tune.util import catchtime
from syne_tune.config_space import choice, logfinrange, finrange, randint
BLACKBOX_NAME = 'fcnet'
METRIC_VALID_LOSS = 'metric_valid_loss'
METRIC_ELAPSED_TIME = 'metric_elapsed_time'
RESOURCE_ATTR = 'hp_epoch'
MAX_RESOURCE_LEVEL = 100
NUM_UNITS_1 = 'hp_n_units_1'
NUM_UNITS_2 = 'hp_n_units_2'
CONFIGURATION_SPACE = {
"hp_activation_fn_1": choice(["tanh", "relu"]),
"hp_activation_fn_2": choice(["tanh", "relu"]),
"hp_batch_size": logfinrange(8, 64, 4, cast_int=True),
"hp_dropout_1": finrange(0.0, 0.6, 3),
"hp_dropout_2": finrange(0.0, 0.6, 3),
"hp_init_lr": choice([0.0005, 0.001, 0.005, 0.01, 0.05, 0.1]),
'hp_lr_schedule': choice(["cosine", "const"]),
NUM_UNITS_1: logfinrange(16, 512, 6, cast_int=True),
NUM_UNITS_2: logfinrange(16, 512, 6, cast_int=True),
}
def convert_dataset(dataset_path: Path, max_rows: int = None):
data = h5py.File(dataset_path, "r")
keys = data.keys()
if max_rows is not None:
keys = list(keys)[:max_rows]
hyperparameters = pd.DataFrame(ast.literal_eval(key) for key in keys)
hyperparameters.rename(columns={col: "hp_" + col for col in hyperparameters.columns}, inplace=True)
objective_names = [
'valid_loss',
'train_loss',
'final_test_error',
'n_params',
'elapsed_time',
]
# todo for now only full metrics
fidelity_values = np.arange(1, MAX_RESOURCE_LEVEL + 1)
n_fidelities = len(fidelity_values)
n_objectives = len(objective_names)
n_seeds = 4
n_hps = len(keys)
objective_evaluations = np.empty((n_hps, n_seeds, n_fidelities, n_objectives)).astype('float32')
def save_objective_values_helper(name, values):
assert values.shape == (n_hps, n_seeds, n_fidelities)
name_index = dict(zip(
objective_names,
range(len(objective_names)))
)
objective_evaluations[..., name_index[name]] = values
# (n_hps, n_seeds,)
final_test_error = np.stack([data[key]['final_test_error'][:].astype('float32') for key in keys])
# (n_hps, n_seeds, n_fidelities)
final_test_error = np.repeat(np.expand_dims(final_test_error, axis=-1), n_fidelities, axis=-1)
save_objective_values_helper('final_test_error', final_test_error)
# (n_hps, n_seeds,)
n_params = np.stack([data[key]['n_params'][:].astype('float32') for key in keys])
# (n_hps, n_seeds, n_fidelities)
n_params = np.repeat(np.expand_dims(n_params, axis=-1), n_fidelities, axis=-1)
save_objective_values_helper('n_params', n_params)
# (n_hps, n_seeds,)
runtime = np.stack([data[key]['runtime'][:].astype('float32') for key in keys])
# linear interpolation to go from total training time to training time per epoch as in fcnet code
# (n_hps, n_seeds, n_epochs)
# todo utilize expand dim instead of reshape
epochs = np.repeat(fidelity_values.reshape(1, -1),
n_hps * n_seeds, axis=0).reshape(n_hps, n_seeds, -1)
elapsed_time = (epochs / MAX_RESOURCE_LEVEL) * runtime.reshape(
(n_hps, n_seeds, 1))
save_objective_values_helper('elapsed_time', elapsed_time)
# metrics that are fully observed, only use train/valid loss as mse are the same numbers
# for m in ['train_loss', 'train_mse', 'valid_loss', 'valid_mse']:
for m in ['train_loss', 'valid_loss']:
save_objective_values_helper(
m,
np.stack([data[key][m][:].astype('float32') for key in keys])
)
fidelity_space = {
RESOURCE_ATTR: randint(lower=1, upper=MAX_RESOURCE_LEVEL)
}
objective_names = [f"metric_{m}" for m in objective_names]
# Sanity checks:
assert objective_names[0] == METRIC_VALID_LOSS
assert objective_names[4] == METRIC_ELAPSED_TIME
return BlackboxTabular(
hyperparameters=hyperparameters,
configuration_space=CONFIGURATION_SPACE,
fidelity_space=fidelity_space,
objectives_evaluations=objective_evaluations,
fidelity_values=fidelity_values,
objectives_names=objective_names,
)
def generate_fcnet(s3_root: Optional[str] = None):
blackbox_name = BLACKBOX_NAME
fcnet_file = repository_path / "fcnet_tabular_benchmarks.tar.gz"
if not fcnet_file.exists():
src = "http://ml4aad.org/wp-content/uploads/2019/01/fcnet_tabular_benchmarks.tar.gz"
print(f"did not find {fcnet_file}, downloading {src}")
urllib.request.urlretrieve(src, fcnet_file)
with tarfile.open(fcnet_file) as f:
f.extractall(path=repository_path)
with catchtime("converting"):
bb_dict = {}
for dataset in ['protein_structure', 'naval_propulsion', 'parkinsons_telemonitoring', 'slice_localization']:
print(f"converting {dataset}")
dataset_path = repository_path / "fcnet_tabular_benchmarks" / f"fcnet_{dataset}_data.hdf5"
bb_dict[dataset] = convert_dataset(dataset_path=dataset_path)
with catchtime("saving to disk"):
serialize(bb_dict=bb_dict, path=repository_path / blackbox_name)
with catchtime("uploading to s3"):
from syne_tune.blackbox_repository.conversion_scripts.utils import upload
upload(blackbox_name, s3_root=s3_root)
def plot_learning_curves():
import matplotlib.pyplot as plt
from syne_tune.blackbox_repository.repository import load
# plot one learning-curve for sanity-check
bb_dict = load(BLACKBOX_NAME)
b = bb_dict['naval_propulsion']
configuration = {k: v.sample() for k, v in b.configuration_space.items()}
print(configuration)
errors = []
for i in range(1, MAX_RESOURCE_LEVEL + 1):
res = b.objective_function(configuration=configuration, fidelity={'epochs': i})
errors.append(res[METRIC_VALID_LOSS])
plt.plot(errors)
if __name__ == '__main__':
generate_fcnet()
# plot_learning_curves()
| [
"syne_tune.util.catchtime",
"tarfile.open",
"syne_tune.blackbox_repository.blackbox_tabular.BlackboxTabular",
"syne_tune.config_space.randint",
"syne_tune.config_space.finrange",
"syne_tune.blackbox_repository.repository.load",
"urllib.request.urlretrieve",
"matplotlib.pyplot.plot",
"syne_tune.confi... | [((867, 891), 'syne_tune.config_space.choice', 'choice', (["['tanh', 'relu']"], {}), "(['tanh', 'relu'])\n", (873, 891), False, 'from syne_tune.config_space import choice, logfinrange, finrange, randint\n'), ((919, 943), 'syne_tune.config_space.choice', 'choice', (["['tanh', 'relu']"], {}), "(['tanh', 'relu'])\n", (925, 943), False, 'from syne_tune.config_space import choice, logfinrange, finrange, randint\n'), ((966, 1002), 'syne_tune.config_space.logfinrange', 'logfinrange', (['(8)', '(64)', '(4)'], {'cast_int': '(True)'}), '(8, 64, 4, cast_int=True)\n', (977, 1002), False, 'from syne_tune.config_space import choice, logfinrange, finrange, randint\n'), ((1024, 1045), 'syne_tune.config_space.finrange', 'finrange', (['(0.0)', '(0.6)', '(3)'], {}), '(0.0, 0.6, 3)\n', (1032, 1045), False, 'from syne_tune.config_space import choice, logfinrange, finrange, randint\n'), ((1067, 1088), 'syne_tune.config_space.finrange', 'finrange', (['(0.0)', '(0.6)', '(3)'], {}), '(0.0, 0.6, 3)\n', (1075, 1088), False, 'from syne_tune.config_space import choice, logfinrange, finrange, randint\n'), ((1108, 1155), 'syne_tune.config_space.choice', 'choice', (['[0.0005, 0.001, 0.005, 0.01, 0.05, 0.1]'], {}), '([0.0005, 0.001, 0.005, 0.01, 0.05, 0.1])\n', (1114, 1155), False, 'from syne_tune.config_space import choice, logfinrange, finrange, randint\n'), ((1179, 1206), 'syne_tune.config_space.choice', 'choice', (["['cosine', 'const']"], {}), "(['cosine', 'const'])\n", (1185, 1206), False, 'from syne_tune.config_space import choice, logfinrange, finrange, randint\n'), ((1225, 1263), 'syne_tune.config_space.logfinrange', 'logfinrange', (['(16)', '(512)', '(6)'], {'cast_int': '(True)'}), '(16, 512, 6, cast_int=True)\n', (1236, 1263), False, 'from syne_tune.config_space import choice, logfinrange, finrange, randint\n'), ((1282, 1320), 'syne_tune.config_space.logfinrange', 'logfinrange', (['(16)', '(512)', '(6)'], {'cast_int': '(True)'}), '(16, 512, 6, cast_int=True)\n', (1293, 1320), False, 'from syne_tune.config_space import choice, logfinrange, finrange, randint\n'), ((1400, 1428), 'h5py.File', 'h5py.File', (['dataset_path', '"""r"""'], {}), "(dataset_path, 'r')\n", (1409, 1428), False, 'import h5py\n'), ((1904, 1940), 'numpy.arange', 'np.arange', (['(1)', '(MAX_RESOURCE_LEVEL + 1)'], {}), '(1, MAX_RESOURCE_LEVEL + 1)\n', (1913, 1940), True, 'import numpy as np\n'), ((4303, 4550), 'syne_tune.blackbox_repository.blackbox_tabular.BlackboxTabular', 'BlackboxTabular', ([], {'hyperparameters': 'hyperparameters', 'configuration_space': 'CONFIGURATION_SPACE', 'fidelity_space': 'fidelity_space', 'objectives_evaluations': 'objective_evaluations', 'fidelity_values': 'fidelity_values', 'objectives_names': 'objective_names'}), '(hyperparameters=hyperparameters, configuration_space=\n CONFIGURATION_SPACE, fidelity_space=fidelity_space,\n objectives_evaluations=objective_evaluations, fidelity_values=\n fidelity_values, objectives_names=objective_names)\n', (4318, 4550), False, 'from syne_tune.blackbox_repository.blackbox_tabular import serialize, BlackboxTabular\n'), ((5935, 5954), 'syne_tune.blackbox_repository.repository.load', 'load', (['BLACKBOX_NAME'], {}), '(BLACKBOX_NAME)\n', (5939, 5954), False, 'from syne_tune.blackbox_repository.repository import load\n'), ((6296, 6312), 'matplotlib.pyplot.plot', 'plt.plot', (['errors'], {}), '(errors)\n', (6304, 6312), True, 'import matplotlib.pyplot as plt\n'), ((2648, 2689), 'numpy.expand_dims', 'np.expand_dims', (['final_test_error'], {'axis': '(-1)'}), '(final_test_error, axis=-1)\n', (2662, 2689), True, 'import numpy as np\n'), ((2959, 2992), 'numpy.expand_dims', 'np.expand_dims', (['n_params'], {'axis': '(-1)'}), '(n_params, axis=-1)\n', (2973, 2992), True, 'import numpy as np\n'), ((4054, 4096), 'syne_tune.config_space.randint', 'randint', ([], {'lower': '(1)', 'upper': 'MAX_RESOURCE_LEVEL'}), '(lower=1, upper=MAX_RESOURCE_LEVEL)\n', (4061, 4096), False, 'from syne_tune.config_space import choice, logfinrange, finrange, randint\n'), ((4944, 4987), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['src', 'fcnet_file'], {}), '(src, fcnet_file)\n', (4970, 4987), False, 'import urllib\n'), ((4998, 5022), 'tarfile.open', 'tarfile.open', (['fcnet_file'], {}), '(fcnet_file)\n', (5010, 5022), False, 'import tarfile\n'), ((5082, 5105), 'syne_tune.util.catchtime', 'catchtime', (['"""converting"""'], {}), "('converting')\n", (5091, 5105), False, 'from syne_tune.util import catchtime\n'), ((5475, 5502), 'syne_tune.util.catchtime', 'catchtime', (['"""saving to disk"""'], {}), "('saving to disk')\n", (5484, 5502), False, 'from syne_tune.util import catchtime\n'), ((5512, 5576), 'syne_tune.blackbox_repository.blackbox_tabular.serialize', 'serialize', ([], {'bb_dict': 'bb_dict', 'path': '(repository_path / blackbox_name)'}), '(bb_dict=bb_dict, path=repository_path / blackbox_name)\n', (5521, 5576), False, 'from syne_tune.blackbox_repository.blackbox_tabular import serialize, BlackboxTabular\n'), ((5587, 5615), 'syne_tune.util.catchtime', 'catchtime', (['"""uploading to s3"""'], {}), "('uploading to s3')\n", (5596, 5615), False, 'from syne_tune.util import catchtime\n'), ((5707, 5745), 'syne_tune.blackbox_repository.conversion_scripts.utils.upload', 'upload', (['blackbox_name'], {'s3_root': 's3_root'}), '(blackbox_name, s3_root=s3_root)\n', (5713, 5745), False, 'from syne_tune.blackbox_repository.conversion_scripts.utils import upload\n'), ((1554, 1575), 'ast.literal_eval', 'ast.literal_eval', (['key'], {}), '(key)\n', (1570, 1575), False, 'import ast\n'), ((2088, 2142), 'numpy.empty', 'np.empty', (['(n_hps, n_seeds, n_fidelities, n_objectives)'], {}), '((n_hps, n_seeds, n_fidelities, n_objectives))\n', (2096, 2142), True, 'import numpy as np\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Test nn.probability.distribution.Gamma.
"""
import numpy as np
import pytest
import mindspore.nn as nn
import mindspore.nn.probability.distribution as msd
from mindspore import dtype
from mindspore import Tensor
from mindspore import context
skip_flag = context.get_context("device_target") != "Ascend"
def test_gamma_shape_errpr():
"""
Invalid shapes.
"""
with pytest.raises(ValueError):
msd.Gamma([[2.], [1.]], [[2.], [3.], [4.]], dtype=dtype.float32)
def test_type():
with pytest.raises(TypeError):
msd.Gamma([0.], [1.], dtype=dtype.int32)
def test_name():
with pytest.raises(TypeError):
msd.Gamma([0.], [1.], name=1.0)
def test_seed():
with pytest.raises(TypeError):
msd.Gamma([0.], [1.], seed='seed')
def test_concentration1():
with pytest.raises(ValueError):
msd.Gamma([0.], [1.])
with pytest.raises(ValueError):
msd.Gamma([-1.], [1.])
def test_concentration0():
with pytest.raises(ValueError):
msd.Gamma([1.], [0.])
with pytest.raises(ValueError):
msd.Gamma([1.], [-1.])
def test_scalar():
with pytest.raises(TypeError):
msd.Gamma(3., [4.])
with pytest.raises(TypeError):
msd.Gamma([3.], -4.)
def test_arguments():
"""
args passing during initialization.
"""
g = msd.Gamma()
assert isinstance(g, msd.Distribution)
g = msd.Gamma([3.0], [4.0], dtype=dtype.float32)
assert isinstance(g, msd.Distribution)
class GammaProb(nn.Cell):
"""
Gamma distribution: initialize with concentration1/concentration0.
"""
def __init__(self):
super(GammaProb, self).__init__()
self.gamma = msd.Gamma([3.0, 4.0], [1.0, 1.0], dtype=dtype.float32)
def construct(self, value):
prob = self.gamma.prob(value)
log_prob = self.gamma.log_prob(value)
return prob + log_prob
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def test_gamma_prob():
"""
Test probability functions: passing value through construct.
"""
net = GammaProb()
value = Tensor([0.5, 1.0], dtype=dtype.float32)
ans = net(value)
assert isinstance(ans, Tensor)
class GammaProb1(nn.Cell):
"""
Gamma distribution: initialize without concentration1/concentration0.
"""
def __init__(self):
super(GammaProb1, self).__init__()
self.gamma = msd.Gamma()
def construct(self, value, concentration1, concentration0):
prob = self.gamma.prob(value, concentration1, concentration0)
log_prob = self.gamma.log_prob(value, concentration1, concentration0)
return prob + log_prob
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def test_gamma_prob1():
"""
Test probability functions: passing concentration1/concentration0, value through construct.
"""
net = GammaProb1()
value = Tensor([0.5, 1.0], dtype=dtype.float32)
concentration1 = Tensor([2.0, 3.0], dtype=dtype.float32)
concentration0 = Tensor([1.0], dtype=dtype.float32)
ans = net(value, concentration1, concentration0)
assert isinstance(ans, Tensor)
class GammaKl(nn.Cell):
"""
Test class: kl_loss of Gamma distribution.
"""
def __init__(self):
super(GammaKl, self).__init__()
self.g1 = msd.Gamma(np.array([3.0]), np.array([4.0]), dtype=dtype.float32)
self.g2 = msd.Gamma(dtype=dtype.float32)
def construct(self, concentration1_b, concentration0_b, concentration1_a, concentration0_a):
kl1 = self.g1.kl_loss('Gamma', concentration1_b, concentration0_b)
kl2 = self.g2.kl_loss('Gamma', concentration1_b, concentration0_b, concentration1_a, concentration0_a)
return kl1 + kl2
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def test_kl():
"""
Test kl_loss.
"""
net = GammaKl()
concentration1_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
concentration0_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
concentration1_a = Tensor(np.array([2.0]).astype(np.float32), dtype=dtype.float32)
concentration0_a = Tensor(np.array([3.0]).astype(np.float32), dtype=dtype.float32)
ans = net(concentration1_b, concentration0_b, concentration1_a, concentration0_a)
assert isinstance(ans, Tensor)
class GammaCrossEntropy(nn.Cell):
"""
Test class: cross_entropy of Gamma distribution.
"""
def __init__(self):
super(GammaCrossEntropy, self).__init__()
self.g1 = msd.Gamma(np.array([3.0]), np.array([4.0]), dtype=dtype.float32)
self.g2 = msd.Gamma(dtype=dtype.float32)
def construct(self, concentration1_b, concentration0_b, concentration1_a, concentration0_a):
h1 = self.g1.cross_entropy('Gamma', concentration1_b, concentration0_b)
h2 = self.g2.cross_entropy('Gamma', concentration1_b, concentration0_b, concentration1_a, concentration0_a)
return h1 + h2
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def test_cross_entropy():
"""
Test cross entropy between Gamma distributions.
"""
net = GammaCrossEntropy()
concentration1_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
concentration0_b = Tensor(np.array([1.0]).astype(np.float32), dtype=dtype.float32)
concentration1_a = Tensor(np.array([2.0]).astype(np.float32), dtype=dtype.float32)
concentration0_a = Tensor(np.array([3.0]).astype(np.float32), dtype=dtype.float32)
ans = net(concentration1_b, concentration0_b, concentration1_a, concentration0_a)
assert isinstance(ans, Tensor)
class GammaBasics(nn.Cell):
"""
Test class: basic mean/sd function.
"""
def __init__(self):
super(GammaBasics, self).__init__()
self.g = msd.Gamma(np.array([3.0, 4.0]), np.array([4.0, 6.0]), dtype=dtype.float32)
def construct(self):
mean = self.g.mean()
sd = self.g.sd()
mode = self.g.mode()
return mean + sd + mode
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def test_bascis():
"""
Test mean/sd/mode/entropy functionality of Gamma.
"""
net = GammaBasics()
ans = net()
assert isinstance(ans, Tensor)
class GammaConstruct(nn.Cell):
"""
Gamma distribution: going through construct.
"""
def __init__(self):
super(GammaConstruct, self).__init__()
self.gamma = msd.Gamma([3.0], [4.0])
self.gamma1 = msd.Gamma()
def construct(self, value, concentration1, concentration0):
prob = self.gamma('prob', value)
prob1 = self.gamma('prob', value, concentration1, concentration0)
prob2 = self.gamma1('prob', value, concentration1, concentration0)
return prob + prob1 + prob2
@pytest.mark.skipif(skip_flag, reason="not support running in CPU and GPU")
def test_gamma_construct():
"""
Test probability function going through construct.
"""
net = GammaConstruct()
value = Tensor([0.5, 1.0], dtype=dtype.float32)
concentration1 = Tensor([0.0], dtype=dtype.float32)
concentration0 = Tensor([1.0], dtype=dtype.float32)
ans = net(value, concentration1, concentration0)
assert isinstance(ans, Tensor)
| [
"mindspore.context.get_context",
"numpy.array",
"pytest.raises",
"mindspore.nn.probability.distribution.Gamma",
"pytest.mark.skipif",
"mindspore.Tensor"
] | [((2566, 2640), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip_flag'], {'reason': '"""not support running in CPU and GPU"""'}), "(skip_flag, reason='not support running in CPU and GPU')\n", (2584, 2640), False, 'import pytest\n'), ((3341, 3415), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip_flag'], {'reason': '"""not support running in CPU and GPU"""'}), "(skip_flag, reason='not support running in CPU and GPU')\n", (3359, 3415), False, 'import pytest\n'), ((4429, 4503), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip_flag'], {'reason': '"""not support running in CPU and GPU"""'}), "(skip_flag, reason='not support running in CPU and GPU')\n", (4447, 4503), False, 'import pytest\n'), ((5673, 5747), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip_flag'], {'reason': '"""not support running in CPU and GPU"""'}), "(skip_flag, reason='not support running in CPU and GPU')\n", (5691, 5747), False, 'import pytest\n'), ((6731, 6805), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip_flag'], {'reason': '"""not support running in CPU and GPU"""'}), "(skip_flag, reason='not support running in CPU and GPU')\n", (6749, 6805), False, 'import pytest\n'), ((7512, 7586), 'pytest.mark.skipif', 'pytest.mark.skipif', (['skip_flag'], {'reason': '"""not support running in CPU and GPU"""'}), "(skip_flag, reason='not support running in CPU and GPU')\n", (7530, 7586), False, 'import pytest\n'), ((927, 963), 'mindspore.context.get_context', 'context.get_context', (['"""device_target"""'], {}), "('device_target')\n", (946, 963), False, 'from mindspore import context\n'), ((2007, 2018), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', ([], {}), '()\n', (2016, 2018), True, 'import mindspore.nn.probability.distribution as msd\n'), ((2070, 2114), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', (['[3.0]', '[4.0]'], {'dtype': 'dtype.float32'}), '([3.0], [4.0], dtype=dtype.float32)\n', (2079, 2114), True, 'import mindspore.nn.probability.distribution as msd\n'), ((2779, 2818), 'mindspore.Tensor', 'Tensor', (['[0.5, 1.0]'], {'dtype': 'dtype.float32'}), '([0.5, 1.0], dtype=dtype.float32)\n', (2785, 2818), False, 'from mindspore import Tensor\n'), ((3587, 3626), 'mindspore.Tensor', 'Tensor', (['[0.5, 1.0]'], {'dtype': 'dtype.float32'}), '([0.5, 1.0], dtype=dtype.float32)\n', (3593, 3626), False, 'from mindspore import Tensor\n'), ((3648, 3687), 'mindspore.Tensor', 'Tensor', (['[2.0, 3.0]'], {'dtype': 'dtype.float32'}), '([2.0, 3.0], dtype=dtype.float32)\n', (3654, 3687), False, 'from mindspore import Tensor\n'), ((3709, 3743), 'mindspore.Tensor', 'Tensor', (['[1.0]'], {'dtype': 'dtype.float32'}), '([1.0], dtype=dtype.float32)\n', (3715, 3743), False, 'from mindspore import Tensor\n'), ((7725, 7764), 'mindspore.Tensor', 'Tensor', (['[0.5, 1.0]'], {'dtype': 'dtype.float32'}), '([0.5, 1.0], dtype=dtype.float32)\n', (7731, 7764), False, 'from mindspore import Tensor\n'), ((7786, 7820), 'mindspore.Tensor', 'Tensor', (['[0.0]'], {'dtype': 'dtype.float32'}), '([0.0], dtype=dtype.float32)\n', (7792, 7820), False, 'from mindspore import Tensor\n'), ((7842, 7876), 'mindspore.Tensor', 'Tensor', (['[1.0]'], {'dtype': 'dtype.float32'}), '([1.0], dtype=dtype.float32)\n', (7848, 7876), False, 'from mindspore import Tensor\n'), ((1053, 1078), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1066, 1078), False, 'import pytest\n'), ((1088, 1157), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', (['[[2.0], [1.0]]', '[[2.0], [3.0], [4.0]]'], {'dtype': 'dtype.float32'}), '([[2.0], [1.0]], [[2.0], [3.0], [4.0]], dtype=dtype.float32)\n', (1097, 1157), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1181, 1205), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1194, 1205), False, 'import pytest\n'), ((1215, 1257), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', (['[0.0]', '[1.0]'], {'dtype': 'dtype.int32'}), '([0.0], [1.0], dtype=dtype.int32)\n', (1224, 1257), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1284, 1308), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1297, 1308), False, 'import pytest\n'), ((1318, 1351), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', (['[0.0]', '[1.0]'], {'name': '(1.0)'}), '([0.0], [1.0], name=1.0)\n', (1327, 1351), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1378, 1402), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1391, 1402), False, 'import pytest\n'), ((1412, 1448), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', (['[0.0]', '[1.0]'], {'seed': '"""seed"""'}), "([0.0], [1.0], seed='seed')\n", (1421, 1448), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1485, 1510), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1498, 1510), False, 'import pytest\n'), ((1520, 1543), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', (['[0.0]', '[1.0]'], {}), '([0.0], [1.0])\n', (1529, 1543), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1551, 1576), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1564, 1576), False, 'import pytest\n'), ((1586, 1610), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', (['[-1.0]', '[1.0]'], {}), '([-1.0], [1.0])\n', (1595, 1610), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1647, 1672), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1660, 1672), False, 'import pytest\n'), ((1682, 1705), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', (['[1.0]', '[0.0]'], {}), '([1.0], [0.0])\n', (1691, 1705), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1713, 1738), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1726, 1738), False, 'import pytest\n'), ((1748, 1772), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', (['[1.0]', '[-1.0]'], {}), '([1.0], [-1.0])\n', (1757, 1772), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1801, 1825), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1814, 1825), False, 'import pytest\n'), ((1835, 1856), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', (['(3.0)', '[4.0]'], {}), '(3.0, [4.0])\n', (1844, 1856), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1864, 1888), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1877, 1888), False, 'import pytest\n'), ((1898, 1920), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', (['[3.0]', '(-4.0)'], {}), '([3.0], -4.0)\n', (1907, 1920), True, 'import mindspore.nn.probability.distribution as msd\n'), ((2360, 2414), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', (['[3.0, 4.0]', '[1.0, 1.0]'], {'dtype': 'dtype.float32'}), '([3.0, 4.0], [1.0, 1.0], dtype=dtype.float32)\n', (2369, 2414), True, 'import mindspore.nn.probability.distribution as msd\n'), ((3082, 3093), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', ([], {}), '()\n', (3091, 3093), True, 'import mindspore.nn.probability.distribution as msd\n'), ((4086, 4116), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', ([], {'dtype': 'dtype.float32'}), '(dtype=dtype.float32)\n', (4095, 4116), True, 'import mindspore.nn.probability.distribution as msd\n'), ((5322, 5352), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', ([], {'dtype': 'dtype.float32'}), '(dtype=dtype.float32)\n', (5331, 5352), True, 'import mindspore.nn.probability.distribution as msd\n'), ((7160, 7183), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', (['[3.0]', '[4.0]'], {}), '([3.0], [4.0])\n', (7169, 7183), True, 'import mindspore.nn.probability.distribution as msd\n'), ((7206, 7217), 'mindspore.nn.probability.distribution.Gamma', 'msd.Gamma', ([], {}), '()\n', (7215, 7217), True, 'import mindspore.nn.probability.distribution as msd\n'), ((4013, 4028), 'numpy.array', 'np.array', (['[3.0]'], {}), '([3.0])\n', (4021, 4028), True, 'import numpy as np\n'), ((4030, 4045), 'numpy.array', 'np.array', (['[4.0]'], {}), '([4.0])\n', (4038, 4045), True, 'import numpy as np\n'), ((5249, 5264), 'numpy.array', 'np.array', (['[3.0]'], {}), '([3.0])\n', (5257, 5264), True, 'import numpy as np\n'), ((5266, 5281), 'numpy.array', 'np.array', (['[4.0]'], {}), '([4.0])\n', (5274, 5281), True, 'import numpy as np\n'), ((6522, 6542), 'numpy.array', 'np.array', (['[3.0, 4.0]'], {}), '([3.0, 4.0])\n', (6530, 6542), True, 'import numpy as np\n'), ((6544, 6564), 'numpy.array', 'np.array', (['[4.0, 6.0]'], {}), '([4.0, 6.0])\n', (6552, 6564), True, 'import numpy as np\n'), ((4603, 4618), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (4611, 4618), True, 'import numpy as np\n'), ((4690, 4705), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (4698, 4705), True, 'import numpy as np\n'), ((4777, 4792), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (4785, 4792), True, 'import numpy as np\n'), ((4864, 4879), 'numpy.array', 'np.array', (['[3.0]'], {}), '([3.0])\n', (4872, 4879), True, 'import numpy as np\n'), ((5902, 5917), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (5910, 5917), True, 'import numpy as np\n'), ((5989, 6004), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (5997, 6004), True, 'import numpy as np\n'), ((6076, 6091), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (6084, 6091), True, 'import numpy as np\n'), ((6163, 6178), 'numpy.array', 'np.array', (['[3.0]'], {}), '([3.0])\n', (6171, 6178), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 3 17:14:53 2019
@author: liuhongbing
"""
import pandas as pd
import numpy as np
from scipy import stats
from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix, roc_curve, auc
import tensorflow as tf
# 加载数据集
def read_data(file_path):
column_names = ['user-id', 'activity', 'timestamp', 'x-axis', 'y-axis', 'z-axis']
data = pd.read_csv(file_path, header=None, names=column_names)
data['z-axis'] = dataset['z-axis'].apply(lambda x : str(x).split(";")[0])
data['z-axis'] = data['z-axis'].astype('float32')
return data
# 数据标准化
def feature_normalize(dataset):
mu = np.mean(dataset, axis=0)
print('mu:',mu)
sigma = np.std(dataset, axis=0)
print('sigma:',sigma)
return (dataset - mu) / sigma
# 创建时间窗口,90 × 50ms,也就是 4.5 秒,每次前进 45 条记录,半重叠的方式。
def windows(data, size):
start = 0
while start < data.count():
yield start, start + size
start += (size / 2)
# 创建输入数据,每一组数据包含 x, y, z 三个轴的 90 条连续记录,
# 用 `stats.mode` 方法获取这 90 条记录中出现次数最多的行为
# 作为该组行为的标签,这里有待商榷,其实可以完全使用同一种行为的数据记录
# 来创建一组数据用于输入的。
def segment_signal(data, window_size=90):
segments = np.empty((0, window_size, 3))
labels = np.empty((0))
print (len(data['timestamp']))
count = 0
for (start, end) in windows(data['timestamp'], window_size):
print (count)
start = int(start)
end = int(end)
count += 1
x = data["x-axis"][start:end]
y = data["y-axis"][start:end]
z = data["z-axis"][start:end]
if (len(dataset['timestamp'][start:end]) == window_size):
segments = np.vstack([segments, np.dstack([x, y, z])])
labels = np.append(labels, stats.mode(data["activity"][start:end])[0][0])
return segments, labels
# 初始化神经网络参数
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
# 初始化神经网络参数
def bias_variable(shape):
initial = tf.constant(0.0, shape=shape)
return tf.Variable(initial)
# 执行卷积操作
def depthwise_conv2d(x, W):
return tf.nn.depthwise_conv2d(x, W, [1, 1, 1, 1], padding='VALID')
# 为输入数据的每个 channel 执行一维卷积,并输出到 ReLU 激活函数
def apply_depthwise_conv(x, kernel_size, num_channels, depth):
weights = weight_variable([1, kernel_size, num_channels, depth])
biases = bias_variable([depth * num_channels])
return tf.nn.relu(tf.add(depthwise_conv2d(x, weights), biases))
# 在卷积层输出进行一维 max pooling
def apply_max_pool(x, kernel_size, stride_size):
return tf.nn.max_pool(x, ksize=[1, 1, kernel_size, 1],
strides=[1, 1, stride_size, 1], padding='VALID')
root = "/Users/liuhongbing/Documents/tensorflow/data/WISDM_ar_v1.1/"
dataset2 = read_data(root +'WISDM_ar_v1.1_raw.txt')
dataset2.fillna(0, inplace=True)
dataset = dataset2[:200000]
dataset['x-axis'] = feature_normalize(dataset['x-axis'])
dataset['y-axis'] = feature_normalize(dataset['y-axis'])
dataset['z-axis'] = feature_normalize(dataset['z-axis'])
segments, labels = segment_signal(dataset)
labels = np.asarray(pd.get_dummies(labels), dtype = np.int8)
# 创建输入
## [batch_size, height, width, chanles]
reshaped_segments = segments.reshape(len(segments), 1, 90, 3)
# 在准备好的输入数据中,分别抽取训练数据和测试数据,按照 70/30 原则来做。
train_test_split = np.random.rand(len(reshaped_segments)) < 0.70
train_x = reshaped_segments[train_test_split]
train_y = labels[train_test_split]
test_x = reshaped_segments[~train_test_split]
test_y = labels[~train_test_split]
# 定义输入数据的维度和标签个数
input_height = 1
input_width = 90
num_labels = 4 # 6
num_channels = 3
batch_size = 10
kernel_size = 60
depth = 60
# 隐藏层神经元个数
num_hidden = 1000
learning_rate = 0.0001
# 降低 cost 的迭代次数
training_epochs = 8
total_batchs = reshaped_segments.shape[0] // batch_size
# 下面是使用 Tensorflow 创建神经网络的过程。
X = tf.placeholder(tf.float32, shape=[None,input_height,input_width,num_channels])
Y = tf.placeholder(tf.float32, shape=[None,num_labels])
c = apply_depthwise_conv(X,kernel_size,num_channels,depth)
p = apply_max_pool(c,20,2)
c = apply_depthwise_conv(p,6,depth*num_channels,depth//10)
shape = c.get_shape().as_list()
c_flat = tf.reshape(c, [-1, shape[1] * shape[2] * shape[3]])
f_weights_l1 = weight_variable([shape[1] * shape[2] * depth * num_channels * (depth//10), num_hidden])
f_biases_l1 = bias_variable([num_hidden])
f = tf.nn.tanh(tf.add(tf.matmul(c_flat, f_weights_l1),f_biases_l1))
out_weights = weight_variable([num_hidden, num_labels])
out_biases = bias_variable([num_labels])
y_ = tf.nn.softmax(tf.matmul(f, out_weights) + out_biases)
loss = -tf.reduce_sum(Y * tf.log(y_))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(loss)
correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
cost_history = np.empty(shape=[1], dtype=float)
# 开始训练
with tf.Session() as session:
tf.initialize_all_variables().run()
# 开始迭代
for epoch in range(training_epochs):
for b in range(total_batchs):
offset = (b * batch_size) % (train_y.shape[0] - batch_size)
batch_x = train_x[offset:(offset + batch_size), :, :, :]
batch_y = train_y[offset:(offset + batch_size), :]
_, c = session.run([optimizer, loss], feed_dict={X: batch_x, Y: batch_y})
cost_history = np.append(cost_history, c)
print("Epoch {}: Training Loss = {}, Training Accuracy = {}".format(
epoch, c, session.run(accuracy, feed_dict={X: train_x, Y: train_y})))
y_p = tf.argmax(y_, 1)
y_true = np.argmax(test_y, 1)
final_acc, y_pred = session.run([accuracy, y_p], feed_dict={X: test_x, Y: test_y})
print("Testing Accuracy: {}".format(final_acc))
temp_y_true = np.unique(y_true)
temp_y_pred = np.unique(y_pred)
np.save("y_true", y_true)
np.save("y_pred", y_pred)
print("temp_y_true", temp_y_true)
print( "temp_y_pred", temp_y_pred)
# 计算模型的 metrics
print( "Precision", precision_score(y_true.tolist(), y_pred.tolist(), average='weighted'))
print( "Recall", recall_score(y_true, y_pred, average='weighted'))
print( "f1_score", f1_score(y_true, y_pred, average='weighted'))
print( "confusion_matrix")
print( confusion_matrix(y_true, y_pred)) | [
"pandas.read_csv",
"sklearn.metrics.recall_score",
"tensorflow.cast",
"tensorflow.log",
"numpy.save",
"numpy.mean",
"tensorflow.nn.depthwise_conv2d",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.empty",
"tensorflow.matmul",
"sklearn.metrics.confusion_matrix",
"tensorflow.initialize... | [((3860, 3945), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, input_height, input_width, num_channels]'}), '(tf.float32, shape=[None, input_height, input_width,\n num_channels])\n', (3874, 3945), True, 'import tensorflow as tf\n'), ((3943, 3995), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, num_labels]'}), '(tf.float32, shape=[None, num_labels])\n', (3957, 3995), True, 'import tensorflow as tf\n'), ((4183, 4234), 'tensorflow.reshape', 'tf.reshape', (['c', '[-1, shape[1] * shape[2] * shape[3]]'], {}), '(c, [-1, shape[1] * shape[2] * shape[3]])\n', (4193, 4234), True, 'import tensorflow as tf\n'), ((4884, 4916), 'numpy.empty', 'np.empty', ([], {'shape': '[1]', 'dtype': 'float'}), '(shape=[1], dtype=float)\n', (4892, 4916), True, 'import numpy as np\n'), ((435, 490), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {'header': 'None', 'names': 'column_names'}), '(file_path, header=None, names=column_names)\n', (446, 490), True, 'import pandas as pd\n'), ((690, 714), 'numpy.mean', 'np.mean', (['dataset'], {'axis': '(0)'}), '(dataset, axis=0)\n', (697, 714), True, 'import numpy as np\n'), ((747, 770), 'numpy.std', 'np.std', (['dataset'], {'axis': '(0)'}), '(dataset, axis=0)\n', (753, 770), True, 'import numpy as np\n'), ((1216, 1245), 'numpy.empty', 'np.empty', (['(0, window_size, 3)'], {}), '((0, window_size, 3))\n', (1224, 1245), True, 'import numpy as np\n'), ((1259, 1270), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (1267, 1270), True, 'import numpy as np\n'), ((1894, 1932), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (1913, 1932), True, 'import tensorflow as tf\n'), ((1944, 1964), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (1955, 1964), True, 'import tensorflow as tf\n'), ((2018, 2047), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'shape': 'shape'}), '(0.0, shape=shape)\n', (2029, 2047), True, 'import tensorflow as tf\n'), ((2059, 2079), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (2070, 2079), True, 'import tensorflow as tf\n'), ((2129, 2188), 'tensorflow.nn.depthwise_conv2d', 'tf.nn.depthwise_conv2d', (['x', 'W', '[1, 1, 1, 1]'], {'padding': '"""VALID"""'}), "(x, W, [1, 1, 1, 1], padding='VALID')\n", (2151, 2188), True, 'import tensorflow as tf\n'), ((2568, 2668), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, 1, kernel_size, 1]', 'strides': '[1, 1, stride_size, 1]', 'padding': '"""VALID"""'}), "(x, ksize=[1, 1, kernel_size, 1], strides=[1, 1, stride_size,\n 1], padding='VALID')\n", (2582, 2668), True, 'import tensorflow as tf\n'), ((3118, 3140), 'pandas.get_dummies', 'pd.get_dummies', (['labels'], {}), '(labels)\n', (3132, 3140), True, 'import pandas as pd\n'), ((4768, 4784), 'tensorflow.argmax', 'tf.argmax', (['y_', '(1)'], {}), '(y_, 1)\n', (4777, 4784), True, 'import tensorflow as tf\n'), ((4785, 4800), 'tensorflow.argmax', 'tf.argmax', (['Y', '(1)'], {}), '(Y, 1)\n', (4794, 4800), True, 'import tensorflow as tf\n'), ((4827, 4866), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (4834, 4866), True, 'import tensorflow as tf\n'), ((4930, 4942), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4940, 4942), True, 'import tensorflow as tf\n'), ((5598, 5614), 'tensorflow.argmax', 'tf.argmax', (['y_', '(1)'], {}), '(y_, 1)\n', (5607, 5614), True, 'import tensorflow as tf\n'), ((5628, 5648), 'numpy.argmax', 'np.argmax', (['test_y', '(1)'], {}), '(test_y, 1)\n', (5637, 5648), True, 'import numpy as np\n'), ((5806, 5823), 'numpy.unique', 'np.unique', (['y_true'], {}), '(y_true)\n', (5815, 5823), True, 'import numpy as np\n'), ((5842, 5859), 'numpy.unique', 'np.unique', (['y_pred'], {}), '(y_pred)\n', (5851, 5859), True, 'import numpy as np\n'), ((5864, 5889), 'numpy.save', 'np.save', (['"""y_true"""', 'y_true'], {}), "('y_true', y_true)\n", (5871, 5889), True, 'import numpy as np\n'), ((5894, 5919), 'numpy.save', 'np.save', (['"""y_pred"""', 'y_pred'], {}), "('y_pred', y_pred)\n", (5901, 5919), True, 'import numpy as np\n'), ((4403, 4434), 'tensorflow.matmul', 'tf.matmul', (['c_flat', 'f_weights_l1'], {}), '(c_flat, f_weights_l1)\n', (4412, 4434), True, 'import tensorflow as tf\n'), ((4566, 4591), 'tensorflow.matmul', 'tf.matmul', (['f', 'out_weights'], {}), '(f, out_weights)\n', (4575, 4591), True, 'import tensorflow as tf\n'), ((4657, 4719), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (4690, 4719), True, 'import tensorflow as tf\n'), ((6133, 6181), 'sklearn.metrics.recall_score', 'recall_score', (['y_true', 'y_pred'], {'average': '"""weighted"""'}), "(y_true, y_pred, average='weighted')\n", (6145, 6181), False, 'from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix, roc_curve, auc\n'), ((6206, 6250), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_pred'], {'average': '"""weighted"""'}), "(y_true, y_pred, average='weighted')\n", (6214, 6250), False, 'from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix, roc_curve, auc\n'), ((6294, 6326), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (6310, 6326), False, 'from sklearn.metrics import precision_score, recall_score, f1_score, confusion_matrix, roc_curve, auc\n'), ((4633, 4643), 'tensorflow.log', 'tf.log', (['y_'], {}), '(y_)\n', (4639, 4643), True, 'import tensorflow as tf\n'), ((4959, 4988), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (4986, 4988), True, 'import tensorflow as tf\n'), ((5402, 5428), 'numpy.append', 'np.append', (['cost_history', 'c'], {}), '(cost_history, c)\n', (5411, 5428), True, 'import numpy as np\n'), ((1702, 1722), 'numpy.dstack', 'np.dstack', (['[x, y, z]'], {}), '([x, y, z])\n', (1711, 1722), True, 'import numpy as np\n'), ((1764, 1803), 'scipy.stats.mode', 'stats.mode', (["data['activity'][start:end]"], {}), "(data['activity'][start:end])\n", (1774, 1803), False, 'from scipy import stats\n')] |
import aiohttp
import asyncio
import time
import argparse
import numpy as np
import pandas as pd
import os
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--dir', action="store")
parser.add_argument('-s', '--service', action="store")
args = parser.parse_args()
result_dir = args.dir
if not os.path.exists(result_dir):
os.makedirs(result_dir)
serviceType = args.service
if serviceType == "local":
SERVER_URL = "http://localhost:8080/tcp"
if serviceType == "clusterIP":
SERVER_URL = 'http://172.16.17.32:31619/tcp'
if serviceType == "nodePort":
SERVER_URL = 'http://192.168.3.11:31234/tcp'
if serviceType == "Host":
SERVER_URL = 'http://192.168.3.11:8080/tcp'
async def get_action(seq_no):
send_time = time.monotonic()
async with aiohttp.ClientSession() as session:
print('get')
async with session.post(SERVER_URL, data=b'ddd') as response:
r = await response.text()
receive_time = time.monotonic()
delay = receive_time - send_time
return seq_no, delay
async def main(period):
seq_no = 0
pending_tasks = set()
done_tasks = set()
# SERVER_URL = 'http://192.168.3.11:31234'
# SERVER_URL = 'http://192.168.3.11:8080'
df_array = np.empty([15000, 2])
uf_array = np.empty([15000, 4])
ind = 0
indu = 0
current_time = time.monotonic()
next_step = current_time
while True:
start_time = time.monotonic()
next_step += period
for task in done_tasks:
seq_num, delay = task.result()
df_array[ind] = [seq_num, delay]
ind += 1
if ind >= 15000:
break
if indu >= 15000:
break
seq_no += 1
await asyncio.sleep(0.002)
uf_array[indu] = [start_time, time.monotonic() - start_time, len(pending_tasks), len(done_tasks)]
indu += 1
pending_tasks.add(asyncio.create_task(get_action(seq_no)))
(done_tasks, pending_tasks) = await asyncio.wait(
pending_tasks,
return_when=asyncio.ALL_COMPLETED,
timeout=max(0, next_step - time.monotonic())
)
return df_array, uf_array
if __name__ == "__main__":
loop = asyncio.get_event_loop()
period = 0.02
main_group = asyncio.gather(main(period))
result = loop.run_until_complete(main_group)
columns = ['seq_no', 'delay']
ucolumns = ['time', 'clien_execution', 'pending', 'done']
print(result)
df = pd.DataFrame(result[0][0], columns=columns)
uf = pd.DataFrame(result[0][1], columns=ucolumns)
df.to_pickle(result_dir + "/tcp_{}_delay.pkl".format(serviceType))
uf.to_pickle(result_dir + "/tcp_{}_execution.pkl".format(serviceType))
| [
"os.path.exists",
"aiohttp.ClientSession",
"os.makedirs",
"argparse.ArgumentParser",
"time.monotonic",
"numpy.empty",
"asyncio.sleep",
"pandas.DataFrame",
"asyncio.get_event_loop"
] | [((117, 142), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (140, 142), False, 'import argparse\n'), ((307, 333), 'os.path.exists', 'os.path.exists', (['result_dir'], {}), '(result_dir)\n', (321, 333), False, 'import os\n'), ((339, 362), 'os.makedirs', 'os.makedirs', (['result_dir'], {}), '(result_dir)\n', (350, 362), False, 'import os\n'), ((743, 759), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (757, 759), False, 'import time\n'), ((1259, 1279), 'numpy.empty', 'np.empty', (['[15000, 2]'], {}), '([15000, 2])\n', (1267, 1279), True, 'import numpy as np\n'), ((1295, 1315), 'numpy.empty', 'np.empty', (['[15000, 4]'], {}), '([15000, 4])\n', (1303, 1315), True, 'import numpy as np\n'), ((1360, 1376), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (1374, 1376), False, 'import time\n'), ((2338, 2362), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2360, 2362), False, 'import asyncio\n'), ((2599, 2642), 'pandas.DataFrame', 'pd.DataFrame', (['result[0][0]'], {'columns': 'columns'}), '(result[0][0], columns=columns)\n', (2611, 2642), True, 'import pandas as pd\n'), ((2652, 2696), 'pandas.DataFrame', 'pd.DataFrame', (['result[0][1]'], {'columns': 'ucolumns'}), '(result[0][1], columns=ucolumns)\n', (2664, 2696), True, 'import pandas as pd\n'), ((775, 798), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (796, 798), False, 'import aiohttp\n'), ((1443, 1459), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (1457, 1459), False, 'import time\n'), ((967, 983), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (981, 983), False, 'import time\n'), ((1758, 1778), 'asyncio.sleep', 'asyncio.sleep', (['(0.002)'], {}), '(0.002)\n', (1771, 1778), False, 'import asyncio\n'), ((1817, 1833), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (1831, 1833), False, 'import time\n'), ((2213, 2229), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2227, 2229), False, 'import time\n')] |
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
def autostring(num, prec=0, zero=False, set_printoptions=False, pp=False, join=False, joinall=False, sep=' '):
"""
Format number (array) with given decimal precision.
Definition
----------
def autostring(num, prec=0, zero=False, set_printoptions=False, pp=False, join=False, joinall=False, sep=' '):
There is a wrapper function for convenience with the short name 'astr' that calls autostring
def astr(num, prec=0, zero=False, set_printoptions=False, pp=False, join=False, joinall=False, sep=' '):
Input
-----
num number array
Optional Input
--------------
prec number of decimal places of formatted values
minimum field width for integers (default: 0)
zero if True, pad values with zeros rather than blanks (default: False)
set_printoptions if True, sets linewidth to the format times size of 1st dimension (default: False)
pp shortcut for set_printoptions (default: False)
it will be checked for (pp | set_printoptions)
join if True, joins all individual strings of last (fastest) dimension into one string (default: False)
joinall if True, joins all individual strings into single string,
i.e. first flattens the array and then joins it (default: False, overwrites join)
sep separator used when joining (default: space=' ')
Output
------
string (array) of formatted numbers
Restrictions
------------
None
Examples
--------
>>> print(autostring(3.5967, 3))
3.597
>>> print(autostring(3.5967))
4
>>> print(autostring(3, 3))
3
>>> print(autostring(np.array([3.5967, 3.5964]), 3))
['3.597' '3.596']
>>> print(autostring(np.array([3.59, 1.123456e12]), 3))
['3.590e+00' '1.123e+12']
>>> print(autostring(np.array([3.59, 11.1234]), 3, zero=True))
['03.590' '11.123']
>>> print(autostring(np.array([3, 11])))
[' 3' '11']
>>> print(autostring(np.array([3, 11]), 3))
[' 3' ' 11']
>>> print(autostring(np.zeros((2,2), dtype=np.float), 1))
[['0.0' '0.0']
['0.0' '0.0']]
>>> np.set_printoptions(threshold=10)
>>> print(autostring(np.zeros((2,10), dtype=np.float), 1))
[['0.0' '0.0' '0.0' ..., '0.0' '0.0' '0.0']
['0.0' '0.0' '0.0' ..., '0.0' '0.0' '0.0']]
>>> print(autostring(np.zeros((2,10), dtype=np.float), 1, set_printoptions=True))
[['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']
['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']]
>>> print(autostring(np.zeros((2,10), dtype=np.float), 1, pp=True))
[['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']
['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']]
>>> print(autostring(np.zeros((2,10), dtype=np.float), 1, set_printoptions=False, pp=True))
[['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']
['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']]
>>> print(autostring(np.array([3.5967, 3.5964]), 3, join=True))
3.597 3.596
>>> print(autostring(np.zeros((2,10), dtype=np.float), 1, join=True, sep=';'))
['0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0'
'0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0']
>>> print(autostring(np.reshape(np.arange(20,dtype=np.float),(2,10)), 1, joinall=True, sep=';'))
0.0; 1.0; 2.0; 3.0; 4.0; 5.0; 6.0; 7.0; 8.0; 9.0;10.0;11.0;12.0;13.0;14.0;15.0;16.0;17.0;18.0;19.0
>>> print(autostring(np.reshape(np.arange(20,dtype=np.float),(2,10)), 1, joinall=True, sep=';'))
0.0; 1.0; 2.0; 3.0; 4.0; 5.0; 6.0; 7.0; 8.0; 9.0;10.0;11.0;12.0;13.0;14.0;15.0;16.0;17.0;18.0;19.0
>>> print(autostring(np.array([3, 11, np.inf])))
[' 3' ' 11' 'inf']
>>> print(autostring(np.array([3, 11, np.nan])))
[' 3' ' 11' 'nan']
>>> print(autostring(np.ma.array([3, 11, np.nan], mask=[False,True,False])))
[' 3' '-- ' 'nan']
>>> print(autostring(np.ma.array([3, 11, np.nan], mask=[False,False,True])))
[' 3' '11' '--']
License
-------
This file is part of the JAMS Python package.
The JAMS Python package is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The JAMS Python package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the JAMS Python package (cf. gpl.txt and lgpl.txt).
If not, see <http://www.gnu.org/licenses/>.
Copyright 2011-2013 <NAME>
History
-------
Written, MC, Nov 2011 - from autostring.pro
Modified, MC, May 2012 - pp
MC, Dec 2012 - special treatment of -0.0 on output
MC, Feb 2013 - nan, inf and masked arrays
MC, Feb 2013 - ported to Python 3
MC, Oct 2014 - isinstance
MC, Dec 2014 - tuple input
"""
#
# Check input
if isinstance(num, (list, tuple)): num = np.array(num)
isarr = np.ndim(num)
if (isarr > 2):
print("AUTOSTRING WARNING: autostring only works with scalars, 1D- and 2D arrays: return original array.")
return num
# Only treat int and float
if (isarr==0):
try:
typ = num.dtype
except AttributeError:
if (type(num) == float):
typ = np.float64
elif (type(num) == int):
typ = np.int32
else:
typ = type(num)
else:
typ = num.dtype
try:
lfloat = np.float128 # Mac/*nix
except AttributeError:
try:
lfloat = np.float96 # Windows
except AttributeError:
lfloat = np.float64
if np.__version__ >= "1.6":
if (typ in [np.float16, np.float32, np.float64, lfloat]):
isfloat = True
elif (typ in [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64]):
isfloat = False
else:
print("AUTOSTRING WARNING: autostring cannot work with input type: return original array.")
return num
else:
if (typ in [np.float32, np.float64, lfloat]):
isfloat = True
elif (typ in [np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64]):
isfloat = False
else:
print("AUTOSTRING WARNING: autostring cannot work with input type: return original array.")
return num
# Scalar to array if necessary; Special treatment of -0.0
if (isarr==0):
if (num == 0):
num = np.abs(num)
else:
if isinstance(num, np.ma.masked_array):
num = np.ma.where(num == 0, 0, num)
else:
num = np.where(num == 0, 0, num)
# Zero padding
if zero:
nix = '0'
else:
nix = ''
#
# If we deal with an array of numbers we take the largest for the format
# deal with inf and nan
hasmask = False
hasnan = False
if (isarr==0):
if np.isnan(num): return 'nan'
if np.isinf(num): return 'inf'
abs_num = np.ma.abs(num)
# leave room for the decimal point and the negative sign, if any
if (num < 0.):
num_sign_chars = 1
else:
num_sign_chars = 0
else:
if isinstance(num, np.ma.masked_array):
if np.sum(num.mask) > 0: hasmask = True
if num.count() > np.ma.sum(np.isfinite(num)): hasnan = True
else:
if num.size > np.sum(np.isfinite(num)): hasnan = True
inum = np.ma.array(num, mask=~np.isfinite(num), keep_mask=True)
abs_num = np.ma.max(np.ma.abs(inum))
# leave room for the decimal point and the negative sign, if any
if (np.ma.min(inum) < 0.):
num_sign_chars = 1
else:
num_sign_chars = 0
#
# Floating point
if isfloat: # number is a float, more or less
if abs_num >= 1.e6:
num_prefix_chars = 1
num_sci_not_chars = 4
format_type = 'e'
elif ((abs_num < 1.e6) & (abs_num >= 1.)):
nprefix = np.int_(np.log10(np.int32(abs_num)))+1
# special treatment: the output prefix digits could
# be one digit longer as the input prefix digits: e.g. 99.99 => 100.0
val = np.around(abs_num*(10.**prec))/(10.**prec)
nprefixval = np.int_(np.log10(val))+1
nprefix = np.amax(np.array([nprefix,nprefixval], dtype=np.int))
num_prefix_chars = nprefix
num_sci_not_chars = 0
format_type = 'f'
elif ((abs_num < 1.) & (abs_num >= 1.e-3)):
num_prefix_chars = 1
num_sci_not_chars = 0
format_type = 'f'
elif (abs_num == 0):
num_prefix_chars = 1
num_sci_not_chars = 0
format_type = 'f'
else:
num_prefix_chars = 1
num_sci_not_chars = 4
format_type = 'e'
#
num_postfix_chars = prec
num_total_chars = num_sign_chars + num_prefix_chars + 1 + num_postfix_chars + num_sci_not_chars
if (prec == 0): # no dot if prec=0
num_total_chars -= 1
if hasmask: # need space for --
if num_total_chars < 2: num_total_chars = 2
if hasnan: # need space for nan or inf
if num_total_chars < 3: num_total_chars = 3
format_string = ("{0:s}{1:s}{2:d}{3:s}{4:d}{5:s}{6:s}".format('{0:', nix, num_total_chars,
'.', num_postfix_chars, format_type, '}'))
else: # number is an integer
format_type = 'd'
if abs_num != 0:
num_digits = np.int_(np.log10(abs_num))+1
else:
num_digits = 1
num_total_chars = np.maximum(num_digits + num_sign_chars, prec)
if hasmask: # need space for --
if num_total_chars < 2: num_total_chars = 2
if hasnan: # need space for nan or inf
if num_total_chars < 3: num_total_chars = 3
format_string = ("{0:s}{1:s}{2:d}{3:s}{4:s}".format('{0:', nix, num_total_chars, format_type, '}'))
#
if (isarr == 0):
out = format_string.format(num)
# Special treatment of -0.0
if np.float(out) == 0:
out = format_string.format(0)
else:
fnum = num.flatten()
nnum = fnum.size
import sys
if sys.hexversion > int('0x3000000',base=16):
styp = 'U{0:d}'.format(num_total_chars)
else:
styp = 'S{0:d}'.format(num_total_chars)
out = np.empty(nnum, dtype=styp)
for i in range(nnum):
if str(fnum[i]) == '--':
sformat_string = ("{0:s}{1:d}s{2:s}".format('{0:', num_total_chars, '}'))
out[i] = sformat_string.format('--')
else:
out[i] = format_string.format(fnum[i])
if np.float(out[i]) == 0:
out[i] = format_string.format(0)
out = np.reshape(out, num.shape)
if (set_printoptions | pp):
# num_total_chars+3 for '' and space, +isarr for []
np.set_printoptions(linewidth=num.shape[-1]*(num_total_chars+3)+isarr, threshold=nnum+1)
if (join | joinall): # There should be reduction routines in numpy
if ((isarr == 1) | ((isarr==2) & joinall)):
if (isarr == 2):
out = out.flatten()
for i in range(out.size):
if (i==0):
outc = out[i]
else:
outc = outc+sep+out[i]
else:
if sys.hexversion > int('0x3000000',base=16):
sform = 'U{0:d}'.format((len(out[0,0])+len(sep))*out.shape[1])
else:
sform = 'S{0:d}'.format((len(out[0,0])+len(sep))*out.shape[1])
outc = np.zeros(out.shape[0], dtype=sform)
for j in range(out.shape[0]):
for i in range(out.shape[1]):
if (i==0):
outc[j] = out[j,i]
else:
outc[j] = outc[j]+sep+out[j,i]
out = outc
# return formatted string
return out
def astr(num, prec=0, zero=False, set_printoptions=False, pp=True, join=False, joinall=False, sep=' '):
"""
Wrapper function for autostring with pp=True by default.
def autostring(num, prec=0, zero=False, set_printoptions=False, pp=False, join=False, joinall=False, sep=' '):
Examples
--------
>>> print(astr(3.5967, 3))
3.597
>>> print(astr(3.5967))
4
>>> print(astr(3, 3))
3
>>> print(astr(np.array([3.5967, 3.5964]), 3))
['3.597' '3.596']
>>> print(astr(np.array([3.59, 1.123456e12]), 3))
['3.590e+00' '1.123e+12']
>>> print(astr(np.array([3.59, 11.1234]), 3, zero=True))
['03.590' '11.123']
>>> print(astr(np.array([3, 11])))
[' 3' '11']
>>> print(astr(np.array([3, 11]), 3))
[' 3' ' 11']
>>> print(astr(np.zeros((2,2), dtype=np.float), 1))
[['0.0' '0.0']
['0.0' '0.0']]
>>> np.set_printoptions(threshold=10)
>>> print(astr(np.zeros((2,10), dtype=np.float), 1))
[['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']
['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']]
>>> print(astr(np.zeros((2,10), dtype=np.float), 1, set_printoptions=True))
[['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']
['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']]
>>> print(astr(np.zeros((2,10), dtype=np.float), 1, pp=True))
[['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']
['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']]
>>> print(astr(np.zeros((2,10), dtype=np.float), 1, set_printoptions=False, pp=True))
[['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']
['0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0' '0.0']]
>>> print(astr(np.array([3.5967, 3.5964]), 3, join=True))
3.597 3.596
>>> print(astr(np.zeros((2,10), dtype=np.float), 1, join=True, sep=';'))
['0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0'
'0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0;0.0']
>>> print(astr(np.reshape(np.arange(20,dtype=np.float),(2,10)), 1, joinall=True, sep=';'))
0.0; 1.0; 2.0; 3.0; 4.0; 5.0; 6.0; 7.0; 8.0; 9.0;10.0;11.0;12.0;13.0;14.0;15.0;16.0;17.0;18.0;19.0
>>> print(astr(np.array([3, 11, np.inf])))
[' 3' ' 11' 'inf']
>>> print(astr(np.array([3, 11, np.nan])))
[' 3' ' 11' 'nan']
>>> print(astr(np.ma.array([3, 11, np.nan], mask=[False,True,False])))
[' 3' '-- ' 'nan']
>>> print(astr(np.ma.array([3, 11, np.nan], mask=[False,False,True])))
[' 3' '11' '--']
"""
return autostring(num, prec=prec, zero=zero, set_printoptions=set_printoptions,
pp=pp, join=join, joinall=joinall, sep=sep)
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
# print(autostring(np.array([3, 11, np.nan])))
# #[' 3' ' 11' 'nan']
# print(autostring(np.ma.array([3, 11, np.nan], mask=[False,True,False])))
# #[' 3' '-- ' 'nan']
# print(autostring(np.ma.array([3, 11, np.nan], mask=[False,False,True])))
# #[' 3' ' 11' '-- ']
| [
"numpy.log10",
"numpy.int32",
"numpy.array",
"numpy.isfinite",
"numpy.ma.min",
"numpy.reshape",
"numpy.where",
"numpy.ndim",
"doctest.testmod",
"numpy.empty",
"numpy.ma.abs",
"numpy.maximum",
"numpy.isinf",
"numpy.abs",
"numpy.ma.where",
"numpy.isnan",
"numpy.around",
"numpy.set_pr... | [((5953, 5965), 'numpy.ndim', 'np.ndim', (['num'], {}), '(num)\n', (5960, 5965), True, 'import numpy as np\n'), ((16341, 16398), 'doctest.testmod', 'doctest.testmod', ([], {'optionflags': 'doctest.NORMALIZE_WHITESPACE'}), '(optionflags=doctest.NORMALIZE_WHITESPACE)\n', (16356, 16398), False, 'import doctest\n'), ((5927, 5940), 'numpy.array', 'np.array', (['num'], {}), '(num)\n', (5935, 5940), True, 'import numpy as np\n'), ((7984, 7997), 'numpy.isnan', 'np.isnan', (['num'], {}), '(num)\n', (7992, 7997), True, 'import numpy as np\n'), ((8023, 8036), 'numpy.isinf', 'np.isinf', (['num'], {}), '(num)\n', (8031, 8036), True, 'import numpy as np\n'), ((8069, 8083), 'numpy.ma.abs', 'np.ma.abs', (['num'], {}), '(num)\n', (8078, 8083), True, 'import numpy as np\n'), ((10872, 10917), 'numpy.maximum', 'np.maximum', (['(num_digits + num_sign_chars)', 'prec'], {}), '(num_digits + num_sign_chars, prec)\n', (10882, 10917), True, 'import numpy as np\n'), ((11670, 11696), 'numpy.empty', 'np.empty', (['nnum'], {'dtype': 'styp'}), '(nnum, dtype=styp)\n', (11678, 11696), True, 'import numpy as np\n'), ((12089, 12115), 'numpy.reshape', 'np.reshape', (['out', 'num.shape'], {}), '(out, num.shape)\n', (12099, 12115), True, 'import numpy as np\n'), ((7549, 7560), 'numpy.abs', 'np.abs', (['num'], {}), '(num)\n', (7555, 7560), True, 'import numpy as np\n'), ((7637, 7666), 'numpy.ma.where', 'np.ma.where', (['(num == 0)', '(0)', 'num'], {}), '(num == 0, 0, num)\n', (7648, 7666), True, 'import numpy as np\n'), ((7699, 7725), 'numpy.where', 'np.where', (['(num == 0)', '(0)', 'num'], {}), '(num == 0, 0, num)\n', (7707, 7725), True, 'import numpy as np\n'), ((8620, 8635), 'numpy.ma.abs', 'np.ma.abs', (['inum'], {}), '(inum)\n', (8629, 8635), True, 'import numpy as np\n'), ((8722, 8737), 'numpy.ma.min', 'np.ma.min', (['inum'], {}), '(inum)\n', (8731, 8737), True, 'import numpy as np\n'), ((11339, 11352), 'numpy.float', 'np.float', (['out'], {}), '(out)\n', (11347, 11352), True, 'import numpy as np\n'), ((12228, 12328), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'linewidth': '(num.shape[-1] * (num_total_chars + 3) + isarr)', 'threshold': '(nnum + 1)'}), '(linewidth=num.shape[-1] * (num_total_chars + 3) + isarr,\n threshold=nnum + 1)\n', (12247, 12328), True, 'import numpy as np\n'), ((8329, 8345), 'numpy.sum', 'np.sum', (['num.mask'], {}), '(num.mask)\n', (8335, 8345), True, 'import numpy as np\n'), ((12996, 13031), 'numpy.zeros', 'np.zeros', (['out.shape[0]'], {'dtype': 'sform'}), '(out.shape[0], dtype=sform)\n', (13004, 13031), True, 'import numpy as np\n'), ((8405, 8421), 'numpy.isfinite', 'np.isfinite', (['num'], {}), '(num)\n', (8416, 8421), True, 'import numpy as np\n'), ((8485, 8501), 'numpy.isfinite', 'np.isfinite', (['num'], {}), '(num)\n', (8496, 8501), True, 'import numpy as np\n'), ((8558, 8574), 'numpy.isfinite', 'np.isfinite', (['num'], {}), '(num)\n', (8569, 8574), True, 'import numpy as np\n'), ((9320, 9353), 'numpy.around', 'np.around', (['(abs_num * 10.0 ** prec)'], {}), '(abs_num * 10.0 ** prec)\n', (9329, 9353), True, 'import numpy as np\n'), ((9460, 9505), 'numpy.array', 'np.array', (['[nprefix, nprefixval]'], {'dtype': 'np.int'}), '([nprefix, nprefixval], dtype=np.int)\n', (9468, 9505), True, 'import numpy as np\n'), ((10784, 10801), 'numpy.log10', 'np.log10', (['abs_num'], {}), '(abs_num)\n', (10792, 10801), True, 'import numpy as np\n'), ((11999, 12015), 'numpy.float', 'np.float', (['out[i]'], {}), '(out[i])\n', (12007, 12015), True, 'import numpy as np\n'), ((9403, 9416), 'numpy.log10', 'np.log10', (['val'], {}), '(val)\n', (9411, 9416), True, 'import numpy as np\n'), ((9120, 9137), 'numpy.int32', 'np.int32', (['abs_num'], {}), '(abs_num)\n', (9128, 9137), True, 'import numpy as np\n')] |
# This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 <NAME> and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
from __future__ import absolute_import
import sys
import numpy as np
from ..common import ut, TestCase
from h5py.highlevel import File, Group, Dataset
import h5py
class BaseDataset(TestCase):
"""
data is a 3-dimensional dataset with dimensions [z, y, x]
The z dimension is labeled. It does not have any attached scales.
The y dimension is not labeled. It has one attached scale.
The x dimension is labeled. It has two attached scales.
data2 is a 3-dimensional dataset with no associated dimension scales.
"""
def setUp(self):
self.f = File(self.mktemp(), 'w')
self.f['data'] = np.ones((4, 3, 2), 'f')
self.f['data2'] = np.ones((4, 3, 2), 'f')
self.f['x1'] = np.ones((2), 'f')
h5py.h5ds.set_scale(self.f['x1'].id)
h5py.h5ds.attach_scale(self.f['data'].id, self.f['x1'].id, 2)
self.f['x2'] = np.ones((2), 'f')
h5py.h5ds.set_scale(self.f['x2'].id, b'x2 name')
h5py.h5ds.attach_scale(self.f['data'].id, self.f['x2'].id, 2)
self.f['y1'] = np.ones((3), 'f')
h5py.h5ds.set_scale(self.f['y1'].id, b'y1 name')
h5py.h5ds.attach_scale(self.f['data'].id, self.f['y1'].id, 1)
self.f['z1'] = np.ones((4), 'f')
h5py.h5ds.set_label(self.f['data'].id, 0, b'z')
h5py.h5ds.set_label(self.f['data'].id, 2, b'x')
def tearDown(self):
if self.f:
self.f.close()
class TestH5DSBindings(BaseDataset):
"""
Feature: Datasets can be created from existing data
"""
def test_create_dimensionscale(self):
""" Create a dimension scale from existing dataset """
self.assertTrue(h5py.h5ds.is_scale(self.f['x1'].id))
self.assertEqual(h5py.h5ds.get_scale_name(self.f['x1'].id), b'')
self.assertEqual(self.f['x1'].attrs['CLASS'], b"DIMENSION_SCALE")
self.assertEqual(h5py.h5ds.get_scale_name(self.f['x2'].id), b'x2 name')
def test_attach_dimensionscale(self):
self.assertTrue(
h5py.h5ds.is_attached(self.f['data'].id, self.f['x1'].id, 2)
)
self.assertFalse(
h5py.h5ds.is_attached(self.f['data'].id, self.f['x1'].id, 1))
self.assertEqual(h5py.h5ds.get_num_scales(self.f['data'].id, 0), 0)
self.assertEqual(h5py.h5ds.get_num_scales(self.f['data'].id, 1), 1)
self.assertEqual(h5py.h5ds.get_num_scales(self.f['data'].id, 2), 2)
def test_detach_dimensionscale(self):
self.assertTrue(
h5py.h5ds.is_attached(self.f['data'].id, self.f['x1'].id, 2)
)
h5py.h5ds.detach_scale(self.f['data'].id, self.f['x1'].id, 2)
self.assertFalse(
h5py.h5ds.is_attached(self.f['data'].id, self.f['x1'].id, 2)
)
self.assertEqual(h5py.h5ds.get_num_scales(self.f['data'].id, 2), 1)
# TODO: update condition once the bug is fixed upstream
@ut.skipUnless(
h5py.version.hdf5_version_tuple > (2, 0, 0),
"Reading non-existent label segfaults"
)
def test_label_dimensionscale(self):
self.assertEqual(h5py.h5ds.get_label(self.f['data'].id, 0), b'z')
self.assertEqual(h5py.h5ds.get_label(self.f['data'].id, 1), b'')
self.assertEqual(h5py.h5ds.get_label(self.f['data'].id, 2), b'x')
def test_iter_dimensionscales(self):
def func(dsid):
res = h5py.h5ds.get_scale_name(dsid)
if res == b'x2 name':
return dsid
res = h5py.h5ds.iterate(self.f['data'].id, 2, func, 0)
self.assertEqual(h5py.h5ds.get_scale_name(res), b'x2 name')
class TestDimensionManager(BaseDataset):
def test_create_scale(self):
# test recreating or renaming an existing scale:
self.f['data'].dims.create_scale(self.f['x1'], b'foobar')
self.assertEqual(self.f['data'].dims[2]['foobar'], self.f['x1'])
# test creating entirely new scale:
self.f['data'].dims.create_scale(self.f['data2'], b'foobaz')
self.f['data'].dims[2].attach_scale(self.f['data2'])
self.assertEqual(self.f['data'].dims[2]['foobaz'], self.f['data2'])
def test_get_dimension(self):
with self.assertRaises(IndexError):
self.f['data'].dims[3]
def test_len(self):
self.assertEqual(len(self.f['data'].dims), 3)
self.assertEqual(len(self.f['data2'].dims), 3)
def test_iter(self):
dims = self.f['data'].dims
self.assertEqual(
[d for d in dims],
[dims[0], dims[1], dims[2]]
)
class TestDimensionsHighLevel(BaseDataset):
def test_len(self):
self.assertEqual(len(self.f['data'].dims[0]), 0)
self.assertEqual(len(self.f['data'].dims[1]), 1)
self.assertEqual(len(self.f['data'].dims[2]), 2)
self.assertEqual(len(self.f['data2'].dims[0]), 0)
self.assertEqual(len(self.f['data2'].dims[1]), 0)
self.assertEqual(len(self.f['data2'].dims[2]), 0)
def test_get_label(self):
self.assertEqual(self.f['data'].dims[2].label, 'x')
self.assertEqual(self.f['data'].dims[1].label, '')
self.assertEqual(self.f['data'].dims[0].label, 'z')
self.assertEqual(self.f['data2'].dims[2].label, '')
self.assertEqual(self.f['data2'].dims[1].label, '')
self.assertEqual(self.f['data2'].dims[0].label, '')
def test_set_label(self):
self.f['data'].dims[0].label = 'foo'
self.assertEqual(self.f['data'].dims[2].label, 'x')
self.assertEqual(self.f['data'].dims[1].label, '')
self.assertEqual(self.f['data'].dims[0].label, 'foo')
def test_detach_scale(self):
self.f['data'].dims[2].detach_scale(self.f['x1'])
self.assertEqual(len(self.f['data'].dims[2]), 1)
self.assertEqual(self.f['data'].dims[2][0], self.f['x2'])
self.f['data'].dims[2].detach_scale(self.f['x2'])
self.assertEqual(len(self.f['data'].dims[2]), 0)
def test_attach_scale(self):
self.f['x3'] = self.f['x2'][...]
self.f['data'].dims[2].attach_scale(self.f['x3'])
self.assertEqual(len(self.f['data'].dims[2]), 3)
self.assertEqual(self.f['data'].dims[2][2], self.f['x3'])
def test_get_dimension_scale(self):
self.assertEqual(self.f['data'].dims[2][0], self.f['x1'])
with self.assertRaises(RuntimeError):
self.f['data2'].dims[2][0], self.f['x2']
self.assertEqual(self.f['data'].dims[2][''], self.f['x1'])
self.assertEqual(self.f['data'].dims[2]['x2 name'], self.f['x2'])
def test_get_items(self):
self.assertEqual(
self.f['data'].dims[2].items(),
[('', self.f['x1']), ('x2 name', self.f['x2'])]
)
def test_get_keys(self):
self.assertEqual(self.f['data'].dims[2].keys(), ['', 'x2 name'])
def test_get_values(self):
self.assertEqual(
self.f['data'].dims[2].values(),
[self.f['x1'], self.f['x2']]
)
def test_iter(self):
self.assertEqual([i for i in self.f['data'].dims[2]], ['', 'x2 name'])
def test_repr(self):
self.assertEqual(repr(self.f['data'].dims[2])[1:16], '"x" dimension 2')
def test_attributes(self):
self.f["data2"].attrs["DIMENSION_LIST"] = self.f["data"].attrs[
"DIMENSION_LIST"]
self.assertEqual(len(self.f['data2'].dims[0]), 0)
self.assertEqual(len(self.f['data2'].dims[1]), 1)
self.assertEqual(len(self.f['data2'].dims[2]), 2)
| [
"h5py.h5ds.is_attached",
"h5py.h5ds.get_scale_name",
"numpy.ones",
"h5py.h5ds.attach_scale",
"h5py.h5ds.get_label",
"h5py.h5ds.is_scale",
"h5py.h5ds.detach_scale",
"h5py.h5ds.iterate",
"h5py.h5ds.get_num_scales",
"h5py.h5ds.set_label",
"h5py.h5ds.set_scale"
] | [((893, 916), 'numpy.ones', 'np.ones', (['(4, 3, 2)', '"""f"""'], {}), "((4, 3, 2), 'f')\n", (900, 916), True, 'import numpy as np\n'), ((943, 966), 'numpy.ones', 'np.ones', (['(4, 3, 2)', '"""f"""'], {}), "((4, 3, 2), 'f')\n", (950, 966), True, 'import numpy as np\n'), ((990, 1005), 'numpy.ones', 'np.ones', (['(2)', '"""f"""'], {}), "(2, 'f')\n", (997, 1005), True, 'import numpy as np\n'), ((1016, 1052), 'h5py.h5ds.set_scale', 'h5py.h5ds.set_scale', (["self.f['x1'].id"], {}), "(self.f['x1'].id)\n", (1035, 1052), False, 'import h5py\n'), ((1061, 1122), 'h5py.h5ds.attach_scale', 'h5py.h5ds.attach_scale', (["self.f['data'].id", "self.f['x1'].id", '(2)'], {}), "(self.f['data'].id, self.f['x1'].id, 2)\n", (1083, 1122), False, 'import h5py\n'), ((1146, 1161), 'numpy.ones', 'np.ones', (['(2)', '"""f"""'], {}), "(2, 'f')\n", (1153, 1161), True, 'import numpy as np\n'), ((1172, 1220), 'h5py.h5ds.set_scale', 'h5py.h5ds.set_scale', (["self.f['x2'].id", "b'x2 name'"], {}), "(self.f['x2'].id, b'x2 name')\n", (1191, 1220), False, 'import h5py\n'), ((1229, 1290), 'h5py.h5ds.attach_scale', 'h5py.h5ds.attach_scale', (["self.f['data'].id", "self.f['x2'].id", '(2)'], {}), "(self.f['data'].id, self.f['x2'].id, 2)\n", (1251, 1290), False, 'import h5py\n'), ((1314, 1329), 'numpy.ones', 'np.ones', (['(3)', '"""f"""'], {}), "(3, 'f')\n", (1321, 1329), True, 'import numpy as np\n'), ((1340, 1388), 'h5py.h5ds.set_scale', 'h5py.h5ds.set_scale', (["self.f['y1'].id", "b'y1 name'"], {}), "(self.f['y1'].id, b'y1 name')\n", (1359, 1388), False, 'import h5py\n'), ((1397, 1458), 'h5py.h5ds.attach_scale', 'h5py.h5ds.attach_scale', (["self.f['data'].id", "self.f['y1'].id", '(1)'], {}), "(self.f['data'].id, self.f['y1'].id, 1)\n", (1419, 1458), False, 'import h5py\n'), ((1482, 1497), 'numpy.ones', 'np.ones', (['(4)', '"""f"""'], {}), "(4, 'f')\n", (1489, 1497), True, 'import numpy as np\n'), ((1509, 1556), 'h5py.h5ds.set_label', 'h5py.h5ds.set_label', (["self.f['data'].id", '(0)', "b'z'"], {}), "(self.f['data'].id, 0, b'z')\n", (1528, 1556), False, 'import h5py\n'), ((1565, 1612), 'h5py.h5ds.set_label', 'h5py.h5ds.set_label', (["self.f['data'].id", '(2)', "b'x'"], {}), "(self.f['data'].id, 2, b'x')\n", (1584, 1612), False, 'import h5py\n'), ((2840, 2901), 'h5py.h5ds.detach_scale', 'h5py.h5ds.detach_scale', (["self.f['data'].id", "self.f['x1'].id", '(2)'], {}), "(self.f['data'].id, self.f['x1'].id, 2)\n", (2862, 2901), False, 'import h5py\n'), ((3736, 3784), 'h5py.h5ds.iterate', 'h5py.h5ds.iterate', (["self.f['data'].id", '(2)', 'func', '(0)'], {}), "(self.f['data'].id, 2, func, 0)\n", (3753, 3784), False, 'import h5py\n'), ((1930, 1965), 'h5py.h5ds.is_scale', 'h5py.h5ds.is_scale', (["self.f['x1'].id"], {}), "(self.f['x1'].id)\n", (1948, 1965), False, 'import h5py\n'), ((1992, 2033), 'h5py.h5ds.get_scale_name', 'h5py.h5ds.get_scale_name', (["self.f['x1'].id"], {}), "(self.f['x1'].id)\n", (2016, 2033), False, 'import h5py\n'), ((2139, 2180), 'h5py.h5ds.get_scale_name', 'h5py.h5ds.get_scale_name', (["self.f['x2'].id"], {}), "(self.f['x2'].id)\n", (2163, 2180), False, 'import h5py\n'), ((2274, 2334), 'h5py.h5ds.is_attached', 'h5py.h5ds.is_attached', (["self.f['data'].id", "self.f['x1'].id", '(2)'], {}), "(self.f['data'].id, self.f['x1'].id, 2)\n", (2295, 2334), False, 'import h5py\n'), ((2387, 2447), 'h5py.h5ds.is_attached', 'h5py.h5ds.is_attached', (["self.f['data'].id", "self.f['x1'].id", '(1)'], {}), "(self.f['data'].id, self.f['x1'].id, 1)\n", (2408, 2447), False, 'import h5py\n'), ((2474, 2520), 'h5py.h5ds.get_num_scales', 'h5py.h5ds.get_num_scales', (["self.f['data'].id", '(0)'], {}), "(self.f['data'].id, 0)\n", (2498, 2520), False, 'import h5py\n'), ((2550, 2596), 'h5py.h5ds.get_num_scales', 'h5py.h5ds.get_num_scales', (["self.f['data'].id", '(1)'], {}), "(self.f['data'].id, 1)\n", (2574, 2596), False, 'import h5py\n'), ((2626, 2672), 'h5py.h5ds.get_num_scales', 'h5py.h5ds.get_num_scales', (["self.f['data'].id", '(2)'], {}), "(self.f['data'].id, 2)\n", (2650, 2672), False, 'import h5py\n'), ((2757, 2817), 'h5py.h5ds.is_attached', 'h5py.h5ds.is_attached', (["self.f['data'].id", "self.f['x1'].id", '(2)'], {}), "(self.f['data'].id, self.f['x1'].id, 2)\n", (2778, 2817), False, 'import h5py\n'), ((2940, 3000), 'h5py.h5ds.is_attached', 'h5py.h5ds.is_attached', (["self.f['data'].id", "self.f['x1'].id", '(2)'], {}), "(self.f['data'].id, self.f['x1'].id, 2)\n", (2961, 3000), False, 'import h5py\n'), ((3040, 3086), 'h5py.h5ds.get_num_scales', 'h5py.h5ds.get_num_scales', (["self.f['data'].id", '(2)'], {}), "(self.f['data'].id, 2)\n", (3064, 3086), False, 'import h5py\n'), ((3348, 3389), 'h5py.h5ds.get_label', 'h5py.h5ds.get_label', (["self.f['data'].id", '(0)'], {}), "(self.f['data'].id, 0)\n", (3367, 3389), False, 'import h5py\n'), ((3422, 3463), 'h5py.h5ds.get_label', 'h5py.h5ds.get_label', (["self.f['data'].id", '(1)'], {}), "(self.f['data'].id, 1)\n", (3441, 3463), False, 'import h5py\n'), ((3495, 3536), 'h5py.h5ds.get_label', 'h5py.h5ds.get_label', (["self.f['data'].id", '(2)'], {}), "(self.f['data'].id, 2)\n", (3514, 3536), False, 'import h5py\n'), ((3628, 3658), 'h5py.h5ds.get_scale_name', 'h5py.h5ds.get_scale_name', (['dsid'], {}), '(dsid)\n', (3652, 3658), False, 'import h5py\n'), ((3810, 3839), 'h5py.h5ds.get_scale_name', 'h5py.h5ds.get_scale_name', (['res'], {}), '(res)\n', (3834, 3839), False, 'import h5py\n')] |
"""Driver for gradient calculations."""
__authors__ = "<NAME>, <NAME>, <NAME>"
__copyright__ = "(c) 2011, Universite de Montreal"
__license__ = "3-clause BSD License"
__contact__ = "theano-dev <<EMAIL>>"
__docformat__ = "restructuredtext en"
import __builtin__
import logging
import warnings
_logger = logging.getLogger('theano.gradient')
import sys
import numpy # for numeric_grad
import theano
from theano.raise_op import Raise
from theano import gof
from theano.gof import Variable
from theano.gof.python25 import all
import theano.gof.utils
_msg_retType = 'op.grad(...) returned a non-list'
_msg_badlen = 'op.grad(...) returned wrong number of gradients'
def format_as(use_list, use_tuple, outputs):
"""
Formats the outputs according to the flags `use_list` and `use_tuple`.
If `use_list` is True, `outputs` is returned as a list (if `outputs`
is not a list or a tuple then it is converted in a one element list).
If `use_tuple` is True, `outputs` is returned as a tuple (if `outputs`
is not a list or a tuple then it is converted into a one element tuple).
Otherwise (if both flags are false), `outputs` is returned.
"""
assert not (use_list and use_tuple), \
"Both flags cannot be simultaneously True"
if (use_list or use_tuple) and not isinstance(outputs, (list, tuple)):
if use_list:
return [outputs]
else:
return (outputs,)
elif not (use_list or use_tuple) and isinstance(outputs, (list, tuple)):
assert len(outputs) == 1, \
"Wrong arguments. Expected a one element list"
return outputs[0]
elif use_list or use_tuple:
if use_list:
return list(outputs)
else:
return tuple(outputs)
else:
return outputs
def grad_sources_inputs(sources, graph_inputs, warn_type=True):
"""
A gradient source is a pair (``v``, ``g_v``), in which ``v`` is
a `Variable`, and ``g_v`` is a `Variable` that is a gradient wrt
``v``. More specifically, ``g_v`` is the gradient of an external
scalar cost, ``cost`` (that is not explicitly used), wrt ``v``.
This function traverses the graph backward from the ``r`` sources,
calling ``op.grad(...)`` for all ops with some non-None gradient
on an output, to compute gradients of ``cost`` wrt intermediate
variables and ``graph_inputs``.
The ``op.grad(...)`` functions are called like this:
.. code-block:: python
op.grad(op.inputs[:], [total_gradient(v) for v in op.outputs])
This call to ``op.grad`` should return a list or tuple: one symbolic
gradient per input. These gradients represent the gradients of
the same implicit ``cost`` mentionned above, wrt ``op.inputs``. Note
that this is **not** the same as the gradient of ``op.outputs`` wrt
``op.inputs``.
If ``op`` has a single input, then ``op.grad`` should return a list
or tuple of length 1.
For each input wrt to which ``op`` is not differentiable, it should
return ``None`` instead of a `Variable` instance.
If a source ``r`` receives a gradient from another source ``r2``,
then the effective gradient on ``r`` is the sum of both gradients.
:type sources: list of pairs of Variable: (v, gradient-on-v) to
initialize the total_gradient dictionary
:param sources: gradients to back-propagate using chain rule
:type graph_inputs: list of Variable
:param graph_inputs: variables considered to be constant
(do not backpropagate through them)
:type warn_type: bool
:param warn_type: True will trigger warnings via the logging module when
the gradient on an expression has a different type than the original
expression
:rtype: dictionary whose keys and values are of type Variable
:return: mapping from each Variable encountered in the backward
traversal to the gradient with respect to that Variable.
It is assumed that there is some objective J shared between all members of
sources, so that for each v, gradient-on-v is the gradient of J with
respect to v
"""
gmap = {}
for (r, g_r) in sources:
if not hasattr(r, 'type'):
raise TypeError('sources must be Variables', r)
if g_r is not None:
if r in gmap:
gmap[r] = gmap[r] + g_r
else:
gmap[r] = g_r
graph_outputs = gof.utils.uniq([r for r, g in sources])
if graph_inputs is None:
graph_inputs = gof.graph.inputs(graph_outputs)
for node in gof.graph.io_toposort(graph_inputs,
graph_outputs).__reversed__():
g_outputs = [gmap.get(o, None) for o in node.outputs]
#if all output gradients are None, continue
if all(map(lambda x: x is None, g_outputs)): continue
output_arg = g_outputs
input_arg = node.inputs
# Each Op's grad function requires inputs and output_grads
# If the Op destroys any input, but the grad expression uses it,
# then chances are the resulting graph will have a dependency
# cycle. We avoid this cycle by passing (symbolic) copies of
# each destroyed input.
try:
dinputs = [node.inputs[x[0]] for x in node.op.destroy_map.values()]
except AttributeError:
dinputs = []
new_input_arg = []
for input in input_arg:
if input in dinputs and hasattr(input, 'copy'):
new_input_arg.append(input.copy())
else:
new_input_arg.append(input)
input_arg = new_input_arg
#note that this function is not in a try-except block
# the rationale:
# If the op implements grad, then any exception should be passed to
# the caller
# If the op doesn't implement grad, this entire function should fail.
# Other possibilities:
# * return a partial back-prop
#
op_grad = node.op.grad(input_arg, output_arg)
if not isinstance(op_grad, (list, tuple)):
raise ValueError(_msg_retType, node.op)
g_inputs = op_grad
assert isinstance(g_inputs, (list, tuple))
if len(g_inputs) != len(node.inputs):
raise ValueError(_msg_badlen,
node.op,
len(g_inputs),
len(node.inputs))
for ii, (r, g_r) in enumerate(zip(node.inputs, g_inputs)):
if warn_type:
if g_r and (getattr(r, 'type', 0) != getattr(g_r, 'type', 1)):
r_type = getattr(r, 'type', None)
g_r_type = getattr(g_r, 'type', None)
_logger.warning('%s.grad returned a different type (%s) '
'for input %i of type (%s)',
node.op, g_r_type, ii, r_type)
if g_r and len(sources) == 1 and sources[0][0].name and r.name:
g_r.name = "(d%s/d%s)" % (sources[0][0].name, r.name)
if g_r is not None:
assert r is not None
if r in gmap:
gmap[r] = gmap[r] + g_r
else:
gmap[r] = g_r
return gmap
def unimplemented_grad(op, x_pos, x):
"""
DO NOT USE. Remove this function after all usage of it has been
removed from theano.
Return an un-computable symbolic variable of type `x.type`.
If any function tries to compute this un-computable variable, an exception
(NotImplementedError) will be raised indicating that the gradient on the
`x_pos`'th input of `op` has not been implemented.
"""
msg = '%s.grad not implemented for input %i' % (op, x_pos)
return Raise(msg=msg)(x)
########################
# R Operator
########################
def Rop(f, wrt, eval_points):
"""
Computes the R operation on `f` wrt to `wrt` evaluated at points given
in `eval_points`. Mathematically this stands for the jacobian of `f` wrt
to `wrt` right muliplied by the eval points.
:type f: Variable or list of Variables
`f` stands for the output of the computational graph to which you
want to apply the R operator
:type wrt: Variable or list of `Variables`s
variables for which you compute the R operator of the expression
described by `f`
:type eval_points: Variable or list of Variables
evalutation points for each of the variables in `wrt`
:rtype: Variable or list/tuple of Variables depending on type of f
:return: symbolic expression such that
R_op[i] = sum_j ( d f[i] / d wrt[j]) eval_point[j]
where the indices in that expression are magic multidimensional
indices that specify both the position within a list and all
coordinates of the tensor element in the last.
If `wrt` is a list/tuple, then return a list/tuple with the results.
"""
from theano.tensor import as_tensor_variable
using_list = isinstance(f, list)
using_tuple = isinstance(f, tuple)
if not isinstance(wrt, (list, tuple)):
wrt = [wrt]
if not isinstance(eval_points, (list, tuple)):
eval_points = [eval_points]
if not isinstance(f, (list, tuple)):
f = [f]
assert len(wrt) == len(eval_points)
# Check that each element of wrt corresponds to an element
# of eval_points with the same dimensionality.
for pack in enumerate(zip(wrt, eval_points)):
i = pack[0]
wrt_elem, eval_point = pack[1]
if not isinstance(wrt_elem, gof.Variable):
wrt_elem = as_tensor_variable(wrt_elem)
if not isinstance(eval_point, gof.Variable):
eval_point = as_tensor_variable(eval_point)
try:
if wrt_elem.type.ndim != eval_point.type.ndim:
raise ValueError('Element ' +
str(i) +
' of wrt/eval_point have mismatched ' +
'dimensionality: ' +
str(wrt_elem.type.ndim) +
' versus ' +
str(eval_point.type.ndim))
except AttributeError:
# wrt_elem and eval_point don't always have ndim like random type
# Tensor, Sparse and CudaNdArray have the ndim attribute
pass
seen_nodes = {}
def _traverse(node):
""" TODO: writeme """
if node is None:
return None
else:
op = node.op
inputs = node.inputs
# Compute the evaluation points corresponding to each of the
# inputs of the node
local_eval_points = []
for inp in inputs:
if inp in wrt:
local_eval_points.append(eval_points[wrt.index(inp)])
elif inp.owner is None:
try:
local_eval_points.append(inp.zeros_like())
except:
# None should be used for non-differentiable
# arguments, like for example random states
local_eval_points.append(None)
elif inp.owner in seen_nodes:
local_eval_points.append(
seen_nodes[inp.owner][inp.owner.outputs.index(inp)])
else:
# We actually need to compute the R_op for this node
_traverse(inp.owner)
local_eval_points.append(
seen_nodes[inp.owner][inp.owner.outputs.index(inp)])
same_type_eval_points = []
for x, y in zip(inputs, local_eval_points):
if y is not None:
if not isinstance(x, gof.Variable):
x = as_tensor_variable(x)
if not isinstance(y, gof.Variable):
y = as_tensor_variable(y)
y = x.type.filter_variable(y)
assert x.type == y.type
same_type_eval_points.append(y)
else:
same_type_eval_points.append(y)
seen_nodes[node] = op.R_op(node.inputs, same_type_eval_points)
return None
# Populate the dictionary
for out in f:
_traverse(out.owner)
rval = []
for out in f:
if out in wrt:
rval.append(eval_points[wrt.index(out)])
elif seen_nodes[out.owner][out.owner.outputs.index(out)] is None:
raise ValueError(('The function is not differentiable with '
'respect to the provided inputs !'))
else:
rval.append(seen_nodes[out.owner][out.owner.outputs.index(out)])
return format_as(using_list, using_tuple, rval)
def Lop(f, wrt, eval_points, consider_constant=None, warn_type=False,
disconnected_inputs='raise'):
"""
Computes the L operation on `f` wrt to `wrt` evaluated at points given
in `eval_points`. Mathematically this stands for the jacobian of `f` wrt
to `wrt` left muliplied by the eval points.
:type f: Variable or list of Variables
`f` stands for the output of the computational graph to which you
want to apply the L operator
:type wrt: Variable or list of `Variables`s
variables for which you compute the L operator of the expression
described by `f`
:type eval_points: Variable or list of Variables
evalutation points for each of the variables in `f`
:rtype: Variable or list/tuple of Variables depending on type of f
:return: symbolic expression such that
L_op[i] = sum_i ( d f[i] / d wrt[j]) eval_point[i]
where the indices in that expression are magic multidimensional
indices that specify both the position within a list and all
coordinates of the tensor element in the last
If `f` is a list/tuple, then return a list/tuple with the results.
"""
if consider_constant is None:
consider_constant = []
if type(eval_points) not in (list, tuple):
eval_points = [eval_points]
using_list = isinstance(wrt, list)
using_tuple = isinstance(wrt, tuple)
if not isinstance(f, (list, tuple)):
f = [f]
inputs = gof.graph.inputs(f)
gmap = grad_sources_inputs(
zip(f, eval_points),
list(inputs) + list(consider_constant),
warn_type=warn_type)
# Note : If p is not in gmap there can be several reasons, among which
# is the fact that p might not be part of the computational graph. A
# simple example is that for a+b for e.g. a[0] is not part of the graph,
# so Theano does not know how to compute TT.grad(TT.sum(a+b), a[0])
# such subtle cases can be fixed by a more careful implementation of the
# gradient, but for now Theano needs to throw an exception, and make the
# user aware that it does not know how to compute that gradient
if not isinstance(wrt, (list, tuple)):
wrt = [wrt]
ret = []
for p in wrt:
if p in gmap:
ret.append(gmap[p])
else:
message = ("Lop method was asked to compute the gradient "
"with respect to a variable that is not part of "
"the computational graph of the cost, or is used "
"only by a non-differentiable operator: %s" % p)
if disconnected_inputs == 'ignore':
pass
elif disconnected_inputs == 'warn':
warnings.warn(message, stacklevel=1)
elif disconnected_inputs == 'raise':
raise ValueError(message)
else:
raise ValueError("Invalid value for keyword "
"'disconnected_inputs', valid values are "
"'ignore', 'warn' and 'raise'.")
ret.append(p.zeros_like())
return format_as(using_list, using_tuple, ret)
#########################
# Gradient
#########################
def grad(cost, wrt, g_cost=None, consider_constant=None, warn_type=False,
disconnected_inputs='raise'):
"""
:type cost: Scalar (0-dimensional) Variable.
:type wrt: Variable or list of Variables.
:type g_cost: Scalar Variable, or None.
:param g_cost: an expression for the gradient through cost. The default is
``ones_like(cost)``.
:param consider_constant: a list of expressions not to backpropagate
through
:param warn_type: a value of True will cause warnings to be logged for any
Op that emits a gradient that does not match its input type.
:type disconnected_inputs: string
:param disconnected_inputs: Defines the behaviour if some of the variables
in ``wrt`` are not part of the computational graph computing ``cost``
(or if all links are non-differentiable). The possible values are:
- 'ignore': considers that the gradient on these parameters is zero.
- 'warn': consider the gradient zero, and print a warning.
- 'raise': raise an exception.
:rtype: Variable or list/tuple of Variables (depending upon `wrt`)
:return: symbolic expression of gradient of `cost` with respect to `wrt`.
If an element of `wrt` is not differentiable with respect
to the output, then a zero variable is returned.
It returns an object of same type as `wrt`: a list/tuple
or Variable in all cases.
This function is a wrapper around the more general function
`theano.gradient.grad_sources_inputs``.
"""
if consider_constant is None:
consider_constant = []
else:
#error checking on consider_constant: verify that it is a collection
# of theano variables
# this is important, if someone accidentally passes a nested data
# structure with theano variables at the leaves, only the root will
# be properly considered constant
if not hasattr(consider_constant, '__iter__'):
raise TypeError('consider_constant must be an iterable collection,'
' got ' + str(type(consider_constant)))
for elem in consider_constant:
if not isinstance(elem, gof.Variable):
raise TypeError('Elements of consider_constant must be '
'variables, but got ' + str(type(elem)))
if not isinstance(cost, Variable):
raise TypeError(('In grad(), cost argument should be '
'a Variable.'), cost)
if cost.type.ndim:
raise TypeError(
'In theano.gradient.grad, "cost" argument should be a scalar,'
' but ndim is %i (should be 0). If you want to compute the'
' gradient of the sum of cost, you should use cost.sum().'
% cost.type.ndim)
if g_cost is None:
from theano import tensor
g_cost = tensor.ones_like(cost)
inputs = gof.graph.inputs([cost])
gmap = grad_sources_inputs(
[(cost, g_cost)],
list(inputs) + list(consider_constant),
warn_type=warn_type)
# Note : If p is not in gmap there can be several reasons, among which
# is the fact that p might not be part of the computational graph. A
# simple example is that for a+b for e.g. a[0] is not part of the graph,
# so Theano does not know how to compute TT.grad(TT.sum(a+b), a[0])
# such subtle cases can be fixed by a more careful implementation of the
# gradient, but for now Theano needs to throw an exception, and make the
# user aware that it does not know how to compute that gradient
using_list = isinstance(wrt, list)
using_tuple = isinstance(wrt, tuple)
if not isinstance(wrt, (list, tuple)):
wrt = [wrt]
ret = []
for p in wrt:
if p in gmap:
ret.append(gmap[p])
else:
message = ("grad method was asked to compute the gradient "
"with respect to a variable that is not part of "
"the computational graph of the cost, or is used "
"only by a non-differentiable operator: %s" % p)
if disconnected_inputs == 'ignore':
pass
elif disconnected_inputs == 'warn':
warnings.warn(message, stacklevel=1)
elif disconnected_inputs == 'raise':
raise ValueError(message)
else:
raise ValueError("Invalid value for keyword "
"'disconnected_inputs', valid values are "
"'ignore', 'warn' and 'raise'.")
ret.append(p.zeros_like())
return format_as(using_list, using_tuple, ret)
class numeric_grad(object):
"""
Compute the numeric derivative of a scalar-valued function at a particular
point.
"""
# Note on step sizes and tolerances:
#
# There is a relationship between the step size and the function value and
# the measurement error that is incurred due to rounding. The finite
# difference we measure is
# delta = f(x0) - f(x0+eps)
#
# For maximum precision, f should be close to zero.
# For every power of 2 that f departs from zero, we lose a bit of precision
# in delta.
#
# Even in this case of maximum accuracy, there is a tradeoff between
# stepsize and measurement error.
# Taking small steps allows us to measure large derivatives accuractly,
# but longer steps are required to measure small derivatives accurately.
# However longer steps introduce bias into our measurement in general
# for non-linear functions.
#
# It would be interesting to have a version of numeric grad that used an
# adaptive stepsize.
#
# For now, we use a heuristic that catches very bad gradients, but is not
# perfectly accurate.
type_eps = {'float64': 1e-7,
'float32': 3e-4,
numpy.dtype('float64'): 1e-7,
numpy.dtype('float32'): 3e-4}
def __init__(self, f, pt, eps=None, out_type=None):
"""Return the gradient of f at pt.
:param f: a differentiable function such that f(*pt) is a scalar
:param pt: an ndarray, a list of ndarrays or tuple of ndarrays
:param out_type: dtype of output, if complex (i.e. 'complex32' or
'complex64')
This function computes the gradient by a one-sided finite
differences of a fixed step size (eps).
It is assumed that f(...) will return a scalar.
It is assumed that all f's inputs are numpy.ndarray objects.
:param eps: the stepsize for the finite differencing. None means
input dtype-dependent. See `type_eps`.
"""
def prod(inputs):
rval = 1
for i in inputs:
rval *= i
return rval
packed_pt = False
if not isinstance(pt, (list, tuple)):
pt = [pt]
packed_pt = True
apt = [numpy.array(p) for p in pt]
shapes = [p.shape for p in apt]
dtypes = [str(p.dtype) for p in apt]
# TODO: remove this eventually (why was this here in the first place ?)
# In the case of CSM, the arguments are a mixture of floats and
# integers...
# if not dtypes == [dtypes[0]] * len(apt):
# raise TypeError('All function arguments must have same dtype')
total_size = __builtin__.sum(prod(sh) for sh in shapes)
working_dtype = __builtin__.min((self.type_eps[dt], dt)
for dt in dtypes)[1]
# create un-initialized memory
x = numpy.ndarray((total_size,), dtype=working_dtype)
if (not out_type is None) and (out_type.startswith('complex')):
gx = numpy.ndarray((total_size,), dtype=out_type)
else:
gx = numpy.ndarray((total_size,), dtype=working_dtype)
if eps is None:
eps = __builtin__.max(self.type_eps[dt] for dt in dtypes)
# set up aliases so that apt[i] is backed by memory in x
# and self.gf is backed by memory in gx
cur_pos = 0
self.gf = []
for i, p in enumerate(apt):
p_size = prod(p.shape)
# set up alias
apt[i] = x[cur_pos: cur_pos + p_size].reshape(p.shape)
self.gf.append(gx[cur_pos: cur_pos + p_size].reshape(p.shape))
# initialize with p's value
apt[i][...] = p
cur_pos += p_size
f_x = f(*[p.copy() for p in apt])
# now iterate over the elements of x, and call f on apt.
x_copy = x.copy()
for i in xrange(total_size):
x[:] = x_copy
x[i] += eps
f_eps = f(*apt)
# TODO: remove this when it is clear that the next
# replacemement does not pose problems of its own. It was replaced
# for its inability to handle complex variables.
# gx[i] = numpy.asarray((f_eps - f_x) / eps)
gx[i] = ((f_eps - f_x) / eps)
if packed_pt:
self.gf = self.gf[0]
@staticmethod
def abs_rel_err(a, b):
"""Return absolute and relative error between a and b.
The relative error is a small number when a and b are close, relative
to how big they are.
Formulas used:
abs_err = abs(a - b)
rel_err = abs_err / max(abs(a) + abs(b), 1e-8)
The denominator is clipped at 1e-8 to avoid dividing by 0 when a and b
are both close to 0.
The tuple (abs_err, rel_err) is returned
"""
abs_err = abs(a - b)
rel_err = abs_err / numpy.maximum(abs(a) + abs(b), 1e-8)
return (abs_err, rel_err)
def abs_rel_errors(self, g_pt):
"""Return the abs and rel error of gradient estimate `g_pt`
`g_pt` must be a list of ndarrays of the same length as self.gf,
otherwise a ValueError is raised.
Corresponding ndarrays in `g_pt` and `self.gf` must have the same
shape or ValueError is raised.
"""
if len(g_pt) != len(self.gf):
raise ValueError(
'argument has wrong number of elements',
len(g_pt))
errs = []
for i, (a, b) in enumerate(zip(g_pt, self.gf)):
if a.shape != b.shape:
raise ValueError(
'argument element %i has wrong shape %s' % (
i, str((a.shape, b.shape))))
errs.append(numeric_grad.abs_rel_err(a, b))
return errs
def max_err(self, g_pt, abs_tol, rel_tol):
"""Find the biggest error between g_pt and self.gf.
What is measured is the violation of relative and absolute errors,
wrt the provided tolerances (abs_tol, rel_tol).
A value > 1 means both tolerances are exceeded.
Return the argmax of min(abs_err / abs_tol, rel_err / rel_tol) over
g_pt, as well as abs_err and rel_err at this point.
"""
pos = []
errs = []
abs_errs = []
rel_errs = []
abs_rel_errs = self.abs_rel_errors(g_pt)
for abs_err, rel_err in abs_rel_errs:
if not numpy.all(numpy.isfinite(abs_err)):
raise ValueError('abs_err not finite', repr(abs_err))
if not numpy.all(numpy.isfinite(rel_err)):
raise ValueError('rel_err not finite', repr(rel_err))
scaled_err = numpy.minimum(abs_err / abs_tol, rel_err / rel_tol)
max_i = scaled_err.argmax()
pos.append(max_i)
errs.append(scaled_err.flatten()[max_i])
abs_errs.append(abs_err.flatten()[max_i])
rel_errs.append(rel_err.flatten()[max_i])
# max over the arrays in g_pt
max_arg = numpy.argmax(errs)
max_pos = pos[max_arg]
return (max_arg, pos[max_arg], abs_errs[max_arg], rel_errs[max_arg])
def verify_grad(fun, pt, n_tests=2, rng=None, eps=None, out_type=None, abs_tol=None,
rel_tol=None, mode=None, cast_to_output_type=False):
""" Test a gradient by Finite Difference Method. Raise error on failure.
Example:
>>> verify_grad(theano.tensor.tanh,
(numpy.asarray([[2,3,4], [-1, 3.3, 9.9]]),),
rng=numpy.random)
Raises an Exception if the difference between the analytic gradient and
numerical gradient (computed through the Finite Difference Method) of a
random projection of the fun's output to a scalar exceeds the given
tolerance.
:param fun: a Python function that takes Theano variables as inputs,
and returns a Theano variable. For instance, an Op instance with
a single output.
:param pt: the list of numpy.ndarrays to use as input values.
These arrays must be either float32 or float64 arrays.
:param n_tests: number of times to run the test
:param rng: random number generator used to sample u, we test gradient
of sum(u * fun) at pt
:param eps: stepsize used in the Finite Difference Method (Default
None is type-dependent)
:param out_type: dtype of output, if complex (i.e. 'complex32' or
'complex64')
:param abs_tol: absolute tolerance used as threshold for gradient
comparison
:param rel_tol: relative tolerance used as threshold for gradient
comparison
:note: WARNING to unit-test writers: if `op` is a function that builds
a graph, try to make it a SMALL graph. Often verify grad is run
in debug mode, which can be very slow if it has to verify a lot of
intermediate computations.
:note: This op does not support multiple outputs. In tests/test_scan.py
there is an experimental verify_grad that covers that case as well
by using random projections.
"""
from theano import compile, shared
import theano.tensor
from theano.tensor import as_tensor_variable, cast, TensorType
assert isinstance(pt, (list, tuple))
pt = [numpy.array(p) for p in pt]
for i, p in enumerate(pt):
if p.dtype not in ('float32', 'float64'):
raise TypeError(('verify_grad can work only with floating point '
'inputs, but input %i has dtype "%s".') % (i, p.dtype))
_type_tol = dict( # relative error tolerances for different types
float32=1e-2,
float64=1e-4)
if abs_tol is None:
abs_tol = __builtin__.max(_type_tol[str(p.dtype)] for p in pt)
if rel_tol is None:
rel_tol = __builtin__.max(_type_tol[str(p.dtype)] for p in pt)
if rng is None:
raise TypeError(('rng should be a valid instance of '
'numpy.random.RandomState. You may '
'want to use theano.tests.unittest'
'_tools.verify_grad instead of '
'theano.gradient.verify_grad.'))
# We allow input downcast in function, because numeric_grad works in the
# most precise dtype used among the inputs, so we may need to cast some.
def function(inputs, output):
if mode is None:
f = compile.function(inputs, output, accept_inplace=True,
allow_input_downcast=True, on_unused_input='ignore')
else:
f = compile.function(inputs, output, accept_inplace=True,
allow_input_downcast=True, mode=mode,
on_unused_input='ignore')
return f
tensor_pt = [TensorType(
as_tensor_variable(p).dtype,
as_tensor_variable(p).broadcastable)(name='input %i' % i)
for i, p in enumerate(pt)]
#fun can be either a function or an actual Op instance
o_output = fun(*tensor_pt)
if isinstance(o_output, list):
raise NotImplementedError(('cant (yet) autotest gradient of fun '
'with multiple outputs'))
# we could make loop over outputs making random projections R for each,
# but this doesn't handle the case where not all the outputs are
# differentiable... so I leave this as TODO for now -JB.
o_fn = function(tensor_pt, o_output)
o_fn_out = o_fn(*[p.copy() for p in pt])
if isinstance(o_fn_out, tuple) or isinstance(o_fn_out, list):
raise TypeError('It seems like you are trying to use verify_grad '
'on an op or a function which outputs a list: there should'
' be a single (array-like) output instead')
# random_projection should not have elements too small,
# otherwise too much precision is lost in numerical gradient
def random_projection():
plain = rng.rand(*o_fn_out.shape) + 0.5
if cast_to_output_type:
return numpy.array(plain, o_output.dtype)
return plain
t_r = shared(random_projection())
# random projection of o onto t_r
# This sum() is defined above, it's not the builtin sum.
cost = theano.tensor.sum(t_r * o_output)
cost_fn = function(tensor_pt, cost)
#todo-- determine if this is actually needed
g_cost = as_tensor_variable(1.0, name='g_cost')
if cast_to_output_type:
g_cost = cast(g_cost, o_output.dtype)
symbolic_grad = grad(cost, tensor_pt, g_cost,
disconnected_inputs='ignore')
grad_fn = function(tensor_pt, symbolic_grad)
for test_num in xrange(n_tests):
num_grad = numeric_grad(cost_fn, [p.copy() for p in pt], eps, out_type)
analytic_grad = grad_fn(*[p.copy() for p in pt])
# Since `tensor_pt` is a list, `analytic_grad` should be one too.
assert isinstance(analytic_grad, list)
max_arg, max_err_pos, max_abs_err, max_rel_err =\
num_grad.max_err(analytic_grad, abs_tol, rel_tol)
if max_abs_err > abs_tol and max_rel_err > rel_tol:
raise verify_grad.E_grad(max_arg, max_err_pos,
max_abs_err, max_rel_err, abs_tol, rel_tol)
#get new random projection for next test
if test_num < n_tests - 1:
t_r.set_value(random_projection(), borrow=True)
class GradientError(Exception):
"""This error is raised when a gradient is calculated, but incorrect."""
def __init__(self, arg, err_pos, abs_err, rel_err, abs_tol, rel_tol):
self.arg = arg
self.err_pos = err_pos
self.abs_err = abs_err
self.rel_err = rel_err
self.abs_tol = abs_tol
self.rel_tol = rel_tol
def __str__(self):
# args may have been inserted by e.g. makeTester
args_msg = ", ".join(str(a) for a in self.args)
return """\
GradientError: numeric gradient and analytic gradient exceed tolerance:
At position %i of argument %i,
abs. error = %f, abs. tolerance = %f
rel. error = %f, rel. tolerance = %f
Exception args: %s""" % (self.err_pos, self.arg,
self.abs_err, self.abs_tol,
self.rel_err, self.rel_tol,
args_msg)
verify_grad.E_grad = GradientError
def jacobian(expression, wrt, consider_constant=None, warn_type=False,
disconnected_inputs='raise'):
"""
:type expression: Vector (1-dimensional) Variable
:type wrt: Variable or list of Variables
:param consider_constant: a list of expressions not to backpropagate
through
:param warn_type: a value of True will cause warnings to be logged for any
Op that emits a gradient that does not match its input type.
:type disconnected_inputs: string
:param disconnected_inputs: Defines the behaviour if some of the variables
in ``wrt`` are not part of the computational graph computing ``cost``
(or if all links are non-differentiable). The possible values are:
- 'ignore': considers that the gradient on these parameters is zero.
- 'warn': consider the gradient zero, and print a warning.
- 'raise': raise an exception.
:return: either a instance of Variable or list/tuple of Variables
(depending upon `wrt`) repesenting the jacobian of `expression`
with respect to (elements of) `wrt`. If an element of `wrt` is not
differentiable with respect to the output, then a zero
variable is returned. The return value is of same type
as `wrt`: a list/tuple or TensorVariable in all cases.
"""
from theano.tensor import arange
# Check inputs have the right format
assert isinstance(expression, Variable), \
"tensor.jacobian expects a Variable as `expression`"
assert expression.ndim < 2, \
("tensor.jacobian expects a 1 dimensional variable as "
"`expression`. If not use flatten to make it a vector")
using_list = isinstance(wrt, list)
using_tuple = isinstance(wrt, tuple)
if isinstance(wrt, (list, tuple)):
wrt = list(wrt)
else:
wrt = [wrt]
if expression.ndim == 0:
# expression is just a scalar, use grad
return format_as(using_list, using_tuple, grad(expression, wrt))
def inner_function(*args):
idx = args[0]
expr = args[1]
rvals = []
for inp in args[2:]:
rval = grad(expr[idx],
inp,
consider_constant=consider_constant,
warn_type=warn_type,
disconnected_inputs=disconnected_inputs)
rvals.append(rval)
return rvals
# Computing the gradients does not affect the random seeds on any random
# generator used n expression (because during computing gradients we are
# just backtracking over old values. (rp Jan 2012 - if anyone has a
# counter example please show me)
jacobs, updates = theano.scan(inner_function,
sequences=arange(expression.shape[0]),
non_sequences=[expression] + wrt)
assert not updates, \
("Scan has returned a list of updates. This should not "
"happen! Report this to theano-users (also include the "
"script that generated the error)")
return format_as(using_list, using_tuple, jacobs)
def hessian(cost, wrt, consider_constant=None, warn_type=False,
disconnected_inputs='raise'):
"""
:type cost: Scalar (0-dimensional) Variable.
:type wrt: Vector (1-dimensional tensor) 'Variable' or list of
vectors (1-dimensional tensors) Variables
:param consider_constant: a list of expressions not to backpropagate
through
:param warn_type: a value of True will cause warnings to be logged for any
Op that emits a gradient that does not match its input type.
:type disconnected_inputs: string
:param disconnected_inputs: Defines the behaviour if some of the variables
in ``wrt`` are not part of the computational graph computing ``cost``
(or if all links are non-differentiable). The possible values are:
- 'ignore': considers that the gradient on these parameters is zero.
- 'warn': consider the gradient zero, and print a warning.
- 'raise': raise an exception.
:return: either a instance of Variable or list/tuple of Variables
(depending upon `wrt`) repressenting the Hessian of the `cost`
with respect to (elements of) `wrt`. If an element of `wrt` is not
differentiable with respect to the output, then a zero
variable is returned. The return value is of same type
as `wrt`: a list/tuple or TensorVariable in all cases.
"""
from theano.tensor import arange
# Check inputs have the right format
assert isinstance(cost, Variable), \
"tensor.hessian expects a Variable as `cost`"
assert cost.ndim == 0, \
"tensor.hessian expects a 0 dimensional variable as `cost`"
using_list = isinstance(wrt, list)
using_tuple = isinstance(wrt, tuple)
if isinstance(wrt, (list, tuple)):
wrt = list(wrt)
else:
wrt = [wrt]
hessians = []
for input in wrt:
assert isinstance(input, Variable), \
"tensor.hessian expects a (list of) Variable as `wrt`"
assert input.ndim == 1, \
"tensor.hessian expects a (list of) 1 dimensional variable "\
"as `wrt`"
expr = grad(cost, input)
hess, updates = theano.scan(lambda i, y, x: grad(
y[i],
x,
consider_constant=consider_constant,
warn_type=warn_type,
disconnected_inputs=disconnected_inputs),
sequences=arange(expr.shape[0]),
non_sequences=[expr, input])
assert not updates, \
("Scan has returned a list of updates. This should not "
"happen! Report this to theano-users (also include the "
"script that generated the error)")
hessians.append(hess)
return format_as(using_list, using_tuple, hessians)
| [
"logging.getLogger",
"numpy.array",
"__builtin__.min",
"numpy.isfinite",
"theano.tensor.arange",
"theano.tensor.as_tensor_variable",
"warnings.warn",
"theano.compile.function",
"numpy.dtype",
"theano.raise_op.Raise",
"__builtin__.max",
"theano.tensor.sum",
"theano.gof.utils.uniq",
"numpy.a... | [((305, 341), 'logging.getLogger', 'logging.getLogger', (['"""theano.gradient"""'], {}), "('theano.gradient')\n", (322, 341), False, 'import logging\n'), ((4439, 4478), 'theano.gof.utils.uniq', 'gof.utils.uniq', (['[r for r, g in sources]'], {}), '([r for r, g in sources])\n', (4453, 4478), False, 'from theano import gof\n'), ((14430, 14449), 'theano.gof.graph.inputs', 'gof.graph.inputs', (['f'], {}), '(f)\n', (14446, 14449), False, 'from theano import gof\n'), ((19124, 19148), 'theano.gof.graph.inputs', 'gof.graph.inputs', (['[cost]'], {}), '([cost])\n', (19140, 19148), False, 'from theano import gof\n'), ((33218, 33251), 'theano.tensor.sum', 'theano.tensor.sum', (['(t_r * o_output)'], {}), '(t_r * o_output)\n', (33235, 33251), False, 'import theano\n'), ((33356, 33394), 'theano.tensor.as_tensor_variable', 'as_tensor_variable', (['(1.0)'], {'name': '"""g_cost"""'}), "(1.0, name='g_cost')\n", (33374, 33394), False, 'from theano.tensor import as_tensor_variable, cast, TensorType\n'), ((4532, 4563), 'theano.gof.graph.inputs', 'gof.graph.inputs', (['graph_outputs'], {}), '(graph_outputs)\n', (4548, 4563), False, 'from theano import gof\n'), ((7770, 7784), 'theano.raise_op.Raise', 'Raise', ([], {'msg': 'msg'}), '(msg=msg)\n', (7775, 7784), False, 'from theano.raise_op import Raise\n'), ((19088, 19110), 'theano.tensor.ones_like', 'tensor.ones_like', (['cost'], {}), '(cost)\n', (19104, 19110), False, 'from theano import tensor\n'), ((22111, 22133), 'numpy.dtype', 'numpy.dtype', (['"""float64"""'], {}), "('float64')\n", (22122, 22133), False, 'import numpy\n'), ((22153, 22175), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (22164, 22175), False, 'import numpy\n'), ((23835, 23884), 'numpy.ndarray', 'numpy.ndarray', (['(total_size,)'], {'dtype': 'working_dtype'}), '((total_size,), dtype=working_dtype)\n', (23848, 23884), False, 'import numpy\n'), ((28042, 28060), 'numpy.argmax', 'numpy.argmax', (['errs'], {}), '(errs)\n', (28054, 28060), False, 'import numpy\n'), ((30279, 30293), 'numpy.array', 'numpy.array', (['p'], {}), '(p)\n', (30290, 30293), False, 'import numpy\n'), ((33440, 33468), 'theano.tensor.cast', 'cast', (['g_cost', 'o_output.dtype'], {}), '(g_cost, o_output.dtype)\n', (33444, 33468), False, 'from theano.tensor import as_tensor_variable, cast, TensorType\n'), ((4581, 4631), 'theano.gof.graph.io_toposort', 'gof.graph.io_toposort', (['graph_inputs', 'graph_outputs'], {}), '(graph_inputs, graph_outputs)\n', (4602, 4631), False, 'from theano import gof\n'), ((9675, 9703), 'theano.tensor.as_tensor_variable', 'as_tensor_variable', (['wrt_elem'], {}), '(wrt_elem)\n', (9693, 9703), False, 'from theano.tensor import as_tensor_variable, cast, TensorType\n'), ((9782, 9812), 'theano.tensor.as_tensor_variable', 'as_tensor_variable', (['eval_point'], {}), '(eval_point)\n', (9800, 9812), False, 'from theano.tensor import as_tensor_variable, cast, TensorType\n'), ((23166, 23180), 'numpy.array', 'numpy.array', (['p'], {}), '(p)\n', (23177, 23180), False, 'import numpy\n'), ((23682, 23739), '__builtin__.min', '__builtin__.min', (['((self.type_eps[dt], dt) for dt in dtypes)'], {}), '((self.type_eps[dt], dt) for dt in dtypes)\n', (23697, 23739), False, 'import __builtin__\n'), ((23974, 24018), 'numpy.ndarray', 'numpy.ndarray', (['(total_size,)'], {'dtype': 'out_type'}), '((total_size,), dtype=out_type)\n', (23987, 24018), False, 'import numpy\n'), ((24050, 24099), 'numpy.ndarray', 'numpy.ndarray', (['(total_size,)'], {'dtype': 'working_dtype'}), '((total_size,), dtype=working_dtype)\n', (24063, 24099), False, 'import numpy\n'), ((24151, 24202), '__builtin__.max', '__builtin__.max', (['(self.type_eps[dt] for dt in dtypes)'], {}), '(self.type_eps[dt] for dt in dtypes)\n', (24166, 24202), False, 'import __builtin__\n'), ((27701, 27752), 'numpy.minimum', 'numpy.minimum', (['(abs_err / abs_tol)', '(rel_err / rel_tol)'], {}), '(abs_err / abs_tol, rel_err / rel_tol)\n', (27714, 27752), False, 'import numpy\n'), ((31405, 31516), 'theano.compile.function', 'compile.function', (['inputs', 'output'], {'accept_inplace': '(True)', 'allow_input_downcast': '(True)', 'on_unused_input': '"""ignore"""'}), "(inputs, output, accept_inplace=True, allow_input_downcast=\n True, on_unused_input='ignore')\n", (31421, 31516), False, 'from theano import compile, shared\n'), ((31562, 31684), 'theano.compile.function', 'compile.function', (['inputs', 'output'], {'accept_inplace': '(True)', 'allow_input_downcast': '(True)', 'mode': 'mode', 'on_unused_input': '"""ignore"""'}), "(inputs, output, accept_inplace=True, allow_input_downcast=\n True, mode=mode, on_unused_input='ignore')\n", (31578, 31684), False, 'from theano import compile, shared\n'), ((33012, 33046), 'numpy.array', 'numpy.array', (['plain', 'o_output.dtype'], {}), '(plain, o_output.dtype)\n', (33023, 33046), False, 'import numpy\n'), ((38124, 38151), 'theano.tensor.arange', 'arange', (['expression.shape[0]'], {}), '(expression.shape[0])\n', (38130, 38151), False, 'from theano.tensor import arange\n'), ((41023, 41044), 'theano.tensor.arange', 'arange', (['expr.shape[0]'], {}), '(expr.shape[0])\n', (41029, 41044), False, 'from theano.tensor import arange\n'), ((15684, 15720), 'warnings.warn', 'warnings.warn', (['message'], {'stacklevel': '(1)'}), '(message, stacklevel=1)\n', (15697, 15720), False, 'import warnings\n'), ((20462, 20498), 'warnings.warn', 'warnings.warn', (['message'], {'stacklevel': '(1)'}), '(message, stacklevel=1)\n', (20475, 20498), False, 'import warnings\n'), ((27455, 27478), 'numpy.isfinite', 'numpy.isfinite', (['abs_err'], {}), '(abs_err)\n', (27469, 27478), False, 'import numpy\n'), ((27580, 27603), 'numpy.isfinite', 'numpy.isfinite', (['rel_err'], {}), '(rel_err)\n', (27594, 27603), False, 'import numpy\n'), ((31779, 31800), 'theano.tensor.as_tensor_variable', 'as_tensor_variable', (['p'], {}), '(p)\n', (31797, 31800), False, 'from theano.tensor import as_tensor_variable, cast, TensorType\n'), ((31820, 31841), 'theano.tensor.as_tensor_variable', 'as_tensor_variable', (['p'], {}), '(p)\n', (31838, 31841), False, 'from theano.tensor import as_tensor_variable, cast, TensorType\n'), ((11934, 11955), 'theano.tensor.as_tensor_variable', 'as_tensor_variable', (['x'], {}), '(x)\n', (11952, 11955), False, 'from theano.tensor import as_tensor_variable, cast, TensorType\n'), ((12040, 12061), 'theano.tensor.as_tensor_variable', 'as_tensor_variable', (['y'], {}), '(y)\n', (12058, 12061), False, 'from theano.tensor import as_tensor_variable, cast, TensorType\n')] |
from taurex.log import Logger
from taurex.util import get_molecular_weight
from taurex.data.fittable import Fittable
import numpy as np
from taurex.output.writeable import Writeable
from taurex.cache import OpacityCache
class Chemistry(Fittable, Logger, Writeable):
"""
*Abstract Class*
Skeleton for defining chemistry. Must implement
methods:
- :func:`activeGases`
- :func:`inactiveGases`
- :func:`activeGasMixProfile`
- :func:`inactiveGasMixProfile`
*Active* are those that are actively
absorbing in the atmosphere. In technical terms they are molecules
that have absorption cross-sections. You can see which molecules
are able to actively absorb by doing:
You can find out what molecules can actively absorb by doing:
>>> avail_active_mols = OpacityCache().find_list_of_molecules()
Parameters
----------
name : str
Name used in logging
"""
def __init__(self, name):
Logger.__init__(self, name)
Fittable.__init__(self)
self.mu_profile = None
self._avail_active = OpacityCache().find_list_of_molecules()
@property
def availableActive(self):
"""
Returns a list of available
actively absorbing molecules
Returns
-------
molecules: :obj:`list`
Actively absorbing molecules
"""
return self._avail_active
@property
def activeGases(self):
"""
**Requires implementation**
Should return a list of molecule names
Returns
-------
active : :obj:`list`
List of active gases
"""
raise NotImplementedError
@property
def inactiveGases(self):
"""
**Requires implementation**
Should return a list of molecule names
Returns
-------
inactive : :obj:`list`
List of inactive gases
"""
raise NotImplementedError
def initialize_chemistry(self, nlayers=100, temperature_profile=None,
pressure_profile=None, altitude_profile=None):
"""
**Requires implementation**
Derived classes should implement this to compute the active and
inactive gas profiles
Parameters
----------
nlayers: int
Number of layers in atmosphere
temperature_profile: :obj:`array`
Temperature profile in K, must have length ``nlayers``
pressure_profile: :obj:`array`
Pressure profile in Pa, must have length ``nlayers``
altitude_profile: :obj:`array`
Altitude profile in m, must have length ``nlayers``
"""
self.compute_mu_profile(nlayers)
@property
def activeGasMixProfile(self):
"""
**Requires implementation**
Should return profiles of shape ``(nactivegases,nlayers)``. Active
refers to gases that are actively absorbing in the atmosphere.
Another way to put it these are gases where molecular cross-sections
are used.
"""
raise NotImplementedError
@property
def inactiveGasMixProfile(self):
"""
**Requires implementation**
Should return profiles of shape ``(ninactivegases,nlayers)``.
These general refer to gases: ``H2``, ``He`` and ``N2``
"""
raise NotImplementedError
@property
def muProfile(self):
"""
Molecular weight for each layer of atmosphere
Returns
-------
mix_profile : :obj:`array`
"""
return self.mu_profile
def get_gas_mix_profile(self, gas_name):
"""
Returns the mix profile of a particular gas
Parameters
----------
gas_name : str
Name of gas
Returns
-------
mixprofile : :obj:`array`
Mix profile of gas with shape ``(nlayer)``
"""
if gas_name in self.activeGases:
idx = self.activeGases.index(gas_name)
return self.activeGasMixProfile[idx]
elif gas_name in self.inactiveGases:
idx = self.inactiveGases.index(gas_name)
return self.inactiveGasMixProfile[idx]
else:
raise KeyError
def compute_mu_profile(self, nlayers):
"""
Computes molecular weight of atmosphere
for each layer
Parameters
----------
nlayers: int
Number of layers
"""
self.mu_profile = np.zeros(shape=(nlayers,))
if self.activeGasMixProfile is not None:
for idx, gasname in enumerate(self.activeGases):
self.mu_profile += self.activeGasMixProfile[idx] * \
get_molecular_weight(gasname)
if self.inactiveGasMixProfile is not None:
for idx, gasname in enumerate(self.inactiveGases):
self.mu_profile += self.inactiveGasMixProfile[idx] * \
get_molecular_weight(gasname)
def write(self, output):
"""
Writes chemistry class and arguments to file
Parameters
----------
output: :class:`~taurex.output.output.Output`
"""
gas_entry = output.create_group('Chemistry')
gas_entry.write_string('chemistry_type', self.__class__.__name__)
gas_entry.write_string_array('active_gases', self.activeGases)
gas_entry.write_string_array('inactive_gases', self.inactiveGases)
return gas_entry
| [
"taurex.data.fittable.Fittable.__init__",
"taurex.cache.OpacityCache",
"taurex.util.get_molecular_weight",
"taurex.log.Logger.__init__",
"numpy.zeros"
] | [((978, 1005), 'taurex.log.Logger.__init__', 'Logger.__init__', (['self', 'name'], {}), '(self, name)\n', (993, 1005), False, 'from taurex.log import Logger\n'), ((1014, 1037), 'taurex.data.fittable.Fittable.__init__', 'Fittable.__init__', (['self'], {}), '(self)\n', (1031, 1037), False, 'from taurex.data.fittable import Fittable\n'), ((4572, 4598), 'numpy.zeros', 'np.zeros', ([], {'shape': '(nlayers,)'}), '(shape=(nlayers,))\n', (4580, 4598), True, 'import numpy as np\n'), ((1099, 1113), 'taurex.cache.OpacityCache', 'OpacityCache', ([], {}), '()\n', (1111, 1113), False, 'from taurex.cache import OpacityCache\n'), ((4798, 4827), 'taurex.util.get_molecular_weight', 'get_molecular_weight', (['gasname'], {}), '(gasname)\n', (4818, 4827), False, 'from taurex.util import get_molecular_weight\n'), ((5033, 5062), 'taurex.util.get_molecular_weight', 'get_molecular_weight', (['gasname'], {}), '(gasname)\n', (5053, 5062), False, 'from taurex.util import get_molecular_weight\n')] |
# --------------
# Importing header files
import numpy as np
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
data = np.genfromtxt(path, delimiter=',' ,skip_header=1)
census = np.concatenate((data,np.asarray(new_record)))
print(census, type(census))
# --------------
#Code starts here
import numpy as np
age = census[:,0] # indexing an array, all rows and only the first column
max_age = max(age)
min_age = min(age)
age_mean = np.mean(age)
age_std = np.std(age)
print(age)
print(max_age)
print(min_age)
print(age_mean)
print(age_std)
# --------------
#Code starts here
race_0 = census[census[:,2] == 0]
race_1 = census[census[:,2] == 1]
race_2 = census[census[:,2] == 2]
race_3 = census[census[:,2] == 3]
race_4 = census[census[:,2] == 4]
len_0 = len(race_0)
len_1 = len(race_1)
len_2 = len(race_2)
len_3 = len(race_3)
len_4 = len(race_4)
len_race = [len_0,len_1,len_2,len_3,len_4]
minority_race = len_race.index(min(len_race))
print(minority_race)
# --------------
#Code starts here
senior_citizens = census[census[:,0]>60]
working_hours_sum = sum(senior_citizens[:,6])
senior_citizens_len = len(senior_citizens)
avg_working_hours = working_hours_sum/senior_citizens_len
# print(working_hours_sum)
print(avg_working_hours)
# --------------
#Code starts here
import numpy as np
high = census[census[:,1]>10]
low = census[census[:,1]<=10]
avg_pay_high = np.mean(high[:,7])
avg_pay_low = np.mean(low[:,7])
print(avg_pay_high)
print(avg_pay_low)
| [
"numpy.mean",
"numpy.asarray",
"numpy.genfromtxt",
"numpy.std"
] | [((217, 266), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(path, delimiter=',', skip_header=1)\n", (230, 266), True, 'import numpy as np\n'), ((540, 552), 'numpy.mean', 'np.mean', (['age'], {}), '(age)\n', (547, 552), True, 'import numpy as np\n'), ((564, 575), 'numpy.std', 'np.std', (['age'], {}), '(age)\n', (570, 575), True, 'import numpy as np\n'), ((1519, 1538), 'numpy.mean', 'np.mean', (['high[:, 7]'], {}), '(high[:, 7])\n', (1526, 1538), True, 'import numpy as np\n'), ((1553, 1571), 'numpy.mean', 'np.mean', (['low[:, 7]'], {}), '(low[:, 7])\n', (1560, 1571), True, 'import numpy as np\n'), ((300, 322), 'numpy.asarray', 'np.asarray', (['new_record'], {}), '(new_record)\n', (310, 322), True, 'import numpy as np\n')] |
import os, pickle, json
from collections import deque
import numpy as np
import tensorflow as tf
import torch
import torch.nn.functional as F
from guacamol.distribution_matching_generator import DistributionMatchingGenerator
from rdkit import Chem
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from data.gen_targets import get_symbol_list
from src.data.loader import SizeSampler, graph_collate_fn
from src.utils import set_seed_if, graph_to_mol, get_index_method, filter_top_k, calculate_graph_properties,\
dct_to_cuda_inplace, copy_graph_remove_data
if int(tf.__version__.split('.')[0]) <= 1:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
class MockGenerator(DistributionMatchingGenerator):
def __init__(self, smiles_list, num_samples_to_generate, train_smiles_list=None, remove_non_novel=False):
self.smiles_list = smiles_list
if remove_non_novel is True:
self.smiles_list = [s for s in self.smiles_list if s not in train_smiles_list]
self.smiles_list = self.smiles_list[:num_samples_to_generate]
def generate(self, number_samples):
smiles_to_return = self.smiles_list[:number_samples]
self.smiles_list = self.smiles_list[number_samples:] + self.smiles_list[:number_samples]
return smiles_to_return
class GenDataset(Dataset):
def __init__(self, dataset, number_samples):
self.dataset = dataset
self.number_samples = number_samples
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return self.number_samples
class GraphGenerator(DistributionMatchingGenerator):
def __init__(self, train_data, model, generation_algorithm, random_init, num_iters, num_sampling_iters, batch_size,
edges_per_batch=-1, retrieve_train_graphs=False, local_cpu=False, cp_save_dir=None,
set_seed_at_load_iter=False, graph_type='QM9', sample_uniformly=False, mask_comp_to_predict=False,
maintain_minority_proportion=False, no_edge_present_type='learned', mask_independently=False,
one_property_per_loop=False, checkpointing_period=1, save_period=1, evaluation_period=1,
evaluate_finegrained=False, save_finegrained=False, variables_per_gibbs_iteration=1, top_k=-1,
save_init=False, cond_property_values={}):
super().__init__()
self.model = model
self.generation_algorithm = generation_algorithm
self.random_init = random_init
self.sample_uniformly = sample_uniformly
self.num_iters = num_iters
self.num_sampling_iters = num_sampling_iters
self.num_argmax_iters = self.num_iters - self.num_sampling_iters
self.train_data = train_data
self.batch_size = batch_size
self.edges_per_batch = edges_per_batch
self.local_cpu = local_cpu
self.cp_save_dir = cp_save_dir
self.calculate_length_dist()
self.get_special_inds()
self.set_seed_at_load_iter = set_seed_at_load_iter
self.symbol_list = get_symbol_list(graph_type)[:self.train_data.num_node_types]
self.retrieve_train_graphs = retrieve_train_graphs
self.mask_comp_to_predict = mask_comp_to_predict
self.maintain_minority_proportion = maintain_minority_proportion
self.no_edge_present_type = no_edge_present_type
self.mask_independently = mask_independently
self.one_property_per_loop = one_property_per_loop
self.index_method = get_index_method()
self.checkpointing_period = checkpointing_period
self.save_period = save_period
self.evaluation_period = evaluation_period
self.evaluate_finegrained = evaluate_finegrained
self.save_finegrained = save_finegrained
self.variables_per_gibbs_iteration = variables_per_gibbs_iteration
self.top_k = top_k
self.save_init = save_init
self.model_forward = self.model_forward_mgm
if self.one_property_per_loop is True:
self.node_property_ints = {'node_type': 1, 'hydrogens': 2, 'charge': 3, 'is_in_ring': 4, 'is_aromatic': 5,
'chirality': 6}
self.edge_property_ints = {'edge_type': 7}
else:
self.node_property_ints = {'node_type': 1, 'hydrogens': 1, 'charge': 1, 'is_in_ring': 1, 'is_aromatic': 1,
'chirality': 1}
self.edge_property_ints = {'edge_type': 2}
self.cond_property_values = {k: float(v) for k, v in cond_property_values.items()}
def generate(self, number_samples):
load_path, load_iters = get_load_path(self.num_sampling_iters, self.num_argmax_iters, self.cp_save_dir)
all_init_node_properties, all_init_edge_properties, all_node_masks, all_edge_masks = \
self.get_all_init_variables(load_path, number_samples)
if self.set_seed_at_load_iter is True:
set_seed_if(load_iters)
retrieve_train_graphs = self.retrieve_train_graphs
for j in range(load_iters, self.num_iters):
if j > 0:
retrieve_train_graphs = False
if self.generation_algorithm == 'Gibbs':
self.train_data.do_not_corrupt = True
loader = self.get_dataloader(all_init_node_properties, all_node_masks, all_init_edge_properties,
number_samples, retrieve_train_graphs)
use_argmax = (j >= self.num_sampling_iters)
all_init_node_properties, all_init_edge_properties, all_node_masks, \
smiles_list = self.carry_out_iteration(loader, use_argmax)
return smiles_list
def generate_with_evaluation(self, num_samples_to_generate, smiles_dataset_path, output_dir,
num_samples_to_evaluate, evaluate_connected_only=False):
load_path, load_iters = get_load_path(self.num_sampling_iters, self.num_argmax_iters, self.cp_save_dir)
all_init_node_properties, all_init_edge_properties, all_node_masks, all_edge_masks = \
self.get_all_init_variables(load_path, num_samples_to_generate)
if self.save_init is True and self.random_init is True and load_iters == 0:
# Save smiles representations of initialised molecules
smiles_list = []
num_nodes = all_node_masks.sum(-1)
for i in range(len(all_init_node_properties['node_type'])):
mol = graph_to_mol({k: v[i][:int(num_nodes[i])].astype(int) \
for k, v in all_init_node_properties.items()},
{k: v[i][:int(num_nodes[i]), :int(num_nodes[i])].astype(int) \
for k, v in all_init_edge_properties.items()},
min_charge=self.train_data.min_charge, symbol_list=self.symbol_list)
smiles_list.append(Chem.MolToSmiles(mol))
save_smiles_list(smiles_list, os.path.join(output_dir, 'smiles_0_0.txt'))
del smiles_list, mol, num_nodes
if self.set_seed_at_load_iter is True:
set_seed_if(load_iters)
retrieve_train_graphs = self.retrieve_train_graphs
for j in tqdm(range(load_iters, self.num_iters)):
if j > 0:
retrieve_train_graphs = False
if self.generation_algorithm == 'Gibbs':
self.train_data.do_not_corrupt = True
loader = self.get_dataloader(all_init_node_properties, all_node_masks, all_init_edge_properties,
num_samples_to_generate, retrieve_train_graphs)
use_argmax = (j >= self.num_sampling_iters)
all_init_node_properties, all_init_edge_properties, all_node_masks,\
smiles_list = self.carry_out_iteration(loader, use_argmax)
sampling_iters_completed = min(j + 1, self.num_sampling_iters)
argmax_iters_completed = max(0, j + 1 - self.num_sampling_iters)
if (j + 1 - load_iters) % self.checkpointing_period == 0:
self.save_checkpoints(all_init_node_properties, all_init_edge_properties,
sampling_iters_completed, argmax_iters_completed)
if (j + 1 - load_iters) % self.save_period == 0 or (self.save_finegrained is True and (j + 1) <= 10):
smiles_output_path = os.path.join(output_dir, 'smiles_{}_{}.txt'.format(
sampling_iters_completed, argmax_iters_completed))
save_smiles_list(smiles_list, smiles_output_path)
if (j + 1 - load_iters) % self.evaluation_period == 0 or \
(self.evaluate_finegrained is True and (j + 1) <= 10):
json_output_path = os.path.join(output_dir, 'distribution_results_{}_{}.json'.format(
sampling_iters_completed, argmax_iters_completed))
evaluate_uncond_generation(MockGenerator(smiles_list, num_samples_to_generate),
smiles_dataset_path, json_output_path, num_samples_to_evaluate,
evaluate_connected_only)
if self.cond_property_values:
cond_json_output_path = os.path.join(output_dir, 'cond_results_{}_{}.json'.format(
sampling_iters_completed, argmax_iters_completed))
self.evaluate_cond_generation(smiles_list[:num_samples_to_evaluate], cond_json_output_path)
def carry_out_iteration(self, loader, use_argmax):
mols, smiles_list = [], []
all_final_node_properties = {name: [] for name in self.train_data.node_property_names}
all_final_edge_properties = {name: [] for name in self.train_data.edge_property_names}
all_final_node_masks = []
print('Generator length: {}'.format(len(loader)), flush=True)
for batch_init_graph, _, batch_target_type_graph, batch_target_type_transpose_graph, \
graph_properties, binary_graph_properties in tqdm(loader):
if self.local_cpu is False:
batch_init_graph = batch_init_graph.to(torch.device('cuda'))
dct_to_cuda_inplace(graph_properties)
if binary_graph_properties: binary_graph_properties = binary_graph_properties.cuda()
batch_init_graph = self.sample_simultaneously(batch_init_graph,
batch_target_type_graph, batch_target_type_transpose_graph,
graph_properties, binary_graph_properties, use_argmax)
batch_init_graph = batch_init_graph.to(torch.device('cpu'))
self.append_and_convert_graphs(batch_init_graph, all_final_node_properties, all_final_edge_properties,
all_final_node_masks, mols, smiles_list)
return all_final_node_properties, all_final_edge_properties, all_final_node_masks, smiles_list
def get_all_init_variables(self, load_path, number_samples):
if load_path is not None:
with open(load_path, 'rb') as f:
load_info = pickle.load(f)
all_init_node_properties, all_init_edge_properties = load_info
all_node_masks = [(node_type != self.train_data.node_properties['node_type']['empty_index']) \
for node_type in all_init_node_properties['node_type']]
all_edge_masks = [(edge_type != self.train_data.edge_properties['edge_type']['empty_index']) \
for edge_type in all_init_edge_properties['edge_type']]
else:
lengths = self.sample_lengths(number_samples)
all_init_node_properties, all_init_edge_properties, all_node_masks, all_edge_masks = \
self.get_masked_variables(lengths, number_samples, pad=False)
return all_init_node_properties, all_init_edge_properties, all_node_masks, all_edge_masks
def get_dataloader(self, all_init_node_properties, all_node_masks, all_init_edge_properties, number_samples,
retrieve_train_graphs):
gen_dataset = GenDataset(self.train_data, number_samples)
if retrieve_train_graphs is False:
for name, node_property in all_init_node_properties.items():
data = []
for i, single_data_property in enumerate(node_property):
if name == 'charge': single_data_property -= abs(self.train_data.min_charge)
data.append(single_data_property[:int(all_node_masks[i].sum())])
gen_dataset.dataset.node_properties[name]['data'] = data
for name, edge_property in all_init_edge_properties.items():
data = []
for i, single_data_property in enumerate(edge_property):
data.append(single_data_property[:int(all_node_masks[i].sum()), :int(all_node_masks[i].sum())])
gen_dataset.dataset.edge_properties[name]['data'] = data
for name, value in self.cond_property_values.items():
gen_dataset.dataset.graph_properties[name] = np.ones_like(gen_dataset.dataset.graph_properties[name]) \
* value
if self.edges_per_batch > 0:
batch_sampler = SizeSampler(gen_dataset, self.edges_per_batch)
batch_sampler.batches.reverse()
loader = DataLoader(gen_dataset, batch_sampler=batch_sampler, collate_fn=graph_collate_fn)
else:
loader = DataLoader(gen_dataset, batch_size=self.batch_size, collate_fn=graph_collate_fn)
return loader
def sample_simultaneously(self, batch_init_graph, batch_target_type_graph, batch_target_type_transpose_graph,
graph_properties=None, binary_graph_properties=None, use_argmax=False):
batch_preds_graph = copy_graph_remove_data(batch_init_graph)
with torch.no_grad():
_, batch_scores_graph, graph_property_scores = self.model_forward(batch_init_graph, graph_properties,
binary_graph_properties)
# This breaks symmetry for edge data
batch_preds_graph = self.predict_from_scores(batch_scores_graph, batch_preds_graph, use_argmax)
for name, target_type in batch_target_type_graph.ndata.items():
batch_init_graph.ndata[name][target_type.numpy() != 0] = \
batch_preds_graph.ndata[name][target_type.numpy() != 0]
for name, target_type in batch_target_type_graph.edata.items():
batch_init_graph.edata[name][target_type.numpy() != 0] = \
batch_preds_graph.edata[name][target_type.numpy() != 0]
batch_init_graph.edata[name][batch_target_type_transpose_graph.edata[name].numpy() != 0] = \
batch_preds_graph.edata[name][batch_target_type_transpose_graph.edata[name].numpy() != 0]
return batch_init_graph
def model_forward_mgm(self, batch_init_graph, graph_properties=None, binary_graph_properties=None):
return self.model(batch_init_graph, graph_properties, binary_graph_properties)
def predict_from_scores(self, batch_scores_graph, batch_preds_graph, use_argmax=False):
for property_name, scores in batch_scores_graph.ndata.items():
if use_argmax is True:
batch_preds_graph.ndata[property_name] = torch.argmax(F.softmax(scores, -1), dim=-1)
else:
if self.top_k > 0:
scores = filter_top_k(scores, self.top_k)
batch_preds_graph.ndata[property_name] = torch.distributions.Categorical(F.softmax(scores, -1)).sample()
for property_name, scores in batch_scores_graph.edata.items():
if use_argmax is True:
batch_preds_graph.edata[property_name] = torch.argmax(F.softmax(scores, -1), dim=-1)
else:
if self.top_k > 0:
scores = filter_top_k(scores, self.top_k)
batch_preds_graph.edata[property_name] = torch.distributions.Categorical(F.softmax(scores, -1)).sample()
return batch_preds_graph
def append_and_convert_graphs(self, batch_init_graph,
all_final_node_properties, all_final_edge_properties, all_final_node_masks,
mols, smiles_list):
all_final_len = len(all_final_node_properties[list(all_final_node_properties.keys())[0]])
node_start, edge_start = 0, 0
for i, num_nodes in enumerate(batch_init_graph.batch_num_nodes()):
num_edges = batch_init_graph.batch_num_edges()[i]
for name, property in batch_init_graph.ndata.items():
all_final_node_properties[name].append(property[node_start:node_start+num_nodes].numpy())
all_final_node_masks.append(np.ones(num_nodes)) # redundant but needs to remain until node masks removed
# from generation
for name, property in batch_init_graph.edata.items():
single_datapoint_fc_data = np.zeros((num_nodes, num_nodes))
single_datapoint_fc_data[
batch_init_graph.edges()[0][edge_start:edge_start+num_edges].numpy() - node_start,
batch_init_graph.edges()[1][edge_start:edge_start+num_edges].numpy() - node_start] = \
property[edge_start:edge_start+num_edges].numpy()
# force symmetry, which is broken earlier by sampling edge predictions
single_datapoint_fc_data = np.triu(single_datapoint_fc_data) + np.tril(single_datapoint_fc_data.T, -1)
all_final_edge_properties[name].append(single_datapoint_fc_data)
node_start += num_nodes
edge_start += num_edges
mol = graph_to_mol({k: v[all_final_len+i] for k, v in all_final_node_properties.items()},
{k: v[all_final_len+i] for k, v in all_final_edge_properties.items()},
min_charge=self.train_data.min_charge, symbol_list=self.symbol_list)
mols.append(mol)
smiles_list.append(Chem.MolToSmiles(mol))
def save_checkpoints(self, all_final_node_properties, all_final_edge_properties, num_sampling_iters,
num_argmax_iters):
if self.cp_save_dir is not None:
save_path = os.path.join(self.cp_save_dir, 'gen_checkpoint_{}_{}.p'.format(
num_sampling_iters, num_argmax_iters))
with open(save_path, 'wb') as f:
pickle.dump([all_final_node_properties, all_final_edge_properties], f)
def calculate_length_dist(self):
lengths_dict = {}
for node_type in self.train_data.node_properties['node_type']['data']:
length = len(node_type)
if length not in lengths_dict:
lengths_dict[length] = 1
else:
lengths_dict[length] += 1
# Normalise
for key in lengths_dict:
lengths_dict[key] /= len(self.train_data)
self.length_dist = lengths_dict
def get_special_inds(self):
self.max_nodes = self.train_data.max_nodes
self.max_edges = int(self.max_nodes * (self.max_nodes-1)/2)
def sample_lengths(self, number_samples=1):
lengths = np.array(list(self.length_dist.keys()))
probs = np.array(list(self.length_dist.values()))
samples = np.random.choice(lengths, number_samples, p=probs)
return samples
def get_masked_variables(self, lengths, number_samples, pad=True):
if pad is True:
all_init_node_properties, all_init_edge_properties = {}, {}
for name, property_info in self.train_data.node_properties.items():
all_init_node_properties[name] = np.ones((number_samples, self.max_nodes)) * \
property_info['empty_index']
for name, property_info in self.train_data.edge_properties.items():
all_init_edge_properties[name] = np.ones((number_samples, self.max_nodes, self.max_nodes)) * \
property_info['empty_index']
node_mask = np.zeros((number_samples, self.max_nodes))
edge_mask = np.zeros((number_samples, self.max_nodes, self.max_nodes))
else:
all_init_node_properties = {name: [] for name in self.train_data.node_property_names}
all_init_edge_properties = {name: [] for name in self.train_data.edge_property_names}
node_mask, edge_mask = [], []
for sample_num, length in enumerate(lengths):
if pad is False:
for name, property_info in self.train_data.node_properties.items():
all_init_node_properties[name].append(np.ones(length) * property_info['empty_index'])
for name, property_info in self.train_data.edge_properties.items():
all_init_edge_properties[name].append(np.ones((length, length)) * property_info['empty_index'])
node_mask.append(np.zeros(length))
edge_mask.append(np.zeros((length, length)))
if self.random_init:
for name, property_info in self.train_data.node_properties.items():
if self.sample_uniformly is True:
samples = np.random.randint(0, property_info['num_categories'], size=length)
else:
samples = torch.distributions.Categorical(1/self.train_data.node_property_weights[name]).sample(
[length]).numpy()
all_init_node_properties[name][sample_num][:length] = samples
for name, property_info in self.train_data.edge_properties.items():
if self.sample_uniformly is True:
samples = np.random.randint(0, property_info['num_categories'],
size=int(length * (length - 1) / 2))
else:
samples = torch.distributions.Categorical(1/self.train_data.edge_property_weights[name]).sample(
[int(length * (length - 1) / 2)]).numpy()
rand_edges = deque(samples)
for i in range(length):
all_init_edge_properties[name][sample_num][i, i] = 0
for j in range(i, length):
if i != j:
all_init_edge_properties[name][sample_num][i, j] = \
all_init_edge_properties[name][sample_num][j, i] = rand_edges.pop()
else:
for name, init_node_property in all_init_node_properties.items():
init_node_property[sample_num][:length] = self.train_data.node_properties[name]['mask_index']
for name, init_edge_property in all_init_edge_properties.items():
init_edge_property[sample_num][:length, :length] = \
self.train_data.edge_properties[name]['mask_index']
node_mask[sample_num][:length] = 1
edge_mask[sample_num][:length, :length] = 1
return all_init_node_properties, all_init_edge_properties, node_mask, edge_mask
def evaluate_cond_generation(self, smiles_list, json_output_path):
valid_mols = []
for s in smiles_list:
mol = Chem.MolFromSmiles(s)
if mol is not None: valid_mols.append(mol)
graph_properties = calculate_graph_properties(valid_mols, self.cond_property_values.keys())
graph_property_stats = {name: {'mean': np.mean(graph_property), 'median': np.median(graph_property),
'std': np.std(graph_property)}
for name, graph_property in graph_properties.items()}
with open(json_output_path, 'w') as f:
json.dump(graph_property_stats, f)
def get_load_path(num_sampling_iters, num_argmax_iters, cp_save_dir):
all_cp_iters = {}
for fname in os.listdir(cp_save_dir):
if 'gen_checkpoint' not in fname: continue
split_fname = os.path.splitext(fname)[0].split('_')
cp_sampling_iters, cp_argmax_iters = int(split_fname[2]), int(split_fname[3])
if cp_sampling_iters in all_cp_iters.keys():
all_cp_iters[cp_sampling_iters].append(cp_argmax_iters)
else:
all_cp_iters[cp_sampling_iters] = [cp_argmax_iters]
if len(all_cp_iters) == 0:
return None, 0
cp_max_sampling_iters = max(all_cp_iters.keys())
sampling_iters_to_load = min(cp_max_sampling_iters, num_sampling_iters)
if sampling_iters_to_load == num_sampling_iters and sampling_iters_to_load in all_cp_iters.keys():
argmax_iters_to_load = min(max(all_cp_iters[sampling_iters_to_load]), num_argmax_iters)
else:
argmax_iters_to_load = 0
if sampling_iters_to_load == argmax_iters_to_load == 0:
return None, 0
load_path = os.path.join(cp_save_dir,
'gen_checkpoint_{}_{}.p'.format(sampling_iters_to_load, argmax_iters_to_load))
return load_path, sampling_iters_to_load + argmax_iters_to_load
def get_shuffled_array(arrays, length=None):
"""
:arg
arrays: list of generation_arrays
length: length of an output generation array with padding
:returns
shuffled_arrays: padded matrix of shape (number of generation arrays, length)
"""
if type(arrays[0][0]) == tuple:
shuffled_arrays = np.ones((len(arrays), length), dtype=(int, 2)) * -1
else:
shuffled_arrays = np.ones((len(arrays), length)) * -1
for i, array in enumerate(arrays):
array = np.random.permutation(array)
shuffled_arrays[i, :len(array)] = array
return shuffled_arrays
def save_smiles_list(smiles_list, smiles_output_path):
with open(smiles_output_path, 'w') as f:
for smiles in smiles_list:
f.write(smiles + '\n')
def evaluate_uncond_generation(mock_generator, smiles_dataset_path,
json_output_path, num_samples_to_evaluate, evaluate_connected_only=False):
from guacamol.assess_distribution_learning import _assess_distribution_learning
if evaluate_connected_only is True:
mock_generator.smiles_list = [s for s in mock_generator.smiles_list if '.' not in s]
_assess_distribution_learning(mock_generator, smiles_dataset_path, json_output_file=json_output_path,
benchmark_version='v1', number_samples=num_samples_to_evaluate) | [
"torch.distributions.Categorical",
"data.gen_targets.get_symbol_list",
"torch.nn.functional.softmax",
"numpy.mean",
"os.listdir",
"tensorflow.__version__.split",
"collections.deque",
"src.utils.set_seed_if",
"tensorflow.Session",
"rdkit.Chem.MolToSmiles",
"src.utils.filter_top_k",
"tensorflow.... | [((639, 655), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (653, 655), True, 'import tensorflow as tf\n'), ((710, 735), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (720, 735), True, 'import tensorflow as tf\n'), ((24563, 24586), 'os.listdir', 'os.listdir', (['cp_save_dir'], {}), '(cp_save_dir)\n', (24573, 24586), False, 'import os, pickle, json\n'), ((26893, 27066), 'guacamol.assess_distribution_learning._assess_distribution_learning', '_assess_distribution_learning', (['mock_generator', 'smiles_dataset_path'], {'json_output_file': 'json_output_path', 'benchmark_version': '"""v1"""', 'number_samples': 'num_samples_to_evaluate'}), "(mock_generator, smiles_dataset_path,\n json_output_file=json_output_path, benchmark_version='v1',\n number_samples=num_samples_to_evaluate)\n", (26922, 27066), False, 'from guacamol.assess_distribution_learning import _assess_distribution_learning\n'), ((3587, 3605), 'src.utils.get_index_method', 'get_index_method', ([], {}), '()\n', (3603, 3605), False, 'from src.utils import set_seed_if, graph_to_mol, get_index_method, filter_top_k, calculate_graph_properties, dct_to_cuda_inplace, copy_graph_remove_data\n'), ((10280, 10292), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (10284, 10292), False, 'from tqdm import tqdm\n'), ((14181, 14221), 'src.utils.copy_graph_remove_data', 'copy_graph_remove_data', (['batch_init_graph'], {}), '(batch_init_graph)\n', (14203, 14221), False, 'from src.utils import set_seed_if, graph_to_mol, get_index_method, filter_top_k, calculate_graph_properties, dct_to_cuda_inplace, copy_graph_remove_data\n'), ((19856, 19906), 'numpy.random.choice', 'np.random.choice', (['lengths', 'number_samples'], {'p': 'probs'}), '(lengths, number_samples, p=probs)\n', (19872, 19906), True, 'import numpy as np\n'), ((26221, 26249), 'numpy.random.permutation', 'np.random.permutation', (['array'], {}), '(array)\n', (26242, 26249), True, 'import numpy as np\n'), ((590, 615), 'tensorflow.__version__.split', 'tf.__version__.split', (['"""."""'], {}), "('.')\n", (610, 615), True, 'import tensorflow as tf\n'), ((3140, 3167), 'data.gen_targets.get_symbol_list', 'get_symbol_list', (['graph_type'], {}), '(graph_type)\n', (3155, 3167), False, 'from data.gen_targets import get_symbol_list\n'), ((5035, 5058), 'src.utils.set_seed_if', 'set_seed_if', (['load_iters'], {}), '(load_iters)\n', (5046, 5058), False, 'from src.utils import set_seed_if, graph_to_mol, get_index_method, filter_top_k, calculate_graph_properties, dct_to_cuda_inplace, copy_graph_remove_data\n'), ((7251, 7274), 'src.utils.set_seed_if', 'set_seed_if', (['load_iters'], {}), '(load_iters)\n', (7262, 7274), False, 'from src.utils import set_seed_if, graph_to_mol, get_index_method, filter_top_k, calculate_graph_properties, dct_to_cuda_inplace, copy_graph_remove_data\n'), ((13604, 13650), 'src.data.loader.SizeSampler', 'SizeSampler', (['gen_dataset', 'self.edges_per_batch'], {}), '(gen_dataset, self.edges_per_batch)\n', (13615, 13650), False, 'from src.data.loader import SizeSampler, graph_collate_fn\n'), ((13716, 13802), 'torch.utils.data.DataLoader', 'DataLoader', (['gen_dataset'], {'batch_sampler': 'batch_sampler', 'collate_fn': 'graph_collate_fn'}), '(gen_dataset, batch_sampler=batch_sampler, collate_fn=\n graph_collate_fn)\n', (13726, 13802), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((13833, 13918), 'torch.utils.data.DataLoader', 'DataLoader', (['gen_dataset'], {'batch_size': 'self.batch_size', 'collate_fn': 'graph_collate_fn'}), '(gen_dataset, batch_size=self.batch_size, collate_fn=graph_collate_fn\n )\n', (13843, 13918), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((14235, 14250), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14248, 14250), False, 'import torch\n'), ((20644, 20686), 'numpy.zeros', 'np.zeros', (['(number_samples, self.max_nodes)'], {}), '((number_samples, self.max_nodes))\n', (20652, 20686), True, 'import numpy as np\n'), ((20711, 20769), 'numpy.zeros', 'np.zeros', (['(number_samples, self.max_nodes, self.max_nodes)'], {}), '((number_samples, self.max_nodes, self.max_nodes))\n', (20719, 20769), True, 'import numpy as np\n'), ((23916, 23937), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['s'], {}), '(s)\n', (23934, 23937), False, 'from rdkit import Chem\n'), ((24418, 24452), 'json.dump', 'json.dump', (['graph_property_stats', 'f'], {}), '(graph_property_stats, f)\n', (24427, 24452), False, 'import os, pickle, json\n'), ((7103, 7145), 'os.path.join', 'os.path.join', (['output_dir', '"""smiles_0_0.txt"""'], {}), "(output_dir, 'smiles_0_0.txt')\n", (7115, 7145), False, 'import os, pickle, json\n'), ((10427, 10464), 'src.utils.dct_to_cuda_inplace', 'dct_to_cuda_inplace', (['graph_properties'], {}), '(graph_properties)\n', (10446, 10464), False, 'from src.utils import set_seed_if, graph_to_mol, get_index_method, filter_top_k, calculate_graph_properties, dct_to_cuda_inplace, copy_graph_remove_data\n'), ((10906, 10925), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (10918, 10925), False, 'import torch\n'), ((11412, 11426), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (11423, 11426), False, 'import os, pickle, json\n'), ((13414, 13470), 'numpy.ones_like', 'np.ones_like', (['gen_dataset.dataset.graph_properties[name]'], {}), '(gen_dataset.dataset.graph_properties[name])\n', (13426, 13470), True, 'import numpy as np\n'), ((17198, 17216), 'numpy.ones', 'np.ones', (['num_nodes'], {}), '(num_nodes)\n', (17205, 17216), True, 'import numpy as np\n'), ((17462, 17494), 'numpy.zeros', 'np.zeros', (['(num_nodes, num_nodes)'], {}), '((num_nodes, num_nodes))\n', (17470, 17494), True, 'import numpy as np\n'), ((18541, 18562), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {}), '(mol)\n', (18557, 18562), False, 'from rdkit import Chem\n'), ((18980, 19050), 'pickle.dump', 'pickle.dump', (['[all_final_node_properties, all_final_edge_properties]', 'f'], {}), '([all_final_node_properties, all_final_edge_properties], f)\n', (18991, 19050), False, 'import os, pickle, json\n'), ((24141, 24164), 'numpy.mean', 'np.mean', (['graph_property'], {}), '(graph_property)\n', (24148, 24164), True, 'import numpy as np\n'), ((24176, 24201), 'numpy.median', 'np.median', (['graph_property'], {}), '(graph_property)\n', (24185, 24201), True, 'import numpy as np\n'), ((24249, 24271), 'numpy.std', 'np.std', (['graph_property'], {}), '(graph_property)\n', (24255, 24271), True, 'import numpy as np\n'), ((7038, 7059), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {}), '(mol)\n', (7054, 7059), False, 'from rdkit import Chem\n'), ((10389, 10409), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (10401, 10409), False, 'import torch\n'), ((15742, 15763), 'torch.nn.functional.softmax', 'F.softmax', (['scores', '(-1)'], {}), '(scores, -1)\n', (15751, 15763), True, 'import torch.nn.functional as F\n'), ((15855, 15887), 'src.utils.filter_top_k', 'filter_top_k', (['scores', 'self.top_k'], {}), '(scores, self.top_k)\n', (15867, 15887), False, 'from src.utils import set_seed_if, graph_to_mol, get_index_method, filter_top_k, calculate_graph_properties, dct_to_cuda_inplace, copy_graph_remove_data\n'), ((16185, 16206), 'torch.nn.functional.softmax', 'F.softmax', (['scores', '(-1)'], {}), '(scores, -1)\n', (16194, 16206), True, 'import torch.nn.functional as F\n'), ((16298, 16330), 'src.utils.filter_top_k', 'filter_top_k', (['scores', 'self.top_k'], {}), '(scores, self.top_k)\n', (16310, 16330), False, 'from src.utils import set_seed_if, graph_to_mol, get_index_method, filter_top_k, calculate_graph_properties, dct_to_cuda_inplace, copy_graph_remove_data\n'), ((17947, 17980), 'numpy.triu', 'np.triu', (['single_datapoint_fc_data'], {}), '(single_datapoint_fc_data)\n', (17954, 17980), True, 'import numpy as np\n'), ((17983, 18022), 'numpy.tril', 'np.tril', (['single_datapoint_fc_data.T', '(-1)'], {}), '(single_datapoint_fc_data.T, -1)\n', (17990, 18022), True, 'import numpy as np\n'), ((20227, 20268), 'numpy.ones', 'np.ones', (['(number_samples, self.max_nodes)'], {}), '((number_samples, self.max_nodes))\n', (20234, 20268), True, 'import numpy as np\n'), ((20480, 20537), 'numpy.ones', 'np.ones', (['(number_samples, self.max_nodes, self.max_nodes)'], {}), '((number_samples, self.max_nodes, self.max_nodes))\n', (20487, 20537), True, 'import numpy as np\n'), ((21528, 21544), 'numpy.zeros', 'np.zeros', (['length'], {}), '(length)\n', (21536, 21544), True, 'import numpy as np\n'), ((21579, 21605), 'numpy.zeros', 'np.zeros', (['(length, length)'], {}), '((length, length))\n', (21587, 21605), True, 'import numpy as np\n'), ((22720, 22734), 'collections.deque', 'deque', (['samples'], {}), '(samples)\n', (22725, 22734), False, 'from collections import deque\n'), ((24661, 24684), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (24677, 24684), False, 'import os, pickle, json\n'), ((21812, 21878), 'numpy.random.randint', 'np.random.randint', (['(0)', "property_info['num_categories']"], {'size': 'length'}), "(0, property_info['num_categories'], size=length)\n", (21829, 21878), True, 'import numpy as np\n'), ((15977, 15998), 'torch.nn.functional.softmax', 'F.softmax', (['scores', '(-1)'], {}), '(scores, -1)\n', (15986, 15998), True, 'import torch.nn.functional as F\n'), ((16420, 16441), 'torch.nn.functional.softmax', 'F.softmax', (['scores', '(-1)'], {}), '(scores, -1)\n', (16429, 16441), True, 'import torch.nn.functional as F\n'), ((21247, 21262), 'numpy.ones', 'np.ones', (['length'], {}), '(length)\n', (21254, 21262), True, 'import numpy as np\n'), ((21437, 21462), 'numpy.ones', 'np.ones', (['(length, length)'], {}), '((length, length))\n', (21444, 21462), True, 'import numpy as np\n'), ((21939, 22024), 'torch.distributions.Categorical', 'torch.distributions.Categorical', (['(1 / self.train_data.node_property_weights[name])'], {}), '(1 / self.train_data.node_property_weights[name]\n )\n', (21970, 22024), False, 'import torch\n'), ((22530, 22615), 'torch.distributions.Categorical', 'torch.distributions.Categorical', (['(1 / self.train_data.edge_property_weights[name])'], {}), '(1 / self.train_data.edge_property_weights[name]\n )\n', (22561, 22615), False, 'import torch\n')] |
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import operator
import numpy as np
from ... import opcodes as OperandDef
from ...serialize import KeyField, StringField, AnyField, Int64Field, Int32Field
from ...config import options
from ...compat import enum
from ..operands import TensorOperand, TensorOperandMixin
from ..core import TENSOR_TYPE, TensorOrder
from ..datasource.array import tensor as astensor
from ..array_utils import as_same_device, device
class Stage(enum.Enum):
map = 'map'
combine = 'combine'
reduce = 'reduce'
class TensorSearchsorted(TensorOperand, TensorOperandMixin):
_op_type_ = OperandDef.SEARCHSORTED
_input = KeyField('input')
_values = AnyField('values')
_side = StringField('side')
_combine_size = Int32Field('combine_size')
_stage = StringField('stage', on_serialize=operator.attrgetter('value'),
on_deserialize=Stage)
# offset is used only for map stage
_offset = Int64Field('offset')
def __init__(self, values=None, side=None, dtype=None, gpu=None, combine_size=None,
stage=None, offset=None, **kw):
super(TensorSearchsorted, self).__init__(_values=values, _side=side, _dtype=dtype,
_gpu=gpu, _combine_size=combine_size,
_stage=stage, _offset=offset, **kw)
def _set_inputs(self, inputs):
super(TensorSearchsorted, self)._set_inputs(inputs)
self._input = self._inputs[0]
if len(self._inputs) == 2:
self._values = self._inputs[1]
@property
def input(self):
return self._input
@property
def values(self):
return self._values
@property
def side(self):
return self._side
@property
def offset(self):
return self._offset
@property
def combine_size(self):
return self._combine_size
@property
def stage(self):
return self._stage
def __call__(self, a, v):
inputs = [a]
if isinstance(v, TENSOR_TYPE):
inputs.append(v)
shape = v.shape
else:
shape = ()
return self.new_tensor(inputs, shape=shape, order=TensorOrder.C_ORDER)
@classmethod
def _tile_one_chunk(cls, op, a, v, out):
chunks = []
if len(op.inputs) == 1:
v_chunks = [v]
else:
v_chunks = v.chunks
for v_chunk in v_chunks:
chunk_op = op.copy().reset_key()
in_chunks = [a.chunks[0]]
if len(op.inputs) == 2:
in_chunks.append(v_chunk)
v_shape = v_chunk.shape if hasattr(v_chunk, 'shape') else ()
chunk_idx = v_chunk.index if len(op.inputs) == 2 else (0,)
chunk = chunk_op.new_chunk(in_chunks, shape=v_shape,
index=chunk_idx, order=out.order)
chunks.append(chunk)
new_op = op.copy().reset_key()
nsplits = ((s,) for s in out.shape) if len(op.inputs) == 1 else v.nsplits
return new_op.new_tensors(op.inputs, out.shape,
chunks=chunks, nsplits=nsplits)
@classmethod
def _combine_chunks(cls, to_combine, op, stage, v, idx):
from ..merge import TensorStack
v_shape = v.shape if hasattr(v, 'shape') else ()
combine_op = TensorStack(axis=0, dtype=op.outputs[0].dtype)
combine_chunk = combine_op.new_chunk(to_combine, shape=v_shape)
chunk_op = op.copy().reset_key()
chunk_op._stage = stage
in_chunks = [combine_chunk]
if len(op.inputs) == 2:
in_chunks.append(v)
return chunk_op.new_chunk(in_chunks, shape=v_shape, index=idx,
order=op.outputs[0].order)
@classmethod
def _tile_tree_reduction(cls, op, a, v, out):
combine_size = op.combine_size or options.combine_size
input_len = len(op.inputs)
v_chunks = [v] if input_len == 1 else v.chunks
out_chunks = []
for v_chunk in v_chunks:
offsets = [0] + np.cumsum(a.nsplits[0]).tolist()[:-1]
v_shape = v_chunk.shape if hasattr(v_chunk, 'shape') else ()
v_index = v_chunk.index if hasattr(v_chunk, 'index') else 0
chunks = []
for i, c in enumerate(a.chunks):
chunk_op = op.copy().reset_key()
chunk_op._stage = Stage.map
chunk_op._offset = offsets[i]
in_chunks = [c]
if input_len == 2:
in_chunks.append(v_chunk)
chunks.append(chunk_op.new_chunk(in_chunks, shape=v_shape,
index=c.index, order=out.order))
while len(chunks) > combine_size:
new_chunks = []
it = itertools.count(0)
while True:
j = next(it)
to_combine = chunks[j * combine_size: (j + 1) * combine_size]
if len(to_combine) == 0:
break
new_chunks.append(
cls._combine_chunks(to_combine, op, Stage.combine, v_chunk, (j,)))
chunks = new_chunks
chunk = cls._combine_chunks(chunks, op, Stage.reduce, v_chunk, v_index)
out_chunks.append(chunk)
new_op = op.copy().reset_key()
nsplits = ((s,) for s in out.shape) if len(op.inputs) == 1 else v.nsplits
return new_op.new_tensors(op.inputs, out.shape,
chunks=out_chunks, nsplits=nsplits)
@classmethod
def tile(cls, op):
a = op.inputs[0]
out = op.outputs[0]
input_len = len(op.inputs)
if input_len == 1:
v = op.values
else:
v = op.inputs[1]
if len(a.chunks) == 1:
return cls._tile_one_chunk(op, a, v, out)
return cls._tile_tree_reduction(op, a, v, out)
@classmethod
def _execute_without_stage(cls, xp, a, v, op):
return xp.searchsorted(a, v, side=op.side)
@classmethod
def _execute_map(cls, xp, a, v, op):
# in the map phase, calculate the indices and positions
# for instance, a=[1, 4, 6], v=5, return will be (2, 6)
indices = xp.atleast_1d(xp.searchsorted(a, v, side=op.side))
data_indices = indices.copy()
# if the value is larger than all data
# for instance, a=[1, 4, 6], v=7
# return will be (2, 6), not (3, 6), thus needs to subtract 1
data_indices = xp.subtract(data_indices, 1, out=data_indices,
where=data_indices >= len(a))
data = a[data_indices]
if op.offset > 0:
indices = xp.add(indices, op.offset, out=indices)
if np.isscalar(v):
indices, data = indices[0], data[0]
return indices, data
@classmethod
def _execute_combine(cls, xp, a, v, op):
inp_indices, inp_data = a
if np.isscalar(v):
ind = xp.searchsorted(inp_data, v, side=op.side)
if ind >= len(inp_data):
ind -= 1
return inp_indices[ind], inp_data[ind]
else:
ret_indices = np.empty(v.shape, dtype=int)
ret_data = np.empty(v.shape, dtype=inp_data.dtype)
for idx in itertools.product(*(range(s) for s in v.shape)):
ind = xp.searchsorted(inp_data[(slice(None),) + idx], v[idx], side=op.side)
if ind >= len(inp_indices):
ind -= 1
ret_indices[idx] = inp_indices[(ind,) + idx]
ret_data[idx] = inp_data[(ind,) + idx]
return ret_indices, ret_data
@classmethod
def _execute_reduce(cls, xp, a, v, op):
inp_indices, inp_data = a
if np.isscalar(v):
ind = xp.searchsorted(inp_data, v, side=op.side)
if ind >= len(inp_indices):
ind -= 1
return inp_indices[ind]
else:
indices = np.empty(v.shape, dtype=int)
for idx in itertools.product(*(range(s) for s in v.shape)):
ind = xp.searchsorted(inp_data[(slice(None),) + idx], v[idx], side=op.side)
if ind >= len(inp_indices):
ind -= 1
indices[idx] = inp_indices[(ind,) + idx]
return indices
@classmethod
def execute(cls, ctx, op):
a = ctx[op.inputs[0].key]
v = ctx[op.inputs[1].key] if len(op.inputs) == 2 else op.values
data = []
if isinstance(a, tuple):
data.extend(a)
else:
data.append(a)
if len(op.inputs) == 2:
data.append(v)
data, device_id, xp = as_same_device(
data, device=op.device, ret_extra=True)
if isinstance(a, tuple):
a = data[:2]
else:
a = data[0]
if len(op.inputs) == 2:
v = data[-1]
with device(device_id):
if op.stage is None:
ret = cls._execute_without_stage(xp, a, v, op)
elif op.stage == Stage.map:
ret = cls._execute_map(xp, a, v, op)
elif op.stage == Stage.combine:
ret = cls._execute_combine(xp, a, v, op)
else:
ret = cls._execute_reduce(xp, a, v, op)
ctx[op.outputs[0].key] = ret
def searchsorted(a, v, side='left', sorter=None, combine_size=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted tensor `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Assuming that `a` is sorted:
====== ============================
`side` returned index `i` satisfies
====== ============================
left ``a[i-1] < v <= a[i]``
right ``a[i-1] <= v < a[i]``
====== ============================
Parameters
----------
a : 1-D array_like
Input tensor. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional tensor of integer indices that sort array a into ascending
order. They are typically the result of argsort.
combine_size: int, optional
The number of chunks to combine.
Returns
-------
indices : tensor of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of a tensor.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
This function is a faster version of the builtin python `bisect.bisect_left`
(``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,
which is also vectorized in the `v` argument.
Examples
--------
>>> import mars.tensor as mt
>>> mt.searchsorted([1,2,3,4,5], 3).execute()
2
>>> mt.searchsorted([1,2,3,4,5], 3, side='right').execute()
3
>>> mt.searchsorted([1,2,3,4,5], [-10, 10, 2, 3]).execute()
array([0, 5, 1, 2])
"""
if not isinstance(a, TENSOR_TYPE) and sorter is not None and \
not isinstance(sorter, TENSOR_TYPE):
a = astensor(np.asarray(a)[sorter])
else:
a = astensor(a)
if sorter is not None:
a = a[sorter]
if a.ndim != 1:
raise ValueError('`a` should be 1-d tensor')
if a.issparse():
# does not support sparse tensor
raise ValueError('`a` should be a dense tensor')
if side not in {'left', 'right'}:
raise ValueError("'{0}' is an invalid value for keyword 'side'".format(side))
if not np.isscalar(v):
v = astensor(v)
op = TensorSearchsorted(values=v, side=side, dtype=np.dtype(np.int64),
combine_size=combine_size)
return op(a, v)
| [
"operator.attrgetter",
"numpy.isscalar",
"numpy.asarray",
"itertools.count",
"numpy.empty",
"numpy.cumsum",
"numpy.dtype"
] | [((7440, 7454), 'numpy.isscalar', 'np.isscalar', (['v'], {}), '(v)\n', (7451, 7454), True, 'import numpy as np\n'), ((7642, 7656), 'numpy.isscalar', 'np.isscalar', (['v'], {}), '(v)\n', (7653, 7656), True, 'import numpy as np\n'), ((8465, 8479), 'numpy.isscalar', 'np.isscalar', (['v'], {}), '(v)\n', (8476, 8479), True, 'import numpy as np\n'), ((12815, 12829), 'numpy.isscalar', 'np.isscalar', (['v'], {}), '(v)\n', (12826, 12829), True, 'import numpy as np\n'), ((1409, 1437), 'operator.attrgetter', 'operator.attrgetter', (['"""value"""'], {}), "('value')\n", (1428, 1437), False, 'import operator\n'), ((7872, 7900), 'numpy.empty', 'np.empty', (['v.shape'], {'dtype': 'int'}), '(v.shape, dtype=int)\n', (7880, 7900), True, 'import numpy as np\n'), ((7924, 7963), 'numpy.empty', 'np.empty', (['v.shape'], {'dtype': 'inp_data.dtype'}), '(v.shape, dtype=inp_data.dtype)\n', (7932, 7963), True, 'import numpy as np\n'), ((8679, 8707), 'numpy.empty', 'np.empty', (['v.shape'], {'dtype': 'int'}), '(v.shape, dtype=int)\n', (8687, 8707), True, 'import numpy as np\n'), ((12911, 12929), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (12919, 12929), True, 'import numpy as np\n'), ((5463, 5481), 'itertools.count', 'itertools.count', (['(0)'], {}), '(0)\n', (5478, 5481), False, 'import itertools\n'), ((12372, 12385), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (12382, 12385), True, 'import numpy as np\n'), ((4702, 4725), 'numpy.cumsum', 'np.cumsum', (['a.nsplits[0]'], {}), '(a.nsplits[0])\n', (4711, 4725), True, 'import numpy as np\n')] |
import numpy as np
import mixem
def logsumexp(X,axis=None,keepdims=1,log=1):
'''
log(
sum(
exp(X)
)
)
'''
xmax = np.max(X,axis=axis,keepdims=keepdims)
y = np.exp(X-xmax)
S = y.sum(axis=axis,keepdims=keepdims)
if log:
S = np.log(S) + xmax
else:
S = S*np.exp(xmax)
return S
def em(data, distributions, initial_weights=None, max_iterations=100, tol=1e-15, tol_iters=10, progress_callback=mixem.simple_progress):
"""Fit a mixture of probability distributions using the Expectation-Maximization (EM) algorithm.
:param data: The data to fit the distributions for. Can be an array-like or a :class:`numpy.ndarray`
:type data: numpy.ndarray
:param distributions: The list of distributions to fit to the data.
:type distributions: list of :class:`mixem.distribution.Distribution`
:param initial_weights: Inital weights for the distributions. Must be the same size as distributions. If None, will use uniform initial weights for all distributions.
:type initial_weights: numpy.ndarray
:param max_iterations: The maximum number of iterations to compute for.
:type max_iterations: int
:param tol: The minimum relative increase in log-likelihood after tol_iters iterations
:type tol: float
:param tol_iters: The number of iterations to go back in comparing log-likelihood change
:type tol_iters: int
:param progress_callback: A function to call to report progress after every iteration.
:type progress_callback: function or None
:rtype: tuple (weights, distributitons, log_likelihood)
"""
n_distr = len(distributions)
n_data = data.shape[0]
if initial_weights is not None:
weight = np.array(initial_weights)
else:
weight = np.ones((n_distr,))
last_ll = np.zeros((tol_iters, ))
resp = np.empty((n_data, n_distr))
log_density = np.empty((n_data, n_distr))
iteration = 0
while True:
# E-step #######
# compute responsibilities
for d in range(n_distr):
log_density[:, d] = distributions[d].log_density(data)
# normalize responsibilities of distributions so they sum up to one for example
logResp = np.log(weight[None,: ]) + log_density
logResp = logResp - logsumexp(logResp,axis=1)
resp = np.exp(logResp)
# resp = weight[np.newaxis, :] * np.exp(log_density)
# resp /= np.sum(resp, axis=1)[:, np.newaxis]
log_likelihood = np.sum( resp * log_density)
# M-step #######
for d in range(n_distr):
distributions[d].estimate_parameters(data, resp[:, d])
weight = np.mean(resp, axis=0)
if progress_callback:
progress_callback(iteration, weight, distributions, log_likelihood)
# Convergence check #######
if np.isnan(log_likelihood):
last_ll[0] = log_likelihood
break
if iteration >= tol_iters and (last_ll[-1] - log_likelihood) / last_ll[-1] <= tol:
last_ll[0] = log_likelihood
break
if iteration >= max_iterations:
last_ll[0] = log_likelihood
break
# store value of current iteration in last_ll[0]
# and shift older values to the right
last_ll[1:] = last_ll[:-1]
last_ll[0] = log_likelihood
iteration += 1
### Return full history for debugging
return weight, distributions, last_ll[::-1]
| [
"numpy.mean",
"numpy.ones",
"numpy.log",
"numpy.max",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty",
"numpy.isnan"
] | [((168, 207), 'numpy.max', 'np.max', (['X'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(X, axis=axis, keepdims=keepdims)\n', (174, 207), True, 'import numpy as np\n'), ((214, 230), 'numpy.exp', 'np.exp', (['(X - xmax)'], {}), '(X - xmax)\n', (220, 230), True, 'import numpy as np\n'), ((1851, 1873), 'numpy.zeros', 'np.zeros', (['(tol_iters,)'], {}), '((tol_iters,))\n', (1859, 1873), True, 'import numpy as np\n'), ((1886, 1913), 'numpy.empty', 'np.empty', (['(n_data, n_distr)'], {}), '((n_data, n_distr))\n', (1894, 1913), True, 'import numpy as np\n'), ((1932, 1959), 'numpy.empty', 'np.empty', (['(n_data, n_distr)'], {}), '((n_data, n_distr))\n', (1940, 1959), True, 'import numpy as np\n'), ((1763, 1788), 'numpy.array', 'np.array', (['initial_weights'], {}), '(initial_weights)\n', (1771, 1788), True, 'import numpy as np\n'), ((1816, 1835), 'numpy.ones', 'np.ones', (['(n_distr,)'], {}), '((n_distr,))\n', (1823, 1835), True, 'import numpy as np\n'), ((2370, 2385), 'numpy.exp', 'np.exp', (['logResp'], {}), '(logResp)\n', (2376, 2385), True, 'import numpy as np\n'), ((2528, 2554), 'numpy.sum', 'np.sum', (['(resp * log_density)'], {}), '(resp * log_density)\n', (2534, 2554), True, 'import numpy as np\n'), ((2700, 2721), 'numpy.mean', 'np.mean', (['resp'], {'axis': '(0)'}), '(resp, axis=0)\n', (2707, 2721), True, 'import numpy as np\n'), ((2881, 2905), 'numpy.isnan', 'np.isnan', (['log_likelihood'], {}), '(log_likelihood)\n', (2889, 2905), True, 'import numpy as np\n'), ((297, 306), 'numpy.log', 'np.log', (['S'], {}), '(S)\n', (303, 306), True, 'import numpy as np\n'), ((339, 351), 'numpy.exp', 'np.exp', (['xmax'], {}), '(xmax)\n', (345, 351), True, 'import numpy as np\n'), ((2263, 2286), 'numpy.log', 'np.log', (['weight[None, :]'], {}), '(weight[None, :])\n', (2269, 2286), True, 'import numpy as np\n')] |
"""
==========
ISOMAP neighbours parameter CV pipeline
==========
Use a pipeline to find the best neighbourhood size parameter for ISOMAP.
Adapted from:
http://scikit-learn.org/stable/auto_examples/decomposition/plot_kernel_pca.html#example-decomposition-plot-kernel-pca-py
http://scikit-learn.org/stable/auto_examples/grid_search_digits.html#example-grid-search-digits-py
"""
import numpy as np
import pickle
from optparse import OptionParser
from tables import *
from sklearn.manifold import Isomap
from sklearn.cluster import KMeans
from sklearn.metrics import v_measure_score, make_scorer, homogeneity_score
from extract_datasets import extract_labeled_chunkrange
from sklearn.preprocessing import scale
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
np.random.seed(0)
# parse commandline arguments
op = OptionParser()
op.add_option("--h5file",
dest="inputfile", help="Read data input from this hdf5 file.")
op.add_option("--size",
dest="size", type="int", help="Extract the first size chunks of the data set and labels.")
op.add_option("--sample-size",
dest="samplesize", type="int", help="The max size of the samples")
op.add_option("--output",
dest="outfile", help="Write the estimator model to this file.")
op.add_option("--num-jobs",
dest="jobs", type="int", help="Use these number of jobs in parallel for GridSearchCV")
(opts, args) = op.parse_args()
###############################################################################
# Load a training set from the given .h5 file
datafile = openFile(opts.inputfile, mode = "r", title = "Data is stored here")
# Extract some of the dataset from the datafile
X, labels = extract_labeled_chunkrange(datafile, opts.size)
# Sample from the dataset
wt_points = np.nonzero(labels[:,0] == 0)[0]
foci_points = np.nonzero(labels[:,0] == 1)[0]
ab_nuclei_points = np.nonzero(labels[:,0] == 2)[0]
wt_data = X[wt_points,5:]
foci_data = X[foci_points,5:]
ab_nuclei_data = X[ab_nuclei_points,5:]
wt_labels = labels[wt_points,0]
foci_labels = labels[foci_points,0]
ab_nuclei_labels = labels[ab_nuclei_points,0]
# Figure out the sample sizes based on the shape of the *_labels arrays and the
# sample size argument
wt_samplesize = min(opts.samplesize,wt_data.shape[0])
foci_samplesize = min(opts.samplesize,foci_data.shape[0])
ab_nuclei_samplesize = min(opts.samplesize, ab_nuclei_data.shape[0])
# Use np.random.permutation(array)[0:size,:] to sample u at random
# from the strata.
wt_data_sample = np.random.permutation(wt_data)[0:wt_samplesize,:]
foci_data_sample = np.random.permutation(foci_data)[0:foci_samplesize,:]
ab_nuclei_sample = np.random.permutation(ab_nuclei_data)[0:ab_nuclei_samplesize,:]
D = np.vstack((wt_data_sample,foci_data_sample,ab_nuclei_sample))
D_labels = np.hstack((wt_labels[0:wt_samplesize],foci_labels[0:foci_samplesize],ab_nuclei_labels[0:ab_nuclei_samplesize]))
D_scaled = scale(D)
datafile.close()
##################
# Range of parameters to consider for neighbours
neighbours = np.arange(5,50,5)
# Set up the method -> kmeans -> h-measure && LLE -> kmeans -> h-measure pipelines
isomap = Isomap(n_neighbors=5, n_components=30)
kmeans = KMeans(n_clusters=3)
# Make a scoring function for the pipeline
v_measure_scorer = make_scorer(v_measure_score)
homogeneity_scorer = make_scorer(homogeneity_score)
pipe = Pipeline(steps=[('isomap', isomap), ('kmeans', kmeans)])
# Set the model parameters to cycle over using '__' a prefix
estimator = GridSearchCV(pipe, dict(isomap__n_neighbors=neighbours), scoring=homogeneity_scorer, n_jobs=opts.jobs)
estimator.fit(D_scaled,D_labels)
# Dump the estimator to a file
f = file(opts.outfile, 'wb')
pickle.dump(estimator, f)
f.close()
# Report the best parameter values
print("Best estimator found on test data set:")
print()
print(estimator.best_estimator_)
print()
print("Best parameters fond on test data set:")
print()
print(estimator.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in estimator.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() / 2, params))
print()
| [
"sklearn.cluster.KMeans",
"pickle.dump",
"numpy.hstack",
"numpy.arange",
"sklearn.manifold.Isomap",
"optparse.OptionParser",
"extract_datasets.extract_labeled_chunkrange",
"sklearn.metrics.make_scorer",
"numpy.vstack",
"numpy.random.seed",
"numpy.nonzero",
"sklearn.pipeline.Pipeline",
"sklea... | [((807, 824), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (821, 824), True, 'import numpy as np\n'), ((861, 875), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (873, 875), False, 'from optparse import OptionParser\n'), ((1752, 1799), 'extract_datasets.extract_labeled_chunkrange', 'extract_labeled_chunkrange', (['datafile', 'opts.size'], {}), '(datafile, opts.size)\n', (1778, 1799), False, 'from extract_datasets import extract_labeled_chunkrange\n'), ((2783, 2846), 'numpy.vstack', 'np.vstack', (['(wt_data_sample, foci_data_sample, ab_nuclei_sample)'], {}), '((wt_data_sample, foci_data_sample, ab_nuclei_sample))\n', (2792, 2846), True, 'import numpy as np\n'), ((2856, 2973), 'numpy.hstack', 'np.hstack', (['(wt_labels[0:wt_samplesize], foci_labels[0:foci_samplesize],\n ab_nuclei_labels[0:ab_nuclei_samplesize])'], {}), '((wt_labels[0:wt_samplesize], foci_labels[0:foci_samplesize],\n ab_nuclei_labels[0:ab_nuclei_samplesize]))\n', (2865, 2973), True, 'import numpy as np\n'), ((2979, 2987), 'sklearn.preprocessing.scale', 'scale', (['D'], {}), '(D)\n', (2984, 2987), False, 'from sklearn.preprocessing import scale\n'), ((3090, 3109), 'numpy.arange', 'np.arange', (['(5)', '(50)', '(5)'], {}), '(5, 50, 5)\n', (3099, 3109), True, 'import numpy as np\n'), ((3201, 3239), 'sklearn.manifold.Isomap', 'Isomap', ([], {'n_neighbors': '(5)', 'n_components': '(30)'}), '(n_neighbors=5, n_components=30)\n', (3207, 3239), False, 'from sklearn.manifold import Isomap\n'), ((3249, 3269), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': '(3)'}), '(n_clusters=3)\n', (3255, 3269), False, 'from sklearn.cluster import KMeans\n'), ((3333, 3361), 'sklearn.metrics.make_scorer', 'make_scorer', (['v_measure_score'], {}), '(v_measure_score)\n', (3344, 3361), False, 'from sklearn.metrics import v_measure_score, make_scorer, homogeneity_score\n'), ((3383, 3413), 'sklearn.metrics.make_scorer', 'make_scorer', (['homogeneity_score'], {}), '(homogeneity_score)\n', (3394, 3413), False, 'from sklearn.metrics import v_measure_score, make_scorer, homogeneity_score\n'), ((3422, 3478), 'sklearn.pipeline.Pipeline', 'Pipeline', ([], {'steps': "[('isomap', isomap), ('kmeans', kmeans)]"}), "(steps=[('isomap', isomap), ('kmeans', kmeans)])\n", (3430, 3478), False, 'from sklearn.pipeline import Pipeline\n'), ((3750, 3775), 'pickle.dump', 'pickle.dump', (['estimator', 'f'], {}), '(estimator, f)\n', (3761, 3775), False, 'import pickle\n'), ((1839, 1868), 'numpy.nonzero', 'np.nonzero', (['(labels[:, 0] == 0)'], {}), '(labels[:, 0] == 0)\n', (1849, 1868), True, 'import numpy as np\n'), ((1885, 1914), 'numpy.nonzero', 'np.nonzero', (['(labels[:, 0] == 1)'], {}), '(labels[:, 0] == 1)\n', (1895, 1914), True, 'import numpy as np\n'), ((1936, 1965), 'numpy.nonzero', 'np.nonzero', (['(labels[:, 0] == 2)'], {}), '(labels[:, 0] == 2)\n', (1946, 1965), True, 'import numpy as np\n'), ((2572, 2602), 'numpy.random.permutation', 'np.random.permutation', (['wt_data'], {}), '(wt_data)\n', (2593, 2602), True, 'import numpy as np\n'), ((2641, 2673), 'numpy.random.permutation', 'np.random.permutation', (['foci_data'], {}), '(foci_data)\n', (2662, 2673), True, 'import numpy as np\n'), ((2714, 2751), 'numpy.random.permutation', 'np.random.permutation', (['ab_nuclei_data'], {}), '(ab_nuclei_data)\n', (2735, 2751), True, 'import numpy as np\n')] |
import numpy as np
a = np.arange(24).reshape(3, 2, 4) + 10
for val in a:
print('item:', val)
# N维枚举
for i, val in np.ndenumerate(a):
if sum(i) % 5 == 0:
print(i, val)
| [
"numpy.ndenumerate",
"numpy.arange"
] | [((120, 137), 'numpy.ndenumerate', 'np.ndenumerate', (['a'], {}), '(a)\n', (134, 137), True, 'import numpy as np\n'), ((24, 37), 'numpy.arange', 'np.arange', (['(24)'], {}), '(24)\n', (33, 37), True, 'import numpy as np\n')] |
"""MCTS module: where MuZero thinks inside the tree."""
import math
import random
import numpy as np
from xt.agent.muzero.default_config import PB_C_BASE, PB_C_INIT
from xt.agent.muzero.default_config import ROOT_DIRICHLET_ALPHA
from xt.agent.muzero.default_config import ROOT_EXPLORATION_FRACTION
from xt.agent.muzero.default_config import GAMMA
from xt.agent.muzero.util import MinMaxStats, Node, soft_max_sample
from xt.model.muzero.muzero_model import NetworkOutput
class Mcts(object):
"""MCTS operation."""
def __init__(self, agent, root_state):
self.network = agent.alg.actor
self.action_dim = agent.alg.action_dim
self.num_simulations = agent.num_simulations
self.min_max_stats = MinMaxStats(None)
self.discount = GAMMA
self.actions = range(self.action_dim)
self.pb_c_base = PB_C_BASE
self.pb_c_init = PB_C_INIT
self.root_dirichlet_alpha = ROOT_DIRICHLET_ALPHA
self.root_exploration_fraction = ROOT_EXPLORATION_FRACTION
self.root = Node(0)
root_state = root_state.reshape((1, ) + root_state.shape)
network_output = self.network.initial_inference(root_state)
self.init_node(self.root, network_output)
def init_node(self, node, network_output):
node.hidden_state = network_output.hidden_state
node.reward = network_output.reward
policy = [p for p in network_output.policy]
for action in self.actions:
node.children[action] = Node(policy[action])
def backpropagate(self, search_path, value):
"""Propagate the evaluation all the way up the tree to the root at the end of a simulation."""
for node in search_path[::-1]:
node.value_sum += value
node.visit_count += 1
self.min_max_stats.update(node.value())
value = node.reward + self.discount * value
def run_mcts(self):
"""
Run Core Monte Carlo Tree Search algorithm.
To decide on an action, we run N simulations, always starting at the root of
the search tree and traversing the tree according to the UCB formula until we
reach a leaf node.
"""
for _ in range(self.num_simulations):
node = self.root
search_path = [node]
history = []
while node.expanded():
action, node = self.select_child(node)
search_path.append(node)
history.append(action)
# Inside the search tree we use the dynamics function to obtain the next
# hidden state given an action and the previous hidden state.
parent = search_path[-2]
network_output = self.network.recurrent_inference(parent.hidden_state, history[-1])
self.init_node(node, network_output)
self.backpropagate(search_path, network_output.value)
def select_action(self, mode='softmax'):
"""
Select action.
After running simulations inside in MCTS, we select an action based on the root's children visit counts.
During training we use a softmax sample for exploration.
During evaluation we select the most visited child.
"""
node = self.root
visit_counts = [child.visit_count for child in node.children.values()]
actions = self.actions
action = None
if mode == 'softmax':
action = soft_max_sample(visit_counts, actions, 1)
elif mode == 'max':
action = np.argmax(visit_counts)
return action
def ucb_score(self, parent, child):
"""
Calculate UCB score.
The score for a node is based on its value, plus an exploration bonus based on the prior.
"""
pb_c = math.log((parent.visit_count + self.pb_c_base + 1) / self.pb_c_base) + self.pb_c_init
pb_c *= math.sqrt(parent.visit_count) / (child.visit_count + 1)
prior_score = pb_c * child.prior
if child.visit_count > 0:
value_score = self.min_max_stats.normalize(child.value())
else:
value_score = 0
return prior_score + value_score
def add_exploration_noise(self, node):
actions = self.actions
noise = np.random.dirichlet([self.root_dirichlet_alpha] * self.action_dim)
frac = self.root_exploration_fraction
for i, _noise in zip(actions, noise):
node.children[i].prior = node.children[i].prior * (1 - frac) + _noise * frac
def get_info(self):
"""Get train info from mcts tree."""
child_visits = [self.root.children[a].visit_count for a in self.actions]
sum_visits = sum(child_visits)
child_visits = [visits / sum_visits for visits in child_visits]
return {"child_visits": child_visits, "root_value": self.root.value()}
def select_child(self, node):
"""Select the child with the highest UCB score."""
_, action, child = max((self.ucb_score(node, child), action, child) for action, child in node.children.items())
return action, child
| [
"xt.agent.muzero.util.soft_max_sample",
"math.sqrt",
"numpy.argmax",
"math.log",
"numpy.random.dirichlet",
"xt.agent.muzero.util.MinMaxStats",
"xt.agent.muzero.util.Node"
] | [((733, 750), 'xt.agent.muzero.util.MinMaxStats', 'MinMaxStats', (['None'], {}), '(None)\n', (744, 750), False, 'from xt.agent.muzero.util import MinMaxStats, Node, soft_max_sample\n'), ((1042, 1049), 'xt.agent.muzero.util.Node', 'Node', (['(0)'], {}), '(0)\n', (1046, 1049), False, 'from xt.agent.muzero.util import MinMaxStats, Node, soft_max_sample\n'), ((4277, 4343), 'numpy.random.dirichlet', 'np.random.dirichlet', (['([self.root_dirichlet_alpha] * self.action_dim)'], {}), '([self.root_dirichlet_alpha] * self.action_dim)\n', (4296, 4343), True, 'import numpy as np\n'), ((1510, 1530), 'xt.agent.muzero.util.Node', 'Node', (['policy[action]'], {}), '(policy[action])\n', (1514, 1530), False, 'from xt.agent.muzero.util import MinMaxStats, Node, soft_max_sample\n'), ((3454, 3495), 'xt.agent.muzero.util.soft_max_sample', 'soft_max_sample', (['visit_counts', 'actions', '(1)'], {}), '(visit_counts, actions, 1)\n', (3469, 3495), False, 'from xt.agent.muzero.util import MinMaxStats, Node, soft_max_sample\n'), ((3799, 3867), 'math.log', 'math.log', (['((parent.visit_count + self.pb_c_base + 1) / self.pb_c_base)'], {}), '((parent.visit_count + self.pb_c_base + 1) / self.pb_c_base)\n', (3807, 3867), False, 'import math\n'), ((3901, 3930), 'math.sqrt', 'math.sqrt', (['parent.visit_count'], {}), '(parent.visit_count)\n', (3910, 3930), False, 'import math\n'), ((3545, 3568), 'numpy.argmax', 'np.argmax', (['visit_counts'], {}), '(visit_counts)\n', (3554, 3568), True, 'import numpy as np\n')] |
'''
AUTHORS:
NORSTRÖM, ARVID 19940206-3193,
HISELIUS, LEO 9402214192
'''
from collections import namedtuple
import numpy as np
import gym
import torch
import matplotlib.pyplot as plt
from tqdm import trange
from DDPG_agent import RandomAgent, Critic, Actor
from DDPG_agent import ExperienceReplayBuffer
import torch.optim as optim
import torch.nn as nn
class LunarLander():
env=gym.make('LunarLanderContinuous-v2')
env.reset()
#"cuda:0" if torch.cuda.is_available() else
ddev = "cpu"
dev = torch.device(ddev)
n_ep_running_average = 50
def __init__(self, gamma, L, N_episodes, N, net_dims):
# HYPERPARAMETERS
self.gamma = gamma
self.L = L
self.N_episodes = N_episodes
self.N = N
self.C = int(self.L/self.N_episodes)
self.C = 2
self.tau = 1e-3
# MISC
self.episode_reward_list = []
self.episode_number_of_steps = []
self.EPISODES = trange(self.N_episodes, desc='Episode: ', leave=True)
self.Experience = namedtuple('Experience',
['state', 'action', 'reward', 'next_state', 'done'])
self.buffert = self.init_buffert(ExperienceReplayBuffer(self.L),self.L)
self.best_reward_run_av = np.NINF
self.prev_noise = np.zeros((2,))
# FOR TRAINING
self.net_dims = net_dims
self.main_critic = Critic().to(self.ddev)
self.target_critic = Critic().to(self.ddev)
self.main_actor = Actor(self.net_dims).to(self.ddev)
self.target_actor = Actor(self.net_dims).to(self.ddev)
self.optimizer_critic = optim.Adam(self.main_critic.parameters(), lr=5e-4)
self.optimizer_actor = optim.Adam(self.main_actor.parameters(), lr=5e-5)
self.loss_critic = nn.MSELoss()
def running_average(self,x, N):
''' Function used to compute the running average
of the last N elements of a vector x
'''
if len(x) >= N:
y = np.copy(x)
y[N - 1:] = np.convolve(x, np.ones((N,)) / N, mode='valid')
else:
y = np.zeros_like(x)
return y
def UOnoise(self, mu = 0.15, sigma = 0.2):
return -mu*self.prev_noise+np.random.multivariate_normal([0,0],sigma**2*np.eye(2), size = 1)
def train(self, PATH = 'models/best.pt', SAVE=False):
for i in self.EPISODES:
# Reset enviroment data and initialize variables
done = False
state = self.env.reset()
total_episode_reward = 0.
t = 0
self.prev_noise = np.array([0,0])
while not done:
UOnoise = self.UOnoise()
with torch.no_grad():
action = self.main_actor(torch.tensor([state], device=self.dev,requires_grad = False, dtype = torch.float32))
self.prev_noise = UOnoise
# Get next state and reward. The done variable
# will be True if you reached the goal position,
# False otherwise
action = action.detach().numpy().reshape(2,)+UOnoise.reshape(2,)
action = action.clip(-1,1)
next_state, reward, done, x_ = self.env.step(action)
exp = self.Experience(state, action, reward, next_state, done)
self.buffert.append(exp)
states, actions, rewards, next_states, dones = self.buffert.sample_batch(self.N)
# Training process, set gradients to 0
self.optimizer_critic.zero_grad()
# Compute output of the network given the states batch
Q_pred_values = self.main_critic(torch.tensor(states, device=self.dev, requires_grad = True, dtype=torch.float32),
torch.tensor(actions, device=self.dev, requires_grad = False, dtype = torch.float32).detach()).view(self.N,1)
Q_target = self.target_critic(torch.tensor(next_states,device=self.dev, requires_grad = False, dtype=torch.float32).detach(),
torch.tensor(self.target_actor(torch.tensor(next_states,device=self.dev, requires_grad = False, dtype = torch.float32).detach()), requires_grad = False, dtype = torch.float32).detach()).max(1)[0]
#print(dones)
Q_target_values = (torch.tensor(rewards, device=self.dev,dtype=torch.float32)+self.gamma*Q_target*(1-torch.tensor(list(map(int,dones)),device=self.dev))).view(self.N,1)
# Compute loss function
self.main_critic.train(mode=True)
loss_critic = self.loss_critic(Q_pred_values,Q_target_values)
# Compute gradient
loss_critic.backward()
# Clip gradient norm to 1
nn.utils.clip_grad_norm_(self.main_critic.parameters(), max_norm=1.)
# Perform backward pass (backpropagation)
self.optimizer_critic.step()
if t%self.C == 0:
self.optimizer_actor.zero_grad()
self.main_actor.train(mode = True)
loss_actor = -torch.mean(self.main_critic(torch.tensor(states, device=self.dev,requires_grad = False, dtype = torch.float32).detach(),
self.main_actor(torch.tensor(states, device=self.dev,requires_grad = True, dtype = torch.float32))))
loss_actor.backward()
#print(loss_actor)
nn.utils.clip_grad_norm(self.main_actor.parameters(), max_norm=1.)
self.optimizer_actor.step()
tgt_state = self.target_actor.state_dict()
for k, v in self.main_actor.state_dict().items():
tgt_state[k] = (1 - self.tau)*tgt_state[k] + self.tau*v
self.target_actor.load_state_dict(tgt_state)
tgt_state = self.target_critic.state_dict()
for k,v in self.main_critic.state_dict().items():
tgt_state[k] = (1 - self.tau)*tgt_state[k] + self.tau*v
self.target_critic.load_state_dict(tgt_state)
# Update episode reward
total_episode_reward += reward
# Update state for next iteration
state = next_state
t += 1
# Append episode reward and total number of steps
self.episode_reward_list.append(total_episode_reward)
self.episode_number_of_steps.append(t)
# Close environment
self.env.close()
# Updates the tqdm update bar with fresh information
# (episode number, total reward of the last episode, total number of Steps
# of the last episode, average reward, average number of steps)
reward_run_av = self.running_average(self.episode_reward_list, self.n_ep_running_average)[-1]
steps_run_av = self.running_average(self.episode_number_of_steps, self.n_ep_running_average)[-1]
self.EPISODES.set_description("Episode {} - Reward/Steps: {:.1f}/{} - Avg. Reward/Steps: {:.1f}/{}".format(
i, total_episode_reward, t,
reward_run_av,
steps_run_av))
#save model
if SAVE:
if reward_run_av > self.best_reward_run_av:
self.best_reward_run_av = reward_run_av
torch.save(self.main_critic, 'models/best_critic.pt')
torch.save(self.main_actor, 'models/best_actor.pt')
def plot(self):
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 9))
ax[0].plot([i for i in range(1, self.N_episodes + 1)], self.episode_reward_list, label='Episode reward')
ax[0].plot([i for i in range(1, self.N_episodes + 1)], self.running_average(
self.episode_reward_list, self.n_ep_running_average), label='Avg. episode reward')
ax[0].set_xlabel('Episodes')
ax[0].set_ylabel('Total reward')
ax[0].set_title('Total Reward vs Episodes')
ax[0].legend()
ax[0].grid(alpha=0.3)
ax[1].plot([i for i in range(1, self.N_episodes + 1)], self.episode_number_of_steps, label='Steps per episode')
ax[1].plot([i for i in range(1, self.N_episodes + 1)], self.running_average(
self.episode_number_of_steps, self.n_ep_running_average), label='Avg. number of steps per episode')
ax[1].set_xlabel('Episodes')
ax[1].set_ylabel('Total number of steps')
ax[1].set_title('Total number of steps vs Episodes')
ax[1].legend()
ax[1].grid(alpha=0.3)
plt.savefig(str(self.gamma)+'_'+str(self.L)+'_'+str(self.N_episodes)+'_'+str(self.N)+'_'+str(self.net_dims)+'.png')
#plt.show()
def init_buffert(self,buffert,L):
filled = False
counter = 0
while not filled:
done = False
state = self.env.reset()
while not done:
action = np.clip(-1 + 2 * np.random.rand(2), -1, 1)
next_state,reward,done,_ = self.env.step(action)
exp = self.Experience(state, action, reward, next_state, done)
buffert.append(exp)
counter +=1
if counter == L:
filled = True
break
return buffert
ll = LunarLander(0.99, 60000, 500, 64, [8,400,200 ,2])
ll.train()
ll.plot()
| [
"numpy.random.rand",
"DDPG_agent.ExperienceReplayBuffer",
"torch.nn.MSELoss",
"numpy.array",
"gym.make",
"DDPG_agent.Actor",
"numpy.eye",
"collections.namedtuple",
"numpy.ones",
"torch.save",
"tqdm.trange",
"torch.device",
"DDPG_agent.Critic",
"numpy.copy",
"torch.tensor",
"numpy.zeros... | [((397, 433), 'gym.make', 'gym.make', (['"""LunarLanderContinuous-v2"""'], {}), "('LunarLanderContinuous-v2')\n", (405, 433), False, 'import gym\n'), ((525, 543), 'torch.device', 'torch.device', (['ddev'], {}), '(ddev)\n', (537, 543), False, 'import torch\n'), ((970, 1023), 'tqdm.trange', 'trange', (['self.N_episodes'], {'desc': '"""Episode: """', 'leave': '(True)'}), "(self.N_episodes, desc='Episode: ', leave=True)\n", (976, 1023), False, 'from tqdm import trange\n'), ((1050, 1127), 'collections.namedtuple', 'namedtuple', (['"""Experience"""', "['state', 'action', 'reward', 'next_state', 'done']"], {}), "('Experience', ['state', 'action', 'reward', 'next_state', 'done'])\n", (1060, 1127), False, 'from collections import namedtuple\n'), ((1304, 1318), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (1312, 1318), True, 'import numpy as np\n'), ((1794, 1806), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1804, 1806), True, 'import torch.nn as nn\n'), ((7680, 7727), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(16, 9)'}), '(nrows=1, ncols=2, figsize=(16, 9))\n', (7692, 7727), True, 'import matplotlib.pyplot as plt\n'), ((1197, 1227), 'DDPG_agent.ExperienceReplayBuffer', 'ExperienceReplayBuffer', (['self.L'], {}), '(self.L)\n', (1219, 1227), False, 'from DDPG_agent import ExperienceReplayBuffer\n'), ((2001, 2011), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (2008, 2011), True, 'import numpy as np\n'), ((2114, 2130), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (2127, 2130), True, 'import numpy as np\n'), ((2595, 2611), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2603, 2611), True, 'import numpy as np\n'), ((1403, 1411), 'DDPG_agent.Critic', 'Critic', ([], {}), '()\n', (1409, 1411), False, 'from DDPG_agent import RandomAgent, Critic, Actor\n'), ((1455, 1463), 'DDPG_agent.Critic', 'Critic', ([], {}), '()\n', (1461, 1463), False, 'from DDPG_agent import RandomAgent, Critic, Actor\n'), ((1504, 1524), 'DDPG_agent.Actor', 'Actor', (['self.net_dims'], {}), '(self.net_dims)\n', (1509, 1524), False, 'from DDPG_agent import RandomAgent, Critic, Actor\n'), ((1567, 1587), 'DDPG_agent.Actor', 'Actor', (['self.net_dims'], {}), '(self.net_dims)\n', (1572, 1587), False, 'from DDPG_agent import RandomAgent, Critic, Actor\n'), ((2051, 2064), 'numpy.ones', 'np.ones', (['(N,)'], {}), '((N,))\n', (2058, 2064), True, 'import numpy as np\n'), ((2275, 2284), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (2281, 2284), True, 'import numpy as np\n'), ((2702, 2717), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2715, 2717), False, 'import torch\n'), ((7499, 7552), 'torch.save', 'torch.save', (['self.main_critic', '"""models/best_critic.pt"""'], {}), "(self.main_critic, 'models/best_critic.pt')\n", (7509, 7552), False, 'import torch\n'), ((7573, 7624), 'torch.save', 'torch.save', (['self.main_actor', '"""models/best_actor.pt"""'], {}), "(self.main_actor, 'models/best_actor.pt')\n", (7583, 7624), False, 'import torch\n'), ((2764, 2849), 'torch.tensor', 'torch.tensor', (['[state]'], {'device': 'self.dev', 'requires_grad': '(False)', 'dtype': 'torch.float32'}), '([state], device=self.dev, requires_grad=False, dtype=torch.float32\n )\n', (2776, 2849), False, 'import torch\n'), ((3725, 3803), 'torch.tensor', 'torch.tensor', (['states'], {'device': 'self.dev', 'requires_grad': '(True)', 'dtype': 'torch.float32'}), '(states, device=self.dev, requires_grad=True, dtype=torch.float32)\n', (3737, 3803), False, 'import torch\n'), ((4379, 4438), 'torch.tensor', 'torch.tensor', (['rewards'], {'device': 'self.dev', 'dtype': 'torch.float32'}), '(rewards, device=self.dev, dtype=torch.float32)\n', (4391, 4438), False, 'import torch\n'), ((9107, 9124), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (9121, 9124), True, 'import numpy as np\n'), ((3828, 3913), 'torch.tensor', 'torch.tensor', (['actions'], {'device': 'self.dev', 'requires_grad': '(False)', 'dtype': 'torch.float32'}), '(actions, device=self.dev, requires_grad=False, dtype=torch.float32\n )\n', (3840, 3913), False, 'import torch\n'), ((5342, 5420), 'torch.tensor', 'torch.tensor', (['states'], {'device': 'self.dev', 'requires_grad': '(True)', 'dtype': 'torch.float32'}), '(states, device=self.dev, requires_grad=True, dtype=torch.float32)\n', (5354, 5420), False, 'import torch\n'), ((4001, 4090), 'torch.tensor', 'torch.tensor', (['next_states'], {'device': 'self.dev', 'requires_grad': '(False)', 'dtype': 'torch.float32'}), '(next_states, device=self.dev, requires_grad=False, dtype=torch\n .float32)\n', (4013, 4090), False, 'import torch\n'), ((5209, 5288), 'torch.tensor', 'torch.tensor', (['states'], {'device': 'self.dev', 'requires_grad': '(False)', 'dtype': 'torch.float32'}), '(states, device=self.dev, requires_grad=False, dtype=torch.float32)\n', (5221, 5288), False, 'import torch\n'), ((4149, 4238), 'torch.tensor', 'torch.tensor', (['next_states'], {'device': 'self.dev', 'requires_grad': '(False)', 'dtype': 'torch.float32'}), '(next_states, device=self.dev, requires_grad=False, dtype=torch\n .float32)\n', (4161, 4238), False, 'import torch\n')] |
import cv2
print(cv2.__version__)
rows = int(input('Enter Number of ROWS: '))
columns = int(input('Enter Number of COLUMNS: '))
width = 1000
height = 1000
import numpy as np
while True:
frame = np.zeros([width,height,3],dtype=np.uint8)
WhiteW = width // columns
WhiteH = height // rows
for i in range(0,rows):
for j in range(0,columns):
frame[WhiteH*(2*i):WhiteH*(2*i+1),WhiteW*(2*j):WhiteW*(2*j+1)] = (255,255,255)
for i in range(0,rows):
for j in range(0,columns):
frame[WhiteH*(2*i+1):WhiteH*(2*i+2),WhiteW*(2*j+1):WhiteW*(2*j+2)] = (255,255,255)
cv2.imshow('myWindow', frame)
if cv2.waitKey(1) & 0xff == ord('q'):
break
| [
"numpy.zeros",
"cv2.waitKey",
"cv2.imshow"
] | [((198, 242), 'numpy.zeros', 'np.zeros', (['[width, height, 3]'], {'dtype': 'np.uint8'}), '([width, height, 3], dtype=np.uint8)\n', (206, 242), True, 'import numpy as np\n'), ((614, 643), 'cv2.imshow', 'cv2.imshow', (['"""myWindow"""', 'frame'], {}), "('myWindow', frame)\n", (624, 643), False, 'import cv2\n'), ((652, 666), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (663, 666), False, 'import cv2\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.