code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""
Filename: matching.py
Author: <NAME>
Matching algorithms.
"""
import numpy as np
from numba import jit
@jit
def deferred_acceptance(prop_prefs, resp_prefs, caps=None):
"""
Compute a stable matching by the deferred acceptance (Gale-Shapley)
algorithm. Support both one-to-one (marriage) and many-to-one
(college admission) matchings.
Parameters
----------
prop_prefs : array_like(int, ndim=2)
Array of shape (m, n+1) containing the proposers' preference
orders as rows, where m is the number of proposers and n is that
of the respondants. prop_prefs[i, j] is the j-th preferred
respondant for the i-th proposer, where "respondant n"
represents "being single".
resp_prefs : array_like(int, ndim=2)
Array of shape (n, m+1) containing the respondants' preference
orders as rows. resp_prefs[j, i] is the i-th preferred proposer
for the j-th respondant, where "proposer m" represents
"being single" (or "vacancy" in the context of college
admissions).
caps : array_like(int, ndim=1), optional(default=None)
Array of shape (n,) containing the respondants' capacities. If
None, the capacities are all regarded as one (i.e., the matching
is one-to-one).
Returns
-------
prop_matches : ndarray(int, ndim=1)
Array of length m representing the matches for the proposals,
where prop_matches[i] is the respondant who proposer i is
matched with.
resp_matches : ndarray(int, ndim=1)
Array of length n representing the matches for the respondants:
if caps=None, resp_matches[j] is the proposer who respondant j
is matched with; if caps is specified, the proposers who
respondant j is matched with are contined in
resp_matches[indptr[j]:indptr[j+1]].
indptr : ndarray(int, ndim=1)
Returned only when caps is specified. Contains index pointers
for resp_matches.
"""
prop_prefs = np.asarray(prop_prefs)
resp_prefs = np.asarray(resp_prefs)
num_props, num_resps = prop_prefs.shape[0], resp_prefs.shape[0]
if not (prop_prefs.shape == (num_props, num_resps+1) and
resp_prefs.shape == (num_resps, num_props+1)):
raise ValueError('shapes of preferences arrays do not match')
if (caps is not None) and (len(caps) != num_resps):
raise ValueError('length of caps must be equal to that of resp_prefs')
# Convert preference orders to rankings
resp_ranks = np.empty((num_resps, num_props+1), dtype=int)
prefs2ranks(resp_prefs, out=resp_ranks)
# IDs representing unmatched
prop_unmatched, resp_unmatched = num_resps, num_props
is_single_prop = np.ones(num_props, dtype=bool)
# Next resp to propose to
next_resp = np.zeros(num_props, dtype=int)
# Set up index pointers
if caps is None: # One-to-one
indptr = np.arange(num_resps+1)
else: # Many-to-one
indptr = np.empty(num_resps+1, dtype=int)
indptr[0] = 0
np.cumsum(caps, out=indptr[1:])
num_caps = indptr[-1]
# Prop currently matched with
current_prop = np.ones(num_caps, dtype=int) * resp_unmatched
# Numbers of occupied seats
nums_occupied = np.zeros(num_resps, dtype=int)
# Main loop
while(is_single_prop.sum() > 0):
for p in range(num_props):
if is_single_prop[p]:
r = prop_prefs[p, next_resp[p]] # p proposes to r
# Prefers to be unmatched
if r == prop_unmatched:
is_single_prop[p] = False
# Unacceptable for r
elif resp_ranks[r, p] > resp_ranks[r, resp_unmatched]:
pass
# Some seats vacant
elif nums_occupied[r] < indptr[r+1] - indptr[r]:
current_prop[indptr[r]+nums_occupied[r]] = p
is_single_prop[p] = False
nums_occupied[r] += 1
# All seats occupied
else:
# Find the least preferred among the currently accepted
least_ptr = indptr[r]
least = current_prop[least_ptr]
for i in range(indptr[r]+1, indptr[r+1]):
compared = current_prop[i]
if resp_ranks[r, least] < resp_ranks[r, compared]:
least_ptr = i
least = compared
if resp_ranks[r, p] < resp_ranks[r, least]:
current_prop[least_ptr] = p
is_single_prop[p] = False
is_single_prop[least] = True
next_resp[p] += 1
prop_matches = prop_prefs[np.arange(num_props), next_resp-1]
resp_matches = current_prop
if caps is None:
return prop_matches, resp_matches
else:
return prop_matches, resp_matches, indptr
@jit(nopython=True)
def prefs2ranks(prefs, out):
m, n = prefs.shape
for i in range(m):
for j in range(n):
out[i, prefs[i, j]] = j
| [
"numpy.empty",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"numpy.cumsum",
"numba.jit",
"numpy.arange"
] | [((5002, 5020), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (5005, 5020), False, 'from numba import jit\n'), ((2024, 2046), 'numpy.asarray', 'np.asarray', (['prop_prefs'], {}), '(prop_prefs)\n', (2034, 2046), True, 'import numpy as np\n'), ((2064, 2086), 'numpy.asarray', 'np.asarray', (['resp_prefs'], {}), '(resp_prefs)\n', (2074, 2086), True, 'import numpy as np\n'), ((2544, 2591), 'numpy.empty', 'np.empty', (['(num_resps, num_props + 1)'], {'dtype': 'int'}), '((num_resps, num_props + 1), dtype=int)\n', (2552, 2591), True, 'import numpy as np\n'), ((2748, 2778), 'numpy.ones', 'np.ones', (['num_props'], {'dtype': 'bool'}), '(num_props, dtype=bool)\n', (2755, 2778), True, 'import numpy as np\n'), ((2826, 2856), 'numpy.zeros', 'np.zeros', (['num_props'], {'dtype': 'int'}), '(num_props, dtype=int)\n', (2834, 2856), True, 'import numpy as np\n'), ((3278, 3308), 'numpy.zeros', 'np.zeros', (['num_resps'], {'dtype': 'int'}), '(num_resps, dtype=int)\n', (3286, 3308), True, 'import numpy as np\n'), ((2938, 2962), 'numpy.arange', 'np.arange', (['(num_resps + 1)'], {}), '(num_resps + 1)\n', (2947, 2962), True, 'import numpy as np\n'), ((3003, 3037), 'numpy.empty', 'np.empty', (['(num_resps + 1)'], {'dtype': 'int'}), '(num_resps + 1, dtype=int)\n', (3011, 3037), True, 'import numpy as np\n'), ((3066, 3097), 'numpy.cumsum', 'np.cumsum', (['caps'], {'out': 'indptr[1:]'}), '(caps, out=indptr[1:])\n', (3075, 3097), True, 'import numpy as np\n'), ((3179, 3207), 'numpy.ones', 'np.ones', (['num_caps'], {'dtype': 'int'}), '(num_caps, dtype=int)\n', (3186, 3207), True, 'import numpy as np\n'), ((4808, 4828), 'numpy.arange', 'np.arange', (['num_props'], {}), '(num_props)\n', (4817, 4828), True, 'import numpy as np\n')] |
import os
import numpy as np
from matplotlib import pyplot as plt
import sys
sys.path.append("../utilities")
import constants
import utils
import data
def plot_histograms(run_dir, metadata_sig, metadata_bg):
#trim to even size
if metadata_bg.shape[0] > metadata_sig.shape[0]:
metadata_bg = metadata_bg[:metadata_sig.shape[0], :]
else:
metadata_sig = metadata_sig[:metadata_bg.shape[0], :]
for j in range(0, 4):
if j == 0:
name = 'pull1'
elif j == 1:
name = 'pull2'
elif j == 2:
name = 'jet_mass'
elif j == 3:
name = 'jet_delta_R'
hist, bins = np.histogram(metadata_sig[:, j], bins = 100)
plt.plot(bins[:-1], hist, drawstyle='steps-post', color='blue', label='qq')
hist, bins = np.histogram(metadata_bg[:, j], bins = 100)
plt.plot(bins[:-1], hist, drawstyle='steps-post', color='red', label='gg')
plt.title(name)
plt.legend(loc='upper right')
plt.savefig(run_dir+name+'.png')
plt.clf()
def main():
import argparse
parser = argparse.ArgumentParser(description='Plot histograms on given data.')
parser.add_argument('--run_dir', default='../histograms/', help='The directory in which histogram plots should be saved.')
args = parser.parse_args()
if not args.run_dir:
args.run_dir = utils.make_run_dir()
print('[clustering] New run directory created at {}'.format(args.run_dir))
_, metadata_sig = data.get_pixels_metadata(octet=False)
_, metadata_bg = data.get_pixels_metadata(octet=True)
plot_histograms(args.run_dir, np.array(metadata_sig), np.array(metadata_bg))
if __name__ == '__main__':
main()
| [
"sys.path.append",
"matplotlib.pyplot.title",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"data.get_pixels_metadata",
"matplotlib.pyplot.legend",
"numpy.histogram",
"utils.make_run_dir",
"numpy.array",
"matplotlib.pyplot.savefig"
] | [((78, 109), 'sys.path.append', 'sys.path.append', (['"""../utilities"""'], {}), "('../utilities')\n", (93, 109), False, 'import sys\n'), ((1026, 1095), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot histograms on given data."""'}), "(description='Plot histograms on given data.')\n", (1049, 1095), False, 'import argparse\n'), ((1413, 1450), 'data.get_pixels_metadata', 'data.get_pixels_metadata', ([], {'octet': '(False)'}), '(octet=False)\n', (1437, 1450), False, 'import data\n'), ((1470, 1506), 'data.get_pixels_metadata', 'data.get_pixels_metadata', ([], {'octet': '(True)'}), '(octet=True)\n', (1494, 1506), False, 'import data\n'), ((612, 654), 'numpy.histogram', 'np.histogram', (['metadata_sig[:, j]'], {'bins': '(100)'}), '(metadata_sig[:, j], bins=100)\n', (624, 654), True, 'import numpy as np\n'), ((661, 736), 'matplotlib.pyplot.plot', 'plt.plot', (['bins[:-1]', 'hist'], {'drawstyle': '"""steps-post"""', 'color': '"""blue"""', 'label': '"""qq"""'}), "(bins[:-1], hist, drawstyle='steps-post', color='blue', label='qq')\n", (669, 736), True, 'from matplotlib import pyplot as plt\n'), ((754, 795), 'numpy.histogram', 'np.histogram', (['metadata_bg[:, j]'], {'bins': '(100)'}), '(metadata_bg[:, j], bins=100)\n', (766, 795), True, 'import numpy as np\n'), ((802, 876), 'matplotlib.pyplot.plot', 'plt.plot', (['bins[:-1]', 'hist'], {'drawstyle': '"""steps-post"""', 'color': '"""red"""', 'label': '"""gg"""'}), "(bins[:-1], hist, drawstyle='steps-post', color='red', label='gg')\n", (810, 876), True, 'from matplotlib import pyplot as plt\n'), ((882, 897), 'matplotlib.pyplot.title', 'plt.title', (['name'], {}), '(name)\n', (891, 897), True, 'from matplotlib import pyplot as plt\n'), ((902, 931), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (912, 931), True, 'from matplotlib import pyplot as plt\n'), ((936, 972), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(run_dir + name + '.png')"], {}), "(run_dir + name + '.png')\n", (947, 972), True, 'from matplotlib import pyplot as plt\n'), ((973, 982), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (980, 982), True, 'from matplotlib import pyplot as plt\n'), ((1292, 1312), 'utils.make_run_dir', 'utils.make_run_dir', ([], {}), '()\n', (1310, 1312), False, 'import utils\n'), ((1539, 1561), 'numpy.array', 'np.array', (['metadata_sig'], {}), '(metadata_sig)\n', (1547, 1561), True, 'import numpy as np\n'), ((1563, 1584), 'numpy.array', 'np.array', (['metadata_bg'], {}), '(metadata_bg)\n', (1571, 1584), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, show, draw, figure, cm
import seaborn as sns
from sklearn.manifold import TSNE
import umap
from sklearn.datasets import load_iris
from mpl_toolkits.mplot3d import Axes3D
sns.set_style("whitegrid", {'axes.grid' : False})
def plot_elbow(min_num, max_num, distortions, y_label, title):
"""
choose the number of clusters by the elblow plot
"""
plt.plot(range(min_num, max_num), distortions, marker='o')
plt.xlabel('Number of clusters')
plt.ylabel(y_label)
plt.title(title)
plt.show()
def d3_tsne(X, y_km, heat=None):
fig = plt.figure(figsize=(6,6))
ax = Axes3D(fig) # Method 1
# ax = fig.add_subplot(111, projection='3d') # Method 2
X_embedded = TSNE(n_components=3, random_state=1234).fit_transform(X)
#print(np.shape(X_embedded))
x, y, z = X_embedded[:,0], X_embedded[:,1], X_embedded[:,2]
ax.scatter(x, y, z, marker='o',c=y_km, cmap=plt.cm.get_cmap('Set1', 5))
#plt.colorbar(ticks=range(1,6), label=y_km)
ax.set_xlabel('tSNE-X')
ax.set_ylabel('tSNE-Y')
ax.set_zlabel('tSNE-Z')
plt.show()
def d3_umap(X, y_km, heat=None):
fig = plt.figure(figsize=(6,6))
ax = Axes3D(fig) # Method 1
# ax = fig.add_subplot(111, projection='3d') # Method 2
reducer = umap.UMAP(random_state=1234, n_components=3)
X_embedded = reducer.fit_transform(X)
x, y, z = X_embedded[:,0], X_embedded[:,1], X_embedded[:,2]
ax.scatter(x, y, z, marker='o',c=y_km, cmap=plt.cm.get_cmap('Set1', 5))
#plt.colorbar(ticks=range(1,6), label=y_km)
ax.set_xlabel('UMAP-X')
ax.set_ylabel('UMAP-Y')
ax.set_zlabel('UMAP-Z')
plt.show()
return reducer
def new_d3_umap(X, y_km, reducer,heat=None):
fig = plt.figure(figsize=(6,6))
ax = Axes3D(fig) # Method 1
# ax = fig.add_subplot(111, projection='3d') # Method 2
#reducer = umap.UMAP(random_state=1234, n_components=3)
#X_embedded = reducer.fit_transform(X)
X_embedded = reducer.transform(X)
x, y, z = X_embedded[:,0], X_embedded[:,1], X_embedded[:,2]
#1f77b4 -> blue
#d62728 -> red
#808080 -> grey
ax.scatter(x, y, z, marker='o',c=['#808080']*(len(y_km)-4) +['#d62728']*2+['#1f77b4']*2, cmap=plt.cm.get_cmap('Set1', 5), s=[20]*(len(y_km)-4) + [200]*4)
#2, 0, 1
#ax.text(x[-4],y[-4],z[-4], ' dsDNA', size=20, zorder=1, color='k')
#ax.text(x[-3],y[-3],z[-3], ' dsDNA', size=20, zorder=1, color='k')
#ax.text(x[-2],y[-2],z[-2], ' noDNA', size=20, zorder=1, color='k')
#ax.text(x[-1],y[-1],z[-1], ' noDNA', size=20, zorder=1, color='k')
#plt.colorbar(ticks=range(1,6), label=y_km)
ax.set_xlabel('UMAP-X')
ax.set_ylabel('UMAP-Y')
ax.set_zlabel('UMAP-Z')
plt.show()
def vis_tsne(X,y_km):
"""
visualize final clusters
"""
X_embedded = TSNE(n_components=2, random_state=1234).fit_transform(X)
#markers = ('o', 'v', 's', 'p', '*', 'h', 'H', 'D', 'd', 'P', 'X')
#random_heat = np.random.random_sample((np.shape(X)[0],))
random_heat = np.random.normal(0, 0.2, np.shape(X)[0])
#,
sns.scatterplot(X_embedded[:,0], X_embedded[:,1], legend='full', hue=y_km, palette="Set1")
plt.title('Visualized by t-SNE')
plt.savefig('figures/tSNE.pdf')
plt.show()
def vis_umap(X, y_km):
"""
visualize by UMAP
"""
reducer = umap.UMAP(random_state=1234)
X_embedded = reducer.fit_transform(X)
#markers = ('o', 'v', 's', 'p', '*', 'h', 'H', 'D', 'd', 'P', 'X')
sns.scatterplot(X_embedded[:,0], X_embedded[:,1], legend='full', hue=y_km, palette="Set1")
plt.title('Visualized by UMAP')
plt.savefig('figures/UMAP.pdf')
plt.show()
def csv2X(fname):
"""
input: csv files
output: X array and each row is a patient
"""
mypd = pd.read_csv(fname, sep=',')
#row = list(mypd)
#X = np.transpose(mypd.values)
X = mypd.values
#print(np.shape(X))
X = [item[1:] for item in X]
return X
def patient_cluster(patient_id, cluster_arr, num_clu, method):
"""
input: csv file of patient id, numpy array of cluster membership
output: csv file, first column: patient id; second column: cluster membership
"""
with open(patient_id) as in_f:
pid = list(in_f.readlines())
cluster = np.load(cluster_arr)
p_c = list(zip(pid, cluster))
new_df = pd.DataFrame()
new_df['patient_id'] = pid
new_df['cluster'] = cluster
new_df.to_csv("%s_patient_cluster%d.csv"%(method,num_clu),index=None)
if __name__ == "__main__":
dat_type ="train"
fname = "/Users/yuexichen/Desktop/School/UThackathon/datafiles/training_pca_projs.csv"
X = csv2X(fname)
#print(np.shape(X))
num_clu = 5
method = "kmeans"
#method = "kmeans"
np.save("%s_pca_good_projection"%dat_type,X)
"""
with open("patients_id.txt",'w') as out_f:
for r in row:
out_f.write(r + '\n')
patient_id = "patients_id.txt"
#cluster_arr = "kmeans_membership_5clusters.npy"
cluster_arr = "%s_membership_%d.npy"%(method,num_clu)
patient_cluster(patient_id, cluster_arr, num_clu, method)
"""
| [
"matplotlib.pyplot.title",
"seaborn.set_style",
"numpy.load",
"pandas.DataFrame",
"matplotlib.pyplot.show",
"mpl_toolkits.mplot3d.Axes3D",
"numpy.save",
"seaborn.scatterplot",
"pandas.read_csv",
"sklearn.manifold.TSNE",
"umap.UMAP",
"numpy.shape",
"matplotlib.pyplot.figure",
"matplotlib.py... | [((312, 360), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""', "{'axes.grid': False}"], {}), "('whitegrid', {'axes.grid': False})\n", (325, 360), True, 'import seaborn as sns\n'), ((563, 595), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of clusters"""'], {}), "('Number of clusters')\n", (573, 595), True, 'import matplotlib.pyplot as plt\n'), ((600, 619), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (610, 619), True, 'import matplotlib.pyplot as plt\n'), ((624, 640), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (633, 640), True, 'import matplotlib.pyplot as plt\n'), ((645, 655), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (653, 655), True, 'import matplotlib.pyplot as plt\n'), ((700, 726), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (710, 726), True, 'import matplotlib.pyplot as plt\n'), ((735, 746), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (741, 746), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((1201, 1211), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1209, 1211), True, 'import matplotlib.pyplot as plt\n'), ((1256, 1282), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (1266, 1282), True, 'import matplotlib.pyplot as plt\n'), ((1291, 1302), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (1297, 1302), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((1388, 1432), 'umap.UMAP', 'umap.UMAP', ([], {'random_state': '(1234)', 'n_components': '(3)'}), '(random_state=1234, n_components=3)\n', (1397, 1432), False, 'import umap\n'), ((1751, 1761), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1759, 1761), True, 'import matplotlib.pyplot as plt\n'), ((1837, 1863), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (1847, 1863), True, 'import matplotlib.pyplot as plt\n'), ((1872, 1883), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (1878, 1883), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((2830, 2840), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2838, 2840), True, 'import matplotlib.pyplot as plt\n'), ((3187, 3283), 'seaborn.scatterplot', 'sns.scatterplot', (['X_embedded[:, 0]', 'X_embedded[:, 1]'], {'legend': '"""full"""', 'hue': 'y_km', 'palette': '"""Set1"""'}), "(X_embedded[:, 0], X_embedded[:, 1], legend='full', hue=y_km,\n palette='Set1')\n", (3202, 3283), True, 'import seaborn as sns\n'), ((3283, 3315), 'matplotlib.pyplot.title', 'plt.title', (['"""Visualized by t-SNE"""'], {}), "('Visualized by t-SNE')\n", (3292, 3315), True, 'import matplotlib.pyplot as plt\n'), ((3320, 3351), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/tSNE.pdf"""'], {}), "('figures/tSNE.pdf')\n", (3331, 3351), True, 'import matplotlib.pyplot as plt\n'), ((3356, 3366), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3364, 3366), True, 'import matplotlib.pyplot as plt\n'), ((3444, 3472), 'umap.UMAP', 'umap.UMAP', ([], {'random_state': '(1234)'}), '(random_state=1234)\n', (3453, 3472), False, 'import umap\n'), ((3590, 3686), 'seaborn.scatterplot', 'sns.scatterplot', (['X_embedded[:, 0]', 'X_embedded[:, 1]'], {'legend': '"""full"""', 'hue': 'y_km', 'palette': '"""Set1"""'}), "(X_embedded[:, 0], X_embedded[:, 1], legend='full', hue=y_km,\n palette='Set1')\n", (3605, 3686), True, 'import seaborn as sns\n'), ((3686, 3717), 'matplotlib.pyplot.title', 'plt.title', (['"""Visualized by UMAP"""'], {}), "('Visualized by UMAP')\n", (3695, 3717), True, 'import matplotlib.pyplot as plt\n'), ((3722, 3753), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/UMAP.pdf"""'], {}), "('figures/UMAP.pdf')\n", (3733, 3753), True, 'import matplotlib.pyplot as plt\n'), ((3758, 3768), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3766, 3768), True, 'import matplotlib.pyplot as plt\n'), ((3882, 3909), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'sep': '""","""'}), "(fname, sep=',')\n", (3893, 3909), True, 'import pandas as pd\n'), ((4374, 4394), 'numpy.load', 'np.load', (['cluster_arr'], {}), '(cluster_arr)\n', (4381, 4394), True, 'import numpy as np\n'), ((4442, 4456), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4454, 4456), True, 'import pandas as pd\n'), ((4846, 4893), 'numpy.save', 'np.save', (["('%s_pca_good_projection' % dat_type)", 'X'], {}), "('%s_pca_good_projection' % dat_type, X)\n", (4853, 4893), True, 'import numpy as np\n'), ((835, 874), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(3)', 'random_state': '(1234)'}), '(n_components=3, random_state=1234)\n', (839, 874), False, 'from sklearn.manifold import TSNE\n'), ((1037, 1063), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""Set1"""', '(5)'], {}), "('Set1', 5)\n", (1052, 1063), True, 'import matplotlib.pyplot as plt\n'), ((1587, 1613), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""Set1"""', '(5)'], {}), "('Set1', 5)\n", (1602, 1613), True, 'import matplotlib.pyplot as plt\n'), ((2317, 2343), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""Set1"""', '(5)'], {}), "('Set1', 5)\n", (2332, 2343), True, 'import matplotlib.pyplot as plt\n'), ((2927, 2966), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'random_state': '(1234)'}), '(n_components=2, random_state=1234)\n', (2931, 2966), False, 'from sklearn.manifold import TSNE\n'), ((3160, 3171), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (3168, 3171), True, 'import numpy as np\n')] |
import chainer
import chainer.functions as F
import chainer.links as L
import chainer.serializers
from chainer.datasets import tuple_dataset
from chainer import Chain, Variable, optimizers
from chainer import training
from chainer.training import extensions
from PIL import Image
import argparse
import numpy as np
import glob
import alexLike
import cv2
def main():
#=======================chainer setting=======================
parse = argparse.ArgumentParser(description='test human position detection')
parse.add_argument('--batchsize', '-b', type=int, default=100)
parse.add_argument('--gpu', '-g', type=int, default=0) #change to -1 for use only CPU
parse.add_argument('--model','-m', default='my_output_5.model')
parse.add_argument('--channel', '-c', default=3)
args = parse.parse_args()
#=======================chainer setting=======================
#=======================read images & labels set=======================
pathsAndLabels = []
pathsAndLabels.append(np.asarray(['./test/center/', 0]))
pathsAndLabels.append(np.asarray(['./test/left/', 1]))
pathsAndLabels.append(np.asarray(['./test/right/', 2]))
pathsAndLabels.append(np.asarray(['./test/near/', 3]))
pathsAndLabels.append(np.asarray(['./test/none/', 4]))
allData = []
for pathAndLabel in pathsAndLabels:
path = pathAndLabel[0]
label = pathAndLabel[1]
imagelist = glob.glob(path + "*")
for imgName in imagelist:
allData.append([imgName, label])
print('Number of datas is ' + str(len(allData)))
print('')
#=======================read images & labels set=======================
#=======================testing program=======================
outNumStr = args.model.split(".")[0].split("_")
outnum = int(outNumStr[ len(outNumStr)-1 ])
correct = 0
model = L.Classifier(alexLike.AlexLike(outnum))
chainer.serializers.load_npz(args.model, model)
count = 1
val = ['center', 'left', 'right', 'near', 'none']
for pathAndLabel in allData:
img = Image.open(pathAndLabel[0])
r,g,b = img.split()
rImgData = np.asarray(np.float32(r)/255.0)
gImgData = np.asarray(np.float32(g)/255.0)
bImgData = np.asarray(np.float32(b)/255.0)
imgData = np.asarray([[[rImgData, gImgData, bImgData]]])
x = Variable(imgData)
y = F.softmax(model.predictor(x.data[0]))
predR = np.round(y.data[0])
for pre_i in np.arange(len(predR)):
if predR[pre_i] == 1:
if pathAndLabel[1].astype(int) == pre_i:
correct += 1
print('image number ', count, 'is correct')
else:
print('image number', count, 'is incorrect')
a = imgData[0][0]
a = np.swapaxes(a,0,2)
a = np.swapaxes(a,0,1)
a = a*255
a = cv2.cvtColor(a, cv2.COLOR_BGR2RGB)
a = cv2.resize(a, (640, 480))
cv2.putText(a,val[pre_i],(550,450), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),2)
cv2.putText(a,val[pathAndLabel[1].astype(int)],(20,450), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),2)
cv2.imwrite('wrong/'+str(count)+'.png',a)
count += 1
print('correct = ', correct/len(allData)*100, '%')
#=======================testing program=======================
if __name__ == '__main__':
main()
| [
"chainer.Variable",
"cv2.putText",
"argparse.ArgumentParser",
"chainer.serializers.load_npz",
"cv2.cvtColor",
"alexLike.AlexLike",
"numpy.asarray",
"numpy.float32",
"PIL.Image.open",
"numpy.swapaxes",
"glob.glob",
"numpy.round",
"cv2.resize"
] | [((442, 510), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""test human position detection"""'}), "(description='test human position detection')\n", (465, 510), False, 'import argparse\n'), ((1806, 1853), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (['args.model', 'model'], {}), '(args.model, model)\n', (1834, 1853), False, 'import chainer\n'), ((986, 1019), 'numpy.asarray', 'np.asarray', (["['./test/center/', 0]"], {}), "(['./test/center/', 0])\n", (996, 1019), True, 'import numpy as np\n'), ((1044, 1075), 'numpy.asarray', 'np.asarray', (["['./test/left/', 1]"], {}), "(['./test/left/', 1])\n", (1054, 1075), True, 'import numpy as np\n'), ((1100, 1132), 'numpy.asarray', 'np.asarray', (["['./test/right/', 2]"], {}), "(['./test/right/', 2])\n", (1110, 1132), True, 'import numpy as np\n'), ((1157, 1188), 'numpy.asarray', 'np.asarray', (["['./test/near/', 3]"], {}), "(['./test/near/', 3])\n", (1167, 1188), True, 'import numpy as np\n'), ((1213, 1244), 'numpy.asarray', 'np.asarray', (["['./test/none/', 4]"], {}), "(['./test/none/', 4])\n", (1223, 1244), True, 'import numpy as np\n'), ((1363, 1384), 'glob.glob', 'glob.glob', (["(path + '*')"], {}), "(path + '*')\n", (1372, 1384), False, 'import glob\n'), ((1778, 1803), 'alexLike.AlexLike', 'alexLike.AlexLike', (['outnum'], {}), '(outnum)\n', (1795, 1803), False, 'import alexLike\n'), ((1955, 1982), 'PIL.Image.open', 'Image.open', (['pathAndLabel[0]'], {}), '(pathAndLabel[0])\n', (1965, 1982), False, 'from PIL import Image\n'), ((2152, 2198), 'numpy.asarray', 'np.asarray', (['[[[rImgData, gImgData, bImgData]]]'], {}), '([[[rImgData, gImgData, bImgData]]])\n', (2162, 2198), True, 'import numpy as np\n'), ((2205, 2222), 'chainer.Variable', 'Variable', (['imgData'], {}), '(imgData)\n', (2213, 2222), False, 'from chainer import Chain, Variable, optimizers\n'), ((2277, 2296), 'numpy.round', 'np.round', (['y.data[0]'], {}), '(y.data[0])\n', (2285, 2296), True, 'import numpy as np\n'), ((2029, 2042), 'numpy.float32', 'np.float32', (['r'], {}), '(r)\n', (2039, 2042), True, 'import numpy as np\n'), ((2074, 2087), 'numpy.float32', 'np.float32', (['g'], {}), '(g)\n', (2084, 2087), True, 'import numpy as np\n'), ((2119, 2132), 'numpy.float32', 'np.float32', (['b'], {}), '(b)\n', (2129, 2132), True, 'import numpy as np\n'), ((2564, 2584), 'numpy.swapaxes', 'np.swapaxes', (['a', '(0)', '(2)'], {}), '(a, 0, 2)\n', (2575, 2584), True, 'import numpy as np\n'), ((2592, 2612), 'numpy.swapaxes', 'np.swapaxes', (['a', '(0)', '(1)'], {}), '(a, 0, 1)\n', (2603, 2612), True, 'import numpy as np\n'), ((2635, 2669), 'cv2.cvtColor', 'cv2.cvtColor', (['a', 'cv2.COLOR_BGR2RGB'], {}), '(a, cv2.COLOR_BGR2RGB)\n', (2647, 2669), False, 'import cv2\n'), ((2679, 2704), 'cv2.resize', 'cv2.resize', (['a', '(640, 480)'], {}), '(a, (640, 480))\n', (2689, 2704), False, 'import cv2\n'), ((2710, 2798), 'cv2.putText', 'cv2.putText', (['a', 'val[pre_i]', '(550, 450)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 0, 255)', '(2)'], {}), '(a, val[pre_i], (550, 450), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, \n 255), 2)\n', (2721, 2798), False, 'import cv2\n')] |
import os
import os.path
from PIL import Image
import numpy as np
from numpy import asarray
import pandas as pd
import tensorflow as tf
from tensorflow import keras
import tensorflow.keras.layers as kl
from tensorflow.keras.models import Model
import matplotlib.pyplot as plt
#%%
path_ = 'I:\\Empresas\\2stars\\IA\\Diagnostico_de_Radiografias\\Datasets\\01\\images_resized\\'
#%%
train_labels = np.load(path_ + 'numpy_files/train_labels.npy')
print(train_labels.shape)
train_images = np.load(path_ + 'numpy_files/train_images.npy')
print(train_images.shape)
#%%
from tensorflow.keras.applications.densenet import DenseNet121
img_in = kl.Input((224, 224, 3))
model = DenseNet121(include_top= False , # remove the 3 fully-connected layers at the top of the network
weights='imagenet', # pre train weight
input_tensor=img_in,
input_shape=(224, 224, 3),
pooling ='avg')
x = model.output
predictions = kl.Dense(2, activation="sigmoid", name="predictions")(x) # fuly connected layer for predict class
model = Model(inputs=img_in, outputs=predictions)
callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', patience=5), keras.callbacks.ModelCheckpoint(filepath=path_ + 'model/best_model.h5', monitor='val_loss', save_best_only=True)]
opt = keras.optimizers.Adam(lr=0.001, epsilon = 1e-8, beta_1=0.9, beta_2=0.999, decay=0.0, amsgrad=False)
print(model.summary())
#%%
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(train_images, train_labels, batch_size=32, validation_split=0.2, epochs=1, callbacks=callbacks)
#%%
model.save(path_ + 'model/model.h5')
#%%
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training accuracy')
plt.legend()
plt.savefig(path_ + 'plots/accuracy.png')
plt.show()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training loss')
plt.legend()
plt.savefig(path_ + 'plots/loss.png')
plt.show()
#%%
train_images = []
train_labels = []
test_labels = np.load(path_ + 'numpy_files/test_labels.npy')
print(test_labels.shape)
test_images = np.load(path_ + 'numpy_files/test_images.npy')
print(test_images.shape)
#%%
predictions = model.predict(test_images)
predictions = abs(np.rint(predictions))
hits = 0
for i in range(len(predictions)):
print(test_labels[i], " ", predictions[i])
for i in range(len(predictions)):
if (test_labels[i] == predictions[i]).all():
hits += 1
accuracy = 100 * hits / 624
print('accuracy', accuracy)
model.evaluate(test_images, test_labels)
#%%
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
open(path_ + "modelTFlite/moses1.tflite", "wb").write(tflite_model)
| [
"matplotlib.pyplot.title",
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.legend",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.models.Model",
"matplotlib.pyplot.figure",
"tensorflow.keras.applications.densen... | [((396, 443), 'numpy.load', 'np.load', (["(path_ + 'numpy_files/train_labels.npy')"], {}), "(path_ + 'numpy_files/train_labels.npy')\n", (403, 443), True, 'import numpy as np\n'), ((485, 532), 'numpy.load', 'np.load', (["(path_ + 'numpy_files/train_images.npy')"], {}), "(path_ + 'numpy_files/train_images.npy')\n", (492, 532), True, 'import numpy as np\n'), ((636, 659), 'tensorflow.keras.layers.Input', 'kl.Input', (['(224, 224, 3)'], {}), '((224, 224, 3))\n', (644, 659), True, 'import tensorflow.keras.layers as kl\n'), ((668, 785), 'tensorflow.keras.applications.densenet.DenseNet121', 'DenseNet121', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_tensor': 'img_in', 'input_shape': '(224, 224, 3)', 'pooling': '"""avg"""'}), "(include_top=False, weights='imagenet', input_tensor=img_in,\n input_shape=(224, 224, 3), pooling='avg')\n", (679, 785), False, 'from tensorflow.keras.applications.densenet import DenseNet121\n'), ((1078, 1119), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'img_in', 'outputs': 'predictions'}), '(inputs=img_in, outputs=predictions)\n', (1083, 1119), False, 'from tensorflow.keras.models import Model\n'), ((1316, 1418), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.001)', 'epsilon': '(1e-08)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'decay': '(0.0)', 'amsgrad': '(False)'}), '(lr=0.001, epsilon=1e-08, beta_1=0.9, beta_2=0.999,\n decay=0.0, amsgrad=False)\n', (1337, 1418), False, 'from tensorflow import keras\n'), ((1866, 1915), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'acc', '"""bo"""'], {'label': '"""Training acc"""'}), "(epochs, acc, 'bo', label='Training acc')\n", (1874, 1915), True, 'import matplotlib.pyplot as plt\n'), ((1916, 1970), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_acc', '"""b"""'], {'label': '"""Validation acc"""'}), "(epochs, val_acc, 'b', label='Validation acc')\n", (1924, 1970), True, 'import matplotlib.pyplot as plt\n'), ((1971, 2001), 'matplotlib.pyplot.title', 'plt.title', (['"""Training accuracy"""'], {}), "('Training accuracy')\n", (1980, 2001), True, 'import matplotlib.pyplot as plt\n'), ((2002, 2014), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2012, 2014), True, 'import matplotlib.pyplot as plt\n'), ((2015, 2056), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_ + 'plots/accuracy.png')"], {}), "(path_ + 'plots/accuracy.png')\n", (2026, 2056), True, 'import matplotlib.pyplot as plt\n'), ((2057, 2067), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2065, 2067), True, 'import matplotlib.pyplot as plt\n'), ((2069, 2081), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2079, 2081), True, 'import matplotlib.pyplot as plt\n'), ((2083, 2134), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'loss', '"""bo"""'], {'label': '"""Training loss"""'}), "(epochs, loss, 'bo', label='Training loss')\n", (2091, 2134), True, 'import matplotlib.pyplot as plt\n'), ((2135, 2191), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_loss', '"""b"""'], {'label': '"""Validation loss"""'}), "(epochs, val_loss, 'b', label='Validation loss')\n", (2143, 2191), True, 'import matplotlib.pyplot as plt\n'), ((2192, 2218), 'matplotlib.pyplot.title', 'plt.title', (['"""Training loss"""'], {}), "('Training loss')\n", (2201, 2218), True, 'import matplotlib.pyplot as plt\n'), ((2219, 2231), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2229, 2231), True, 'import matplotlib.pyplot as plt\n'), ((2232, 2269), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_ + 'plots/loss.png')"], {}), "(path_ + 'plots/loss.png')\n", (2243, 2269), True, 'import matplotlib.pyplot as plt\n'), ((2270, 2280), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2278, 2280), True, 'import matplotlib.pyplot as plt\n'), ((2336, 2382), 'numpy.load', 'np.load', (["(path_ + 'numpy_files/test_labels.npy')"], {}), "(path_ + 'numpy_files/test_labels.npy')\n", (2343, 2382), True, 'import numpy as np\n'), ((2422, 2468), 'numpy.load', 'np.load', (["(path_ + 'numpy_files/test_images.npy')"], {}), "(path_ + 'numpy_files/test_images.npy')\n", (2429, 2468), True, 'import numpy as np\n'), ((2883, 2930), 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['model'], {}), '(model)\n', (2923, 2930), True, 'import tensorflow as tf\n'), ((969, 1022), 'tensorflow.keras.layers.Dense', 'kl.Dense', (['(2)'], {'activation': '"""sigmoid"""', 'name': '"""predictions"""'}), "(2, activation='sigmoid', name='predictions')\n", (977, 1022), True, 'import tensorflow.keras.layers as kl\n'), ((1133, 1194), 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(5)'}), "(monitor='val_loss', patience=5)\n", (1162, 1194), False, 'from tensorflow import keras\n'), ((1196, 1312), 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', ([], {'filepath': "(path_ + 'model/best_model.h5')", 'monitor': '"""val_loss"""', 'save_best_only': '(True)'}), "(filepath=path_ + 'model/best_model.h5',\n monitor='val_loss', save_best_only=True)\n", (1227, 1312), False, 'from tensorflow import keras\n'), ((2558, 2578), 'numpy.rint', 'np.rint', (['predictions'], {}), '(predictions)\n', (2565, 2578), True, 'import numpy as np\n')] |
# Copyright 2020 University of Groningen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test that force field files are properly read.
"""
import math
import pytest
import numpy as np
from numpy.linalg import norm
import networkx as nx
import polyply
from polyply import TEST_DATA
from polyply.src.topology import Topology
from polyply.src.nonbond_engine import NonBondEngine
from polyply.src.random_walk import (fulfill_geometrical_constraints,
pbc_complete,
not_exceeds_max_dimensions,
_take_step,
RandomWalk,
is_restricted)
@pytest.mark.parametrize('restraint_dict, point, result', (
# test single geometrical constraint
({"restraints": [["in", np.array([0.0, 0.0, 0.0]), 1.0, "sphere"]]},
np.array([0, 0, 0.5]),
True
),
({"restraints": [["out", np.array([0.0, 0.0, 0.0]), 1.0, "sphere"]]},
np.array([0, 0, 1.5]),
True
),
({"restraints": [["in", np.array([0.0, 0.0, 0.0]), 1.0, "sphere"]]},
np.array([0, 0, 1.5]),
False
),
({"restraints": [["out", np.array([0.0, 0.0, 0.0]), 1.0, "sphere"]]},
np.array([0.0, 0.0, 0.5]),
False
),
({"restraints": [["in", np.array([0.0, 0.0, 0.0]), 1.0, 2.0, "cylinder"]]},
np.array([0, 0.5, 0.5]),
True
),
({"restraints": [["out", np.array([0.0, 0.0, 0.0]), 1.0, 2.0, "cylinder"]]},
np.array([0, 1.5, 1.5]),
True
),
({"restraints": [["in", np.array([0.0, 0.0, 0.0]), 1.0, 2.0, "cylinder"]]},
np.array([0, 1.5, 1.5]),
False
),
({"restraints": [["out", np.array([0.0, 0.0, 0.0]), 2.0, 2.0, 4.0, "rectangle"]]},
np.array([0, 0.5, 0.5]),
False
),
({"restraints": [["in", np.array([0.0, 0.0, 0.0]), 2.0, 2.0, 4.0, "rectangle"]]},
np.array([0, 1.0, 0.5]),
True
),
({"restraints": [["out", np.array([0.0, 0.0, 0.0]), 2.0, 2.0, 4.0, "rectangle"]]},
np.array([0, 1.5, 4.5]),
True
),
({"restraints": [["in", np.array([0.0, 0.0, 0.0]), 2.0, 2.0, 4.0, "rectangle"]]},
np.array([0, 1.5, 4.5]),
False
),
({"restraints": [["out", np.array([0.0, 0.0, 0.0]), 2.0, 2.0, 4.0, "rectangle"]]},
np.array([0, 1.5, 3.9]),
False
),
# test default empty dict
({},
np.array([0, 1.5, 3.9]),
True
),
))
def test_geometric_restrictions(restraint_dict, point, result):
assert fulfill_geometrical_constraints(point, restraint_dict) == result
@pytest.mark.parametrize('box_vect, point, result', (
(np.array([5., 5., 10.]),
np.array([0, 0, 0.5]),
np.array([0, 0, 0.5])
),
(np.array([5., 5., 10.]),
np.array([5., 6., 0.5]),
np.array([0., 1.0, 0.5])
),
(np.array([5., 5., 10.]),
np.array([5., -3., 0.5]),
np.array([0., 2.0, 0.5])
)))
def test_pbc_complete(box_vect, point, result):
assert all(pbc_complete(point, box_vect) == result)
@pytest.mark.parametrize('box_vect, point, result', (
(np.array([5., 5., 10.]),
np.array([1., 1., 0.5]),
True
),
(np.array([5., 5., 10.]),
np.array([5., 6., 0.5]),
False
)))
def test_not_exceeds_max_dimensions(box_vect, point, result):
assert not_exceeds_max_dimensions(point, box_vect) == result
def test__take_step():
coord = np.array([1.0, 1.0, 1.0])
step_length = 0.5
vectors = polyply.src.linalg_functions.norm_sphere(50)
new_coord, _ = _take_step(
vectors, step_length, coord, np.array([5.0, 5.0, 5.0]))
assert math.isclose(norm(new_coord - coord), step_length)
@pytest.fixture
def nonbond_matrix():
toppath = TEST_DATA + "/struc_build/system.top"
topology = Topology.from_gmx_topfile(name="test", path=toppath)
topology.preprocess()
topology.volumes = {"PEO":0.43}
return NonBondEngine.from_topology(topology.molecules,
topology,
box=np.array([10., 10., 10.]))
@pytest.fixture
def molecule():
toppath = TEST_DATA + "/struc_build/system.top"
topology = Topology.from_gmx_topfile(name="test", path=toppath)
return topology.molecules[0]
def add_positions(nb_matrix, ncoords, pos=None):
if isinstance(pos, type(None)):
pos = np.array([[1.0, 1.0, 0.37],
[1.0, 1.0, 0.74],
[1.0, 1.0, 1.11],
[1.0, 1.0, 1.48],
[1.0, 1.0, 1.85],
[1.0, 1.0, 2.22],
[1.0, 1.0, 2.59],
[1.0, 1.0, 2.96],
[1.0, 1.0, 3.33],
[1.0, 1.0, 3.70],
[1.0, 1.0, 4.07]])
nb_matrix.add_positions(pos[0], mol_idx=0, node_key=0, start=True)
for idx, point in enumerate(pos[1:ncoords]):
if all(point != np.array([np.inf, np.inf, np.inf])):
nb_matrix.add_positions(point, mol_idx=0, node_key=idx+1, start=False)
return nb_matrix
def test_rewind(nonbond_matrix):
nb_matrix = add_positions(nonbond_matrix, 6)
processor = RandomWalk(mol_idx=0, nonbond_matrix=nb_matrix, nrewind=3)
# node 4 is already placed and hence is skipped over
processor.placed_nodes = [(0, 0), (1, 1), (2, 2), (3, 3), (4, 5), (5, 6)]
last_idx = processor._rewind(current_step=5)
assert last_idx == 3
for idx in [6, 5, 3]:
assert all(nb_matrix.positions[idx] == np.array([np.inf, np.inf, np.inf]))
@pytest.mark.parametrize('new_point, result', (
(np.array([1., 1., 2.96]),
False
),
(np.array([1., 1., 2.3]),
True
)))
def test_is_overlap(nonbond_matrix, molecule, new_point, result):
nb_matrix = add_positions(nonbond_matrix, 6)
proccessor = RandomWalk(mol_idx=0, nonbond_matrix=nb_matrix)
proccessor.molecule = molecule
# node 4 is already placed and hence is skipped over
assert proccessor._is_overlap(new_point, 7, nrexcl=1) == result
@pytest.mark.parametrize('new_point, restraint, result', (
# distance restraint true upper_bound
# ref_node, upper_bound, lower_bound
(np.array([1., 1., 2.96]),
[(0, 4.0, 0.0)],
True
),
#distance restraint false upper_bound
(np.array([1., 1.0, 2.96]),
[(0, 1.43, 0.0)],
False
),
# distance restraint false lower_bound
(np.array([1., 1.0, 2.96]),
[(5, 2.00, 1.0)],
False
),
# distance restraint true lower_bound
(np.array([1., 1.0, 2.96]),
[(5, 2.00, 0.47)],
True
),
# two restraints true
(np.array([1., 1.0, 2.96]),
[(5, 2.00, 0.47), (0, 4.0, 0.0)],
True
),
# two restraints 1 false
(np.array([1., 1.0, 2.96]),
[(5, 2.00, 1.0), (0, 4.0, 0.0)],
False
),
# two restraints 1 false
(np.array([1., 1.0, 2.96]),
[(5, 2.00, 0.47), (0, 1.43, 0.0)],
False
),
))
def test_checks_milestone(nonbond_matrix, molecule, new_point, restraint, result):
nb_matrix = add_positions(nonbond_matrix, 6)
proccessor = RandomWalk(mol_idx=0, nonbond_matrix=nb_matrix)
molecule.nodes[7]["distance_restraints"] = restraint
proccessor.molecule = molecule
assert proccessor.checks_milestones(7, new_point) == result
@pytest.mark.parametrize('pos, expected', (
# simple test; should just work
(np.array([[1.0, 1.0, 0.37],
[1.0, 1.0, 0.74],
[1.0, 1.0, 1.11],
[1.0, 1.0, 1.48],
[1.0, 1.0, 1.85],
[1.0, 1.0, 2.22],
[1.0, 1.0, 2.59]]),
True),
# this will fail because all space is blocked
(np.array([[1.0, 1.0, 0.67],
[1.0, 1.0, 1.37],
[1.0, 1.37, 1.0],
[1.37, 1.0, 1.0],
[1.0, 0.63, 1.0],
[0.63, 1.0, 1.0],
[1.0, 1.0, 1.0]]),
False
)))
def test_update_positions(nonbond_matrix, molecule, pos, expected):
# add positions of the rest of the chain
nb_matrix = add_positions(nonbond_matrix, 7, pos=pos)
# create instance of processor
proccessor = RandomWalk(mol_idx=0,
nonbond_matrix=nb_matrix,
maxdim=np.array([10., 10., 10.]),
max_force=100.0,
maxiter=49)
# set molecule attribute which is normally set by the run_molecule class
proccessor.molecule = molecule
vector_bundle = polyply.src.linalg_functions.norm_sphere(50)
status = proccessor.update_positions(vector_bundle=vector_bundle, current_node=7, prev_node=6)
assert status == expected
if status:
assert all(nb_matrix.positions[7] != np.array([np.inf, np.inf, np.inf]))
else:
assert all(nb_matrix.positions[7] == np.array([np.inf, np.inf, np.inf]))
@pytest.mark.parametrize('build_attr, pos, start, npos', (
# simple test; create all coordinates
({0: True, 1: True, 2: True, 3: True,
4: True, 5: True, 6: True, 7: True, 8: True, 9: True},
None,
None,
0),
# start in the middle of the chain
({0: True, 1: True, 2: True, 3: True,
4: True, 5: True, 6: True, 7: True, 8: True, 9: True},
None,
5,
0),
# here we look for a starting point and build the rest
({0: False, 1: False, 2: True, 3: True,
4: True, 5: True, 6: True, 7: True, 8: True, 9: True},
np.array([[1.0, 1.0, 0.67],
[1.0, 1.0, 1.37]]),
None,
2,
),
# here we need to skip one already defined position
({0: False, 1: True, 2: False, 3: True,
4: True, 5: True, 6: True, 7: True, 8: True, 9: True},
np.array([[1.0, 1.0, 0.67],
[np.inf, np.inf, np.inf],
[1.0, 1.0, 1.30]]),
None,
3),
# here we trigger a rewind
# ({0: False, 1: False, 2: False, 3: False,
# 4: False, 5: False, 6: False, 7: True, 8: True, 9: True},
# np.array([[1.0, 1.0, 0.67],
# [1.0, 1.0, 1.37],
# [1.0, 1.37, 1.0],
# [1.37, 1.0, 1.0],
# [1.0, 0.63, 1.0],
# [0.63, 1.0, 1.0],
# [1.0, 1.0, 1.0],
# ]),
# None,
# 7),
))
def test_run_molecule(nonbond_matrix, molecule, build_attr, npos, pos, start):
# add positions of the rest of the chain
nb_matrix = add_positions(nonbond_matrix, npos, pos=pos)
# create instance of processor
vector_bundle = polyply.src.linalg_functions.norm_sphere(500)
proccessor = RandomWalk(mol_idx=0,
nonbond_matrix=nb_matrix,
maxdim=np.array([10., 10., 10.]),
max_force=100.0,
maxiter=49,
vector_sphere=vector_bundle,
start_node=start)
# set molecule attribute which is normally set by the run_molecule class
nx.set_node_attributes(molecule, build_attr, "build")
proccessor.run_molecule(molecule)
for pos in proccessor.nonbond_matrix.positions:
assert all(pos != np.array([np.inf, np.inf, np.inf]))
@pytest.mark.parametrize('point, old_point, node_dict, expected', (
# basic example
(np.array([1., 1., 2.]),
np.array([1., 1., 1.]),
{"rw_options": [[np.array([0., 0., 1.0]), 90.0]]},
True),
# false because goes back
(np.array([1., 1., 0.5]),
np.array([1., 1., 1.]),
{"rw_options": [[np.array([0., 0., 1.0]), 90.0]]},
False),
# false because angle not large enough
(np.array([1.5, 1.5, 1.5]),
np.array([1., 1., 1.]),
{"rw_options": [[np.array([0., 0., 1.0]), 50.0]]},
False),
))
def test_vector_push(point, old_point, node_dict, expected):
status = is_restricted(point, old_point, node_dict)
assert status == expected
| [
"polyply.src.topology.Topology.from_gmx_topfile",
"polyply.src.linalg_functions.norm_sphere",
"networkx.set_node_attributes",
"polyply.src.random_walk.pbc_complete",
"polyply.src.random_walk.is_restricted",
"numpy.array",
"numpy.linalg.norm",
"polyply.src.random_walk.not_exceeds_max_dimensions",
"po... | [((3920, 3945), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (3928, 3945), True, 'import numpy as np\n'), ((3982, 4026), 'polyply.src.linalg_functions.norm_sphere', 'polyply.src.linalg_functions.norm_sphere', (['(50)'], {}), '(50)\n', (4022, 4026), False, 'import polyply\n'), ((4291, 4343), 'polyply.src.topology.Topology.from_gmx_topfile', 'Topology.from_gmx_topfile', ([], {'name': '"""test"""', 'path': 'toppath'}), "(name='test', path=toppath)\n", (4316, 4343), False, 'from polyply.src.topology import Topology\n'), ((4683, 4735), 'polyply.src.topology.Topology.from_gmx_topfile', 'Topology.from_gmx_topfile', ([], {'name': '"""test"""', 'path': 'toppath'}), "(name='test', path=toppath)\n", (4708, 4735), False, 'from polyply.src.topology import Topology\n'), ((5702, 5760), 'polyply.src.random_walk.RandomWalk', 'RandomWalk', ([], {'mol_idx': '(0)', 'nonbond_matrix': 'nb_matrix', 'nrewind': '(3)'}), '(mol_idx=0, nonbond_matrix=nb_matrix, nrewind=3)\n', (5712, 5760), False, 'from polyply.src.random_walk import fulfill_geometrical_constraints, pbc_complete, not_exceeds_max_dimensions, _take_step, RandomWalk, is_restricted\n'), ((6359, 6406), 'polyply.src.random_walk.RandomWalk', 'RandomWalk', ([], {'mol_idx': '(0)', 'nonbond_matrix': 'nb_matrix'}), '(mol_idx=0, nonbond_matrix=nb_matrix)\n', (6369, 6406), False, 'from polyply.src.random_walk import fulfill_geometrical_constraints, pbc_complete, not_exceeds_max_dimensions, _take_step, RandomWalk, is_restricted\n'), ((7592, 7639), 'polyply.src.random_walk.RandomWalk', 'RandomWalk', ([], {'mol_idx': '(0)', 'nonbond_matrix': 'nb_matrix'}), '(mol_idx=0, nonbond_matrix=nb_matrix)\n', (7602, 7639), False, 'from polyply.src.random_walk import fulfill_geometrical_constraints, pbc_complete, not_exceeds_max_dimensions, _take_step, RandomWalk, is_restricted\n'), ((8999, 9043), 'polyply.src.linalg_functions.norm_sphere', 'polyply.src.linalg_functions.norm_sphere', (['(50)'], {}), '(50)\n', (9039, 9043), False, 'import polyply\n'), ((10983, 11028), 'polyply.src.linalg_functions.norm_sphere', 'polyply.src.linalg_functions.norm_sphere', (['(500)'], {}), '(500)\n', (11023, 11028), False, 'import polyply\n'), ((11453, 11506), 'networkx.set_node_attributes', 'nx.set_node_attributes', (['molecule', 'build_attr', '"""build"""'], {}), "(molecule, build_attr, 'build')\n", (11475, 11506), True, 'import networkx as nx\n'), ((12283, 12325), 'polyply.src.random_walk.is_restricted', 'is_restricted', (['point', 'old_point', 'node_dict'], {}), '(point, old_point, node_dict)\n', (12296, 12325), False, 'from polyply.src.random_walk import fulfill_geometrical_constraints, pbc_complete, not_exceeds_max_dimensions, _take_step, RandomWalk, is_restricted\n'), ((3027, 3081), 'polyply.src.random_walk.fulfill_geometrical_constraints', 'fulfill_geometrical_constraints', (['point', 'restraint_dict'], {}), '(point, restraint_dict)\n', (3058, 3081), False, 'from polyply.src.random_walk import fulfill_geometrical_constraints, pbc_complete, not_exceeds_max_dimensions, _take_step, RandomWalk, is_restricted\n'), ((3829, 3872), 'polyply.src.random_walk.not_exceeds_max_dimensions', 'not_exceeds_max_dimensions', (['point', 'box_vect'], {}), '(point, box_vect)\n', (3855, 3872), False, 'from polyply.src.random_walk import fulfill_geometrical_constraints, pbc_complete, not_exceeds_max_dimensions, _take_step, RandomWalk, is_restricted\n'), ((4095, 4120), 'numpy.array', 'np.array', (['[5.0, 5.0, 5.0]'], {}), '([5.0, 5.0, 5.0])\n', (4103, 4120), True, 'import numpy as np\n'), ((4146, 4169), 'numpy.linalg.norm', 'norm', (['(new_coord - coord)'], {}), '(new_coord - coord)\n', (4150, 4169), False, 'from numpy.linalg import norm\n'), ((4869, 5085), 'numpy.array', 'np.array', (['[[1.0, 1.0, 0.37], [1.0, 1.0, 0.74], [1.0, 1.0, 1.11], [1.0, 1.0, 1.48], [\n 1.0, 1.0, 1.85], [1.0, 1.0, 2.22], [1.0, 1.0, 2.59], [1.0, 1.0, 2.96],\n [1.0, 1.0, 3.33], [1.0, 1.0, 3.7], [1.0, 1.0, 4.07]]'], {}), '([[1.0, 1.0, 0.37], [1.0, 1.0, 0.74], [1.0, 1.0, 1.11], [1.0, 1.0, \n 1.48], [1.0, 1.0, 1.85], [1.0, 1.0, 2.22], [1.0, 1.0, 2.59], [1.0, 1.0,\n 2.96], [1.0, 1.0, 3.33], [1.0, 1.0, 3.7], [1.0, 1.0, 4.07]])\n', (4877, 5085), True, 'import numpy as np\n'), ((1394, 1415), 'numpy.array', 'np.array', (['[0, 0, 0.5]'], {}), '([0, 0, 0.5])\n', (1402, 1415), True, 'import numpy as np\n'), ((1514, 1535), 'numpy.array', 'np.array', (['[0, 0, 1.5]'], {}), '([0, 0, 1.5])\n', (1522, 1535), True, 'import numpy as np\n'), ((1633, 1654), 'numpy.array', 'np.array', (['[0, 0, 1.5]'], {}), '([0, 0, 1.5])\n', (1641, 1654), True, 'import numpy as np\n'), ((1754, 1779), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5]'], {}), '([0.0, 0.0, 0.5])\n', (1762, 1779), True, 'import numpy as np\n'), ((1885, 1908), 'numpy.array', 'np.array', (['[0, 0.5, 0.5]'], {}), '([0, 0.5, 0.5])\n', (1893, 1908), True, 'import numpy as np\n'), ((2014, 2037), 'numpy.array', 'np.array', (['[0, 1.5, 1.5]'], {}), '([0, 1.5, 1.5])\n', (2022, 2037), True, 'import numpy as np\n'), ((2142, 2165), 'numpy.array', 'np.array', (['[0, 1.5, 1.5]'], {}), '([0, 1.5, 1.5])\n', (2150, 2165), True, 'import numpy as np\n'), ((2278, 2301), 'numpy.array', 'np.array', (['[0, 0.5, 0.5]'], {}), '([0, 0.5, 0.5])\n', (2286, 2301), True, 'import numpy as np\n'), ((2413, 2436), 'numpy.array', 'np.array', (['[0, 1.0, 0.5]'], {}), '([0, 1.0, 0.5])\n', (2421, 2436), True, 'import numpy as np\n'), ((2548, 2571), 'numpy.array', 'np.array', (['[0, 1.5, 4.5]'], {}), '([0, 1.5, 4.5])\n', (2556, 2571), True, 'import numpy as np\n'), ((2682, 2705), 'numpy.array', 'np.array', (['[0, 1.5, 4.5]'], {}), '([0, 1.5, 4.5])\n', (2690, 2705), True, 'import numpy as np\n'), ((2818, 2841), 'numpy.array', 'np.array', (['[0, 1.5, 3.9]'], {}), '([0, 1.5, 3.9])\n', (2826, 2841), True, 'import numpy as np\n'), ((2906, 2929), 'numpy.array', 'np.array', (['[0, 1.5, 3.9]'], {}), '([0, 1.5, 3.9])\n', (2914, 2929), True, 'import numpy as np\n'), ((3501, 3530), 'polyply.src.random_walk.pbc_complete', 'pbc_complete', (['point', 'box_vect'], {}), '(point, box_vect)\n', (3513, 3530), False, 'from polyply.src.random_walk import fulfill_geometrical_constraints, pbc_complete, not_exceeds_max_dimensions, _take_step, RandomWalk, is_restricted\n'), ((3153, 3179), 'numpy.array', 'np.array', (['[5.0, 5.0, 10.0]'], {}), '([5.0, 5.0, 10.0])\n', (3161, 3179), True, 'import numpy as np\n'), ((3183, 3204), 'numpy.array', 'np.array', (['[0, 0, 0.5]'], {}), '([0, 0, 0.5])\n', (3191, 3204), True, 'import numpy as np\n'), ((3211, 3232), 'numpy.array', 'np.array', (['[0, 0, 0.5]'], {}), '([0, 0, 0.5])\n', (3219, 3232), True, 'import numpy as np\n'), ((3246, 3272), 'numpy.array', 'np.array', (['[5.0, 5.0, 10.0]'], {}), '([5.0, 5.0, 10.0])\n', (3254, 3272), True, 'import numpy as np\n'), ((3276, 3301), 'numpy.array', 'np.array', (['[5.0, 6.0, 0.5]'], {}), '([5.0, 6.0, 0.5])\n', (3284, 3301), True, 'import numpy as np\n'), ((3306, 3331), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.5]'], {}), '([0.0, 1.0, 0.5])\n', (3314, 3331), True, 'import numpy as np\n'), ((3343, 3369), 'numpy.array', 'np.array', (['[5.0, 5.0, 10.0]'], {}), '([5.0, 5.0, 10.0])\n', (3351, 3369), True, 'import numpy as np\n'), ((3373, 3399), 'numpy.array', 'np.array', (['[5.0, -3.0, 0.5]'], {}), '([5.0, -3.0, 0.5])\n', (3381, 3399), True, 'import numpy as np\n'), ((3404, 3429), 'numpy.array', 'np.array', (['[0.0, 2.0, 0.5]'], {}), '([0.0, 2.0, 0.5])\n', (3412, 3429), True, 'import numpy as np\n'), ((3603, 3629), 'numpy.array', 'np.array', (['[5.0, 5.0, 10.0]'], {}), '([5.0, 5.0, 10.0])\n', (3611, 3629), True, 'import numpy as np\n'), ((3633, 3658), 'numpy.array', 'np.array', (['[1.0, 1.0, 0.5]'], {}), '([1.0, 1.0, 0.5])\n', (3641, 3658), True, 'import numpy as np\n'), ((3681, 3707), 'numpy.array', 'np.array', (['[5.0, 5.0, 10.0]'], {}), '([5.0, 5.0, 10.0])\n', (3689, 3707), True, 'import numpy as np\n'), ((3711, 3736), 'numpy.array', 'np.array', (['[5.0, 6.0, 0.5]'], {}), '([5.0, 6.0, 0.5])\n', (3719, 3736), True, 'import numpy as np\n'), ((4557, 4585), 'numpy.array', 'np.array', (['[10.0, 10.0, 10.0]'], {}), '([10.0, 10.0, 10.0])\n', (4565, 4585), True, 'import numpy as np\n'), ((6133, 6159), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.96]'], {}), '([1.0, 1.0, 2.96])\n', (6141, 6159), True, 'import numpy as np\n'), ((6183, 6208), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.3]'], {}), '([1.0, 1.0, 2.3])\n', (6191, 6208), True, 'import numpy as np\n'), ((6712, 6738), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.96]'], {}), '([1.0, 1.0, 2.96])\n', (6720, 6738), True, 'import numpy as np\n'), ((6818, 6844), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.96]'], {}), '([1.0, 1.0, 2.96])\n', (6826, 6844), True, 'import numpy as np\n'), ((6928, 6954), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.96]'], {}), '([1.0, 1.0, 2.96])\n', (6936, 6954), True, 'import numpy as np\n'), ((7037, 7063), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.96]'], {}), '([1.0, 1.0, 2.96])\n', (7045, 7063), True, 'import numpy as np\n'), ((7131, 7157), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.96]'], {}), '([1.0, 1.0, 2.96])\n', (7139, 7157), True, 'import numpy as np\n'), ((7245, 7271), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.96]'], {}), '([1.0, 1.0, 2.96])\n', (7253, 7271), True, 'import numpy as np\n'), ((7359, 7385), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.96]'], {}), '([1.0, 1.0, 2.96])\n', (7367, 7385), True, 'import numpy as np\n'), ((8755, 8783), 'numpy.array', 'np.array', (['[10.0, 10.0, 10.0]'], {}), '([10.0, 10.0, 10.0])\n', (8763, 8783), True, 'import numpy as np\n'), ((7879, 8020), 'numpy.array', 'np.array', (['[[1.0, 1.0, 0.37], [1.0, 1.0, 0.74], [1.0, 1.0, 1.11], [1.0, 1.0, 1.48], [\n 1.0, 1.0, 1.85], [1.0, 1.0, 2.22], [1.0, 1.0, 2.59]]'], {}), '([[1.0, 1.0, 0.37], [1.0, 1.0, 0.74], [1.0, 1.0, 1.11], [1.0, 1.0, \n 1.48], [1.0, 1.0, 1.85], [1.0, 1.0, 2.22], [1.0, 1.0, 2.59]])\n', (7887, 8020), True, 'import numpy as np\n'), ((8174, 8313), 'numpy.array', 'np.array', (['[[1.0, 1.0, 0.67], [1.0, 1.0, 1.37], [1.0, 1.37, 1.0], [1.37, 1.0, 1.0], [\n 1.0, 0.63, 1.0], [0.63, 1.0, 1.0], [1.0, 1.0, 1.0]]'], {}), '([[1.0, 1.0, 0.67], [1.0, 1.0, 1.37], [1.0, 1.37, 1.0], [1.37, 1.0,\n 1.0], [1.0, 0.63, 1.0], [0.63, 1.0, 1.0], [1.0, 1.0, 1.0]])\n', (8182, 8313), True, 'import numpy as np\n'), ((11157, 11185), 'numpy.array', 'np.array', (['[10.0, 10.0, 10.0]'], {}), '([10.0, 10.0, 10.0])\n', (11165, 11185), True, 'import numpy as np\n'), ((9933, 9979), 'numpy.array', 'np.array', (['[[1.0, 1.0, 0.67], [1.0, 1.0, 1.37]]'], {}), '([[1.0, 1.0, 0.67], [1.0, 1.0, 1.37]])\n', (9941, 9979), True, 'import numpy as np\n'), ((10189, 10260), 'numpy.array', 'np.array', (['[[1.0, 1.0, 0.67], [np.inf, np.inf, np.inf], [1.0, 1.0, 1.3]]'], {}), '([[1.0, 1.0, 0.67], [np.inf, np.inf, np.inf], [1.0, 1.0, 1.3]])\n', (10197, 10260), True, 'import numpy as np\n'), ((11754, 11779), 'numpy.array', 'np.array', (['[1.0, 1.0, 2.0]'], {}), '([1.0, 1.0, 2.0])\n', (11762, 11779), True, 'import numpy as np\n'), ((11783, 11808), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (11791, 11808), True, 'import numpy as np\n'), ((11910, 11935), 'numpy.array', 'np.array', (['[1.0, 1.0, 0.5]'], {}), '([1.0, 1.0, 0.5])\n', (11918, 11935), True, 'import numpy as np\n'), ((11940, 11965), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (11948, 11965), True, 'import numpy as np\n'), ((12081, 12106), 'numpy.array', 'np.array', (['[1.5, 1.5, 1.5]'], {}), '([1.5, 1.5, 1.5])\n', (12089, 12106), True, 'import numpy as np\n'), ((12113, 12138), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (12121, 12138), True, 'import numpy as np\n'), ((5463, 5497), 'numpy.array', 'np.array', (['[np.inf, np.inf, np.inf]'], {}), '([np.inf, np.inf, np.inf])\n', (5471, 5497), True, 'import numpy as np\n'), ((6043, 6077), 'numpy.array', 'np.array', (['[np.inf, np.inf, np.inf]'], {}), '([np.inf, np.inf, np.inf])\n', (6051, 6077), True, 'import numpy as np\n'), ((9232, 9266), 'numpy.array', 'np.array', (['[np.inf, np.inf, np.inf]'], {}), '([np.inf, np.inf, np.inf])\n', (9240, 9266), True, 'import numpy as np\n'), ((9322, 9356), 'numpy.array', 'np.array', (['[np.inf, np.inf, np.inf]'], {}), '([np.inf, np.inf, np.inf])\n', (9330, 9356), True, 'import numpy as np\n'), ((11623, 11657), 'numpy.array', 'np.array', (['[np.inf, np.inf, np.inf]'], {}), '([np.inf, np.inf, np.inf])\n', (11631, 11657), True, 'import numpy as np\n'), ((1344, 1369), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1352, 1369), True, 'import numpy as np\n'), ((1464, 1489), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1472, 1489), True, 'import numpy as np\n'), ((1583, 1608), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1591, 1608), True, 'import numpy as np\n'), ((1704, 1729), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1712, 1729), True, 'import numpy as np\n'), ((1828, 1853), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1836, 1853), True, 'import numpy as np\n'), ((1957, 1982), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1965, 1982), True, 'import numpy as np\n'), ((2085, 2110), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (2093, 2110), True, 'import numpy as np\n'), ((2215, 2240), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (2223, 2240), True, 'import numpy as np\n'), ((2350, 2375), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (2358, 2375), True, 'import numpy as np\n'), ((2485, 2510), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (2493, 2510), True, 'import numpy as np\n'), ((2619, 2644), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (2627, 2644), True, 'import numpy as np\n'), ((2755, 2780), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (2763, 2780), True, 'import numpy as np\n'), ((11829, 11854), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (11837, 11854), True, 'import numpy as np\n'), ((11986, 12011), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (11994, 12011), True, 'import numpy as np\n'), ((12159, 12184), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (12167, 12184), True, 'import numpy as np\n')] |
################################################################
# module to convert data files from Huygens mission into
# z-dependent functions
################################################################
import numpy as np
from scipy.interpolate import interp1d
import pandas as pd
import datetime
def getz2xfunctions():
'''
function to process Huygens data from discrete data points to
continuous functions of altitude z
use linear interpolation from scipy
outputs:
* z2p, z2T, z2fCH4 [functions] to convert altitude to pressure, temperature, and CH4 molar concentration
'''
ppi_f = 'data/Huygens_PPI.txt'
# source: https://atmos.nmsu.edu/PDS/data/hphasi_0001/DATA/PPI/HASI_L4_PPI_PRESSURE_VEL.TAB
# documentation: https://atmos.nmsu.edu/PDS/data/hphasi_0001/DATA/PPI/HASI_L4_PPI_PRESSURE_VEL.LBL
tem_f = 'data/Huygens_TEM.txt'
# source: https://atmos.nmsu.edu/PDS/data/hphasi_0001/DATA/TEM/HASI_L4_TEM_TEMPERATURE.TAB
# documentation: https://atmos.nmsu.edu/PDS/data/hphasi_0001/DATA/TEM/HASI_L4_TEM_TEMPERATURE.LBL
gcms_f = 'data/Huygens_GCMS.txt'
# source: https://atmos.nmsu.edu/PDS/data/hpgcms_0001/DATA/DTWG_MOLE_FRACTION/GCMS_MOLE_FRACTION_STG2.TAB
# documentation: https://atmos.nmsu.edu/PDS/data/hpgcms_0001/DATA/DTWG_MOLE_FRACTION/GCMS_MOLE_FRACTION_STG2.LBL
# "Obviously the mole fraction for Nitrogen (N2) is [1. - SUM(MF(CH4)+MF(Ar)+MF(XX))]."
# => molar concentrations
ppi_data = np.genfromtxt(ppi_f,delimiter=';')
tem_data = np.genfromtxt(tem_f,delimiter=';')
gcms_data = np.genfromtxt(gcms_f,skip_header=1)
gcms_data = pd.read_csv(gcms_f,header=0,parse_dates=[0])
# convert UTC_ABS_TIME to same units as descent file
# START_TIME = 2005-01-14T09:11:21.373 # PPI, TEM
t0 = np.datetime64('2005-01-14T09:11:21.373')
t_elapsed_ms = (gcms_data['UTC_ABS_TIME'].values-t0)/np.timedelta64(1, 'ms')
t_surface = 8878990.
gcms_data_ms = np.array([t_elapsed_ms,gcms_data['CH4'].values])
gcms_data_ms = gcms_data_ms.transpose()
# what's stored in read-in arrays
# ppi_data[:,0] # time [milliseconds]
# ppi_data[:,2] # total pressure [Pa]
# ppi_data[:,4] # z [m]
# tem_data[:,0] # time [milliseconds]
# tem_data[:,1] # z [m]
# tem_data[:,2] # T [K]
# gcms_data_ms[:,0] # time [milliseconds]
# gcms_data_ms[:,1] # f_CH4 [mol/mol]
# only include data before hit surface
ppi_data = ppi_data[ppi_data[:,0]<=t_surface]
tem_data = tem_data[tem_data[:,0]<=t_surface]
gcms_data_ms = gcms_data_ms[gcms_data_ms[:,0]<=t_surface]
# interpolations to get functions in terms of z
t2z = interp1d(ppi_data[:,0], ppi_data[:,4],bounds_error=False,fill_value=(ppi_data[0,0],0.))
t2fCH4 = interp1d(gcms_data_ms[:,0], gcms_data_ms[:,1],bounds_error=False,fill_value=(gcms_data_ms[0,1],gcms_data_ms[-1,1]))
z2fCH4 = interp1d(ppi_data[:,4],t2fCH4(ppi_data[:,0]),bounds_error=False,fill_value=(gcms_data_ms[-1,1],gcms_data_ms[0,1]))
z2t = interp1d(ppi_data[:,4],ppi_data[:,0],bounds_error=False,fill_value=(ppi_data[-1,0],ppi_data[0,0]))
z2T = interp1d(tem_data[:,1], tem_data[:,2],bounds_error=False,fill_value=(tem_data[-1,2],tem_data[0,2]))
z2p = interp1d(ppi_data[:,4],ppi_data[:,2],bounds_error=False,fill_value=(ppi_data[-1,2],ppi_data[0,2]))
return z2p, z2T, z2fCH4
| [
"pandas.read_csv",
"numpy.datetime64",
"numpy.genfromtxt",
"numpy.timedelta64",
"numpy.array",
"scipy.interpolate.interp1d"
] | [((1483, 1518), 'numpy.genfromtxt', 'np.genfromtxt', (['ppi_f'], {'delimiter': '""";"""'}), "(ppi_f, delimiter=';')\n", (1496, 1518), True, 'import numpy as np\n'), ((1533, 1568), 'numpy.genfromtxt', 'np.genfromtxt', (['tem_f'], {'delimiter': '""";"""'}), "(tem_f, delimiter=';')\n", (1546, 1568), True, 'import numpy as np\n'), ((1584, 1620), 'numpy.genfromtxt', 'np.genfromtxt', (['gcms_f'], {'skip_header': '(1)'}), '(gcms_f, skip_header=1)\n', (1597, 1620), True, 'import numpy as np\n'), ((1636, 1682), 'pandas.read_csv', 'pd.read_csv', (['gcms_f'], {'header': '(0)', 'parse_dates': '[0]'}), '(gcms_f, header=0, parse_dates=[0])\n', (1647, 1682), True, 'import pandas as pd\n'), ((1803, 1843), 'numpy.datetime64', 'np.datetime64', (['"""2005-01-14T09:11:21.373"""'], {}), "('2005-01-14T09:11:21.373')\n", (1816, 1843), True, 'import numpy as np\n'), ((1970, 2019), 'numpy.array', 'np.array', (["[t_elapsed_ms, gcms_data['CH4'].values]"], {}), "([t_elapsed_ms, gcms_data['CH4'].values])\n", (1978, 2019), True, 'import numpy as np\n'), ((2669, 2768), 'scipy.interpolate.interp1d', 'interp1d', (['ppi_data[:, 0]', 'ppi_data[:, 4]'], {'bounds_error': '(False)', 'fill_value': '(ppi_data[0, 0], 0.0)'}), '(ppi_data[:, 0], ppi_data[:, 4], bounds_error=False, fill_value=(\n ppi_data[0, 0], 0.0))\n', (2677, 2768), False, 'from scipy.interpolate import interp1d\n'), ((2770, 2896), 'scipy.interpolate.interp1d', 'interp1d', (['gcms_data_ms[:, 0]', 'gcms_data_ms[:, 1]'], {'bounds_error': '(False)', 'fill_value': '(gcms_data_ms[0, 1], gcms_data_ms[-1, 1])'}), '(gcms_data_ms[:, 0], gcms_data_ms[:, 1], bounds_error=False,\n fill_value=(gcms_data_ms[0, 1], gcms_data_ms[-1, 1]))\n', (2778, 2896), False, 'from scipy.interpolate import interp1d\n'), ((3024, 3135), 'scipy.interpolate.interp1d', 'interp1d', (['ppi_data[:, 4]', 'ppi_data[:, 0]'], {'bounds_error': '(False)', 'fill_value': '(ppi_data[-1, 0], ppi_data[0, 0])'}), '(ppi_data[:, 4], ppi_data[:, 0], bounds_error=False, fill_value=(\n ppi_data[-1, 0], ppi_data[0, 0]))\n', (3032, 3135), False, 'from scipy.interpolate import interp1d\n'), ((3133, 3244), 'scipy.interpolate.interp1d', 'interp1d', (['tem_data[:, 1]', 'tem_data[:, 2]'], {'bounds_error': '(False)', 'fill_value': '(tem_data[-1, 2], tem_data[0, 2])'}), '(tem_data[:, 1], tem_data[:, 2], bounds_error=False, fill_value=(\n tem_data[-1, 2], tem_data[0, 2]))\n', (3141, 3244), False, 'from scipy.interpolate import interp1d\n'), ((3243, 3354), 'scipy.interpolate.interp1d', 'interp1d', (['ppi_data[:, 4]', 'ppi_data[:, 2]'], {'bounds_error': '(False)', 'fill_value': '(ppi_data[-1, 2], ppi_data[0, 2])'}), '(ppi_data[:, 4], ppi_data[:, 2], bounds_error=False, fill_value=(\n ppi_data[-1, 2], ppi_data[0, 2]))\n', (3251, 3354), False, 'from scipy.interpolate import interp1d\n'), ((1901, 1924), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""ms"""'], {}), "(1, 'ms')\n", (1915, 1924), True, 'import numpy as np\n')] |
#!/usr/bin/python3
"""
A Psi4 input script to compute CIS energy from a SCF reference
Algorithms were developed for ilustrative purposes.
"""
__authors__ = "<NAME>"
__copyright__ = "(c) 2019"
__license__ = "BSD-3-Clause"
__date__ = "2019-07-23"
import time
import numpy
import psi4
from abc import ABC, abstractmethod
from ..psithon.util import two_index_transform, four_index_transform
#numpy.set_printoptions(precision=3, linewidth=200, suppress=True)
__all__ = [\
"CIS",
"RCIS",
"UCIS",]
class CIS(ABC):
"""
Implementation of CIS method.
"""
reference_types = ['rhf', 'uhf']
def __init__(self, mol, verbose, save_states):
ABC.__init__(self)
self._common_init(mol, verbose, save_states)
# ---> public interface <--- #
@classmethod
def create(cls, mol, verbose=True, save_states=None, reference='rhf'):
"Create CIS instance"
if reference.lower() not in cls.reference_types:
raise ValueError("Incorrect reference wavefunction type chosen. Only RHF and UHF are available")
assert(not (reference.lower()=='rhf' and mol.multiplicity() != 1)), "RHF reference cannot be set for open-shell system!"
# UCIS
if mol.multiplicity()!=1 or reference.lower()=='uhf': return UCIS(mol, verbose, save_states)
else: return RCIS(mol, verbose, save_states)
def run(self):
"Run CIS calculations"
self._run_scf()
self._prepare_for_cis()
self._build_hamiltonian()
self._diagonalize()
# ---> protected interface <--- #
def _common_init(self, mol, verbose, save_states):
self.mol = mol
self.verbose = verbose
self.save_states = save_states
#
self.ref_wfn = None
self.scf_e = None
self.e_0 = None
self.nuclear_repulsion_energy = mol.nuclear_repulsion_energy()
self.N = self.nuclear_repulsion_energy
#
self.hamiltonian = None
self.E = None
self.W = None
#
self.nmo = None
self.naocc = None
self.nbocc = None
self.navir = None
self.nbvir = None
self.ndet = None
#
self.Ca_occ = None
self.Cb_occ = None
self.Ca_vir = None
self.Cb_vir = None
#
self.eps_a_occ = None
self.eps_a_vir = None
self.eps_b_occ = None
self.eps_b_vir = None
#
self.eri_OVOV = None
self.eri_OOVV = None
self.eri_OVov = None
self.eri_oovv = None
self.eri_ovov = None
#self.Da = None
#self.Db = None
#self.jk = None
#
self.same_ab = None
def _run_scf(self):
self._set_scf_reference()
scf_e, wfn = psi4.energy('HF', molecule=self.mol, return_wfn=True)
self.scf_e = scf_e
self.e_0 = scf_e - self.nuclear_repulsion_energy
self.ref_wfn = wfn
def _prepare_for_cis(self):
self.Ca_occ = self.ref_wfn.Ca_subset("AO","OCC")
self.Ca_vir = self.ref_wfn.Ca_subset("AO","VIR")
#self.Da = self.ref_wfn.Da()
self.nmo = self.ref_wfn.nmo()
self.naocc = self.ref_wfn.nalpha()
self.navir = self.nmo - self.naocc
#
#self.jk = psi4.core.JK.build(self.ref_wfn.basisset(), jk_type='direct')
#self.jk.set_memory(int(5e8))
#self.jk.initialize()
#
mints = psi4.core.MintsHelper(self.ref_wfn.basisset())
eri = numpy.asarray(mints.ao_eri())
H = self.ref_wfn.H().to_array(dense=True)
#
Oa = self.Ca_occ.to_array(dense=True)
Va = self.Ca_vir.to_array(dense=True)
#
#self.eri_OVOV = numpy.einsum("ai,bj,ck,dl,abcd->ijkl", Oa, Va, Oa, Va, eri)
#self.eri_OOVV = numpy.einsum("ai,bj,ck,dl,abcd->ijkl", Oa, Oa, Va, Va, eri)
self.eri_OVOV = four_index_transform(eri, Oa, Va, Oa, Va)
self.eri_OOVV = four_index_transform(eri, Oa, Oa, Va, Va)
#
#self.jk.C_clear()
#self.jk.C_left_add(psi4.core.Matrix.from_array(self.Da, ""))
#I = numpy.identity(self.Da.shape[0], numpy.float64)
#self.jk.C_right_add(psi4.core.Matrix.from_array(I, ""))
#self.jk.compute()
#Ja= self.jk.J()[0].to_array(dense=True)
#Ka= self.jk.K()[0].to_array(dense=True)
##
#Ga = H+2.0*Ja-Ka
#Ga = self.ref_wfn.Fa().to_array(dense=True)
#self.Fa_occ = numpy.einsum("ai,ab,bj->ij", Oa, Ga, Oa)
#self.Fa_vir = numpy.einsum("ai,ab,bj->ij", Va, Ga, Va)
#self.Fa_occ = two_index_transform(Ga, Oa, Oa)
#self.Fa_vir = two_index_transform(Ga, Va, Va)
self.eps_a_occ = self.ref_wfn.epsilon_a_subset("MO","OCC").to_array()
self.eps_a_vir = self.ref_wfn.epsilon_a_subset("MO","VIR").to_array()
#
self._set_beta(H, eri)
@abstractmethod
def _set_beta(self, H, eri): pass
@abstractmethod
def _set_scf_reference(self): pass
def _build_hamiltonian(self):
# OO block
self.hamiltonian[0, 0] = self.e_0
# OS and SO blocks are zero
None
# SS block
off_a = self.naocc*self.navir
for i in range(self.naocc):
for a in range(self.navir):
ia = self.navir*i + a
# block AA
for j in range(self.naocc):
for b in range(self.navir):
jb = self.navir*j + b
v = 0.0
if ((i==j) and (a==b)): v+= self.e_0 + self.eps_a_vir[a] - self.eps_a_occ[i]
v += self.eri_OVOV[i,a,j,b] - self.eri_OOVV[i,j,a,b]
#
self.hamiltonian[1+ia,1+jb] = v
# blocks AB and BA
for j in range(self.nbocc):
for b in range(self.nbvir):
jb = self.nbvir*j + b
v = self.eri_OVov[i,a,j,b]
self.hamiltonian[1+ia,1+jb+off_a] = v # AB
self.hamiltonian[1+jb+off_a,1+ia] = v # BA
if not self.same_ab:
for i in range(self.nbocc):
for a in range(self.nbvir):
ia = self.nbvir*i + a
# block BB
for j in range(self.nbocc):
for b in range(self.nbvir):
jb = self.nbvir*j + b
v = 0.0
if ((i==j) and (a==b)): v+= self.e_0 + self.eps_b_vir[a] - self.eps_b_occ[i]
v += self.eri_ovov[i,a,j,b] - self.eri_oovv[i,j,a,b]
#
self.hamiltonian[1+ia+off_a,1+jb+off_a] = v
else:
self.hamiltonian[(1+off_a):,(1+off_a):] = self.hamiltonian[1:1+off_a,1:1+off_a]
del self.eri_ovov, self.eri_OVOV, self.eri_OVov, self.eri_OOVV, self.eri_oovv
def _diagonalize(self):
t = time.time()
E, W = numpy.linalg.eigh(self.hamiltonian)
if self.save_states is not None:
E = E[ :(self.save_states+1)]
W = W[:,:(self.save_states+1)]
self.E = E
self.W = W
#
e_cis_ground = E[0] + self.nuclear_repulsion_energy
if self.verbose: print('..finished diagonalization in %.3f seconds.\n' % (time.time() - t))
if self.verbose: print('No of Determinants: % 16d' % (self.ndet))
if self.verbose: print('SCF energy: % 16.10f' % (self.scf_e))
if self.verbose: print('CIS ground: % 16.10f' % (e_cis_ground))
hartree2eV = 27.211
if self.verbose: print('\nCIS Excitation Energies:')
if self.verbose: print(' Hartree eV')
if self.verbose: print('-- -------------------- --------------------')
for i in range(1, len(E)):
excit_e = E[i] + self.nuclear_repulsion_energy - e_cis_ground
if self.verbose: print('%2d %20.10f %20.10f' % (i, excit_e, excit_e * hartree2eV))
class RCIS(CIS):
def __init__(self, mol, verbose, save_states):
CIS.__init__(self, mol, verbose, save_states)
self.same_ab = True
def _set_scf_reference(self):
psi4.core.set_global_option('reference', 'rhf')
def _set_beta(self, H, eri):
self.Cb_occ = self.Ca_occ
self.Cb_vir = self.Ca_vir
#self.Db = self.Da
self.nbocc = self.naocc
self.nbvir = self.navir
self.ndet = 1 + self.naocc * self.navir + self.nbocc * self.nbvir
self.hamiltonian = numpy.zeros((self.ndet, self.ndet),numpy.float64)
#
self.eri_ovov = self.eri_OVOV
self.eri_OVov = self.eri_OVOV
self.eri_oovv = self.eri_OOVV
#self.Fb_occ = self.Fa_occ
#self.Fb_vir = self.Fa_vir
self.eps_b_occ = self.eps_a_occ
self.eps_b_vir = self.eps_a_vir
class UCIS(CIS):
def __init__(self, mol, verbose, save_states):
CIS.__init__(self, mol, verbose, save_states)
self.same_ab = False
def _set_scf_reference(self):
psi4.core.set_global_option('reference', 'uhf')
def _set_beta(self, H, eri):
self.Cb_occ = self.ref_wfn.Cb_subset("AO","OCC")
self.Cb_vir = self.ref_wfn.Cb_subset("AO","VIR")
#self.Db = self.ref_wfn.Db()
self.nbocc = self.ref_wfn.nbeta()
self.nbvir = self.nmo - self.nbocc
self.ndet = 1 + self.naocc * self.navir + self.nbocc * self.nbvir
self.hamiltonian = numpy.zeros((self.ndet, self.ndet),numpy.float64)
#
Oa = self.Ca_occ.to_array(dense=True)
Va = self.Ca_vir.to_array(dense=True)
Ob = self.Cb_occ.to_array(dense=True)
Vb = self.Cb_vir.to_array(dense=True)
#
#self.eri_ovov = numpy.einsum("ai,bj,ck,dl,abcd->ijkl", Ob, Vb, Ob, Vb, eri)
#self.eri_OVov = numpy.einsum("ai,bj,ck,dl,abcd->ijkl", Oa, Va, Ob, Vb, eri)
#self.eri_oovv = numpy.einsum("ai,bj,ck,dl,abcd->ijkl", Ob, Ob, Vb, Vb, eri)
self.eri_ovov = four_index_transform(eri, Ob, Vb, Ob, Vb)
self.eri_OVov = four_index_transform(eri, Oa, Va, Ob, Vb)
self.eri_oovv = four_index_transform(eri, Ob, Ob, Vb, Vb)
#
#self.jk.C_clear()
#self.jk.C_left_add(psi4.core.Matrix.from_array(self.Db, ""))
#I = numpy.identity(self.Da.shape[0], numpy.float64)
#self.jk.C_right_add(psi4.core.Matrix.from_array(I, ""))
#self.jk.compute()
#Jb= self.jk.J()[0].to_array(dense=True)
#Kb= self.jk.K()[0].to_array(dense=True)
##
#Gb = H + Ja + Jb - Kb
#Gb = self.ref_wfn.Fb().to_array(dense=True)
#self.Fb_occ = numpy.einsum("ai,ab,bj->ij", Ob, Gb, Ob)
#self.Fb_vir = numpy.einsum("ai,ab,bj->ij", Vb, Gb, Vb)
#self.Fb_occ = two_index_transform(Gb, Ob, Ob)
#self.Fb_vir = two_index_transform(Gb, Vb, Vb)
self.eps_b_occ = self.ref_wfn.epsilon_b_subset("MO","OCC").to_array()
self.eps_b_vir = self.ref_wfn.epsilon_b_subset("MO","VIR").to_array()
# - the same as above but in one class - #
class CIS_one_class:
"Direct CIS Method"
def __init__(self, mol, verbose=True, save_states=4):
self.mol = mol
self.save_states = save_states
self.Ca_occ = None
self.Cb_occ = None
self.Ca_vir = None
self.Cb_vir = None
self.Da = None
self.Db = None
self.scf_e = None
self.e_0 = None
self.N = mol.nuclear_repulsion_energy()
self.E = None
self.W = None
self.hamiltonian = None
self.nmo = None
self.naocc = None
self.nbocc = None
self.navir = None
self.nbvir = None
self.ndet = None
self.jk = None
def run(self):
self._run_scf()
self._prepare_for_cis()
self._build_hamiltonian()
self._diagonalize()
def _prepare_for_cis(self):
self.Ca_occ = self.ref_wfn.Ca_subset("AO","OCC")
self.Cb_occ = self.ref_wfn.Cb_subset("AO","OCC")
self.Ca_vir = self.ref_wfn.Ca_subset("AO","VIR")
self.Cb_vir = self.ref_wfn.Cb_subset("AO","VIR")
self.Da = self.ref_wfn.Da()
self.Db = self.ref_wfn.Db()
self.nmo = self.ref_wfn.nmo()
self.naocc = self.ref_wfn.nalpha()
self.nbocc = self.ref_wfn.nbeta()
self.navir = self.nmo - self.naocc
self.nbvir = self.nmo - self.nbocc
self.ndet = 1 + self.naocc * self.navir + self.nbocc * self.nbvir
self.hamiltonian = numpy.zeros((self.ndet, self.ndet),numpy.float64)
#
self.jk = psi4.core.JK.build(self.ref_wfn.basisset(), jk_type='direct')
self.jk.set_memory(int(5e8))
self.jk.initialize()
#
mints = psi4.core.MintsHelper(self.ref_wfn.basisset())
eri = numpy.asarray(mints.ao_eri())
Oa = self.Ca_occ.to_array(dense=True)
Ob = self.Cb_occ.to_array(dense=True)
Va = self.Ca_vir.to_array(dense=True)
Vb = self.Cb_vir.to_array(dense=True)
#
self.eri_OVOV = numpy.einsum("ai,bj,ck,dl,abcd->ijkl", Oa, Va, Oa, Va, eri)
self.eri_ovov = numpy.einsum("ai,bj,ck,dl,abcd->ijkl", Ob, Vb, Ob, Vb, eri)
self.eri_OVov = numpy.einsum("ai,bj,ck,dl,abcd->ijkl", Oa, Va, Ob, Vb, eri)
self.eri_OOVV = numpy.einsum("ai,bj,ck,dl,abcd->ijkl", Oa, Oa, Va, Va, eri)
self.eri_oovv = numpy.einsum("ai,bj,ck,dl,abcd->ijkl", Ob, Ob, Vb, Vb, eri)
#
#H = self.ref_wfn.H().to_array(dense=True)
#
#self.jk.C_clear()
#self.jk.C_left_add(psi4.core.Matrix.from_array(self.Da, ""))
#I = numpy.identity(self.Da.shape[0], numpy.float64)
#self.jk.C_right_add(psi4.core.Matrix.from_array(I, ""))
#self.jk.compute()
#Ja= self.jk.J()[0].to_array(dense=True)
#Ka= self.jk.K()[0].to_array(dense=True)
##
#self.jk.C_clear()
#self.jk.C_left_add(psi4.core.Matrix.from_array(self.Db, ""))
#self.jk.C_right_add(psi4.core.Matrix.from_array(I, ""))
#self.jk.compute()
#Jb= self.jk.J()[0].to_array(dense=True)
#Kb= self.jk.K()[0].to_array(dense=True)
#
#Fa = H+Ja+Jb-Ka
#Fb = Fa+Ka-Kb
#print(Fa, self.ref_wfn.Fa().to_array(dense=True))
Fa = self.ref_wfn.Fa().to_array(dense=True)
Fb = self.ref_wfn.Fb().to_array(dense=True)
self.Fa_occ = numpy.einsum("ai,ab,bj->ij", Oa, Fa, Oa)
self.Fa_vir = numpy.einsum("ai,ab,bj->ij", Va, Fa, Va)
self.Fb_occ = numpy.einsum("ai,ab,bj->ij", Ob, Fb, Ob)
self.Fb_vir = numpy.einsum("ai,ab,bj->ij", Vb, Fb, Vb)
def _run_scf(self):
scf_e, wfn = psi4.energy('HF', molecule=self.mol, return_wfn=True)
self.scf_e = scf_e
self.e_0 = scf_e - self.N
self.ref_wfn = wfn
def _build_hamiltonian(self):
# OO block
self.hamiltonian[0, 0] = self.e_0
# OS and SO blocks are zero
# SS block
off_a = self.naocc*self.navir
off_b = self.nbocc*self.nbvir
for i in range(self.naocc):
for a in range(self.navir):
ia = (self.navir)*i + a
for j in range(self.naocc):
for b in range(self.navir):
jb = (self.navir)*j + b
v = 0.0
if (i==j) and (a==b): v+= self.e_0
if (i==j): v+= self.Fa_vir[a,b]
if (a==b): v-= self.Fa_occ[i,j]
v += self.eri_OVOV[i,a,j,b] - self.eri_OOVV[i,j,a,b]
#
self.hamiltonian[1+ia,1+jb] = v
for j in range(self.nbocc):
for b in range(self.nbvir):
jb = (self.nbvir)*j + b
v = self.eri_OVov[i,a,j,b]
self.hamiltonian[1+ia,1+jb+off_a] = v
self.hamiltonian[1+jb+off_a,1+ia] = v
for i in range(self.nbocc):
for a in range(self.nbvir):
ia = (self.nbvir)*i + a
for j in range(self.nbocc):
for b in range(self.nbvir):
jb = (self.nbvir)*j + b
v = 0.0
if (i==j) and (a==b): v+= self.e_0
if (i==j): v+= self.Fb_vir[a,b]
if (a==b): v-= self.Fb_occ[i,j]
v += self.eri_ovov[i,a,j,b] - self.eri_oovv[i,j,a,b]
#
self.hamiltonian[1+ia+off_a,1+jb+off_a] = v
del self.eri_ovov, self.eri_OVOV, self.eri_OVov
def _diagonalize(self):
E, W = numpy.linalg.eigh(self.hamiltonian)
self.E = E
self.W = W
| [
"numpy.zeros",
"numpy.einsum",
"psi4.energy",
"time.time",
"abc.ABC.__init__",
"numpy.linalg.eigh",
"psi4.core.set_global_option"
] | [((695, 713), 'abc.ABC.__init__', 'ABC.__init__', (['self'], {}), '(self)\n', (707, 713), False, 'from abc import ABC, abstractmethod\n'), ((2670, 2723), 'psi4.energy', 'psi4.energy', (['"""HF"""'], {'molecule': 'self.mol', 'return_wfn': '(True)'}), "('HF', molecule=self.mol, return_wfn=True)\n", (2681, 2723), False, 'import psi4\n'), ((6775, 6786), 'time.time', 'time.time', ([], {}), '()\n', (6784, 6786), False, 'import time\n'), ((6800, 6835), 'numpy.linalg.eigh', 'numpy.linalg.eigh', (['self.hamiltonian'], {}), '(self.hamiltonian)\n', (6817, 6835), False, 'import numpy\n'), ((8022, 8069), 'psi4.core.set_global_option', 'psi4.core.set_global_option', (['"""reference"""', '"""rhf"""'], {}), "('reference', 'rhf')\n", (8049, 8069), False, 'import psi4\n'), ((8348, 8398), 'numpy.zeros', 'numpy.zeros', (['(self.ndet, self.ndet)', 'numpy.float64'], {}), '((self.ndet, self.ndet), numpy.float64)\n', (8359, 8398), False, 'import numpy\n'), ((8841, 8888), 'psi4.core.set_global_option', 'psi4.core.set_global_option', (['"""reference"""', '"""uhf"""'], {}), "('reference', 'uhf')\n", (8868, 8888), False, 'import psi4\n'), ((9244, 9294), 'numpy.zeros', 'numpy.zeros', (['(self.ndet, self.ndet)', 'numpy.float64'], {}), '((self.ndet, self.ndet), numpy.float64)\n', (9255, 9294), False, 'import numpy\n'), ((12149, 12199), 'numpy.zeros', 'numpy.zeros', (['(self.ndet, self.ndet)', 'numpy.float64'], {}), '((self.ndet, self.ndet), numpy.float64)\n', (12160, 12199), False, 'import numpy\n'), ((12664, 12723), 'numpy.einsum', 'numpy.einsum', (['"""ai,bj,ck,dl,abcd->ijkl"""', 'Oa', 'Va', 'Oa', 'Va', 'eri'], {}), "('ai,bj,ck,dl,abcd->ijkl', Oa, Va, Oa, Va, eri)\n", (12676, 12723), False, 'import numpy\n'), ((12746, 12805), 'numpy.einsum', 'numpy.einsum', (['"""ai,bj,ck,dl,abcd->ijkl"""', 'Ob', 'Vb', 'Ob', 'Vb', 'eri'], {}), "('ai,bj,ck,dl,abcd->ijkl', Ob, Vb, Ob, Vb, eri)\n", (12758, 12805), False, 'import numpy\n'), ((12828, 12887), 'numpy.einsum', 'numpy.einsum', (['"""ai,bj,ck,dl,abcd->ijkl"""', 'Oa', 'Va', 'Ob', 'Vb', 'eri'], {}), "('ai,bj,ck,dl,abcd->ijkl', Oa, Va, Ob, Vb, eri)\n", (12840, 12887), False, 'import numpy\n'), ((12910, 12969), 'numpy.einsum', 'numpy.einsum', (['"""ai,bj,ck,dl,abcd->ijkl"""', 'Oa', 'Oa', 'Va', 'Va', 'eri'], {}), "('ai,bj,ck,dl,abcd->ijkl', Oa, Oa, Va, Va, eri)\n", (12922, 12969), False, 'import numpy\n'), ((12992, 13051), 'numpy.einsum', 'numpy.einsum', (['"""ai,bj,ck,dl,abcd->ijkl"""', 'Ob', 'Ob', 'Vb', 'Vb', 'eri'], {}), "('ai,bj,ck,dl,abcd->ijkl', Ob, Ob, Vb, Vb, eri)\n", (13004, 13051), False, 'import numpy\n'), ((13964, 14004), 'numpy.einsum', 'numpy.einsum', (['"""ai,ab,bj->ij"""', 'Oa', 'Fa', 'Oa'], {}), "('ai,ab,bj->ij', Oa, Fa, Oa)\n", (13976, 14004), False, 'import numpy\n'), ((14025, 14065), 'numpy.einsum', 'numpy.einsum', (['"""ai,ab,bj->ij"""', 'Va', 'Fa', 'Va'], {}), "('ai,ab,bj->ij', Va, Fa, Va)\n", (14037, 14065), False, 'import numpy\n'), ((14086, 14126), 'numpy.einsum', 'numpy.einsum', (['"""ai,ab,bj->ij"""', 'Ob', 'Fb', 'Ob'], {}), "('ai,ab,bj->ij', Ob, Fb, Ob)\n", (14098, 14126), False, 'import numpy\n'), ((14147, 14187), 'numpy.einsum', 'numpy.einsum', (['"""ai,ab,bj->ij"""', 'Vb', 'Fb', 'Vb'], {}), "('ai,ab,bj->ij', Vb, Fb, Vb)\n", (14159, 14187), False, 'import numpy\n'), ((14229, 14282), 'psi4.energy', 'psi4.energy', (['"""HF"""'], {'molecule': 'self.mol', 'return_wfn': '(True)'}), "('HF', molecule=self.mol, return_wfn=True)\n", (14240, 14282), False, 'import psi4\n'), ((16173, 16208), 'numpy.linalg.eigh', 'numpy.linalg.eigh', (['self.hamiltonian'], {}), '(self.hamiltonian)\n', (16190, 16208), False, 'import numpy\n'), ((7136, 7147), 'time.time', 'time.time', ([], {}), '()\n', (7145, 7147), False, 'import time\n')] |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
from scipy.spatial import KDTree
#
import numpy as np
from tf.transformations import (
quaternion_matrix,
quaternion_from_matrix,
euler_matrix,
# quaternion_from_euler,
# euler_from_quaternion,
# quaternion_multiply
)
STATE_COUNT_THRESHOLD = 3
# Data collection
#--------------------------------#
# is_collecting_traffic_data = True
is_collecting_traffic_data = False
data_dir_str = "/capstone/traffic_light_data/"
file_prefix = "tl"
tl_data_count = 0
#--------------------------------#
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.camera_image = None
self.lights = []
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.light_classifier = TLClassifier()
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
# Variables
self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
# Data collection
self.tl_data_collection_period = 0.5 # sec.
self.tl_data_count = 0
self.tl_last_collect_stamp = rospy.get_rostime()
# Camera intrinsic matrix (Ground truth)
f_camera = 1345.0 # 1.2
# f_camera = 100
#
fx_camera = f_camera * 1.6
fy_camera = f_camera * 1.35
xo_camera = 800/2.0
yo_camera = 600/2.0
self.np_K_camera_est = np.array([[fx_camera, 0.0, xo_camera], [0.0, fy_camera, yo_camera], [0.0, 0.0, 1.0]]) # Estimated
# print("np_K_camera_est = \n%s" % str(self.np_K_camera_est))
#
self.R_camera_fixer_at_car = euler_matrix(0.0, np.deg2rad(-10.0), 0.0, 'rzyx')
self.R_car_at_camera = np.array([[0., -1., 0.], [0., 0., -1.], [1., 0., 0.]]).dot(self.R_camera_fixer_at_car[0:3,0:3].T)
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
print("len(self.waypoints_2d) = %d" % len(self.waypoints_2d))
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, x, y):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
#TODO implement
closest_idx = self.waypoint_tree.query([x,y], 1)[1]
return closest_idx
def get_relative_pose(self, pose_obj, pose_ref):
'''
Calculate the transformation between the object and the reference frame
Specifically, the object pose represents in reference frame.
'''
point_obj = np.array( (pose_obj.position.x, pose_obj.position.y, pose_obj.position.z) ).reshape((3,1))
point_ref = np.array( (pose_ref.position.x, pose_ref.position.y, pose_ref.position.z) ).reshape((3,1))
q_obj = (pose_obj.orientation.x, pose_obj.orientation.y, pose_obj.orientation.z, pose_obj.orientation.w)
q_ref = (pose_ref.orientation.x, pose_ref.orientation.y, pose_ref.orientation.z, pose_ref.orientation.w)
#
T_obj = quaternion_matrix(q_obj) # 4x4 matrix
T_ref = quaternion_matrix(q_ref)
# T_id = quaternion_matrix([0.0, 0.0, 0.0, 1.0])
# print("T_obj = \n%s" % T_obj)
# print("T_ref = \n%s" % T_ref)
# print("R_id = \n%s" % R_id)
#
T_obj[0:3,3:] = point_obj
T_ref[0:3,3:] = point_ref
# T_rel = T_wold_at_ref * T_obj_at_world --> T_rel = T_ref^-1 * T_obj
T_rel = (np.linalg.inv(T_ref)).dot(T_obj)
# print("T_obj = \n%s" % T_obj)
# print("T_ref = \n%s" % T_ref)
# print("T_rel = \n%s" % T_rel)
R_rel = T_rel[0:3,0:3]
t_rel = T_rel[0:3,3:4]
# q_rel = quaternion_from_matrix(R_rel)
return (T_rel, R_rel, t_rel)
def perspective_projection(self, R_tl_at_car, t_tl_at_car, point_at_tl_list):
'''
This function help project the points represented in traffic light local frame onto the image
'''
_R_tl_at_camera = self.R_car_at_camera.dot(R_tl_at_car)
_t_tl_at_camera = self.R_car_at_camera.dot(t_tl_at_car)
projection_list = list()
for _p_3D_at_tl in point_at_tl_list:
#
_point_3D_at_tl = np.array(_p_3D_at_tl).reshape((3,1))
_point_3D_at_camera = _R_tl_at_camera.dot(_point_3D_at_tl) + _t_tl_at_camera
_ray = self.np_K_camera_est.dot( _point_3D_at_camera )
_projection = (_ray / abs(_ray[2,0]))[:2,0]
print("_projection = \n%s" % _projection)
projection_list.append(_projection)
return projection_list
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
'''
Since the process is quit similar for collecting traffic light data
(i.e. find the closest light, its location, and its state),
I reuse the code for data collecting.
'''
if not is_collecting_traffic_data:
# For testing, simply return the light state (for simulation only)
return light.state
# # The Following codes are for classification
# if(not self.has_image):
# self.prev_light_loc = None
# return False
# cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
# #Get classification
# return self.light_classifier.get_classification(cv_image)
else:
# Collect traffic image, location (bounding box), and state
if(not self.has_image):
self.prev_light_loc = None
return False
cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
# Jump through some images
#--------------------------#
_current_stamp = rospy.get_rostime()
if (_current_stamp - self.tl_last_collect_stamp).to_sec() < self.tl_data_collection_period:
return light.state
# else
self.tl_last_collect_stamp = _current_stamp
#--------------------------#
# Count the image
self.tl_data_count += 1
print("--- tl_data_count = %d ---" % self.tl_data_count)
# Try to get the bounding box
print("light.pose = \n%s" % light.pose)
print("self.pose = \n%s" % self.pose)
# Calculate relative pose
#---------------------------------#
# Calculate the relative pose of light at car
T_rel, R_tl_at_car, t_tl_at_car = self.get_relative_pose(light.pose.pose, self.pose.pose)
print("R_tl_at_car = \n%s" % R_tl_at_car)
print("t_tl_at_car = \n%s" % t_tl_at_car)
# If it's too far, ignore
if t_tl_at_car[0] > 100:
return light.state
# Perspective projection
#---------------------------------#
# TODO: Generate the bounding box
point_at_tl_list = list()
point_at_tl_list.append([0., 0., 0.])
#
point_at_tl_list.append([0., 0.15, 0.0])
point_at_tl_list.append([0., 0.15, 0.5])
point_at_tl_list.append([0., 0.-15, 0.5])
point_at_tl_list.append([0., 0.-15, 0.0])
#
projection_list = self.perspective_projection(R_tl_at_car, t_tl_at_car, point_at_tl_list)
#---------------------------------#
# TODO: Try drawing the boundinf box on the image
for _p in projection_list:
_center_pixel = tuple( _p.astype('int') )
_radius = 10
_color = (255, 0, 0) # BGR
cv2.circle(cv_image, _center_pixel, _radius, _color, -1)
# Store the image
_file_name = file_prefix + ("_%.4d_%d" % (self.tl_data_count, light.state)) + ".png"
data_path_str = data_dir_str + _file_name
cv2.imwrite(data_path_str, cv_image )
#
return light.state
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
closest_light = None
line_wp_idx = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if(self.pose):
car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)
#TODO find the closest visible traffic light (if one exists)
diff = len(self.waypoints.waypoints)
for i, light in enumerate(self.lights):
# Get stop line waypoint index
line = stop_line_positions[i] # Note: this is loaded from config
tmp_wp_idx = self.get_closest_waypoint(line[0], line[1])
# Find closest stop line waypoint index
d = tmp_wp_idx - car_wp_idx
if d >= 0 and d < diff:
# Found a closer frontal light (stop line)
diff = d
closest_light = light
line_wp_idx = tmp_wp_idx
if closest_light:
state = self.get_light_state(closest_light)
return line_wp_idx, state
# self.waypoints = None
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
| [
"yaml.load",
"rospy.logerr",
"rospy.Subscriber",
"light_classification.tl_classifier.TLClassifier",
"tf.transformations.quaternion_matrix",
"cv2.imwrite",
"rospy.init_node",
"cv2.circle",
"std_msgs.msg.Int32",
"numpy.linalg.inv",
"tf.TransformListener",
"cv_bridge.CvBridge",
"numpy.deg2rad",... | [((938, 968), 'rospy.init_node', 'rospy.init_node', (['"""tl_detector"""'], {}), "('tl_detector')\n", (953, 968), False, 'import rospy\n'), ((1099, 1159), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/current_pose"""', 'PoseStamped', 'self.pose_cb'], {}), "('/current_pose', PoseStamped, self.pose_cb)\n", (1115, 1159), False, 'import rospy\n'), ((1175, 1235), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/base_waypoints"""', 'Lane', 'self.waypoints_cb'], {}), "('/base_waypoints', Lane, self.waypoints_cb)\n", (1191, 1235), False, 'import rospy\n'), ((1728, 1807), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/vehicle/traffic_lights"""', 'TrafficLightArray', 'self.traffic_cb'], {}), "('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n", (1744, 1807), False, 'import rospy\n'), ((1823, 1877), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/image_color"""', 'Image', 'self.image_cb'], {}), "('/image_color', Image, self.image_cb)\n", (1839, 1877), False, 'import rospy\n'), ((1903, 1943), 'rospy.get_param', 'rospy.get_param', (['"""/traffic_light_config"""'], {}), "('/traffic_light_config')\n", (1918, 1943), False, 'import rospy\n'), ((1966, 1990), 'yaml.load', 'yaml.load', (['config_string'], {}), '(config_string)\n', (1975, 1990), False, 'import yaml\n'), ((2030, 2087), 'rospy.Publisher', 'rospy.Publisher', (['"""/traffic_waypoint"""', 'Int32'], {'queue_size': '(1)'}), "('/traffic_waypoint', Int32, queue_size=1)\n", (2045, 2087), False, 'import rospy\n'), ((2111, 2121), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (2119, 2121), False, 'from cv_bridge import CvBridge\n'), ((2154, 2168), 'light_classification.tl_classifier.TLClassifier', 'TLClassifier', ([], {}), '()\n', (2166, 2168), False, 'from light_classification.tl_classifier import TLClassifier\n'), ((2193, 2215), 'tf.TransformListener', 'tf.TransformListener', ([], {}), '()\n', (2213, 2215), False, 'import tf\n'), ((2634, 2653), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (2651, 2653), False, 'import rospy\n'), ((2928, 3018), 'numpy.array', 'np.array', (['[[fx_camera, 0.0, xo_camera], [0.0, fy_camera, yo_camera], [0.0, 0.0, 1.0]]'], {}), '([[fx_camera, 0.0, xo_camera], [0.0, fy_camera, yo_camera], [0.0, \n 0.0, 1.0]])\n', (2936, 3018), True, 'import numpy as np\n'), ((3331, 3343), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (3341, 3343), False, 'import rospy\n'), ((6130, 6154), 'tf.transformations.quaternion_matrix', 'quaternion_matrix', (['q_obj'], {}), '(q_obj)\n', (6147, 6154), False, 'from tf.transformations import quaternion_matrix, quaternion_from_matrix, euler_matrix\n'), ((6184, 6208), 'tf.transformations.quaternion_matrix', 'quaternion_matrix', (['q_ref'], {}), '(q_ref)\n', (6201, 6208), False, 'from tf.transformations import quaternion_matrix, quaternion_from_matrix, euler_matrix\n'), ((3161, 3178), 'numpy.deg2rad', 'np.deg2rad', (['(-10.0)'], {}), '(-10.0)\n', (3171, 3178), True, 'import numpy as np\n'), ((3672, 3697), 'scipy.spatial.KDTree', 'KDTree', (['self.waypoints_2d'], {}), '(self.waypoints_2d)\n', (3678, 3697), False, 'from scipy.spatial import KDTree\n'), ((9090, 9109), 'rospy.get_rostime', 'rospy.get_rostime', ([], {}), '()\n', (9107, 9109), False, 'import rospy\n'), ((11236, 11272), 'cv2.imwrite', 'cv2.imwrite', (['data_path_str', 'cv_image'], {}), '(data_path_str, cv_image)\n', (11247, 11272), False, 'import cv2\n'), ((13037, 13082), 'rospy.logerr', 'rospy.logerr', (['"""Could not start traffic node."""'], {}), "('Could not start traffic node.')\n", (13049, 13082), False, 'import rospy\n'), ((3224, 3287), 'numpy.array', 'np.array', (['[[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]]'], {}), '([[0.0, -1.0, 0.0], [0.0, 0.0, -1.0], [1.0, 0.0, 0.0]])\n', (3232, 3287), True, 'import numpy as np\n'), ((5676, 5749), 'numpy.array', 'np.array', (['(pose_obj.position.x, pose_obj.position.y, pose_obj.position.z)'], {}), '((pose_obj.position.x, pose_obj.position.y, pose_obj.position.z))\n', (5684, 5749), True, 'import numpy as np\n'), ((5787, 5860), 'numpy.array', 'np.array', (['(pose_ref.position.x, pose_ref.position.y, pose_ref.position.z)'], {}), '((pose_ref.position.x, pose_ref.position.y, pose_ref.position.z))\n', (5795, 5860), True, 'import numpy as np\n'), ((6557, 6577), 'numpy.linalg.inv', 'np.linalg.inv', (['T_ref'], {}), '(T_ref)\n', (6570, 6577), True, 'import numpy as np\n'), ((10985, 11041), 'cv2.circle', 'cv2.circle', (['cv_image', '_center_pixel', '_radius', '_color', '(-1)'], {}), '(cv_image, _center_pixel, _radius, _color, -1)\n', (10995, 11041), False, 'import cv2\n'), ((4835, 4850), 'std_msgs.msg.Int32', 'Int32', (['light_wp'], {}), '(light_wp)\n', (4840, 4850), False, 'from std_msgs.msg import Int32\n'), ((4914, 4933), 'std_msgs.msg.Int32', 'Int32', (['self.last_wp'], {}), '(self.last_wp)\n', (4919, 4933), False, 'from std_msgs.msg import Int32\n'), ((7318, 7339), 'numpy.array', 'np.array', (['_p_3D_at_tl'], {}), '(_p_3D_at_tl)\n', (7326, 7339), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `crossdispersion` module."""
import unittest
import numpy as np
from hotsoss import crossdispersion as xd
class TestCrossDiespersion(unittest.TestCase):
"""Test crossdispersion.py functions"""
def setUp(self):
"""Test instance setup"""
# Make x axis for testing
self.n = 100
self.x = np.linspace(0, 100, self.n)
self.mu0 = 50.
self.sigma0 = 1.
self.A0 = 10.
self.sigma1 = 0.5
self.A1 = 50.
self.sep = 5.
def test_batman(self):
"""Check batman function works"""
result = xd.batman(self.x, self.mu0, self.sigma0, self.A0, self.sigma1, self.A1, self.sep)
self.assertEqual(self.n, len(result))
def test_batmen(self):
"""Check batmen function works"""
result = xd.batmen(self.x, self.mu0, self.sigma0, self.A0, self.sigma1, self.A1, self.sep, self.mu0+10., self.sigma0, self.A0/2., self.sigma1, self.A1/2., self.sep)
self.assertEqual(self.n, len(result))
def test_bimodal(self):
"""Check bimodal function works"""
result = xd.bimodal(self.x, self.mu0, self.sigma0, self.A0, self.mu0+10., self.sigma1, self.A1)
self.assertEqual(self.n, len(result))
def test_gaussian(self):
"""Check gaussian function works"""
result = xd.gaussian(self.x, self.mu0, self.sigma0, self.A0)
self.assertEqual(self.n, len(result))
def test_trimodal(self):
"""Check trimodal function works"""
result = xd.trimodal(self.x, self.mu0, self.sigma0, self.A0, self.mu0+10., self.sigma0, self.A0, self.mu0-10., self.sigma0, self.A0)
self.assertEqual(self.n, len(result))
| [
"hotsoss.crossdispersion.batman",
"hotsoss.crossdispersion.batmen",
"hotsoss.crossdispersion.gaussian",
"hotsoss.crossdispersion.bimodal",
"numpy.linspace",
"hotsoss.crossdispersion.trimodal"
] | [((389, 416), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', 'self.n'], {}), '(0, 100, self.n)\n', (400, 416), True, 'import numpy as np\n'), ((644, 729), 'hotsoss.crossdispersion.batman', 'xd.batman', (['self.x', 'self.mu0', 'self.sigma0', 'self.A0', 'self.sigma1', 'self.A1', 'self.sep'], {}), '(self.x, self.mu0, self.sigma0, self.A0, self.sigma1, self.A1,\n self.sep)\n', (653, 729), True, 'from hotsoss import crossdispersion as xd\n'), ((859, 1032), 'hotsoss.crossdispersion.batmen', 'xd.batmen', (['self.x', 'self.mu0', 'self.sigma0', 'self.A0', 'self.sigma1', 'self.A1', 'self.sep', '(self.mu0 + 10.0)', 'self.sigma0', '(self.A0 / 2.0)', 'self.sigma1', '(self.A1 / 2.0)', 'self.sep'], {}), '(self.x, self.mu0, self.sigma0, self.A0, self.sigma1, self.A1,\n self.sep, self.mu0 + 10.0, self.sigma0, self.A0 / 2.0, self.sigma1, \n self.A1 / 2.0, self.sep)\n', (868, 1032), True, 'from hotsoss import crossdispersion as xd\n'), ((1150, 1244), 'hotsoss.crossdispersion.bimodal', 'xd.bimodal', (['self.x', 'self.mu0', 'self.sigma0', 'self.A0', '(self.mu0 + 10.0)', 'self.sigma1', 'self.A1'], {}), '(self.x, self.mu0, self.sigma0, self.A0, self.mu0 + 10.0, self.\n sigma1, self.A1)\n', (1160, 1244), True, 'from hotsoss import crossdispersion as xd\n'), ((1374, 1425), 'hotsoss.crossdispersion.gaussian', 'xd.gaussian', (['self.x', 'self.mu0', 'self.sigma0', 'self.A0'], {}), '(self.x, self.mu0, self.sigma0, self.A0)\n', (1385, 1425), True, 'from hotsoss import crossdispersion as xd\n'), ((1563, 1697), 'hotsoss.crossdispersion.trimodal', 'xd.trimodal', (['self.x', 'self.mu0', 'self.sigma0', 'self.A0', '(self.mu0 + 10.0)', 'self.sigma0', 'self.A0', '(self.mu0 - 10.0)', 'self.sigma0', 'self.A0'], {}), '(self.x, self.mu0, self.sigma0, self.A0, self.mu0 + 10.0, self.\n sigma0, self.A0, self.mu0 - 10.0, self.sigma0, self.A0)\n', (1574, 1697), True, 'from hotsoss import crossdispersion as xd\n')] |
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>, <NAME>, based on code from <NAME>
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
from model.test import test_net
from model.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import argparse
import pprint
import time, os, sys
import pickle
from nets.vgg16 import vgg16
from nets.resnet_v1 import resnetv1
from nets.mobilenet_v1 import mobilenetv1
import cv2
import numpy as np
import torch
def get_all_boxes():
label_dir = '/media/rgh/rgh-data/PycharmProjects/cvpr2018/deeplab/deeplab_result_40epoch/outLabel_lip/'
names = open('/media/rgh/rgh-data/Dataset/Lip_320/val.txt','r')
class_nums = 20
img_nums = 1914
all_boxes = [[[] for _ in range(img_nums)]
for _ in range(class_nums)]
#print(len(names))
for index, name in enumerate(names):
name = name.strip()
print(index, ' ', name)
label = cv2.imread(label_dir + name+'.png', cv2.IMREAD_GRAYSCALE)
label = cv2.resize(label, (320, 320), interpolation=cv2.INTER_NEAREST)
cv2.imwrite('/media/rgh/rgh-data/PycharmProjects/cvpr2018/deeplab/deeplab_result_40epoch/320/'+ name+'.png'
,label)
# 1 - 19
for i in range(1, class_nums):
label_part = label == i
label_part = label_part - 0
if not 1 in label_part:
continue
h = label_part.shape[0]
w = label_part.shape[1]
for j in range(h):
if 1 in label_part[j]:
y1 = j
break
for j in range(h):
if 1 in label_part[h - 1 - j]:
y2 = h - 1 - j
break
for j in range(w):
if 1 in label_part[:, j]:
x1 = j
break
for j in range(w):
if 1 in label_part[:, w - 1 - j]:
x2 = w - 1 - j
break
all_boxes[i][index] = np.array([[x1, y1, x2, y2, 1]])
return all_boxes
if __name__ == '__main__':
#all_boxes = get_all_boxes()
det_file = os.path.join('/media/rgh/rgh-data/PycharmProjects/cvpr2018/deeplab', 'detections.pkl')
# with open(det_file, 'wb') as f:
# pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
all_boxes = pickle.load(open(det_file, 'rb'))
print(all_boxes[:][1])
print(len(all_boxes))
print(len(all_boxes[1]))
print(all_boxes[2][1])
# imdb_name = 'Lip_320_val'
# imdb = get_imdb(imdb_name)
# print('Evaluating detections')
# imdb.evaluate_detections(all_boxes, '/media/rgh/rgh-data/PycharmProjects/cvpr2018/temp/')
| [
"cv2.imwrite",
"cv2.imread",
"numpy.array",
"os.path.join",
"cv2.resize"
] | [((2211, 2301), 'os.path.join', 'os.path.join', (['"""/media/rgh/rgh-data/PycharmProjects/cvpr2018/deeplab"""', '"""detections.pkl"""'], {}), "('/media/rgh/rgh-data/PycharmProjects/cvpr2018/deeplab',\n 'detections.pkl')\n", (2223, 2301), False, 'import time, os, sys\n'), ((1174, 1233), 'cv2.imread', 'cv2.imread', (["(label_dir + name + '.png')", 'cv2.IMREAD_GRAYSCALE'], {}), "(label_dir + name + '.png', cv2.IMREAD_GRAYSCALE)\n", (1184, 1233), False, 'import cv2\n'), ((1244, 1306), 'cv2.resize', 'cv2.resize', (['label', '(320, 320)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(label, (320, 320), interpolation=cv2.INTER_NEAREST)\n', (1254, 1306), False, 'import cv2\n'), ((1311, 1439), 'cv2.imwrite', 'cv2.imwrite', (["(\n '/media/rgh/rgh-data/PycharmProjects/cvpr2018/deeplab/deeplab_result_40epoch/320/'\n + name + '.png')", 'label'], {}), "(\n '/media/rgh/rgh-data/PycharmProjects/cvpr2018/deeplab/deeplab_result_40epoch/320/'\n + name + '.png', label)\n", (1322, 1439), False, 'import cv2\n'), ((2084, 2115), 'numpy.array', 'np.array', (['[[x1, y1, x2, y2, 1]]'], {}), '([[x1, y1, x2, y2, 1]])\n', (2092, 2115), True, 'import numpy as np\n')] |
""" Module for Magellan/MAGE specific codes
"""
import numpy as np
from pypeit import msgs
from pypeit import telescopes
from pypeit.core import framematch
from pypeit.core import parse
from pypeit.par import pypeitpar
from pypeit.spectrographs import spectrograph
from pypeit.core import pixels
from pypeit import debugger
class MagellanMAGESpectrograph(spectrograph.Spectrograph):
"""
Child to handle Magellan/MAGE specific code
"""
def __init__(self):
# Get it started
super(MagellanMAGESpectrograph, self).__init__()
self.spectrograph = 'magellan_mage'
self.telescope = telescopes.MagellanTelescopePar()
self.camera = 'MAGE'
self.numhead = 1
self.detector = [
# Detector 1
pypeitpar.DetectorPar(
specaxis = 0,
specflip = False,
xgap = 0.,
ygap = 0.,
ysize = 1.,
# plate scale in arcsec/pixel
platescale = 0.3,
# electrons/pixel/hour. From: http://www.lco.cl/telescopes-information/magellan/instruments/mage/the-mage-spectrograph-user-manual
darkcurr = 1.00,
saturation = 65535.,
# CCD is linear to better than 0.5 per cent up to digital saturation (65,536 DN including bias) in the Fast readout mode.
nonlinear = 0.99,
numamplifiers = 1,
gain = 1.02, # depends on the readout
ronoise = 2.9, # depends on the readout
datasec = '[1:2048,1:1024]', # complementary to oscansec
oscansec = '[2049:2176,1025:1152]' # as taken from the header
)]
# Taken from the MASE paper: https://arxiv.org/pdf/0910.1834.pdf
self.norders = 15
# Uses default timeunit
# Uses default primary_hdrext
# self.sky_file = ?
@property
def pypeline(self):
return 'Echelle'
def default_pypeit_par(self):
"""
Set default parameters for magellan MagE reduction.
"""
par = pypeitpar.PypeItPar()
par['rdx']['spectrograph'] = 'magellan_mage'
# Frame numbers
par['calibrations']['standardframe']['number'] = 1
par['calibrations']['biasframe']['number'] = 0
par['calibrations']['pixelflatframe']['number'] = 3
par['calibrations']['traceframe']['number'] = 3
par['calibrations']['arcframe']['number'] = 1
# Bias
par['calibrations']['biasframe']['useframe'] = 'overscan'
# Wavelengths
# 1D wavelength solution
par['calibrations']['wavelengths']['rms_threshold'] = 0.20 # Might be grating dependent..
par['calibrations']['wavelengths']['sigdetect']=5.0
par['calibrations']['wavelengths']['lamps'] = ['ThAr']
par['calibrations']['wavelengths']['nonlinear_counts'] = self.detector[0]['nonlinear'] * self.detector[0]['saturation']
#par['calibrations']['wavelengths']['method'] = 'reidentify'
# Reidentification parameters
#par['calibrations']['wavelengths']['reid_arxiv'] = 'magellan_thar.json'
par['calibrations']['wavelengths']['ech_fix_format'] = True
# Echelle parameters
par['calibrations']['wavelengths']['echelle'] = True
par['calibrations']['wavelengths']['ech_nspec_coeff'] = 4
par['calibrations']['wavelengths']['ech_norder_coeff'] = 4
par['calibrations']['wavelengths']['ech_sigrej'] = 3.0
# Always correct for flexure, starting with default parameters
par['flexure'] = pypeitpar.FlexurePar()
par['scienceframe']['process']['sigclip'] = 20.0
par['scienceframe']['process']['satpix'] ='nothing'
# Set slits and tilts parameters
# par['calibrations']['tilts']['order'] = 2
par['calibrations']['tilts']['tracethresh'] = [10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
par['calibrations']['slits']['trace_npoly'] = 5
par['calibrations']['slits']['maxshift'] = 3.
# par['calibrations']['slits']['pcatype'] = 'order'
# Scienceimage default parameters
par['scienceimage'] = pypeitpar.ScienceImagePar()
# Always flux calibrate, starting with default parameters
par['fluxcalib'] = pypeitpar.FluxCalibrationPar()
# Do not correct for flexure
par['flexure'] = pypeitpar.FlexurePar()
par['flexure']['method'] = 'skip'
# Set the default exposure time ranges for the frame typing
par['calibrations']['standardframe']['exprng'] = [None, 20]
par['calibrations']['arcframe']['exprng'] = [20, None]
par['calibrations']['darkframe']['exprng'] = [20, None]
par['scienceframe']['exprng'] = [20, None]
return par
def init_meta(self):
"""
Generate the meta data dict
Note that the children can add to this
Returns:
self.meta: dict (generated in place)
"""
self.meta = {}
# Required (core)
self.meta['ra'] = dict(ext=0, card='RA')
self.meta['dec'] = dict(ext=0, card='DEC')
self.meta['target'] = dict(ext=0, card='OBJECT')
#TODO: Check decker is correct
self.meta['decker'] = dict(ext=0, card='SLITENC')
self.meta['binning'] = dict(card=None, compound=True)
# self.meta['binning'] = dict(ext=0, card='BINNING')
self.meta['mjd'] = dict(ext=0, card='MJD-OBS')
self.meta['exptime'] = dict(ext=0, card='EXPTIME')
self.meta['airmass'] = dict(ext=0, card='AIRMASS')
# Extras for config and frametyping
self.meta['dispname'] = dict(ext=0, card='INSTR')
def compound_meta(self, headarr, meta_key):
"""
Args:
headarr: list
meta_key: str
Returns:
value
"""
if meta_key == 'binning':
binspatial, binspec = parse.parse_binning(headarr[0]['BINNING'])
return parse.binning2string(binspec, binspatial)
def check_frame_type(self, ftype, fitstbl, exprng=None):
"""
Check for frames of the provided type.
"""
# TODO: arcs, tilts, darks?
if ftype in ['pinhole', 'bias']:
# No pinhole or bias frames
return np.zeros(len(fitstbl), dtype=bool)
if ftype in ['pixelflat', 'trace']:
return fitstbl['idname'] == 'domeflat'
return (fitstbl['idname'] == 'object') \
& framematch.check_frame_exptime(fitstbl['exptime'], exprng)
def bpm(self, shape=None, filename=None, det=None, **null_kwargs):
"""
Override parent bpm function with BPM specific to X-Shooter VIS.
.. todo::
Allow for binning changes.
Parameters
----------
det : int, REQUIRED
**null_kwargs:
Captured and never used
Returns
-------
bpix : ndarray
0 = ok; 1 = Mask
"""
msgs.info("Custom bad pixel mask for MAGE")
self.empty_bpm(shape=shape, filename=filename, det=det)
if det == 1:
self.bpm_img[:, :20] = 1.
self.bpm_img[:, 1000:] = 1.
return self.bpm_img
@staticmethod
def slitmask(tslits_dict, pad=None, binning=None):
"""
Generic routine ton construct a slitmask image from a tslits_dict. Children of this class can
overload this function to implement instrument specific slitmask behavior, for example setting
where the orders on an echelle spectrograph end
Parameters
-----------
tslits_dict: dict
Trace slits dictionary with slit boundary information
Optional Parameters
pad: int or float
Padding of the slit boundaries
binning: tuple
Spectrograph binning in spectral and spatial directions
Returns
-------
slitmask: ndarray int
Image with -1 where there are no slits/orders, and an integer where there are slits/order with the integer
indicating the slit number going from 0 to nslit-1 from left to right.
"""
# These lines are always the same
pad = tslits_dict['pad'] if pad is None else pad
slitmask = pixels.slit_pixels(tslits_dict['lcen'], tslits_dict['rcen'], tslits_dict['nspat'], pad=pad)
spec_img = np.outer(np.arange(tslits_dict['nspec'], dtype=int), np.ones(tslits_dict['nspat'], dtype=int)) # spectral position everywhere along image
order7bad = (slitmask == 0) & (spec_img < tslits_dict['nspec']/2)
slitmask[order7bad] = -1
return slitmask
@staticmethod
def slit2order(islit):
"""
Parameters
----------
islit: int, float, or string, slit number
Returns
-------
order: int
"""
if isinstance(islit, str):
islit = int(islit)
elif isinstance(islit, np.ndarray):
islit = islit.astype(int)
elif isinstance(islit, float):
islit = int(islit)
elif isinstance(islit, int):
pass
else:
msgs.error('Unrecognized type for islit')
orders = np.arange(7, 2, -1, dtype=int)
return orders[islit]
@staticmethod
def order_platescale(binning = None):
"""
Returns the plate scale in arcseconds for each order
Parameters
----------
None
Optional Parameters
--------------------
binning: str
Returns
-------
order_platescale: ndarray, float
"""
# MAGE has no binning, but for an instrument with binning we would do this
#binspatial, binspectral = parse.parse_binning(binning)
return np.full(5, 0.15)
def bpm(self, shape=None, filename=None, det=None, **null_kwargs):
"""
Override parent bpm function with BPM specific to X-ShooterNIR.
.. todo::
Allow for binning changes.
Parameters
----------
det : int, REQUIRED
**null_kwargs:
Captured and never used
Returns
-------
bpix : ndarray
0 = ok; 1 = Mask
"""
self.empty_bpm(shape=shape, filename=filename, det=det)
return self.bpm_img
@staticmethod
def slit2order(islit):
"""
Parameters
----------
islit: int, float, or string, slit number
Returns
-------
order: int
"""
if isinstance(islit,str):
islit = int(islit)
elif isinstance(islit,np.ndarray):
islit = islit.astype(int)
elif isinstance(islit,float):
islit = int(islit)
elif isinstance(islit, int):
pass
else:
msgs.error('Unrecognized type for islit')
orders = np.arange(26,10,-1, dtype=int)
return orders[islit]
@staticmethod
def order_platescale(self, binning = None):
"""
Returns the plate scale in arcseconds for each order
Parameters
----------
None
Optional Parameters
--------------------
binning: str
Returns
-------
order_platescale: ndarray, float
"""
# NIR has no binning, but for an instrument with binning we would do this
#binspatial, binspectral = parse.parse_binning(binning)
# ToDO Either assume a linear trend or measure this
# X-shooter manual says, but gives no exact numbers per order.
# NIR: 52.4 pixels (0.210”/pix) at order 11 to 59.9 pixels (0.184”/pix) at order 26.
# Right now I just took the average
return np.full(16, 0.197)
@staticmethod
def slitmask(tslits_dict, pad=None, binning=None):
"""
Generic routine ton construct a slitmask image from a tslits_dict. Children of this class can
overload this function to implement instrument specific slitmask behavior, for example setting
where the orders on an echelle spectrograph end
Parameters
-----------
tslits_dict: dict
Trace slits dictionary with slit boundary information
Optional Parameters
pad: int or float
Padding of the slit boundaries
binning: tuple
Spectrograph binning in spectral and spatial directions
Returns
-------
slitmask: ndarray int
Image with -1 where there are no slits/orders, and an integer where there are slits/order with the integer
indicating the slit number going from 0 to nslit-1 from left to right.
"""
# These lines are always the same
pad = tslits_dict['pad'] if pad is None else pad
slitmask = pixels.slit_pixels(tslits_dict['lcen'], tslits_dict['rcen'], tslits_dict['nspat'], pad=pad)
spec_img = np.outer(np.arange(tslits_dict['nspec'], dtype=int), np.ones(tslits_dict['nspat'], dtype=int)) # spectral position everywhere along image
nslits = tslits_dict['lcen'].shape[1]
# These are the order boundaries determined by eye by JFH. 2025 is used as the maximum as the upper bit is not illuminated
order_max = [1476,1513,1551, 1592,1687,1741,1801, 1864,1935,2007, 2025, 2025,2025,2025,2025,2025]
order_min = [418 ,385 , 362, 334, 303, 268, 230, 187, 140, 85, 26, 0, 0, 0, 0, 0]
# TODO add binning adjustments to these
for islit in range(nslits):
orderbad = (slitmask == islit) & ((spec_img < order_min[islit]) | (spec_img > order_max[islit]))
slitmask[orderbad] = -1
return slitmask
| [
"numpy.full",
"pypeit.par.pypeitpar.ScienceImagePar",
"pypeit.core.pixels.slit_pixels",
"pypeit.telescopes.MagellanTelescopePar",
"pypeit.par.pypeitpar.DetectorPar",
"pypeit.core.parse.binning2string",
"pypeit.core.framematch.check_frame_exptime",
"pypeit.core.parse.parse_binning",
"numpy.ones",
"... | [((624, 657), 'pypeit.telescopes.MagellanTelescopePar', 'telescopes.MagellanTelescopePar', ([], {}), '()\n', (655, 657), False, 'from pypeit import telescopes\n'), ((2457, 2478), 'pypeit.par.pypeitpar.PypeItPar', 'pypeitpar.PypeItPar', ([], {}), '()\n', (2476, 2478), False, 'from pypeit.par import pypeitpar\n'), ((3967, 3989), 'pypeit.par.pypeitpar.FlexurePar', 'pypeitpar.FlexurePar', ([], {}), '()\n', (3987, 3989), False, 'from pypeit.par import pypeitpar\n'), ((4557, 4584), 'pypeit.par.pypeitpar.ScienceImagePar', 'pypeitpar.ScienceImagePar', ([], {}), '()\n', (4582, 4584), False, 'from pypeit.par import pypeitpar\n'), ((4678, 4708), 'pypeit.par.pypeitpar.FluxCalibrationPar', 'pypeitpar.FluxCalibrationPar', ([], {}), '()\n', (4706, 4708), False, 'from pypeit.par import pypeitpar\n'), ((4771, 4793), 'pypeit.par.pypeitpar.FlexurePar', 'pypeitpar.FlexurePar', ([], {}), '()\n', (4791, 4793), False, 'from pypeit.par import pypeitpar\n'), ((7406, 7449), 'pypeit.msgs.info', 'msgs.info', (['"""Custom bad pixel mask for MAGE"""'], {}), "('Custom bad pixel mask for MAGE')\n", (7415, 7449), False, 'from pypeit import msgs\n'), ((8720, 8816), 'pypeit.core.pixels.slit_pixels', 'pixels.slit_pixels', (["tslits_dict['lcen']", "tslits_dict['rcen']", "tslits_dict['nspat']"], {'pad': 'pad'}), "(tslits_dict['lcen'], tslits_dict['rcen'], tslits_dict[\n 'nspat'], pad=pad)\n", (8738, 8816), False, 'from pypeit.core import pixels\n'), ((9673, 9703), 'numpy.arange', 'np.arange', (['(7)', '(2)', '(-1)'], {'dtype': 'int'}), '(7, 2, -1, dtype=int)\n', (9682, 9703), True, 'import numpy as np\n'), ((10250, 10266), 'numpy.full', 'np.full', (['(5)', '(0.15)'], {}), '(5, 0.15)\n', (10257, 10266), True, 'import numpy as np\n'), ((11364, 11396), 'numpy.arange', 'np.arange', (['(26)', '(10)', '(-1)'], {'dtype': 'int'}), '(26, 10, -1, dtype=int)\n', (11373, 11396), True, 'import numpy as np\n'), ((12216, 12234), 'numpy.full', 'np.full', (['(16)', '(0.197)'], {}), '(16, 0.197)\n', (12223, 12234), True, 'import numpy as np\n'), ((13316, 13412), 'pypeit.core.pixels.slit_pixels', 'pixels.slit_pixels', (["tslits_dict['lcen']", "tslits_dict['rcen']", "tslits_dict['nspat']"], {'pad': 'pad'}), "(tslits_dict['lcen'], tslits_dict['rcen'], tslits_dict[\n 'nspat'], pad=pad)\n", (13334, 13412), False, 'from pypeit.core import pixels\n'), ((783, 1044), 'pypeit.par.pypeitpar.DetectorPar', 'pypeitpar.DetectorPar', ([], {'specaxis': '(0)', 'specflip': '(False)', 'xgap': '(0.0)', 'ygap': '(0.0)', 'ysize': '(1.0)', 'platescale': '(0.3)', 'darkcurr': '(1.0)', 'saturation': '(65535.0)', 'nonlinear': '(0.99)', 'numamplifiers': '(1)', 'gain': '(1.02)', 'ronoise': '(2.9)', 'datasec': '"""[1:2048,1:1024]"""', 'oscansec': '"""[2049:2176,1025:1152]"""'}), "(specaxis=0, specflip=False, xgap=0.0, ygap=0.0, ysize\n =1.0, platescale=0.3, darkcurr=1.0, saturation=65535.0, nonlinear=0.99,\n numamplifiers=1, gain=1.02, ronoise=2.9, datasec='[1:2048,1:1024]',\n oscansec='[2049:2176,1025:1152]')\n", (804, 1044), False, 'from pypeit.par import pypeitpar\n'), ((6315, 6357), 'pypeit.core.parse.parse_binning', 'parse.parse_binning', (["headarr[0]['BINNING']"], {}), "(headarr[0]['BINNING'])\n", (6334, 6357), False, 'from pypeit.core import parse\n'), ((6377, 6418), 'pypeit.core.parse.binning2string', 'parse.binning2string', (['binspec', 'binspatial'], {}), '(binspec, binspatial)\n', (6397, 6418), False, 'from pypeit.core import parse\n'), ((6902, 6960), 'pypeit.core.framematch.check_frame_exptime', 'framematch.check_frame_exptime', (["fitstbl['exptime']", 'exprng'], {}), "(fitstbl['exptime'], exprng)\n", (6932, 6960), False, 'from pypeit.core import framematch\n'), ((8841, 8883), 'numpy.arange', 'np.arange', (["tslits_dict['nspec']"], {'dtype': 'int'}), "(tslits_dict['nspec'], dtype=int)\n", (8850, 8883), True, 'import numpy as np\n'), ((8885, 8925), 'numpy.ones', 'np.ones', (["tslits_dict['nspat']"], {'dtype': 'int'}), "(tslits_dict['nspat'], dtype=int)\n", (8892, 8925), True, 'import numpy as np\n'), ((13437, 13479), 'numpy.arange', 'np.arange', (["tslits_dict['nspec']"], {'dtype': 'int'}), "(tslits_dict['nspec'], dtype=int)\n", (13446, 13479), True, 'import numpy as np\n'), ((13481, 13521), 'numpy.ones', 'np.ones', (["tslits_dict['nspat']"], {'dtype': 'int'}), "(tslits_dict['nspat'], dtype=int)\n", (13488, 13521), True, 'import numpy as np\n'), ((9613, 9654), 'pypeit.msgs.error', 'msgs.error', (['"""Unrecognized type for islit"""'], {}), "('Unrecognized type for islit')\n", (9623, 9654), False, 'from pypeit import msgs\n'), ((11304, 11345), 'pypeit.msgs.error', 'msgs.error', (['"""Unrecognized type for islit"""'], {}), "('Unrecognized type for islit')\n", (11314, 11345), False, 'from pypeit import msgs\n')] |
import os
import os.path as osp
from glob import glob
import numpy as np
import pickle
METType = {
'MET_CHAR': np.char,
'MET_SHORT': np.int16,
'MET_LONG': np.int32,
'MET_INT': np.int32,
'MET_UCHAR': np.uint8,
'MET_USHORT': np.uint16,
'MET_ULONG': np.uint32,
'MET_UINT': np.uint32,
'MET_FLOAT': np.float32,
'MET_FLOAT': np.float64
}
def extract_data(SrcDir, mode, DstDir_o, DstDir_m, worker, origin, mask, name=None):
""" Convert vol data into mhd data
Params
------
`SrcDir`: source directory
`mode`: 0 or 1, extract 3D volume or 2D slices
`DstDir_o`: destination directory to store origin image
`DstDir_m`: destination directory to store mask image
`worker`: converter path
"""
SrcDirs = os.listdir(SrcDir)
for i, src in enumerate(SrcDirs):
if name:
dst = "{:s}{:03d}".format(name, i)
else:
dst = src
src_liver = osp.join(SrcDir, src, origin)
src_liver_mask = osp.join(SrcDir, src, mask)
dst_liver = osp.join(DstDir_o, (dst + "_o") if mode == 1 else dst)
dst_liver_mask = osp.join(DstDir_m, dst + "_m")
if mode == 0:
os.system(worker + " 0 1 " + src_liver + " " + dst_liver)
os.system(worker + " 0 0 " + src_liver_mask + " " + dst_liver_mask)
elif mode == 1:
os.system(worker + " 1 1 " + src_liver + " " + dst_liver)
os.system(worker + " 1 0 " + src_liver_mask + " " + dst_liver_mask)
else:
raise ValueError("Wrong mode.")
def mhd_reader(mhdpath, only_meta=False):
""" Implementation of `.mhd` file reader
Params
------
`mhdpath`: file path to a mhd file
`only_meta`: if True, raw image will not be loaded and the second return is None
Returns
-------
`meta_info`: a dictonary contains all the information in mhd file
`raw_image`: raw data of this image. If `only_meta` is True, this return will be None.
Note: the returned `raw_image` is read-only.
"""
meta_info = {}
# read .mhd file
with open(mhdpath, 'r') as fmhd:
for line in fmhd.readlines():
parts = line.split()
meta_info[parts[0]] = ' '.join(parts[2:])
PrimaryKeys = ['NDims', 'DimSize', 'ElementType', 'ElementSpacing', 'ElementDataFile']
for key in PrimaryKeys:
if not key in meta_info:
raise KeyError("Missing key `{}` in meta data of the mhd file".format(key))
meta_info['NDims'] = int(meta_info['NDims'])
meta_info['DimSize'] = [eval(ele) for ele in meta_info['DimSize'].split()]
meta_info['ElementSpacing'] = [eval(ele) for ele in meta_info['ElementSpacing'].split()]
#meta_info['ElementByteOrderMSB'] = eval(meta_info['ElementByteOrderMSB'])
raw_image = None
if not only_meta:
rawpath = osp.join(osp.dirname(mhdpath), meta_info['ElementDataFile'])
# read .raw file
with open(rawpath, 'rb') as fraw:
buffer = fraw.read()
raw_image = np.frombuffer(buffer, dtype=METType[meta_info['ElementType']])
raw_image = np.reshape(raw_image, list(reversed(meta_info['DimSize'])))
return meta_info, raw_image
def mhd_writer(mhdpath, image:np.ndarray):
""" Implementation of `.mhd` file writer
Params
------
`mhdpath`: file path to write at
`image`: image to write
"""
image = np.squeeze(image)
meta_info = {}
meta_info["NDims"] = image.ndim
meta_info["DimSize"] = reversed(image.shape)
raise NotImplementedError
def bbox_from_mask(mask, bk_value=None):
""" Calculate bounding box from a mask image
"""
if bk_value is None:
bk_value = mask[0, 0]
mask_pixels = np.where(mask > bk_value)
if mask_pixels[0].size == 0:
return None
bbox = [
np.min(mask_pixels[1]),
np.min(mask_pixels[0]),
np.max(mask_pixels[1]),
np.max(mask_pixels[0])
]
return bbox
def get_mhd_list(SrcDir):
if not osp.exists(SrcDir):
raise FileNotFoundError("{} can not found!".format(SrcDir))
mhd_list = glob(osp.join(SrcDir, "*.mhd"))
return mhd_list
def get_mhd_list_with_liver(SrcDir, verbose=False):
""" Get mhd files list in a specific directory and remove the slices
which does not contain liver.
Note: SrcDir should be a mask dir
"""
cache_file = osp.join(SrcDir, "liver_slices.pkl")
if osp.exists(cache_file):
with open(cache_file, 'rb') as fid:
try:
keep_mhd_list = pickle.load(fid)
except:
keep_mhd_list = pickle.load(fid, encoding='bytes')
print("mhd list loaded from {}".format(cache_file))
return keep_mhd_list
all_mhd_list = get_mhd_list(SrcDir)
keep_mhd_list = []
for mhdfile in all_mhd_list:
if verbose:
print(mhdfile)
_, raw = mhd_reader(mhdfile)
bbox = bbox_from_mask(raw)
if bbox:
keep_mhd_list.append(mhdfile)
with open(cache_file, 'wb') as fid:
pickle.dump(keep_mhd_list, fid, pickle.HIGHEST_PROTOCOL)
print("Write mhd list to {}".format(cache_file))
return keep_mhd_list
if __name__ == '__main__':
if False:
SrcDir = "D:/DataSet/LiverQL/Liver-Ref/"
SrcDir_o = "D:/DataSet/LiverQL/Liver_slices_train/liver/"
SrcDir_m = "D:/DataSet/LiverQL/Liver_slices_train/mask/"
extract_slices(SrcDir, SrcDir_o, SrcDir_m)
if False:
SrcDir_m = "D:/DataSet/LiverQL/3Dircadb1_slices_train/mask/"
print(len(get_mhd_list_with_liver(SrcDir_m, False)))
if False:
SrcDir = "D:/DataSet/LiverQL/Target-New-Training"
worker = "D:/DataSet/LiverQL/VolConverter.exe"
DstDir_o = "D:/DataSet/LiverQL/Liver_2018_train_3D/liver"
DstDir_m = "D:/DataSet/LiverQL/Liver_2018_train_3D/mask"
origin = "Study_Phase2.vol"
mask = "Study_Phase2_Label.vol"
extract_data(SrcDir, 0, DstDir_o, DstDir_m, worker, origin, mask, name="R")
if True:
SrcDir = "D:/DataSet/LiverQL/Target-New-Training"
worker = "D:/DataSet/LiverQL/VolConverter.exe"
DstDir_o = "D:/DataSet/LiverQL/Liver_2018_train/liver"
DstDir_m = "D:/DataSet/LiverQL/Liver_2018_train/mask"
origin = "Study_Phase2.vol"
mask = "Study_Phase2_Label.vol"
extract_data(SrcDir, 1, DstDir_o, DstDir_m, worker, origin, mask, name="R")
| [
"pickle.dump",
"numpy.frombuffer",
"os.path.dirname",
"os.path.exists",
"os.system",
"numpy.min",
"numpy.where",
"numpy.max",
"pickle.load",
"numpy.squeeze",
"os.path.join",
"os.listdir"
] | [((772, 790), 'os.listdir', 'os.listdir', (['SrcDir'], {}), '(SrcDir)\n', (782, 790), False, 'import os\n'), ((3432, 3449), 'numpy.squeeze', 'np.squeeze', (['image'], {}), '(image)\n', (3442, 3449), True, 'import numpy as np\n'), ((3758, 3783), 'numpy.where', 'np.where', (['(mask > bk_value)'], {}), '(mask > bk_value)\n', (3766, 3783), True, 'import numpy as np\n'), ((4432, 4468), 'os.path.join', 'osp.join', (['SrcDir', '"""liver_slices.pkl"""'], {}), "(SrcDir, 'liver_slices.pkl')\n", (4440, 4468), True, 'import os.path as osp\n'), ((4476, 4498), 'os.path.exists', 'osp.exists', (['cache_file'], {}), '(cache_file)\n', (4486, 4498), True, 'import os.path as osp\n'), ((950, 979), 'os.path.join', 'osp.join', (['SrcDir', 'src', 'origin'], {}), '(SrcDir, src, origin)\n', (958, 979), True, 'import os.path as osp\n'), ((1005, 1032), 'os.path.join', 'osp.join', (['SrcDir', 'src', 'mask'], {}), '(SrcDir, src, mask)\n', (1013, 1032), True, 'import os.path as osp\n'), ((1054, 1106), 'os.path.join', 'osp.join', (['DstDir_o', "(dst + '_o' if mode == 1 else dst)"], {}), "(DstDir_o, dst + '_o' if mode == 1 else dst)\n", (1062, 1106), True, 'import os.path as osp\n'), ((1134, 1164), 'os.path.join', 'osp.join', (['DstDir_m', "(dst + '_m')"], {}), "(DstDir_m, dst + '_m')\n", (1142, 1164), True, 'import os.path as osp\n'), ((3050, 3112), 'numpy.frombuffer', 'np.frombuffer', (['buffer'], {'dtype': "METType[meta_info['ElementType']]"}), "(buffer, dtype=METType[meta_info['ElementType']])\n", (3063, 3112), True, 'import numpy as np\n'), ((3863, 3885), 'numpy.min', 'np.min', (['mask_pixels[1]'], {}), '(mask_pixels[1])\n', (3869, 3885), True, 'import numpy as np\n'), ((3895, 3917), 'numpy.min', 'np.min', (['mask_pixels[0]'], {}), '(mask_pixels[0])\n', (3901, 3917), True, 'import numpy as np\n'), ((3927, 3949), 'numpy.max', 'np.max', (['mask_pixels[1]'], {}), '(mask_pixels[1])\n', (3933, 3949), True, 'import numpy as np\n'), ((3959, 3981), 'numpy.max', 'np.max', (['mask_pixels[0]'], {}), '(mask_pixels[0])\n', (3965, 3981), True, 'import numpy as np\n'), ((4043, 4061), 'os.path.exists', 'osp.exists', (['SrcDir'], {}), '(SrcDir)\n', (4053, 4061), True, 'import os.path as osp\n'), ((4160, 4185), 'os.path.join', 'osp.join', (['SrcDir', '"""*.mhd"""'], {}), "(SrcDir, '*.mhd')\n", (4168, 4185), True, 'import os.path as osp\n'), ((5118, 5174), 'pickle.dump', 'pickle.dump', (['keep_mhd_list', 'fid', 'pickle.HIGHEST_PROTOCOL'], {}), '(keep_mhd_list, fid, pickle.HIGHEST_PROTOCOL)\n', (5129, 5174), False, 'import pickle\n'), ((1199, 1256), 'os.system', 'os.system', (["(worker + ' 0 1 ' + src_liver + ' ' + dst_liver)"], {}), "(worker + ' 0 1 ' + src_liver + ' ' + dst_liver)\n", (1208, 1256), False, 'import os\n'), ((1269, 1336), 'os.system', 'os.system', (["(worker + ' 0 0 ' + src_liver_mask + ' ' + dst_liver_mask)"], {}), "(worker + ' 0 0 ' + src_liver_mask + ' ' + dst_liver_mask)\n", (1278, 1336), False, 'import os\n'), ((2872, 2892), 'os.path.dirname', 'osp.dirname', (['mhdpath'], {}), '(mhdpath)\n', (2883, 2892), True, 'import os.path as osp\n'), ((1374, 1431), 'os.system', 'os.system', (["(worker + ' 1 1 ' + src_liver + ' ' + dst_liver)"], {}), "(worker + ' 1 1 ' + src_liver + ' ' + dst_liver)\n", (1383, 1431), False, 'import os\n'), ((1444, 1511), 'os.system', 'os.system', (["(worker + ' 1 0 ' + src_liver_mask + ' ' + dst_liver_mask)"], {}), "(worker + ' 1 0 ' + src_liver_mask + ' ' + dst_liver_mask)\n", (1453, 1511), False, 'import os\n'), ((4593, 4609), 'pickle.load', 'pickle.load', (['fid'], {}), '(fid)\n', (4604, 4609), False, 'import pickle\n'), ((4662, 4696), 'pickle.load', 'pickle.load', (['fid'], {'encoding': '"""bytes"""'}), "(fid, encoding='bytes')\n", (4673, 4696), False, 'import pickle\n')] |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = 'Arial'
# Where to save the figures
PROJECT_ROOT_DIR = "."
PROJECT_SAVE_DIR = "Figure_PDFs"
import os
if not (os.path.isdir(PROJECT_ROOT_DIR+'/'+PROJECT_SAVE_DIR)):
print('Figure directory didn''t exist, creating now.')
os.mkdir(PROJECT_ROOT_DIR+'/'+PROJECT_SAVE_DIR, exist_ok=True)
else:
print('Figure directory exists.')
# Ignore warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=UserWarning)
from scipy import stats
import copy
import pickle as pkl
from tqdm import tqdm
import joblib
import pandas as pd
import seaborn as sns
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.ensemble import RandomForestRegressor
import torch
from torchvision import transforms
from torch.utils.data import Dataset, DataLoader
# Define a function to save future figures to PDFs
def savepdf(fig,name):
fig.savefig(PROJECT_ROOT_DIR+'/'+PROJECT_SAVE_DIR+'/'+name+'.pdf')
# To evaluate the statistics between predicted and true PM2.5
def eval_stat(y_train_pred, y_train):
Rsquared = stats.spearmanr(y_train_pred, y_train.ravel())[0]
pvalue = stats.spearmanr(y_train_pred, y_train.ravel())[1]
Rsquared_pearson = stats.pearsonr(y_train_pred, y_train.ravel())[0]
pvalue_pearson = stats.pearsonr(y_train_pred, y_train.ravel())[1]
return Rsquared, pvalue, Rsquared_pearson, pvalue_pearson
# To plot the predicted and true PM2.5 along with the calculated statistics
def plot_result(y_pred, y_true, Rsquared, pvalue, Rsquared_pearson, pvalue_pearson, plot_label="train", save=True,
fig_name="", lower_bound=0, upper_bound=100, spatial_R=-1):
plt.clf()
fig, ax = plt.subplots(figsize=(12, 10))
data = pd.DataFrame(data={'y_true': y_true, 'y_pred': y_pred})
ax = sns.histplot(data, x='y_true', y='y_pred', cbar=True, color='orange')
ax.plot([lower_bound, upper_bound], [lower_bound, upper_bound], 'r--', lw=4)
ax.set_xlabel('True $PM_{2.5}$ ($\mu $g m$^{-3}$)', size = 20)
ax.set_ylabel('Predicted $PM_{2.5}$ ($\mu $g m$^{-3}$)', size = 20)
ax.tick_params(labelsize = 15)
ax.text(0.02, 0.98, 'Spearman r = '+ str(round(Rsquared,2)), ha='left', va='top', color='black', weight='roman', fontsize=16, transform=ax.transAxes)
ax.text(0.02, 0.94, 'Spearman p-value = '+ str(round(pvalue,2)), ha='left', va='top', color='black', weight='roman', fontsize=16, transform=ax.transAxes)
ax.text(0.02, 0.90, 'Pearson r = '+ str(round(Rsquared_pearson,2)), ha='left', va='top', color='black', weight='roman', fontsize=16, transform=ax.transAxes)
ax.text(0.02, 0.86, 'Pearson p-value = '+ str(round(pvalue_pearson,3)), ha='left', va='top', color='black', weight='roman', fontsize=16, transform=ax.transAxes)
ax.text(0.02, 0.82, 'RMSE = '+ str(round(np.sqrt(metrics.mean_squared_error(y_true, y_pred)),2)), ha='left', va='top', color='black', weight='roman', fontsize=16, transform=ax.transAxes)
ax.text(0.02, 0.78, 'MAE = '+ str(round(metrics.mean_absolute_error(y_true, y_pred),2)), ha='left', va='top', color='black', weight='roman', fontsize=16, transform=ax.transAxes)
ax.text(0.02, 0.74, '% error = '+ str(round(metrics.mean_absolute_error(y_true, y_pred)/np.mean(y_true)*100,1))+'%', ha='left', va='top', color='black', weight='roman', fontsize=16, transform=ax.transAxes)
if spatial_R != -1:
ax.text(0.02, 0.70, 'Spatial Pearson r = ' + str(round(spatial_R, 2)), ha='left', va='top', color='black', weight='roman', fontsize=16, transform=ax.transAxes)
if plot_label == "train":
ax.text(0.65, 0.10, 'training dataset', bbox=dict(facecolor='grey', alpha=0.9), ha="left", va="top", color='black', weight='roman', fontsize=20, transform=ax.transAxes)
else:
ax.text(0.65, 0.10, 'test dataset', bbox=dict(facecolor='grey', alpha=0.9), ha="left", va="top", color='black', weight='roman', fontsize=20, transform=ax.transAxes)
# plt.gca().set_aspect('equal', adjustable='box')
if save:
plt.savefig(PROJECT_ROOT_DIR+'/'+PROJECT_SAVE_DIR+'/'+fig_name+'.pdf', dpi=300)
pass
plt.show()
del data, ax
return
# To plot the spatial R and RMSE for the predicted and true PM2.5 along with the calculated statistics
def spatialRPlot(color, y_test_ref, y_test_ref_pred_raw, plot_label = 'test', save=False, fig_name="", line_range=[50, 150]):
plt.clf()
Rsquared, pvalue, Rsquared_pearson, pvalue_pearson = eval_stat(y_test_ref_pred_raw, y_test_ref)
y_train_pred_mlpr,y_train, Rsquared, pvalue, Rsquared_pearson, pvalue_pearson = y_test_ref_pred_raw, y_test_ref, Rsquared, pvalue, Rsquared_pearson, pvalue_pearson
plt.rcParams.update({'mathtext.default': 'regular' })
my_prediction = y_train_pred_mlpr
fig, ax = plt.subplots(figsize = (8,8))
ax.scatter(y_train, my_prediction, color = color,alpha =1, edgecolors='navy', s = 100)
ax.plot(line_range, line_range, 'k--', lw=4)
ax.set_xlabel('True $PM_{2.5}$ ($\mu $g m$^{-3}$)', size = 25)
ax.set_ylabel('Predicted $PM_{2.5}$ ($\mu $g m$^{-3}$)', size = 25)
ax.tick_params(labelsize = 25)
horozontal_ax = 0.05
vertical_offset = 0.2
ax.text(horozontal_ax, 0.72+vertical_offset, 'Spatial Pearson r = '+ str(round(Rsquared_pearson,2)), color='black', weight='roman',
fontsize=25,transform=ax.transAxes)
ax.text(horozontal_ax, 0.65+vertical_offset, 'p-value = '+ str(round(pvalue_pearson,3)), color='black', weight='roman',
fontsize=25,transform=ax.transAxes)
ax.text(horozontal_ax, 0.58+vertical_offset, 'RMSE = '+ str(round(np.sqrt(metrics.mean_squared_error(y_train, my_prediction)),2)),
color='black', weight='roman', fontsize=25, transform=ax.transAxes)
if plot_label == "train":
ax.text(0.575, 0.014, 'training dataset', bbox=dict(facecolor='grey', alpha=0.9),color='black', weight='roman',
fontsize=25,transform=ax.transAxes)
else:
ax.text(0.665, 0.0190, 'test dataset', bbox=dict(facecolor='grey', alpha=0.9),color='black', weight='roman',
fontsize=25,transform=ax.transAxes)
plt.tight_layout()
if save:
savepdf(fig, fig_name)
pass
del fig, ax
return
# Image dataset used for downstream supervised task with regular MSE loss
class MyPM25Dataset(Dataset):
def __init__(self, root_dir, holdout, crop_dim=0, img_transform=None, mode='train', train_stations=-1,
requires_meteo=False, meteo_model=None, rf_train=None, rf_test=None, normalized=False):
"""
Args:
root_dir (string): Directory of PM2.5 data
holdout (list of station index): Must be specified if mode is 'test'
crop_dim (int, optional): Dimension for cropping
mode ('train' or 'test', optional): Whether the dataset is for
training or testing
img_transform (callable, optional): Optional transform to be applied
on an image.
train_stations (integer): Number of stations to be used for training
requires_meteo (boolean): Whether to use meteorological features or not
meteo_model (optional): RandomTreesEmbedding model
rf_train (optional): Train predictions using Random Forest model
rf_test (optional): Test predictions using Random Forest model
normalized (optional): Boolean, if True, then all PM2.5 values are normalized by mean and std
"""
if mode not in ['train', 'test']:
raise Exception('Mode must be either \'train\' or \'test\'.')
if requires_meteo and not meteo_model:
raise Exception('If meteo features are required, you must pass in a model to transform the meteo features.')
if requires_meteo:
if mode == 'train' and rf_train is None:
raise Exception('Please pass in training predictions from Random Forest model')
elif mode == 'test' and rf_test is None:
raise Exception('Please pass in test predictions from Random Forest model')
# Pass in parameters
self.crop_dim = crop_dim
self.img_transform = img_transform
self.mode = mode
self.holdout = holdout
self.train_stations = train_stations
self.requires_meteo = requires_meteo
self.y_train_pred_rf = rf_train
self.y_test_pred_rf = rf_test
self.normalized = normalized
# Private variables
self.img_train_PM25, self.img_test_PM25 = [], []
self.PM25, self.PM25_train, self.PM25_test = [], [], []
self.train_set = set()
self.scaler = None
self.meteo_raw = []
self.meteo_raw_train, self.meteo_raw_test = [], []
self.meteo_transformed_train, self.meteo_transformed_test = [], []
# Load images, meteo features and targets for PM2.5 data
with open(root_dir, "rb") as fp:
images = pkl.load(fp)
for data_point in images:
self.PM25.append(data_point['PM25'])
if data_point['Station_index'] not in self.holdout:
self.train_set.add(data_point['Station_index'])
self.train_set = sorted(list(self.train_set))
if self.normalized:
self.scaler = StandardScaler()
self.PM25 = np.squeeze(self.scaler.fit_transform(np.array(self.PM25).reshape(-1, 1)))
for i in range(len(images)):
images[i]['PM25'] = self.PM25[i]
if self.train_stations != -1:
self.train_set = self.train_set[:train_stations]
for data_point in tqdm(images, position=0, leave=True):
if data_point['Station_index'] in self.train_set:
self.img_train_PM25.append(data_point['Image'])
self.PM25_train.append(data_point['PM25'])
if self.requires_meteo:
self.meteo_raw_train.append(data_point['Meteo'].values)
elif data_point['Station_index'] in self.holdout:
self.img_test_PM25.append(data_point['Image'])
self.PM25_test.append(data_point['PM25'])
if self.requires_meteo:
self.meteo_raw_test.append(data_point['Meteo'].values)
if self.requires_meteo:
# Transform the meteo features to increase the representation power
self.meteo_transformed_train = meteo_model.transform(self.meteo_raw_train).toarray()
self.meteo_transformed_test = meteo_model.transform(self.meteo_raw_test).toarray()
# Remove unnecessary data
if self.mode == 'train':
del self.img_test_PM25, self.PM25_test, self.meteo_transformed_test
else:
del self.img_train_PM25, self.PM25_train, self.meteo_transformed_train
del self.meteo_raw, self.meteo_raw_train, self.meteo_raw_test
def __len__(self):
if self.mode == 'train':
return len(self.img_train_PM25)
else:
return len(self.img_test_PM25)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# Get images, transformed meteo features and targets
if self.mode == 'train':
img = self.img_train_PM25[idx]
target = self.PM25_train[idx]
if self.requires_meteo:
meteo = self.meteo_transformed_train[idx]
target_pred = self.y_train_pred_rf[idx]
else:
img = self.img_test_PM25[idx]
target = self.PM25_test[idx]
if self.requires_meteo:
meteo = self.meteo_transformed_test[idx]
target_pred = self.y_test_pred_rf[idx]
# Crop the image if crop_dim is specified
if self.crop_dim != 0:
crop = transforms.Compose([transforms.ToPILImage(),
transforms.CenterCrop((self.crop_dim, self.crop_dim)),
transforms.ToTensor()])
img = crop(img)
# Perform data augmentation if transform function is specified
if self.img_transform:
img = self.img_transform(img)
if self.requires_meteo:
return img, meteo, target, target_pred
else:
return img, target
# Temporally sorted image dataset used for downstream supervised task with weighted MSE loss
class MyPM25DatasetSorted(Dataset):
def __init__(self, root_dir, holdout, img_transform=None, mode='train', train_stations=-1,
requires_meteo=False, meteo_model=None, rf_train=None, rf_test=None, normalized=False):
"""
Args:
root_dir (string): Directory of PM2.5 data
holdout (list of station index): Must be specified if mode is 'test'
mode ('train' or 'test', optional): Whether the dataset is for
training or testing
img_transform (callable, optional): Optional transform to be applied
on an image.
target_transform (boolean, optional): If true, then normalize y
train_stations (integer): Number of stations to be used for training
requires_meteo (boolean): Whether to use meteorological features or not
meteo_model (optional): RandomTreesEmbedding model
rf_train (optional): Train predictions using Random Forest model
rf_test (optional): Test predictions using Random Forest model
normalized (optional): Boolean, if True, then all PM2.5 values are normalized by mean and std
"""
if mode not in ['train', 'test']:
raise Exception('Mode must be either \'train\' or \'test\'.')
if requires_meteo and not meteo_model:
raise Exception('If meteo features are required, you must pass in a model to transform the meteo features.')
if requires_meteo:
if mode == 'train' and rf_train is None:
raise Exception('Please pass in training predictions from Random Forest model')
elif mode == 'test' and rf_test is None:
raise Exception('Please pass in test predictions from Random Forest model')
# Pass in parameters
self.img_transform = img_transform
self.mode = mode
self.holdout = holdout
self.train_stations = train_stations
self.requires_meteo = requires_meteo
self.y_train_pred_rf = rf_train
self.y_test_pred_rf = rf_test
self.normalized = normalized
# Private variables
self.img_train_PM25, self.img_test_PM25 = [], []
self.PM25, self.PM25_train, self.PM25_test = [], [], []
self.train_set = set()
self.scaler = None
self.meteo_raw = []
self.meteo_raw_train, self.meteo_raw_test = [], []
self.meteo_transformed_train, self.meteo_transformed_test = [], []
# Load images, meteo features and targets for PM2.5 data
with open(root_dir, "rb") as fp:
# Sort the images based on their timestamp
images = pkl.load(fp)
for data_point in tqdm(images, position=0, leave=True):
self.PM25.append(data_point['PM25'])
if data_point['Station_index'] not in self.holdout:
self.train_set.add(data_point['Station_index'])
self.train_set = sorted(list(self.train_set))
if self.normalized:
self.scaler = StandardScaler()
self.PM25 = np.squeeze(self.scaler.fit_transform(np.array(self.PM25).reshape(-1, 1)))
for i in range(len(images)):
images[i]['PM25'] = self.PM25[i]
images.sort(key=lambda x: x['Meteo'].name)
cur_timestamp_train, cur_timestamp_test = None, None
if self.train_stations != -1:
self.train_set = self.train_set[:train_stations]
for data_point in tqdm(images, position=0, leave=True):
if data_point['Station_index'] in self.train_set:
if data_point['Meteo'].name != cur_timestamp_train:
cur_timestamp_train = data_point['Meteo'].name
self.img_train_PM25.append([])
self.PM25_train.append([])
if self.requires_meteo:
self.meteo_raw_train.append([])
self.img_train_PM25[-1].append(data_point['Image'])
self.PM25_train[-1].append(data_point['PM25'])
if self.requires_meteo:
self.meteo_raw_train[-1].append(data_point['Meteo'].values)
elif data_point['Station_index'] in self.holdout:
if data_point['Meteo'].name != cur_timestamp_test:
cur_timestamp_test = data_point['Meteo'].name
self.img_test_PM25.append([])
self.PM25_test.append([])
if self.requires_meteo:
self.meteo_raw_test.append([])
self.img_test_PM25[-1].append(data_point['Image'])
self.PM25_test[-1].append(data_point['PM25'])
if self.requires_meteo:
self.meteo_raw_test[-1].append(data_point['Meteo'].values)
# Perform data augmentation if transform function is specified
if self.img_transform:
for i in range(len(self.img_train_PM25)):
for j in range(len(self.img_train_PM25[i])):
self.img_train_PM25[i][j] = self.img_transform(self.img_train_PM25[i][j])
for i in range(len(self.img_test_PM25)):
for j in range(len(self.img_test_PM25[i])):
self.img_test_PM25[i][j] = self.img_transform(self.img_test_PM25[i][j])
if self.requires_meteo:
# Transform the meteo features to increase the representation power
for meteo_day in tqdm(self.meteo_raw_train, position=0, leave=True):
self.meteo_transformed_train.append(meteo_model.transform(meteo_day).toarray())
for meteo_day in tqdm(self.meteo_raw_test, position=0, leave=True):
self.meteo_transformed_test.append(meteo_model.transform(meteo_day).toarray())
self.meteo_transformed_train = np.array(self.meteo_transformed_train)
self.meteo_transformed_test = np.array(self.meteo_transformed_test)
# Remove unnecessary data
if self.mode == 'train':
del self.img_test_PM25, self.PM25_test, self.meteo_transformed_test
else:
del self.img_train_PM25, self.PM25_train, self.meteo_transformed_train
del self.meteo_raw, self.meteo_raw_train, self.meteo_raw_test
def __len__(self):
if self.mode == 'train':
return len(self.img_train_PM25)
else:
return len(self.img_test_PM25)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# Get images, transformed meteo features and targets
if self.mode == 'train':
img = self.img_train_PM25[idx]
target = self.PM25_train[idx]
if self.requires_meteo:
meteo = self.meteo_transformed_train[idx]
target_pred = self.y_train_pred_rf[idx]
else:
img = self.img_test_PM25[idx]
target = self.PM25_test[idx]
if self.requires_meteo:
meteo = self.meteo_transformed_test[idx]
target_pred = self.y_test_pred_rf[idx]
if self.requires_meteo:
return img, meteo, target, target_pred
else:
return img, target
# Load Random Trees Embedding and Random Forest model for regular MSE loss
def loadRTandRFModel(root_dir, rt_dir, rf_dir, holdout):
with open(root_dir, "rb") as fp:
meteo_raw, meteo_raw_train, y_train, meteo_raw_test, y_test = [], [], [], [], []
for data_point in pkl.load(fp):
meteo_raw.append(data_point['Meteo'].values)
if data_point['Station_index'] not in holdout:
meteo_raw_train.append(data_point['Meteo'].values)
y_train.append(data_point['PM25'])
else:
meteo_raw_test.append(data_point['Meteo'].values)
y_test.append(data_point['PM25'])
meteo_raw = np.array(meteo_raw)
meteo_raw_train, y_train = np.array(meteo_raw_train), np.array(y_train)
meteo_raw_test, y_test = np.array(meteo_raw_test), np.array(y_test)
# Load Random Trees Embedding Model
rt_model = joblib.load(rt_dir)
# Load Random Forest Model
print("Loading Random Forest Model...")
rf_model = joblib.load(rf_dir)
# Transform the meteo features
meteo_transformed_train = rt_model.transform(meteo_raw_train).toarray()
meteo_transformed_test = rt_model.transform(meteo_raw_test).toarray()
del meteo_raw
return rt_model, rf_model, meteo_transformed_train, y_train, meteo_transformed_test, y_test
# Load Random Trees Embedding and Random Forest model with temporally sorted images for weighted MSE loss
def loadRTandRFModelSorted(root_dir, rt_dir, rf_dir, holdout):
with open(root_dir, "rb") as fp:
# Sort the images based on their timestamp
images = pkl.load(fp)
images.sort(key=lambda x: x['Meteo'].name)
meteo_raw_train, y_train, meteo_raw_test, y_test = [], [], [], []
cur_timestamp_train, cur_timestamp_test = None, None
for data_point in images:
if data_point['Station_index'] not in holdout:
if data_point['Meteo'].name != cur_timestamp_train:
cur_timestamp_train = data_point['Meteo'].name
meteo_raw_train.append([])
y_train.append([])
meteo_raw_train[-1].append(data_point['Meteo'].values)
y_train[-1].append(data_point['PM25'])
else:
if data_point['Meteo'].name != cur_timestamp_test:
cur_timestamp_test = data_point['Meteo'].name
meteo_raw_test.append([])
y_test.append([])
meteo_raw_test[-1].append(data_point['Meteo'].values)
y_test[-1].append(data_point['PM25'])
meteo_raw_train, y_train = np.array(meteo_raw_train), np.array(y_train)
meteo_raw_test, y_test = np.array(meteo_raw_test), np.array(y_test)
# Load Random Trees Embedding Model
rt_model = joblib.load(rt_dir)
# Load Random Forest Model
print("Loading Random Forest Model...")
rf_model = joblib.load(rf_dir)
# Transform the meteo features
meteo_transformed_train, meteo_transformed_test = [], []
for meteo_day in tqdm(meteo_raw_train, position=0, leave=True):
meteo_transformed_train.append(rt_model.transform(meteo_day).toarray())
for meteo_day in tqdm(meteo_raw_test, position=0, leave=True):
meteo_transformed_test.append(rt_model.transform(meteo_day).toarray())
meteo_transformed_train = np.array(meteo_transformed_train)
meteo_transformed_test = np.array(meteo_transformed_test)
return rt_model, rf_model, meteo_transformed_train, y_train, meteo_transformed_test, y_test
# Make predictions with the loaded Random Forest model for regular MSE loss
def predictWithRF(rf_model, meteo_transformed_train, meteo_transformed_test):
y_train_pred_rf = rf_model.predict(meteo_transformed_train)
y_test_pred_rf = rf_model.predict(meteo_transformed_test)
return y_train_pred_rf, y_test_pred_rf
# Make predictions with the loaded Random Forest model and temporally sorted images for weighted MSE loss
def predictWithRFSorted(rf_model, meteo_transformed_train, meteo_transformed_test):
y_train_pred_rf, y_test_pred_rf = [], []
print("Predicting with Random Forest Model...")
for meteo_day in tqdm(meteo_transformed_train, position=0, leave=True):
y_train_pred_rf.append(rf_model.predict(meteo_day))
for meteo_day in tqdm(meteo_transformed_test, position=0, leave=True):
y_test_pred_rf.append(rf_model.predict(meteo_day))
y_train_pred_rf = np.array(y_train_pred_rf)
y_test_pred_rf = np.array(y_test_pred_rf)
return y_train_pred_rf, y_test_pred_rf
# Initialize the data loader for CNN models with regular MSE loss
def initializeCNNdata(root_dir, img_transform, batch_size, crop_dim=0, holdout=None, train_stations=-1, requires_meteo=False, rt_model=None, rf_train=None, rf_test=None, normalized=False):
if requires_meteo:
if (rt_model is None) or (rf_train is None) or (rf_test is None):
raise Exception("Must specify rt_model, rf_train and rf_test.")
train_dataset_PM25 = MyPM25Dataset(root_dir=root_dir, holdout=holdout, img_transform=img_transform, crop_dim=crop_dim, mode='train',
train_stations=train_stations, requires_meteo=requires_meteo, meteo_model=rt_model, rf_train=rf_train, normalized=normalized)
test_dataset_PM25 = MyPM25Dataset(root_dir=root_dir, holdout=holdout, img_transform=img_transform, crop_dim=crop_dim, mode='test',
train_stations=train_stations, requires_meteo=requires_meteo, meteo_model=rt_model, rf_test=rf_test, normalized=normalized)
else:
train_dataset_PM25 = MyPM25Dataset(root_dir=root_dir, holdout=holdout, img_transform=img_transform, crop_dim=crop_dim, mode='train',
train_stations=train_stations, requires_meteo=requires_meteo, normalized=normalized)
test_dataset_PM25 = MyPM25Dataset(root_dir=root_dir, holdout=holdout, img_transform=img_transform, crop_dim=crop_dim, mode='test',
train_stations=train_stations, requires_meteo=requires_meteo, normalized=normalized)
train_dataloader_PM25 = DataLoader(train_dataset_PM25, batch_size=batch_size, shuffle=True, num_workers=2, worker_init_fn=np.random.seed(2020))
train_dataloader_PM25_for_test = DataLoader(train_dataset_PM25, batch_size=128, shuffle=False)
test_dataloader_PM25 = DataLoader(test_dataset_PM25, batch_size=128, shuffle=False)
print(len(train_dataset_PM25), len(test_dataset_PM25))
if requires_meteo:
return train_dataloader_PM25, train_dataloader_PM25_for_test, test_dataloader_PM25, train_dataset_PM25[0][1].shape[0]
else:
return train_dataloader_PM25, train_dataloader_PM25_for_test, test_dataloader_PM25, train_dataset_PM25.scaler
# Initialize the data loader for CNN models with weighted MSE loss
def initializeSortedCNNdata(root_dir, img_transform, batch_size, crop_dim=0, holdout=None, train_stations=-1, requires_meteo=False, rt_model=None, rf_train=None, rf_test=None, normalized=False):
if requires_meteo:
if (rt_model is None) or (rf_train is None) or (rf_test is None):
raise Exception("Must specify rt_model, rf_train and rf_test.")
train_dataset_PM25 = MyPM25DatasetSorted(root_dir=root_dir, img_transform=img_transform, crop_dim=crop_dim, mode='train', holdout=holdout,
train_stations=train_stations, requires_meteo=requires_meteo, meteo_model=rt_model, rf_train=rf_train, normalized=normalized)
test_dataset_PM25 = MyPM25DatasetSorted(root_dir=root_dir, img_transform=img_transform, crop_dim=crop_dim, mode='test', holdout=holdout,
train_stations=train_stations, requires_meteo=requires_meteo, meteo_model=rt_model, rf_test=rf_test, normalized=normalized)
else:
train_dataset_PM25 = MyPM25DatasetSorted(root_dir=root_dir, img_transform=img_transform, crop_dim=crop_dim, mode='train',
holdout=holdout, train_stations=train_stations, requires_meteo=requires_meteo, normalized=normalized)
test_dataset_PM25 = MyPM25DatasetSorted(root_dir=root_dir, img_transform=img_transform, crop_dim=crop_dim, mode='test',
holdout=holdout, train_stations=train_stations, requires_meteo=requires_meteo, normalized=normalized)
train_dataloader_PM25 = DataLoader(train_dataset_PM25, batch_size=batch_size, shuffle=True, num_workers=2, worker_init_fn=np.random.seed(2020))
train_dataloader_PM25_for_test = DataLoader(train_dataset_PM25, batch_size=128, shuffle=False)
test_dataloader_PM25 = DataLoader(test_dataset_PM25, batch_size=128, shuffle=False)
print(len(train_dataset_PM25), len(test_dataset_PM25))
if requires_meteo:
return train_dataloader_PM25, train_dataloader_PM25_for_test, test_dataloader_PM25, train_dataset_PM25[0][1][0].shape[0]
else:
return train_dataloader_PM25, train_dataloader_PM25_for_test, test_dataloader_PM25, train_dataset_PM25.scaler
# Get the all the station names for testing only
def getTestStations(root_dir, holdout, sort=False):
with open(root_dir, "rb") as fp:
images = pkl.load(fp)
if sort:
images.sort(key=lambda x: x['Meteo'].name)
test_stations = []
for data_point in images:
if data_point['Station_index'] in holdout:
test_stations.append(data_point['Station_index'])
return test_stations
# Get all the station names
def getAllStations(root_dir):
with open(root_dir, "rb") as fp:
stations = []
for data_point in pkl.load(fp):
if data_point['Station_index'] not in stations:
stations.append(data_point['Station_index'])
return stations
# Calculate spatial Pearson R and RMSE of all stations for testing
def calculateSpatial(y_test_pred, y_test, test_stations):
df = pd.DataFrame({'y_test_pred': y_test_pred, 'y_test': y_test, 'test_stations': test_stations}).groupby(['test_stations']).mean()
test_station_avg_pred = np.array(df.y_test_pred)
test_station_avg = np.array(df.y_test)
_, _, Rsquared_pearson, _ = eval_stat(test_station_avg_pred, test_station_avg)
rmse = np.sqrt(metrics.mean_squared_error(test_station_avg, test_station_avg_pred))
return Rsquared_pearson, rmse, test_station_avg_pred, test_station_avg
# Create all applicable plots
def plot_all(current_epochs, encoder_name, fig_size, loss_train, loss_test, y_train_pred, y_train, y_test_pred, y_test,
station_avg_pred, station_avg, spatial_R, spatial_R_test=None, spatial_rmse_test=None, train_stations=-1,
line_range=[50, 150]):
# Plot and save the train and test loss over epochs
plt.clf()
fig, ax = plt.subplots(figsize=(12, 10))
epochs = range(current_epochs)
ax.plot(epochs, loss_train, color='b', linewidth=0.5, label='Train loss')
ax.plot(epochs, loss_test, color='r', linewidth=0.5, label='Test loss')
ax.set_xlabel('Epochs')
ax.set_ylabel('Loss')
ax.set_title('Train and test loss of predicting ground-level PM2.5')
ax.legend()
if train_stations > 0:
savepdf(ax.figure, 'PM2.5_train_test_loss_' + encoder_name + '_train_stations_' + str(train_stations))
else:
savepdf(ax.figure, 'PM2.5_train_test_loss_' + encoder_name)
plt.show()
del fig, ax
# Plot the spatial R and RMSE if applicable
# if spatial_R_test:
# plt.clf()
# fig = plt.plot(figsize=(16, 16))
# ax = plt.gca()
# epochs = range(current_epochs)
# ax.plot(epochs, spatial_R_test, color='r', linewidth=0.5, label='Test spatial R')
# ax.set_xlabel('Epochs')
# ax.set_ylabel('Spatial R')
# ax.set_title('Test spatial R of predicting ground-level PM2.5')
# ax.legend()
# if train_stations > 0:
# savepdf(ax.figure, 'PM2.5_test_spatial_R_' + encoder_name + '_self_supervision_train_stations_' + str(train_stations))
# else:
# savepdf(ax.figure, 'PM2.5_test_spatial_R_' + encoder_name + '_self_supervision')
# del fig, ax
# if spatial_rmse_test:
# plt.clf()
# fig = plt.plot(figsize=(16, 16))
# ax = plt.gca()
# epochs = range(current_epochs)
# ax.plot(epochs, spatial_rmse_test, color='r', linewidth=0.5, label='Test spatial RMSE')
# ax.set_xlabel('Epochs')
# ax.set_ylabel('Spatial RMSE')
# ax.set_title('Test spatial RMSE of predicting ground-level PM2.5')
# ax.legend()
# if train_stations > 0:
# savepdf(ax.figure, 'PM2.5_test_spatial_RMSE_' + encoder_name + '_self_supervision_train_stations_' + str(train_stations))
# else:
# savepdf(ax.figure, 'PM2.5_test_spatial_RMSE_' + encoder_name + '_self_supervision')
# del fig, ax
# Plot and save the train set predictions
y_train_pred, y_train = np.squeeze(y_train_pred), np.squeeze(y_train)
Rsquared, pvalue, Rsquared_pearson, pvalue_pearson = eval_stat(y_train_pred, y_train)
if train_stations > 0:
plot_result(y_train_pred, y_train, Rsquared, pvalue, Rsquared_pearson, pvalue_pearson, plot_label='train', save=True,
fig_name='PM2.5_train_' + encoder_name + '_train_stations_' + str(train_stations), lower_bound=0, upper_bound=fig_size)
else:
plot_result(y_train_pred, y_train, Rsquared, pvalue, Rsquared_pearson, pvalue_pearson, plot_label='train', save=True,
fig_name='PM2.5_train_' + encoder_name, lower_bound=0, upper_bound=fig_size)
# Plot and save the test set predictions
y_test_pred, y_test = np.squeeze(y_test_pred), np.squeeze(y_test)
Rsquared, pvalue, Rsquared_pearson, pvalue_pearson = eval_stat(y_test_pred, y_test)
if train_stations > 0:
plot_result(y_test_pred, y_test, Rsquared, pvalue, Rsquared_pearson, pvalue_pearson, plot_label='test', save=True,
fig_name='PM2.5_test_' + encoder_name + '_train_stations_' + str(train_stations), lower_bound=0, upper_bound=fig_size, spatial_R=spatial_R)
else:
plot_result(y_test_pred, y_test, Rsquared, pvalue, Rsquared_pearson, pvalue_pearson, plot_label='test', save=True,
fig_name='PM2.5_test_' + encoder_name, lower_bound=0, upper_bound=fig_size, spatial_R=spatial_R)
# Plot and save the spatial predictions
if train_stations > 0:
spatialRPlot('dodgerblue', station_avg, station_avg_pred, plot_label='test', save=True,
fig_name='PM2.5_test_spatial_' + encoder_name + '_train_stations_' + str(train_stations), line_range=line_range)
else:
spatialRPlot('dodgerblue', station_avg, station_avg_pred, plot_label='test', save=True,
fig_name='PM2.5_test_spatial_' + encoder_name, line_range=line_range)
# ########################################################################################
# MIT License
# Copyright (c) 2022 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to the following
# conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
# ######################################################################################## | [
"os.mkdir",
"numpy.random.seed",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.clf",
"sklearn.metrics.mean_absolute_error",
"pickle.load",
"numpy.mean",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"torch.utils.data.DataLoader",
"torchvision.transforms.ToPILImage",
"matpl... | [((628, 690), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (651, 690), False, 'import warnings\n'), ((691, 746), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (714, 746), False, 'import warnings\n'), ((368, 424), 'os.path.isdir', 'os.path.isdir', (["(PROJECT_ROOT_DIR + '/' + PROJECT_SAVE_DIR)"], {}), "(PROJECT_ROOT_DIR + '/' + PROJECT_SAVE_DIR)\n", (381, 424), False, 'import os\n'), ((486, 552), 'os.mkdir', 'os.mkdir', (["(PROJECT_ROOT_DIR + '/' + PROJECT_SAVE_DIR)"], {'exist_ok': '(True)'}), "(PROJECT_ROOT_DIR + '/' + PROJECT_SAVE_DIR, exist_ok=True)\n", (494, 552), False, 'import os\n'), ((2011, 2020), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2018, 2020), True, 'import matplotlib.pyplot as plt\n'), ((2035, 2065), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 10)'}), '(figsize=(12, 10))\n', (2047, 2065), True, 'import matplotlib.pyplot as plt\n'), ((2077, 2132), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "{'y_true': y_true, 'y_pred': y_pred}"}), "(data={'y_true': y_true, 'y_pred': y_pred})\n", (2089, 2132), True, 'import pandas as pd\n'), ((2142, 2211), 'seaborn.histplot', 'sns.histplot', (['data'], {'x': '"""y_true"""', 'y': '"""y_pred"""', 'cbar': '(True)', 'color': '"""orange"""'}), "(data, x='y_true', y='y_pred', cbar=True, color='orange')\n", (2154, 2211), True, 'import seaborn as sns\n'), ((4447, 4457), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4455, 4457), True, 'import matplotlib.pyplot as plt\n'), ((4721, 4730), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4728, 4730), True, 'import matplotlib.pyplot as plt\n'), ((5012, 5064), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'mathtext.default': 'regular'}"], {}), "({'mathtext.default': 'regular'})\n", (5031, 5064), True, 'import matplotlib.pyplot as plt\n'), ((5119, 5147), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (5131, 5147), True, 'import matplotlib.pyplot as plt\n'), ((6437, 6455), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6453, 6455), True, 'import matplotlib.pyplot as plt\n'), ((21374, 21393), 'joblib.load', 'joblib.load', (['rt_dir'], {}), '(rt_dir)\n', (21385, 21393), False, 'import joblib\n'), ((21489, 21508), 'joblib.load', 'joblib.load', (['rf_dir'], {}), '(rf_dir)\n', (21500, 21508), False, 'import joblib\n'), ((23307, 23326), 'joblib.load', 'joblib.load', (['rt_dir'], {}), '(rt_dir)\n', (23318, 23326), False, 'import joblib\n'), ((23422, 23441), 'joblib.load', 'joblib.load', (['rf_dir'], {}), '(rf_dir)\n', (23433, 23441), False, 'import joblib\n'), ((23564, 23609), 'tqdm.tqdm', 'tqdm', (['meteo_raw_train'], {'position': '(0)', 'leave': '(True)'}), '(meteo_raw_train, position=0, leave=True)\n', (23568, 23609), False, 'from tqdm import tqdm\n'), ((23712, 23756), 'tqdm.tqdm', 'tqdm', (['meteo_raw_test'], {'position': '(0)', 'leave': '(True)'}), '(meteo_raw_test, position=0, leave=True)\n', (23716, 23756), False, 'from tqdm import tqdm\n'), ((23872, 23905), 'numpy.array', 'np.array', (['meteo_transformed_train'], {}), '(meteo_transformed_train)\n', (23880, 23905), True, 'import numpy as np\n'), ((23935, 23967), 'numpy.array', 'np.array', (['meteo_transformed_test'], {}), '(meteo_transformed_test)\n', (23943, 23967), True, 'import numpy as np\n'), ((24702, 24755), 'tqdm.tqdm', 'tqdm', (['meteo_transformed_train'], {'position': '(0)', 'leave': '(True)'}), '(meteo_transformed_train, position=0, leave=True)\n', (24706, 24755), False, 'from tqdm import tqdm\n'), ((24838, 24890), 'tqdm.tqdm', 'tqdm', (['meteo_transformed_test'], {'position': '(0)', 'leave': '(True)'}), '(meteo_transformed_test, position=0, leave=True)\n', (24842, 24890), False, 'from tqdm import tqdm\n'), ((24982, 25007), 'numpy.array', 'np.array', (['y_train_pred_rf'], {}), '(y_train_pred_rf)\n', (24990, 25007), True, 'import numpy as np\n'), ((25029, 25053), 'numpy.array', 'np.array', (['y_test_pred_rf'], {}), '(y_test_pred_rf)\n', (25037, 25053), True, 'import numpy as np\n'), ((26881, 26942), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset_PM25'], {'batch_size': '(128)', 'shuffle': '(False)'}), '(train_dataset_PM25, batch_size=128, shuffle=False)\n', (26891, 26942), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((26970, 27030), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset_PM25'], {'batch_size': '(128)', 'shuffle': '(False)'}), '(test_dataset_PM25, batch_size=128, shuffle=False)\n', (26980, 27030), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((29200, 29261), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset_PM25'], {'batch_size': '(128)', 'shuffle': '(False)'}), '(train_dataset_PM25, batch_size=128, shuffle=False)\n', (29210, 29261), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((29289, 29349), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset_PM25'], {'batch_size': '(128)', 'shuffle': '(False)'}), '(test_dataset_PM25, batch_size=128, shuffle=False)\n', (29299, 29349), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((30726, 30750), 'numpy.array', 'np.array', (['df.y_test_pred'], {}), '(df.y_test_pred)\n', (30734, 30750), True, 'import numpy as np\n'), ((30774, 30793), 'numpy.array', 'np.array', (['df.y_test'], {}), '(df.y_test)\n', (30782, 30793), True, 'import numpy as np\n'), ((31408, 31417), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (31415, 31417), True, 'import matplotlib.pyplot as plt\n'), ((31432, 31462), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 10)'}), '(figsize=(12, 10))\n', (31444, 31462), True, 'import matplotlib.pyplot as plt\n'), ((32015, 32025), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32023, 32025), True, 'import matplotlib.pyplot as plt\n'), ((4354, 4447), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(PROJECT_ROOT_DIR + '/' + PROJECT_SAVE_DIR + '/' + fig_name + '.pdf')"], {'dpi': '(300)'}), "(PROJECT_ROOT_DIR + '/' + PROJECT_SAVE_DIR + '/' + fig_name +\n '.pdf', dpi=300)\n", (4365, 4447), True, 'import matplotlib.pyplot as plt\n'), ((11596, 11616), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (11611, 11616), False, 'import torch\n'), ((19686, 19706), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (19701, 19706), False, 'import torch\n'), ((20736, 20748), 'pickle.load', 'pkl.load', (['fp'], {}), '(fp)\n', (20744, 20748), True, 'import pickle as pkl\n'), ((21138, 21157), 'numpy.array', 'np.array', (['meteo_raw'], {}), '(meteo_raw)\n', (21146, 21157), True, 'import numpy as np\n'), ((22093, 22105), 'pickle.load', 'pkl.load', (['fp'], {}), '(fp)\n', (22101, 22105), True, 'import pickle as pkl\n'), ((29845, 29857), 'pickle.load', 'pkl.load', (['fp'], {}), '(fp)\n', (29853, 29857), True, 'import pickle as pkl\n'), ((30281, 30293), 'pickle.load', 'pkl.load', (['fp'], {}), '(fp)\n', (30289, 30293), True, 'import pickle as pkl\n'), ((30896, 30963), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['test_station_avg', 'test_station_avg_pred'], {}), '(test_station_avg, test_station_avg_pred)\n', (30922, 30963), False, 'from sklearn import metrics\n'), ((33624, 33648), 'numpy.squeeze', 'np.squeeze', (['y_train_pred'], {}), '(y_train_pred)\n', (33634, 33648), True, 'import numpy as np\n'), ((33650, 33669), 'numpy.squeeze', 'np.squeeze', (['y_train'], {}), '(y_train)\n', (33660, 33669), True, 'import numpy as np\n'), ((34364, 34387), 'numpy.squeeze', 'np.squeeze', (['y_test_pred'], {}), '(y_test_pred)\n', (34374, 34387), True, 'import numpy as np\n'), ((34389, 34407), 'numpy.squeeze', 'np.squeeze', (['y_test'], {}), '(y_test)\n', (34399, 34407), True, 'import numpy as np\n'), ((9306, 9318), 'pickle.load', 'pkl.load', (['fp'], {}), '(fp)\n', (9314, 9318), True, 'import pickle as pkl\n'), ((10047, 10083), 'tqdm.tqdm', 'tqdm', (['images'], {'position': '(0)', 'leave': '(True)'}), '(images, position=0, leave=True)\n', (10051, 10083), False, 'from tqdm import tqdm\n'), ((15650, 15662), 'pickle.load', 'pkl.load', (['fp'], {}), '(fp)\n', (15658, 15662), True, 'import pickle as pkl\n'), ((15693, 15729), 'tqdm.tqdm', 'tqdm', (['images'], {'position': '(0)', 'leave': '(True)'}), '(images, position=0, leave=True)\n', (15697, 15729), False, 'from tqdm import tqdm\n'), ((16557, 16593), 'tqdm.tqdm', 'tqdm', (['images'], {'position': '(0)', 'leave': '(True)'}), '(images, position=0, leave=True)\n', (16561, 16593), False, 'from tqdm import tqdm\n'), ((18642, 18692), 'tqdm.tqdm', 'tqdm', (['self.meteo_raw_train'], {'position': '(0)', 'leave': '(True)'}), '(self.meteo_raw_train, position=0, leave=True)\n', (18646, 18692), False, 'from tqdm import tqdm\n'), ((18819, 18868), 'tqdm.tqdm', 'tqdm', (['self.meteo_raw_test'], {'position': '(0)', 'leave': '(True)'}), '(self.meteo_raw_test, position=0, leave=True)\n', (18823, 18868), False, 'from tqdm import tqdm\n'), ((19008, 19046), 'numpy.array', 'np.array', (['self.meteo_transformed_train'], {}), '(self.meteo_transformed_train)\n', (19016, 19046), True, 'import numpy as np\n'), ((19089, 19126), 'numpy.array', 'np.array', (['self.meteo_transformed_test'], {}), '(self.meteo_transformed_test)\n', (19097, 19126), True, 'import numpy as np\n'), ((21193, 21218), 'numpy.array', 'np.array', (['meteo_raw_train'], {}), '(meteo_raw_train)\n', (21201, 21218), True, 'import numpy as np\n'), ((21220, 21237), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (21228, 21237), True, 'import numpy as np\n'), ((21271, 21295), 'numpy.array', 'np.array', (['meteo_raw_test'], {}), '(meteo_raw_test)\n', (21279, 21295), True, 'import numpy as np\n'), ((21297, 21313), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (21305, 21313), True, 'import numpy as np\n'), ((23126, 23151), 'numpy.array', 'np.array', (['meteo_raw_train'], {}), '(meteo_raw_train)\n', (23134, 23151), True, 'import numpy as np\n'), ((23153, 23170), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (23161, 23170), True, 'import numpy as np\n'), ((23204, 23228), 'numpy.array', 'np.array', (['meteo_raw_test'], {}), '(meteo_raw_test)\n', (23212, 23228), True, 'import numpy as np\n'), ((23230, 23246), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (23238, 23246), True, 'import numpy as np\n'), ((26822, 26842), 'numpy.random.seed', 'np.random.seed', (['(2020)'], {}), '(2020)\n', (26836, 26842), True, 'import numpy as np\n'), ((29141, 29161), 'numpy.random.seed', 'np.random.seed', (['(2020)'], {}), '(2020)\n', (29155, 29161), True, 'import numpy as np\n'), ((9679, 9695), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (9693, 9695), False, 'from sklearn.preprocessing import StandardScaler\n'), ((16053, 16069), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (16067, 16069), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3340, 3383), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3367, 3383), False, 'from sklearn import metrics\n'), ((12344, 12367), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (12365, 12367), False, 'from torchvision import transforms\n'), ((12409, 12462), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(self.crop_dim, self.crop_dim)'], {}), '((self.crop_dim, self.crop_dim))\n', (12430, 12462), False, 'from torchvision import transforms\n'), ((12503, 12524), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (12522, 12524), False, 'from torchvision import transforms\n'), ((30571, 30667), 'pandas.DataFrame', 'pd.DataFrame', (["{'y_test_pred': y_test_pred, 'y_test': y_test, 'test_stations': test_stations}"], {}), "({'y_test_pred': y_test_pred, 'y_test': y_test, 'test_stations':\n test_stations})\n", (30583, 30667), True, 'import pandas as pd\n'), ((3158, 3200), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3184, 3200), False, 'from sklearn import metrics\n'), ((5933, 5983), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_train', 'my_prediction'], {}), '(y_train, my_prediction)\n', (5959, 5983), False, 'from sklearn import metrics\n'), ((3526, 3569), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3553, 3569), False, 'from sklearn import metrics\n'), ((3570, 3585), 'numpy.mean', 'np.mean', (['y_true'], {}), '(y_true)\n', (3577, 3585), True, 'import numpy as np\n'), ((9761, 9780), 'numpy.array', 'np.array', (['self.PM25'], {}), '(self.PM25)\n', (9769, 9780), True, 'import numpy as np\n'), ((16135, 16154), 'numpy.array', 'np.array', (['self.PM25'], {}), '(self.PM25)\n', (16143, 16154), True, 'import numpy as np\n')] |
import math
import numpy as np
def inverse_quaternion(quaternion):
result = np.copy(quaternion)
result[1:4] = -result[1:4]
return result
def quaternion_product(q1, q2):
result = np.zeros(4)
result[0] = q1[0]*q2[0]-q1[1]*q2[1]-q1[2]*q2[2]-q1[3]*q2[3]
result[1] = q1[0]*q2[1]+q2[0]*q1[1]+q1[2]*q2[3]-q1[3]*q2[2]
result[2] = q1[0]*q2[2]-q1[1]*q2[3]+q1[2]*q2[0]+q1[3]*q2[1]
result[3] = q1[0]*q2[3]+q1[1]*q2[2]-q1[2]*q2[1]+q1[3]*q2[0]
return result
def rotate_by_quaternion(vector, quaternion):
q1 = np.copy(quaternion)
q2 = np.zeros(4)
q2[1:4] = np.copy(vector)
q3 = inverse_quaternion(quaternion)
q = quaternion_product(q2, q3)
q = quaternion_product(q1, q)
result = q[1:4]
return result
def quaternion2euler(quaternion):
w = quaternion[0]
x = quaternion[1]
y = quaternion[2]
z = quaternion[3]
ysqr = y * y
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + ysqr)
X = math.degrees(math.atan2(t0, t1))
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
Y = math.degrees(math.asin(t2))
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (ysqr + z * z)
Z = math.degrees(math.atan2(t3, t4))
result = np.zeros(3)
result[0] = X * np.pi / 180
result[1] = Y * np.pi / 180
result[2] = Z * np.pi / 180
return result
def euler2quat(z=0, y=0, x=0):
z = z/2.0
y = y/2.0
x = x/2.0
cz = math.cos(z)
sz = math.sin(z)
cy = math.cos(y)
sy = math.sin(y)
cx = math.cos(x)
sx = math.sin(x)
result = np.array([
cx*cy*cz - sx*sy*sz,
cx*sy*sz + cy*cz*sx,
cx*cz*sy - sx*cy*sz,
cx*cy*sz + sx*cz*sy])
if result[0] < 0:
result = -result
return result | [
"math.asin",
"numpy.copy",
"math.atan2",
"numpy.zeros",
"math.sin",
"numpy.array",
"math.cos"
] | [((78, 97), 'numpy.copy', 'np.copy', (['quaternion'], {}), '(quaternion)\n', (85, 97), True, 'import numpy as np\n'), ((184, 195), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (192, 195), True, 'import numpy as np\n'), ((508, 527), 'numpy.copy', 'np.copy', (['quaternion'], {}), '(quaternion)\n', (515, 527), True, 'import numpy as np\n'), ((534, 545), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (542, 545), True, 'import numpy as np\n'), ((557, 572), 'numpy.copy', 'np.copy', (['vector'], {}), '(vector)\n', (564, 572), True, 'import numpy as np\n'), ((1175, 1186), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1183, 1186), True, 'import numpy as np\n'), ((1375, 1386), 'math.cos', 'math.cos', (['z'], {}), '(z)\n', (1383, 1386), False, 'import math\n'), ((1396, 1407), 'math.sin', 'math.sin', (['z'], {}), '(z)\n', (1404, 1407), False, 'import math\n'), ((1417, 1428), 'math.cos', 'math.cos', (['y'], {}), '(y)\n', (1425, 1428), False, 'import math\n'), ((1438, 1449), 'math.sin', 'math.sin', (['y'], {}), '(y)\n', (1446, 1449), False, 'import math\n'), ((1459, 1470), 'math.cos', 'math.cos', (['x'], {}), '(x)\n', (1467, 1470), False, 'import math\n'), ((1480, 1491), 'math.sin', 'math.sin', (['x'], {}), '(x)\n', (1488, 1491), False, 'import math\n'), ((1506, 1636), 'numpy.array', 'np.array', (['[cx * cy * cz - sx * sy * sz, cx * sy * sz + cy * cz * sx, cx * cz * sy - \n sx * cy * sz, cx * cy * sz + sx * cz * sy]'], {}), '([cx * cy * cz - sx * sy * sz, cx * sy * sz + cy * cz * sx, cx * cz *\n sy - sx * cy * sz, cx * cy * sz + sx * cz * sy])\n', (1514, 1636), True, 'import numpy as np\n'), ((913, 931), 'math.atan2', 'math.atan2', (['t0', 't1'], {}), '(t0, t1)\n', (923, 931), False, 'import math\n'), ((1046, 1059), 'math.asin', 'math.asin', (['t2'], {}), '(t2)\n', (1055, 1059), False, 'import math\n'), ((1144, 1162), 'math.atan2', 'math.atan2', (['t3', 't4'], {}), '(t3, t4)\n', (1154, 1162), False, 'import math\n')] |
from numpy.testing._private.utils import assert_array_almost_equal
from quara.protocol.qtomography.standard.standard_qpt import StandardQpt
from quara.protocol.qtomography.standard.standard_povmt import StandardPovmt
from quara.interface.qiskit.api import (
estimate_standard_povmt_from_qiskit,
estimate_standard_qpt_from_qiskit,
estimate_standard_qst_from_qiskit,
generate_empi_dists_from_quara,
)
from quara.protocol.qtomography.standard.standard_qst import StandardQst
import numpy as np
import numpy.testing as npt
import pytest
from quara.interface.qiskit.conversion import (
convert_empi_dists_quara_to_qiskit,
convert_empi_dists_quara_to_qiskit_shots,
convert_state_quara_to_qiskit,
convert_povm_quara_to_qiskit,
convert_gate_quara_to_qiskit,
)
from quara.objects.composite_system_typical import generate_composite_system
from quara.objects.state_typical import generate_state_from_name
from quara.objects.povm_typical import generate_povm_from_name
from quara.objects.gate_typical import generate_gate_from_gate_name
def get_tester_state_names_1qubit():
return ["x0", "y0", "z0", "z1"]
def get_tester_povm_names_1qubit():
return ["x", "y", "z"]
@pytest.mark.qiskit
@pytest.mark.parametrize(
("mode", "num", "true_state_name", "decimal"), [("qubit", 1, "z0", 4)]
)
def test_estimate_standard_qst_from_qiskit(mode, num, true_state_name, decimal):
c_sys = generate_composite_system(mode, num)
true_state = generate_state_from_name(c_sys, true_state_name)
true_state_qiskit = convert_state_quara_to_qiskit(true_state)
get_tester_povm_names_method_name = f"get_tester_povm_names_{int(num)}{mode}"
get_tester_povm_names_method = eval(get_tester_povm_names_method_name)
get_tester_povm_names = get_tester_povm_names_method()
tester_povms = []
tester_povms_qiskit = []
for tester_povm_name in get_tester_povm_names:
tester_povm = generate_povm_from_name(tester_povm_name, c_sys)
tester_povms.append(tester_povm)
tester_povms_qiskit.append(convert_povm_quara_to_qiskit(tester_povm))
seed = 7896
qst = StandardQst(
tester_povms, on_para_eq_constraint=True, schedules="all", seed_data=seed
)
prob_dists_arrays = qst.calc_prob_dists(true_state)
prob_dists = []
for prob_dist in prob_dists_arrays:
prob_dists.append((1, np.array(prob_dist)))
empi_dists_qiskit = convert_empi_dists_quara_to_qiskit(prob_dists)
shots = convert_empi_dists_quara_to_qiskit_shots(prob_dists)
label = [2, 2, 2]
for estimator_name in ["linear", "least_squares"]:
estimated_state_qiskit = estimate_standard_qst_from_qiskit(
mode,
num,
tester_povms=tester_povms_qiskit,
empi_dists=empi_dists_qiskit,
shots=shots,
label=label,
estimator_name=estimator_name,
schedules="all",
)
npt.assert_array_almost_equal(
estimated_state_qiskit,
true_state_qiskit,
decimal=decimal,
)
@pytest.mark.qiskit
@pytest.mark.parametrize(
("mode", "num", "true_povm_name", "decimal"), [("qubit", 1, "z", 4)]
)
def test_estimate_standard_povmt_from_qiskit(mode, num, true_povm_name, decimal):
c_sys = generate_composite_system(mode, num)
true_povm = generate_povm_from_name(true_povm_name, c_sys)
true_povm_qiskit = convert_povm_quara_to_qiskit(true_povm)
get_tester_state_names_method_name = f"get_tester_state_names_{int(num)}{mode}"
get_tester_state_names_method = eval(get_tester_state_names_method_name)
get_tester_state_names = get_tester_state_names_method()
tester_states = []
tester_states_qiskit = []
for tester_state_name in get_tester_state_names:
tester_state = generate_state_from_name(c_sys, tester_state_name)
tester_states.append(tester_state)
tester_states_qiskit.append(convert_state_quara_to_qiskit(tester_state))
seed = 7896
povmt = StandardPovmt(
tester_states,
true_povm.num_outcomes,
on_para_eq_constraint=True,
schedules="all",
seed_data=seed,
)
prob_dists_arrays = povmt.calc_prob_dists(true_povm)
prob_dists = []
for prob_dist in prob_dists_arrays:
prob_dists.append((1, np.array(prob_dist)))
empi_dists_qiskit = convert_empi_dists_quara_to_qiskit(prob_dists)
shots = convert_empi_dists_quara_to_qiskit_shots(prob_dists)
label = [2, 2, 2, 2]
for estimator_name in ["linear", "least_squares"]:
estimated_povm_qiskit = estimate_standard_povmt_from_qiskit(
mode,
num,
tester_states=tester_states_qiskit,
empi_dists=empi_dists_qiskit,
shots=shots,
label=label,
num_outcomes=true_povm.num_outcomes,
estimator_name=estimator_name,
schedules="all",
)
npt.assert_array_almost_equal(
estimated_povm_qiskit,
true_povm_qiskit,
decimal=decimal,
)
@pytest.mark.qiskit
@pytest.mark.parametrize(
("mode", "num", "true_gate_name", "decimal"), [("qubit", 1, "identity", 4)]
)
def test_estimate_standard_qpt_from_qiskit(mode, num, true_gate_name, decimal):
dim = 2 ** num
c_sys = generate_composite_system(mode, num)
true_gate = generate_gate_from_gate_name(true_gate_name, c_sys)
true_gate_qiskit = convert_gate_quara_to_qiskit(true_gate, dim)
get_tester_state_names_method_name = f"get_tester_state_names_{int(num)}{mode}"
get_tester_state_names_method = eval(get_tester_state_names_method_name)
get_tester_state_names = get_tester_state_names_method()
tester_states = []
tester_states_qiskit = []
for tester_state_name in get_tester_state_names:
tester_state = generate_state_from_name(c_sys, tester_state_name)
tester_states.append(tester_state)
tester_states_qiskit.append(convert_state_quara_to_qiskit(tester_state))
get_tester_povm_names_method_name = f"get_tester_povm_names_{int(num)}{mode}"
get_tester_povm_names_method = eval(get_tester_povm_names_method_name)
get_tester_povm_names = get_tester_povm_names_method()
tester_povms = []
tester_povms_qiskit = []
for tester_povm_name in get_tester_povm_names:
tester_povm = generate_povm_from_name(tester_povm_name, c_sys)
tester_povms.append(tester_povm)
tester_povms_qiskit.append(convert_povm_quara_to_qiskit(tester_povm))
seed = 7896
qpt = StandardQpt(
tester_states,
tester_povms,
on_para_eq_constraint=True,
schedules="all",
seed_data=seed,
)
prob_dists_arrays = qpt.calc_prob_dists(true_gate)
prob_dists = []
for prob_dist in prob_dists_arrays:
prob_dists.append((1, np.array(prob_dist)))
empi_dists_qiskit = convert_empi_dists_quara_to_qiskit(prob_dists)
shots = convert_empi_dists_quara_to_qiskit_shots(prob_dists)
label = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
for estimator_name in ["linear", "least_squares"]:
estimated_gate_qiskit = estimate_standard_qpt_from_qiskit(
mode,
num,
tester_states=tester_states_qiskit,
tester_povms=tester_povms_qiskit,
empi_dists=empi_dists_qiskit,
shots=shots,
label=label,
estimator_name=estimator_name,
schedules="all",
)
npt.assert_array_almost_equal(
estimated_gate_qiskit,
true_gate_qiskit,
decimal=decimal,
)
@pytest.mark.qiskit
def test_generate_empi_dists_from_quara_label():
true_empi_dists = [[2, 2, 2], np.array([0.864, 0.136, 0.844, 0.156, 0.49, 0.51])]
source = [
(1000, np.array([0.864, 0.136])),
(1000, np.array([0.844, 0.156])),
(1000, np.array([0.49, 0.51])),
]
actual = generate_empi_dists_from_quara(source)
assert actual[0] == true_empi_dists[0]
@pytest.mark.qiskit
def test_generate_empi_dists_from_quara_dists():
true_empi_dists = [[2, 2, 2], np.array([0.864, 0.136, 0.844, 0.156, 0.49, 0.51])]
source = [
(1000, np.array([0.864, 0.136])),
(1000, np.array([0.844, 0.156])),
(1000, np.array([0.49, 0.51])),
]
actual = generate_empi_dists_from_quara(source)
npt.assert_array_almost_equal(true_empi_dists[1], actual[1])
| [
"quara.protocol.qtomography.standard.standard_qst.StandardQst",
"quara.protocol.qtomography.standard.standard_qpt.StandardQpt",
"pytest.mark.parametrize",
"numpy.testing.assert_array_almost_equal",
"quara.interface.qiskit.conversion.convert_povm_quara_to_qiskit",
"quara.interface.qiskit.api.estimate_stand... | [((1261, 1361), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('mode', 'num', 'true_state_name', 'decimal')", "[('qubit', 1, 'z0', 4)]"], {}), "(('mode', 'num', 'true_state_name', 'decimal'), [(\n 'qubit', 1, 'z0', 4)])\n", (1284, 1361), False, 'import pytest\n'), ((3186, 3284), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('mode', 'num', 'true_povm_name', 'decimal')", "[('qubit', 1, 'z', 4)]"], {}), "(('mode', 'num', 'true_povm_name', 'decimal'), [(\n 'qubit', 1, 'z', 4)])\n", (3209, 3284), False, 'import pytest\n'), ((5243, 5348), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('mode', 'num', 'true_gate_name', 'decimal')", "[('qubit', 1, 'identity', 4)]"], {}), "(('mode', 'num', 'true_gate_name', 'decimal'), [(\n 'qubit', 1, 'identity', 4)])\n", (5266, 5348), False, 'import pytest\n'), ((1460, 1496), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', (['mode', 'num'], {}), '(mode, num)\n', (1485, 1496), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((1515, 1563), 'quara.objects.state_typical.generate_state_from_name', 'generate_state_from_name', (['c_sys', 'true_state_name'], {}), '(c_sys, true_state_name)\n', (1539, 1563), False, 'from quara.objects.state_typical import generate_state_from_name\n'), ((1589, 1630), 'quara.interface.qiskit.conversion.convert_state_quara_to_qiskit', 'convert_state_quara_to_qiskit', (['true_state'], {}), '(true_state)\n', (1618, 1630), False, 'from quara.interface.qiskit.conversion import convert_empi_dists_quara_to_qiskit, convert_empi_dists_quara_to_qiskit_shots, convert_state_quara_to_qiskit, convert_povm_quara_to_qiskit, convert_gate_quara_to_qiskit\n'), ((2180, 2270), 'quara.protocol.qtomography.standard.standard_qst.StandardQst', 'StandardQst', (['tester_povms'], {'on_para_eq_constraint': '(True)', 'schedules': '"""all"""', 'seed_data': 'seed'}), "(tester_povms, on_para_eq_constraint=True, schedules='all',\n seed_data=seed)\n", (2191, 2270), False, 'from quara.protocol.qtomography.standard.standard_qst import StandardQst\n'), ((2482, 2528), 'quara.interface.qiskit.conversion.convert_empi_dists_quara_to_qiskit', 'convert_empi_dists_quara_to_qiskit', (['prob_dists'], {}), '(prob_dists)\n', (2516, 2528), False, 'from quara.interface.qiskit.conversion import convert_empi_dists_quara_to_qiskit, convert_empi_dists_quara_to_qiskit_shots, convert_state_quara_to_qiskit, convert_povm_quara_to_qiskit, convert_gate_quara_to_qiskit\n'), ((2542, 2594), 'quara.interface.qiskit.conversion.convert_empi_dists_quara_to_qiskit_shots', 'convert_empi_dists_quara_to_qiskit_shots', (['prob_dists'], {}), '(prob_dists)\n', (2582, 2594), False, 'from quara.interface.qiskit.conversion import convert_empi_dists_quara_to_qiskit, convert_empi_dists_quara_to_qiskit_shots, convert_state_quara_to_qiskit, convert_povm_quara_to_qiskit, convert_gate_quara_to_qiskit\n'), ((3384, 3420), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', (['mode', 'num'], {}), '(mode, num)\n', (3409, 3420), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((3438, 3484), 'quara.objects.povm_typical.generate_povm_from_name', 'generate_povm_from_name', (['true_povm_name', 'c_sys'], {}), '(true_povm_name, c_sys)\n', (3461, 3484), False, 'from quara.objects.povm_typical import generate_povm_from_name\n'), ((3509, 3548), 'quara.interface.qiskit.conversion.convert_povm_quara_to_qiskit', 'convert_povm_quara_to_qiskit', (['true_povm'], {}), '(true_povm)\n', (3537, 3548), False, 'from quara.interface.qiskit.conversion import convert_empi_dists_quara_to_qiskit, convert_empi_dists_quara_to_qiskit_shots, convert_state_quara_to_qiskit, convert_povm_quara_to_qiskit, convert_gate_quara_to_qiskit\n'), ((4118, 4236), 'quara.protocol.qtomography.standard.standard_povmt.StandardPovmt', 'StandardPovmt', (['tester_states', 'true_povm.num_outcomes'], {'on_para_eq_constraint': '(True)', 'schedules': '"""all"""', 'seed_data': 'seed'}), "(tester_states, true_povm.num_outcomes, on_para_eq_constraint=\n True, schedules='all', seed_data=seed)\n", (4131, 4236), False, 'from quara.protocol.qtomography.standard.standard_povmt import StandardPovmt\n'), ((4485, 4531), 'quara.interface.qiskit.conversion.convert_empi_dists_quara_to_qiskit', 'convert_empi_dists_quara_to_qiskit', (['prob_dists'], {}), '(prob_dists)\n', (4519, 4531), False, 'from quara.interface.qiskit.conversion import convert_empi_dists_quara_to_qiskit, convert_empi_dists_quara_to_qiskit_shots, convert_state_quara_to_qiskit, convert_povm_quara_to_qiskit, convert_gate_quara_to_qiskit\n'), ((4545, 4597), 'quara.interface.qiskit.conversion.convert_empi_dists_quara_to_qiskit_shots', 'convert_empi_dists_quara_to_qiskit_shots', (['prob_dists'], {}), '(prob_dists)\n', (4585, 4597), False, 'from quara.interface.qiskit.conversion import convert_empi_dists_quara_to_qiskit, convert_empi_dists_quara_to_qiskit_shots, convert_state_quara_to_qiskit, convert_povm_quara_to_qiskit, convert_gate_quara_to_qiskit\n'), ((5466, 5502), 'quara.objects.composite_system_typical.generate_composite_system', 'generate_composite_system', (['mode', 'num'], {}), '(mode, num)\n', (5491, 5502), False, 'from quara.objects.composite_system_typical import generate_composite_system\n'), ((5520, 5571), 'quara.objects.gate_typical.generate_gate_from_gate_name', 'generate_gate_from_gate_name', (['true_gate_name', 'c_sys'], {}), '(true_gate_name, c_sys)\n', (5548, 5571), False, 'from quara.objects.gate_typical import generate_gate_from_gate_name\n'), ((5596, 5640), 'quara.interface.qiskit.conversion.convert_gate_quara_to_qiskit', 'convert_gate_quara_to_qiskit', (['true_gate', 'dim'], {}), '(true_gate, dim)\n', (5624, 5640), False, 'from quara.interface.qiskit.conversion import convert_empi_dists_quara_to_qiskit, convert_empi_dists_quara_to_qiskit_shots, convert_state_quara_to_qiskit, convert_povm_quara_to_qiskit, convert_gate_quara_to_qiskit\n'), ((6727, 6832), 'quara.protocol.qtomography.standard.standard_qpt.StandardQpt', 'StandardQpt', (['tester_states', 'tester_povms'], {'on_para_eq_constraint': '(True)', 'schedules': '"""all"""', 'seed_data': 'seed'}), "(tester_states, tester_povms, on_para_eq_constraint=True,\n schedules='all', seed_data=seed)\n", (6738, 6832), False, 'from quara.protocol.qtomography.standard.standard_qpt import StandardQpt\n'), ((7080, 7126), 'quara.interface.qiskit.conversion.convert_empi_dists_quara_to_qiskit', 'convert_empi_dists_quara_to_qiskit', (['prob_dists'], {}), '(prob_dists)\n', (7114, 7126), False, 'from quara.interface.qiskit.conversion import convert_empi_dists_quara_to_qiskit, convert_empi_dists_quara_to_qiskit_shots, convert_state_quara_to_qiskit, convert_povm_quara_to_qiskit, convert_gate_quara_to_qiskit\n'), ((7140, 7192), 'quara.interface.qiskit.conversion.convert_empi_dists_quara_to_qiskit_shots', 'convert_empi_dists_quara_to_qiskit_shots', (['prob_dists'], {}), '(prob_dists)\n', (7180, 7192), False, 'from quara.interface.qiskit.conversion import convert_empi_dists_quara_to_qiskit, convert_empi_dists_quara_to_qiskit_shots, convert_state_quara_to_qiskit, convert_povm_quara_to_qiskit, convert_gate_quara_to_qiskit\n'), ((8156, 8194), 'quara.interface.qiskit.api.generate_empi_dists_from_quara', 'generate_empi_dists_from_quara', (['source'], {}), '(source)\n', (8186, 8194), False, 'from quara.interface.qiskit.api import estimate_standard_povmt_from_qiskit, estimate_standard_qpt_from_qiskit, estimate_standard_qst_from_qiskit, generate_empi_dists_from_quara\n'), ((8565, 8603), 'quara.interface.qiskit.api.generate_empi_dists_from_quara', 'generate_empi_dists_from_quara', (['source'], {}), '(source)\n', (8595, 8603), False, 'from quara.interface.qiskit.api import estimate_standard_povmt_from_qiskit, estimate_standard_qpt_from_qiskit, estimate_standard_qst_from_qiskit, generate_empi_dists_from_quara\n'), ((8609, 8669), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['true_empi_dists[1]', 'actual[1]'], {}), '(true_empi_dists[1], actual[1])\n', (8638, 8669), True, 'import numpy.testing as npt\n'), ((1980, 2028), 'quara.objects.povm_typical.generate_povm_from_name', 'generate_povm_from_name', (['tester_povm_name', 'c_sys'], {}), '(tester_povm_name, c_sys)\n', (2003, 2028), False, 'from quara.objects.povm_typical import generate_povm_from_name\n'), ((2710, 2902), 'quara.interface.qiskit.api.estimate_standard_qst_from_qiskit', 'estimate_standard_qst_from_qiskit', (['mode', 'num'], {'tester_povms': 'tester_povms_qiskit', 'empi_dists': 'empi_dists_qiskit', 'shots': 'shots', 'label': 'label', 'estimator_name': 'estimator_name', 'schedules': '"""all"""'}), "(mode, num, tester_povms=\n tester_povms_qiskit, empi_dists=empi_dists_qiskit, shots=shots, label=\n label, estimator_name=estimator_name, schedules='all')\n", (2743, 2902), False, 'from quara.interface.qiskit.api import estimate_standard_povmt_from_qiskit, estimate_standard_qpt_from_qiskit, estimate_standard_qst_from_qiskit, generate_empi_dists_from_quara\n'), ((3018, 3111), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['estimated_state_qiskit', 'true_state_qiskit'], {'decimal': 'decimal'}), '(estimated_state_qiskit, true_state_qiskit,\n decimal=decimal)\n', (3047, 3111), True, 'import numpy.testing as npt\n'), ((3909, 3959), 'quara.objects.state_typical.generate_state_from_name', 'generate_state_from_name', (['c_sys', 'tester_state_name'], {}), '(c_sys, tester_state_name)\n', (3933, 3959), False, 'from quara.objects.state_typical import generate_state_from_name\n'), ((4715, 4953), 'quara.interface.qiskit.api.estimate_standard_povmt_from_qiskit', 'estimate_standard_povmt_from_qiskit', (['mode', 'num'], {'tester_states': 'tester_states_qiskit', 'empi_dists': 'empi_dists_qiskit', 'shots': 'shots', 'label': 'label', 'num_outcomes': 'true_povm.num_outcomes', 'estimator_name': 'estimator_name', 'schedules': '"""all"""'}), "(mode, num, tester_states=\n tester_states_qiskit, empi_dists=empi_dists_qiskit, shots=shots, label=\n label, num_outcomes=true_povm.num_outcomes, estimator_name=\n estimator_name, schedules='all')\n", (4750, 4953), False, 'from quara.interface.qiskit.api import estimate_standard_povmt_from_qiskit, estimate_standard_qpt_from_qiskit, estimate_standard_qst_from_qiskit, generate_empi_dists_from_quara\n'), ((5077, 5168), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['estimated_povm_qiskit', 'true_povm_qiskit'], {'decimal': 'decimal'}), '(estimated_povm_qiskit, true_povm_qiskit,\n decimal=decimal)\n', (5106, 5168), True, 'import numpy.testing as npt\n'), ((6001, 6051), 'quara.objects.state_typical.generate_state_from_name', 'generate_state_from_name', (['c_sys', 'tester_state_name'], {}), '(c_sys, tester_state_name)\n', (6025, 6051), False, 'from quara.objects.state_typical import generate_state_from_name\n'), ((6527, 6575), 'quara.objects.povm_typical.generate_povm_from_name', 'generate_povm_from_name', (['tester_povm_name', 'c_sys'], {}), '(tester_povm_name, c_sys)\n', (6550, 6575), False, 'from quara.objects.povm_typical import generate_povm_from_name\n'), ((7334, 7567), 'quara.interface.qiskit.api.estimate_standard_qpt_from_qiskit', 'estimate_standard_qpt_from_qiskit', (['mode', 'num'], {'tester_states': 'tester_states_qiskit', 'tester_povms': 'tester_povms_qiskit', 'empi_dists': 'empi_dists_qiskit', 'shots': 'shots', 'label': 'label', 'estimator_name': 'estimator_name', 'schedules': '"""all"""'}), "(mode, num, tester_states=\n tester_states_qiskit, tester_povms=tester_povms_qiskit, empi_dists=\n empi_dists_qiskit, shots=shots, label=label, estimator_name=\n estimator_name, schedules='all')\n", (7367, 7567), False, 'from quara.interface.qiskit.api import estimate_standard_povmt_from_qiskit, estimate_standard_qpt_from_qiskit, estimate_standard_qst_from_qiskit, generate_empi_dists_from_quara\n'), ((7691, 7782), 'numpy.testing.assert_array_almost_equal', 'npt.assert_array_almost_equal', (['estimated_gate_qiskit', 'true_gate_qiskit'], {'decimal': 'decimal'}), '(estimated_gate_qiskit, true_gate_qiskit,\n decimal=decimal)\n', (7720, 7782), True, 'import numpy.testing as npt\n'), ((7940, 7990), 'numpy.array', 'np.array', (['[0.864, 0.136, 0.844, 0.156, 0.49, 0.51]'], {}), '([0.864, 0.136, 0.844, 0.156, 0.49, 0.51])\n', (7948, 7990), True, 'import numpy as np\n'), ((8349, 8399), 'numpy.array', 'np.array', (['[0.864, 0.136, 0.844, 0.156, 0.49, 0.51]'], {}), '([0.864, 0.136, 0.844, 0.156, 0.49, 0.51])\n', (8357, 8399), True, 'import numpy as np\n'), ((2107, 2148), 'quara.interface.qiskit.conversion.convert_povm_quara_to_qiskit', 'convert_povm_quara_to_qiskit', (['tester_povm'], {}), '(tester_povm)\n', (2135, 2148), False, 'from quara.interface.qiskit.conversion import convert_empi_dists_quara_to_qiskit, convert_empi_dists_quara_to_qiskit_shots, convert_state_quara_to_qiskit, convert_povm_quara_to_qiskit, convert_gate_quara_to_qiskit\n'), ((4041, 4084), 'quara.interface.qiskit.conversion.convert_state_quara_to_qiskit', 'convert_state_quara_to_qiskit', (['tester_state'], {}), '(tester_state)\n', (4070, 4084), False, 'from quara.interface.qiskit.conversion import convert_empi_dists_quara_to_qiskit, convert_empi_dists_quara_to_qiskit_shots, convert_state_quara_to_qiskit, convert_povm_quara_to_qiskit, convert_gate_quara_to_qiskit\n'), ((6133, 6176), 'quara.interface.qiskit.conversion.convert_state_quara_to_qiskit', 'convert_state_quara_to_qiskit', (['tester_state'], {}), '(tester_state)\n', (6162, 6176), False, 'from quara.interface.qiskit.conversion import convert_empi_dists_quara_to_qiskit, convert_empi_dists_quara_to_qiskit_shots, convert_state_quara_to_qiskit, convert_povm_quara_to_qiskit, convert_gate_quara_to_qiskit\n'), ((6654, 6695), 'quara.interface.qiskit.conversion.convert_povm_quara_to_qiskit', 'convert_povm_quara_to_qiskit', (['tester_povm'], {}), '(tester_povm)\n', (6682, 6695), False, 'from quara.interface.qiskit.conversion import convert_empi_dists_quara_to_qiskit, convert_empi_dists_quara_to_qiskit_shots, convert_state_quara_to_qiskit, convert_povm_quara_to_qiskit, convert_gate_quara_to_qiskit\n'), ((8024, 8048), 'numpy.array', 'np.array', (['[0.864, 0.136]'], {}), '([0.864, 0.136])\n', (8032, 8048), True, 'import numpy as np\n'), ((8067, 8091), 'numpy.array', 'np.array', (['[0.844, 0.156]'], {}), '([0.844, 0.156])\n', (8075, 8091), True, 'import numpy as np\n'), ((8110, 8132), 'numpy.array', 'np.array', (['[0.49, 0.51]'], {}), '([0.49, 0.51])\n', (8118, 8132), True, 'import numpy as np\n'), ((8433, 8457), 'numpy.array', 'np.array', (['[0.864, 0.136]'], {}), '([0.864, 0.136])\n', (8441, 8457), True, 'import numpy as np\n'), ((8476, 8500), 'numpy.array', 'np.array', (['[0.844, 0.156]'], {}), '([0.844, 0.156])\n', (8484, 8500), True, 'import numpy as np\n'), ((8519, 8541), 'numpy.array', 'np.array', (['[0.49, 0.51]'], {}), '([0.49, 0.51])\n', (8527, 8541), True, 'import numpy as np\n'), ((2433, 2452), 'numpy.array', 'np.array', (['prob_dist'], {}), '(prob_dist)\n', (2441, 2452), True, 'import numpy as np\n'), ((4436, 4455), 'numpy.array', 'np.array', (['prob_dist'], {}), '(prob_dist)\n', (4444, 4455), True, 'import numpy as np\n'), ((7031, 7050), 'numpy.array', 'np.array', (['prob_dist'], {}), '(prob_dist)\n', (7039, 7050), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from ionotomo.settings import TFSettings
from ionotomo.tomography.interpolation import RegularGridInterpolator
from ionotomo.tomography.integrate import simps
class RayOp(object):
r"""Linear operator that performs for any v(x)
h[i1,...,ir] = \int_R[i1,...,ir] ds M(x) v(x)
grid : tuple of ndim Tensors specifying grid coordinates used for interpolation
M : the function over V to integrate, defined on the *grid*
rays : Tensor with *r* ray index dimensions and last dim is size ndim
Defines the ray trajectories over which to integrate.
Shape (i1,...,ir, ndim, N)
transpose : bool
If True then Av represents \sum_R \Delta_R(x) v_R M(x)
"""
def __init__(self,grid,M,rays,dx = None,
weight = None, transpose = False,
dtype=TFSettings.tf_float):
self.dtype = dtype
self.grid = grid
self.rays = tf.cast(rays,TFSettings.tf_float)
if dx is None:
self.dx = tf.sqrt(tf.reduce_sum(tf.square(self.rays[...,1:] - self.rays[...,:-1]),axis=-2))
self.dx = tf.cumsum(tf.concat([tf.zeros_like(self.dx[...,0:1]),self.dx],axis=-1),axis=-1)
else:
nd = tf.size(tf.shape(rays))
dxshape = tf.concat([tf.ones_like(tf.shape(rays)[0:-2]),
tf.shape(rays)[nd-1:nd]],axis=0)
self.dx = tf.reshape(dx,dxshape)
if weight is not None:
self.weight = tf.reshape(tf.cast(weight,self.dtype),self.range_shape())
else:
self.weight = None
self.M = tf.cast(M,self.dtype)
self.transpose = transpose
def domain_shape(self):
return tf.shape(self.M)
def range_shape(self):
return tf.shape(self.rays)[:-2]
def shape(self):
return tf.concat([self.range_shape(),self.domain_shape()],axis=0)
def matmul(self,x,adjoint=False,adjoint_arg=False):
'''Transform [batch] matrix x with left multiplication: x --> Ax.
x: Tensor with compatible shape and same dtype as self.
See class docstring for definition of compatibility.
adjoint: Python bool. If True, left multiply by the adjoint: A^H x.
adjoint_arg: Python bool.
If True, compute A x^H where x^H is the hermitian transpose
(transposition and complex conjugation).
name: A name for this `Op.
Returns:
A Tensor with shape [..., M, R] and same dtype as self.
'''
x = tf.cast(x,self.dtype)
Ax = self.M * x
Ax = RegularGridInterpolator(self.grid,Ax,method='linear')
if self.weight is None:
Ax = Ax(tf.unstack(self.rays,axis=-2))
else:
Ax = self.weight*Ax(tf.unstack(self.rays,axis=-2))
Ax = simps(Ax, self.dx,axis = -1)
return Ax
class TECForwardEquation(RayOp):
def __init__(self,i0, grid,M,rays,dx = None,
weight = None, transpose = False,
dtype=TFSettings.tf_float):
super(TECForwardEquation,self).__init__(grid,M,rays,dx,
weight, transpose, dtype)
self.i0 = tf.cast(i0,TFSettings.tf_int)
def matmul(self,x,adjoint=False,adjoint_arg=False):
'''Transform [batch] matrix x with left multiplication: x --> Ax.
x: Tensor with compatible shape and same dtype as self.
See class docstring for definition of compatibility.
adjoint: Python bool. If True, left multiply by the adjoint: A^H x.
adjoint_arg: Python bool.
If True, compute A x^H where x^H is the hermitian transpose
(transposition and complex conjugation).
name: A name for this `Op.
Returns:
A Tensor with shape [..., M, R] and same dtype as self.
'''
Ax = super(TECForwardEquation,self).matmul(x)
Ax = Ax - Ax[self.i0:self.i0+1, ...]
return Ax
if __name__ == '__main__':
rays = np.sort(np.random.uniform(size=[2,2,3,6]),axis=-1)
M = np.random.normal(size=(100,100,100))
grid = (np.linspace(0,1,100),)*3
op = TECForwardEquation(0,grid, M, rays)
x = np.random.normal(size=(100,100,100))
sess = tf.Session()
print(sess.run(op.matmul(x)))
sess.close()
| [
"numpy.random.uniform",
"tensorflow.square",
"ionotomo.tomography.interpolation.RegularGridInterpolator",
"tensorflow.reshape",
"tensorflow.Session",
"tensorflow.zeros_like",
"tensorflow.cast",
"tensorflow.shape",
"ionotomo.tomography.integrate.simps",
"numpy.random.normal",
"numpy.linspace",
... | [((4016, 4054), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100, 100, 100)'}), '(size=(100, 100, 100))\n', (4032, 4054), True, 'import numpy as np\n'), ((4143, 4181), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(100, 100, 100)'}), '(size=(100, 100, 100))\n', (4159, 4181), True, 'import numpy as np\n'), ((4191, 4203), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4201, 4203), True, 'import tensorflow as tf\n'), ((949, 983), 'tensorflow.cast', 'tf.cast', (['rays', 'TFSettings.tf_float'], {}), '(rays, TFSettings.tf_float)\n', (956, 983), True, 'import tensorflow as tf\n'), ((1607, 1629), 'tensorflow.cast', 'tf.cast', (['M', 'self.dtype'], {}), '(M, self.dtype)\n', (1614, 1629), True, 'import tensorflow as tf\n'), ((1708, 1724), 'tensorflow.shape', 'tf.shape', (['self.M'], {}), '(self.M)\n', (1716, 1724), True, 'import tensorflow as tf\n'), ((2526, 2548), 'tensorflow.cast', 'tf.cast', (['x', 'self.dtype'], {}), '(x, self.dtype)\n', (2533, 2548), True, 'import tensorflow as tf\n'), ((2585, 2640), 'ionotomo.tomography.interpolation.RegularGridInterpolator', 'RegularGridInterpolator', (['self.grid', 'Ax'], {'method': '"""linear"""'}), "(self.grid, Ax, method='linear')\n", (2608, 2640), False, 'from ionotomo.tomography.interpolation import RegularGridInterpolator\n'), ((2812, 2839), 'ionotomo.tomography.integrate.simps', 'simps', (['Ax', 'self.dx'], {'axis': '(-1)'}), '(Ax, self.dx, axis=-1)\n', (2817, 2839), False, 'from ionotomo.tomography.integrate import simps\n'), ((3148, 3178), 'tensorflow.cast', 'tf.cast', (['i0', 'TFSettings.tf_int'], {}), '(i0, TFSettings.tf_int)\n', (3155, 3178), True, 'import tensorflow as tf\n'), ((3965, 4001), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '[2, 2, 3, 6]'}), '(size=[2, 2, 3, 6])\n', (3982, 4001), True, 'import numpy as np\n'), ((1407, 1430), 'tensorflow.reshape', 'tf.reshape', (['dx', 'dxshape'], {}), '(dx, dxshape)\n', (1417, 1430), True, 'import tensorflow as tf\n'), ((1768, 1787), 'tensorflow.shape', 'tf.shape', (['self.rays'], {}), '(self.rays)\n', (1776, 1787), True, 'import tensorflow as tf\n'), ((4065, 4087), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (4076, 4087), True, 'import numpy as np\n'), ((1251, 1265), 'tensorflow.shape', 'tf.shape', (['rays'], {}), '(rays)\n', (1259, 1265), True, 'import tensorflow as tf\n'), ((1498, 1525), 'tensorflow.cast', 'tf.cast', (['weight', 'self.dtype'], {}), '(weight, self.dtype)\n', (1505, 1525), True, 'import tensorflow as tf\n'), ((2691, 2721), 'tensorflow.unstack', 'tf.unstack', (['self.rays'], {'axis': '(-2)'}), '(self.rays, axis=-2)\n', (2701, 2721), True, 'import tensorflow as tf\n'), ((1050, 1101), 'tensorflow.square', 'tf.square', (['(self.rays[..., 1:] - self.rays[..., :-1])'], {}), '(self.rays[..., 1:] - self.rays[..., :-1])\n', (1059, 1101), True, 'import tensorflow as tf\n'), ((2768, 2798), 'tensorflow.unstack', 'tf.unstack', (['self.rays'], {'axis': '(-2)'}), '(self.rays, axis=-2)\n', (2778, 2798), True, 'import tensorflow as tf\n'), ((1153, 1185), 'tensorflow.zeros_like', 'tf.zeros_like', (['self.dx[..., 0:1]'], {}), '(self.dx[..., 0:1])\n', (1166, 1185), True, 'import tensorflow as tf\n'), ((1352, 1366), 'tensorflow.shape', 'tf.shape', (['rays'], {}), '(rays)\n', (1360, 1366), True, 'import tensorflow as tf\n'), ((1313, 1327), 'tensorflow.shape', 'tf.shape', (['rays'], {}), '(rays)\n', (1321, 1327), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 25 11:46:31 2017
@author: lansford
"""
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import matplotlib.pyplot as plt
from jl_spectra_2_structure.cross_validation import LOAD_CROSS_VALIDATION
from jl_spectra_2_structure.plotting_tools import set_figure_settings
#wasserstein_loss
#kl_div_loss
#Use 3350 max for C2H4, 2200 for CO, and 2000 for NO. Use 750 points for C2H4 and 500 for CO and 450 for NO.
#coverage is 'low', 'high' or a float <= 1
#assert TARGET in ['binding_type','GCN','combine_hollow_sites']
Downloads_folder = os.path.join(os.path.expanduser("~"),'Downloads')
cv_folder = r'C:\Users\lansf\Documents\Data\IR_Materials_Gap\cv_BW\cv_files'
#cv_folder = r'C:\Users\lansf\Documents\Data\IR_Materials_Gap\cv_cv5'
cv_indices = r'C:\Users\lansf\Documents\Data\IR_Materials_Gap\cv_BW\cv_indices'
set_figure_settings('paper')
CV_class = LOAD_CROSS_VALIDATION(cv_indices_path=cv_indices,cross_validation_path=cv_folder)
#CV_class.load_CV_class(78)
#print(CV_class.NUM_TRAIN)
#print(CV_class.TRAINING_ERROR)
#print(CV_class.NN_PROPERTIES)
BEST_MODELS = CV_class.get_best_models(3, 2)
keys = CV_class.get_keys(BEST_MODELS)
print(keys)
ADSORBATE_LIST = []
TARGET_LIST = []
COVERAGE_LIST = []
SCORE_LIST = []
BATCH_LIST = []
EPSILON_LIST = []
ALPHA_LIST = []
NUM_TRAIN_LIST = []
TRAIN_SETS_LIST = []
REG_LIST = []
TRAIN_ERROR_LIST = []
LAYERS_LIST = []
LEARN_RATE = []
for ADSORBATE in BEST_MODELS.keys():
for TARGET in BEST_MODELS[ADSORBATE].keys():
for COVERAGE in BEST_MODELS[ADSORBATE][TARGET].keys():
SCORE_LIST += BEST_MODELS[ADSORBATE][TARGET][COVERAGE]['SCORES'].tolist()
for CV_INDEX in BEST_MODELS[ADSORBATE][TARGET][COVERAGE]['CV_FILES_INDEX']:
CV_class.load_CV_class(CV_INDEX)
ADSORBATE_LIST.append(ADSORBATE)
TARGET_LIST.append(TARGET)
COVERAGE_LIST.append(COVERAGE)
BATCH_LIST.append(CV_class.NN_PROPERTIES['batch_size'])
EPSILON_LIST.append(CV_class.NN_PROPERTIES['epsilon'])
ALPHA_LIST.append(CV_class.NN_PROPERTIES['alpha'])
NUM_TRAIN_LIST.append(CV_class.NUM_TRAIN)
TRAIN_SETS_LIST.append(CV_class.NN_PROPERTIES['training_sets'])
REG_LIST.append(CV_class.NN_PROPERTIES['regularization'])
TRAIN_ERROR_LIST.append(CV_class.TRAINING_ERROR)
LAYERS_LIST.append(CV_class.NN_PROPERTIES['hidden_layer_sizes'])
LEARN_RATE.append(CV_class.NN_PROPERTIES['learning_rate_init'])
ADSORBATE_LIST = np.array(ADSORBATE_LIST)
TARGET_LIST = np.array(TARGET_LIST)
COVERAGE_LIST = np.array(COVERAGE_LIST)
SCORE_LIST = np.array(SCORE_LIST)
BATCH_LIST = np.array(BATCH_LIST)
EPSILON_LIST = np.array(EPSILON_LIST)
ALPHA_LIST = np.array(ALPHA_LIST)
NUM_TRAIN_LIST = np.array(NUM_TRAIN_LIST)
TRAIN_SETS_LIST = np.array(TRAIN_SETS_LIST)
REG_LIST = np.array(REG_LIST)
TRAIN_ERROR_LIST = np.array(TRAIN_ERROR_LIST)
LAYERS_LIST = np.array(LAYERS_LIST)
LEARN_RATE = np.array(LEARN_RATE)
PARAMETER_SETS = [ALPHA_LIST,EPSILON_LIST,NUM_TRAIN_LIST,TRAIN_SETS_LIST,LEARN_RATE, BATCH_LIST]
PARAMETER_TITLES = ['Alpha','Epsilon','Data per Training Set','Number of Training Sets','Initial Learning Rate', 'Batch Size']
color_dict = {'CO': 'green', 'NO': 'blue', 'C2H4': 'red'}
marker_dict = {'high': 'o', 'low': 's', '1': '>'}
ADSORBATE_COVERAGE = []
ADSORBATE_STRING = []
for i1, i2 in zip(ADSORBATE_LIST,COVERAGE_LIST):
ADSORBATE_COVERAGE.append((i1,i2))
ADSORBATE_STRING.append(i1+', '+i2)
ADSORBATE_COVERAGE = np.array(ADSORBATE_COVERAGE)
ADSORBATE_STRING = np.array(ADSORBATE_STRING)
for TARGET in ['GCN','binding_type','combine_hollow_sites']:
unique_sets = np.unique(ADSORBATE_COVERAGE[TARGET_LIST==TARGET],axis=0)
unique_string = np.unique(ADSORBATE_STRING[TARGET_LIST==TARGET])
for count, parameter_title in enumerate(PARAMETER_TITLES):
plt.figure()
plt.title('Target: ' + TARGET + ', Parameter: '+ parameter_title)
for pair in unique_sets:
indices = np.all((TARGET_LIST==TARGET,ADSORBATE_LIST==pair[0],COVERAGE_LIST == pair[1]),axis=0)
plt.plot(PARAMETER_SETS[count][indices],SCORE_LIST[indices], marker=marker_dict[pair[1]],color=color_dict[pair[0]],linewidth=0)
plt.legend(unique_string)
if parameter_title in ['Alpha', 'Epsilon']:
plt.xscale('log')
plt.xlabel('log('+parameter_title+')')
else:
plt.xlabel(parameter_title)
plt.ylabel('score')
plt.show()
#CV_class.plot_models(BEST_MODELS)
#CV_class.plot_models(BEST_MODELS,figure_directory=Downloads_folder)
#CV_class.plot_models(CV_class.CV_RESULTS,figure_directory=Downloads_folder,model_list=[6,7,9,10])
#CV_class.plot_parity_plots(figure_directory=Downloads_folder,model_list=[6,7,9,10]) | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"jl_spectra_2_structure.cross_validation.LOAD_CROSS_VALIDATION",
"matplotlib.pyplot.legend",
"numpy.all",
"matplotlib.pyplot.figure",
"numpy.array",
"jl_spectra_2_structure.plotting_tools.set... | [((901, 929), 'jl_spectra_2_structure.plotting_tools.set_figure_settings', 'set_figure_settings', (['"""paper"""'], {}), "('paper')\n", (920, 929), False, 'from jl_spectra_2_structure.plotting_tools import set_figure_settings\n'), ((941, 1028), 'jl_spectra_2_structure.cross_validation.LOAD_CROSS_VALIDATION', 'LOAD_CROSS_VALIDATION', ([], {'cv_indices_path': 'cv_indices', 'cross_validation_path': 'cv_folder'}), '(cv_indices_path=cv_indices, cross_validation_path=\n cv_folder)\n', (962, 1028), False, 'from jl_spectra_2_structure.cross_validation import LOAD_CROSS_VALIDATION\n'), ((2646, 2670), 'numpy.array', 'np.array', (['ADSORBATE_LIST'], {}), '(ADSORBATE_LIST)\n', (2654, 2670), True, 'import numpy as np\n'), ((2685, 2706), 'numpy.array', 'np.array', (['TARGET_LIST'], {}), '(TARGET_LIST)\n', (2693, 2706), True, 'import numpy as np\n'), ((2723, 2746), 'numpy.array', 'np.array', (['COVERAGE_LIST'], {}), '(COVERAGE_LIST)\n', (2731, 2746), True, 'import numpy as np\n'), ((2760, 2780), 'numpy.array', 'np.array', (['SCORE_LIST'], {}), '(SCORE_LIST)\n', (2768, 2780), True, 'import numpy as np\n'), ((2794, 2814), 'numpy.array', 'np.array', (['BATCH_LIST'], {}), '(BATCH_LIST)\n', (2802, 2814), True, 'import numpy as np\n'), ((2830, 2852), 'numpy.array', 'np.array', (['EPSILON_LIST'], {}), '(EPSILON_LIST)\n', (2838, 2852), True, 'import numpy as np\n'), ((2866, 2886), 'numpy.array', 'np.array', (['ALPHA_LIST'], {}), '(ALPHA_LIST)\n', (2874, 2886), True, 'import numpy as np\n'), ((2904, 2928), 'numpy.array', 'np.array', (['NUM_TRAIN_LIST'], {}), '(NUM_TRAIN_LIST)\n', (2912, 2928), True, 'import numpy as np\n'), ((2947, 2972), 'numpy.array', 'np.array', (['TRAIN_SETS_LIST'], {}), '(TRAIN_SETS_LIST)\n', (2955, 2972), True, 'import numpy as np\n'), ((2984, 3002), 'numpy.array', 'np.array', (['REG_LIST'], {}), '(REG_LIST)\n', (2992, 3002), True, 'import numpy as np\n'), ((3022, 3048), 'numpy.array', 'np.array', (['TRAIN_ERROR_LIST'], {}), '(TRAIN_ERROR_LIST)\n', (3030, 3048), True, 'import numpy as np\n'), ((3063, 3084), 'numpy.array', 'np.array', (['LAYERS_LIST'], {}), '(LAYERS_LIST)\n', (3071, 3084), True, 'import numpy as np\n'), ((3098, 3118), 'numpy.array', 'np.array', (['LEARN_RATE'], {}), '(LEARN_RATE)\n', (3106, 3118), True, 'import numpy as np\n'), ((3649, 3677), 'numpy.array', 'np.array', (['ADSORBATE_COVERAGE'], {}), '(ADSORBATE_COVERAGE)\n', (3657, 3677), True, 'import numpy as np\n'), ((3697, 3723), 'numpy.array', 'np.array', (['ADSORBATE_STRING'], {}), '(ADSORBATE_STRING)\n', (3705, 3723), True, 'import numpy as np\n'), ((637, 660), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (655, 660), False, 'import os\n'), ((3803, 3863), 'numpy.unique', 'np.unique', (['ADSORBATE_COVERAGE[TARGET_LIST == TARGET]'], {'axis': '(0)'}), '(ADSORBATE_COVERAGE[TARGET_LIST == TARGET], axis=0)\n', (3812, 3863), True, 'import numpy as np\n'), ((3881, 3931), 'numpy.unique', 'np.unique', (['ADSORBATE_STRING[TARGET_LIST == TARGET]'], {}), '(ADSORBATE_STRING[TARGET_LIST == TARGET])\n', (3890, 3931), True, 'import numpy as np\n'), ((4001, 4013), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4011, 4013), True, 'import matplotlib.pyplot as plt\n'), ((4022, 4088), 'matplotlib.pyplot.title', 'plt.title', (["('Target: ' + TARGET + ', Parameter: ' + parameter_title)"], {}), "('Target: ' + TARGET + ', Parameter: ' + parameter_title)\n", (4031, 4088), True, 'import matplotlib.pyplot as plt\n'), ((4377, 4402), 'matplotlib.pyplot.legend', 'plt.legend', (['unique_string'], {}), '(unique_string)\n', (4387, 4402), True, 'import matplotlib.pyplot as plt\n'), ((4598, 4617), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""score"""'], {}), "('score')\n", (4608, 4617), True, 'import matplotlib.pyplot as plt\n'), ((4626, 4636), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4634, 4636), True, 'import matplotlib.pyplot as plt\n'), ((4143, 4239), 'numpy.all', 'np.all', (['(TARGET_LIST == TARGET, ADSORBATE_LIST == pair[0], COVERAGE_LIST == pair[1])'], {'axis': '(0)'}), '((TARGET_LIST == TARGET, ADSORBATE_LIST == pair[0], COVERAGE_LIST ==\n pair[1]), axis=0)\n', (4149, 4239), True, 'import numpy as np\n'), ((4241, 4376), 'matplotlib.pyplot.plot', 'plt.plot', (['PARAMETER_SETS[count][indices]', 'SCORE_LIST[indices]'], {'marker': 'marker_dict[pair[1]]', 'color': 'color_dict[pair[0]]', 'linewidth': '(0)'}), '(PARAMETER_SETS[count][indices], SCORE_LIST[indices], marker=\n marker_dict[pair[1]], color=color_dict[pair[0]], linewidth=0)\n', (4249, 4376), True, 'import matplotlib.pyplot as plt\n'), ((4467, 4484), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (4477, 4484), True, 'import matplotlib.pyplot as plt\n'), ((4497, 4539), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('log(' + parameter_title + ')')"], {}), "('log(' + parameter_title + ')')\n", (4507, 4539), True, 'import matplotlib.pyplot as plt\n'), ((4562, 4589), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['parameter_title'], {}), '(parameter_title)\n', (4572, 4589), True, 'import matplotlib.pyplot as plt\n')] |
#######################################################################################
#
# Copyright (c) 2018, Wiphoo (Terng) Methachawalit, All rights reserved.
#
#######################################################################################
#######################################################################################
#
# STANDARD IMPORTS
#
import numpy
#######################################################################################
#
# LOCAL IMPORTS
#
# activation function
from ActivationFuncs import ActivationFuncs
#######################################################################################
#
# GLOBAL VARIABLES
#
#######################################################################################
#
# HELPER FUNCTIONS
#
#######################################################################################
#
# CLASS DEFINITIONS
#
class BackwardPropagator:
''' this class is designed for storing delta weights at hidden layer and output layer
'''
def __init__( self ):
# errors on each layer
# at hidden layer
self.hiddenLayerErrorsMatrix = None
# at output layer
self.outputErrorsMatrix = None
# delta errors on each layer
# at hidden layer
self.hiddenLayerDeltaErrorsMatrix = None
# at output layer
self.outputDeltaErrorsMatrix = None
# delta weights on each layer
# at hidden layer
self.hiddenLayerDeltaWeightsMatrix = None
# at output layer
self.outputDeltaWeightsMatrix = None
def propagate( self, perceptron, forwardPropagator, targetOutputsMatrix, learningRate ):
''' calculate the error deltas on each layers
'''
# print( 'BackwardPropagator.propagate()...............' )
# print( ' targetOutputsMatrix = {}'.format( targetOutputsMatrix ) )
####################################################################
# calculate errors and delta errors
####################################################################
# output layer
# calculate errors at outputs layer
self.outputErrorsMatrix = ( targetOutputsMatrix - forwardPropagator.outputsMatrixOutputsLayer )
# print( ' forwardPropagator.outputsMatrixOutputsLayer = {}'.format( forwardPropagator.outputsMatrixOutputsLayer ) )
# print( ' forwardPropagator.outputsMatrixOutputsLayer.shape = {}'.format( forwardPropagator.outputsMatrixOutputsLayer.shape ) )
# print( ' type( forwardPropagator.outputsMatrixOutputsLayer ) = {}'.format( type( forwardPropagator.outputsMatrixOutputsLayer ) ) )
# calculate delta errors at output layer
self.outputDeltaErrorsMatrix = self.outputErrorsMatrix * ActivationFuncs.activationFuncDerivative(
forwardPropagator.outputsMatrixOutputsLayer )
# print( ' self.outputErrorsMatrix = {}'.format( self.outputErrorsMatrix ) )
# print( ' self.outputDeltaErrorsMatrix = {}'.format( self.outputDeltaErrorsMatrix ) )
####################################################################
# hidden layer
# calculate errors at hidden layer neurons
self.hiddenLayerErrorsMatrix = self.outputDeltaErrorsMatrix.dot( perceptron.outputLayerWeightsMatrix.T )
# + perceptron.outputLayerBiasMatrix
# calulate delta errors at hidden layer neurons
self.hiddenLayerDeltaErrorsMatrix = self.hiddenLayerErrorsMatrix * ActivationFuncs.activationFuncDerivative(
forwardPropagator.outputsMatrixHiddenLayerNeurons )
# print( ' self.hiddenLayerErrorsMatrix = {}'.format( self.hiddenLayerErrorsMatrix ) )
# print( ' self.hiddenLayerDeltaErrorsMatrix = {}'.format( self.hiddenLayerDeltaErrorsMatrix ) )
# DONE :: calculate errors and delta errors
####################################################################
####################################################################
# calculate delta weights
# calculate delta weights at output layer
self.outputDeltaWeightsMatrix = learningRate * forwardPropagator.outputsMatrixHiddenLayerNeurons.T.dot(
self.outputDeltaErrorsMatrix )
# calculate delta weights at hidden layer
self.hiddenLayerDeltaWeightsMatrix = learningRate * forwardPropagator.inputsMatrix.T.dot(
self.hiddenLayerDeltaErrorsMatrix )
# print( ' self.hiddenLayerDeltaWeightsMatrix = {}'.format( self.hiddenLayerDeltaWeightsMatrix ) )
# print( ' self.outputDeltaWeightsMatrix = {}'.format( self.outputDeltaWeightsMatrix ) )
def calculateRmsError( self ):
''' calculate error '''
return numpy.sqrt( numpy.mean( numpy.power( self.outputErrorsMatrix, 2 ) ) )
| [
"numpy.power",
"ActivationFuncs.ActivationFuncs.activationFuncDerivative"
] | [((2666, 2756), 'ActivationFuncs.ActivationFuncs.activationFuncDerivative', 'ActivationFuncs.activationFuncDerivative', (['forwardPropagator.outputsMatrixOutputsLayer'], {}), '(forwardPropagator.\n outputsMatrixOutputsLayer)\n', (2706, 2756), False, 'from ActivationFuncs import ActivationFuncs\n'), ((3381, 3477), 'ActivationFuncs.ActivationFuncs.activationFuncDerivative', 'ActivationFuncs.activationFuncDerivative', (['forwardPropagator.outputsMatrixHiddenLayerNeurons'], {}), '(forwardPropagator.\n outputsMatrixHiddenLayerNeurons)\n', (3421, 3477), False, 'from ActivationFuncs import ActivationFuncs\n'), ((4633, 4672), 'numpy.power', 'numpy.power', (['self.outputErrorsMatrix', '(2)'], {}), '(self.outputErrorsMatrix, 2)\n', (4644, 4672), False, 'import numpy\n')] |
import h5py
import numpy as np
with h5py.File('./Galaxy10.h5', 'r') as F:
images = np.array(F['images'])
labels = np.array(F['ans'])
labels = labels.astype(np.float32)
images = images.astype(np.float32) / 255.0
data = np.transpose(images[np.where(labels==6.0)], (0,3,1,2))
# save data
np.save('./data_all.npy', data)
print('saved data: mean(x)={:.3e}'.format(np.mean(data)))
print(data.shape)
| [
"h5py.File",
"numpy.save",
"numpy.mean",
"numpy.array",
"numpy.where"
] | [((296, 327), 'numpy.save', 'np.save', (['"""./data_all.npy"""', 'data'], {}), "('./data_all.npy', data)\n", (303, 327), True, 'import numpy as np\n'), ((37, 68), 'h5py.File', 'h5py.File', (['"""./Galaxy10.h5"""', '"""r"""'], {}), "('./Galaxy10.h5', 'r')\n", (46, 68), False, 'import h5py\n'), ((88, 109), 'numpy.array', 'np.array', (["F['images']"], {}), "(F['images'])\n", (96, 109), True, 'import numpy as np\n'), ((123, 141), 'numpy.array', 'np.array', (["F['ans']"], {}), "(F['ans'])\n", (131, 141), True, 'import numpy as np\n'), ((248, 271), 'numpy.where', 'np.where', (['(labels == 6.0)'], {}), '(labels == 6.0)\n', (256, 271), True, 'import numpy as np\n'), ((370, 383), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (377, 383), True, 'import numpy as np\n')] |
import os
import numpy as np
from scipy import interpolate
import pylab as pl
from perimysium.dataman import TRCFile, storage2numpy, dict2storage
from perimysium.postprocessing import filter_critically_damped
from collections import OrderedDict
# Create motion capture data.
# ===========================
# Load in forward simulation results.
# -----------------------------------
def create_array(column_prefix, fname):
pointkin = storage2numpy(fname)
array = np.empty((pointkin.shape[0], 3))
array[:, 0] = pointkin[column_prefix + '_X']
array[:, 1] = pointkin[column_prefix + '_Y']
array[:, 2] = pointkin[column_prefix + '_Z']
return pointkin['time'], array
t, pelvis = create_array('pelvis', 'dynhop_pelviskin_pelvis_pos.sto')
t, knee = create_array('knee', 'dynhop_kneekin_knee_pos.sto')
t, foot = create_array('foot', 'dynhop_footkin_foot_pos.sto')
# Interpolate.
# ------------
def interp(x, xp, fp):
num_columns = fp.shape[1]
array = np.empty((len(x), num_columns))
for i in range(num_columns):
array[:, i] = np.interp(x, xp, fp[:, i])
return array
t_max = 1
trc_rate = 100.0
trc_time = np.linspace(0, t_max, t_max * trc_rate)
trc_pelvis = interp(trc_time, t, pelvis)
trc_knee = interp(trc_time, t, knee)
trc_foot = interp(trc_time, t, foot)
# Filter.
# -------
# TODO
# Write out TRC file.
# -------------------
trc = TRCFile(
data_rate=trc_rate,
camera_rate=trc_rate,
num_frames=len(trc_time),
num_markers=0,
units='mm',
orig_data_rate=trc_rate,
orig_data_start_frame=1,
orig_num_frames=len(trc_time),
time=trc_time,
)
meters_to_mm = 1000.0
def add_marker(trc, name, data):
trc.add_marker(name, meters_to_mm * data[:, 0],
meters_to_mm * data[:, 1],
meters_to_mm * data[:, 2])
add_marker(trc, "Pelvis", trc_pelvis)
add_marker(trc, "LKnee", trc_knee)
add_marker(trc, "LFoot", trc_foot)
trc.write("dynhop.trc")
# Create external loads files.
# ============================
forces = storage2numpy('dynhop_forces_forces.sto')
grf = OrderedDict()
# Negate.
# -------
grf_rate = 2000.
grf['time'] = np.linspace(0, t_max, t_max * grf_rate)
grf['ground_force_vx'] = -forces['LFootContactPlatformforceX']
grf['ground_force_vy'] = -forces['LFootContactPlatformforceY']
grf['ground_force_vz'] = -forces['LFootContactPlatformforceZ']
grf['ground_torque_x'] = -forces['LFootContactPlatformtorqueX']
grf['ground_torque_y'] = -forces['LFootContactPlatformtorqueY']
grf['ground_torque_z'] = -forces['LFootContactPlatformtorqueZ']
# Resample at a constant frequency.
# ---------------------------------
def resample(entry):
grf[entry] = np.interp(grf['time'], forces['time'], grf[entry])
resample('ground_force_vx')
resample('ground_force_vy')
resample('ground_force_vz')
resample('ground_torque_x')
resample('ground_torque_y')
resample('ground_torque_z')
# Filter.
# -------
def filt(entry):
cutoff_frequency = 50
order = 2
grf[entry] = filter_critically_damped(grf[entry],
grf_rate, cutoff_frequency, order)
#filt('ground_force_vx')
#filt('ground_torque_z')
# Compute center of pressure.
# ---------------------------
# TODO choose a better interpolater; maybe a spline.
# s = 0 means no smoothing.
copx_spline = interpolate.splrep(t, foot[:, 0], s=0)
# der is order of derivative.
copx = interpolate.splev(grf['time'], copx_spline, der=0)
copy_spline = interpolate.splrep(t, foot[:, 1], s=0)
copy = interpolate.splev(grf['time'], copy_spline, der=0)
copz_spline = interpolate.splrep(t, foot[:, 2], s=0)
copz = interpolate.splev(grf['time'], copz_spline, der=0)
grf['ground_force_px'] = copx
grf['ground_force_py'] = copy
grf['ground_force_pz'] = copz
# Convert to ndarray.
# -------------------
dict2storage(grf, 'ground_reaction.mot')
os.system('%s/bin/ik -S ik_setup.xml' % os.environ['OPENSIM_HOME'])
os.system('%s/bin/id -S id_setup.xml' % os.environ['OPENSIM_HOME'])
#os.system('%s/bin/rra -S rra_setup.xml' % os.environ['OPENSIM_HOME'])
#os.system('./invdyn invdyn_setup.xml')
| [
"perimysium.dataman.dict2storage",
"perimysium.postprocessing.filter_critically_damped",
"numpy.empty",
"numpy.interp",
"scipy.interpolate.splev",
"os.system",
"numpy.linspace",
"collections.OrderedDict",
"perimysium.dataman.storage2numpy",
"scipy.interpolate.splrep"
] | [((1146, 1185), 'numpy.linspace', 'np.linspace', (['(0)', 't_max', '(t_max * trc_rate)'], {}), '(0, t_max, t_max * trc_rate)\n', (1157, 1185), True, 'import numpy as np\n'), ((2081, 2122), 'perimysium.dataman.storage2numpy', 'storage2numpy', (['"""dynhop_forces_forces.sto"""'], {}), "('dynhop_forces_forces.sto')\n", (2094, 2122), False, 'from perimysium.dataman import TRCFile, storage2numpy, dict2storage\n'), ((2129, 2142), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2140, 2142), False, 'from collections import OrderedDict\n'), ((2195, 2234), 'numpy.linspace', 'np.linspace', (['(0)', 't_max', '(t_max * grf_rate)'], {}), '(0, t_max, t_max * grf_rate)\n', (2206, 2234), True, 'import numpy as np\n'), ((3335, 3373), 'scipy.interpolate.splrep', 'interpolate.splrep', (['t', 'foot[:, 0]'], {'s': '(0)'}), '(t, foot[:, 0], s=0)\n', (3353, 3373), False, 'from scipy import interpolate\n'), ((3412, 3462), 'scipy.interpolate.splev', 'interpolate.splev', (["grf['time']", 'copx_spline'], {'der': '(0)'}), "(grf['time'], copx_spline, der=0)\n", (3429, 3462), False, 'from scipy import interpolate\n'), ((3478, 3516), 'scipy.interpolate.splrep', 'interpolate.splrep', (['t', 'foot[:, 1]'], {'s': '(0)'}), '(t, foot[:, 1], s=0)\n', (3496, 3516), False, 'from scipy import interpolate\n'), ((3524, 3574), 'scipy.interpolate.splev', 'interpolate.splev', (["grf['time']", 'copy_spline'], {'der': '(0)'}), "(grf['time'], copy_spline, der=0)\n", (3541, 3574), False, 'from scipy import interpolate\n'), ((3589, 3627), 'scipy.interpolate.splrep', 'interpolate.splrep', (['t', 'foot[:, 2]'], {'s': '(0)'}), '(t, foot[:, 2], s=0)\n', (3607, 3627), False, 'from scipy import interpolate\n'), ((3635, 3685), 'scipy.interpolate.splev', 'interpolate.splev', (["grf['time']", 'copz_spline'], {'der': '(0)'}), "(grf['time'], copz_spline, der=0)\n", (3652, 3685), False, 'from scipy import interpolate\n'), ((3822, 3862), 'perimysium.dataman.dict2storage', 'dict2storage', (['grf', '"""ground_reaction.mot"""'], {}), "(grf, 'ground_reaction.mot')\n", (3834, 3862), False, 'from perimysium.dataman import TRCFile, storage2numpy, dict2storage\n'), ((3864, 3931), 'os.system', 'os.system', (["('%s/bin/ik -S ik_setup.xml' % os.environ['OPENSIM_HOME'])"], {}), "('%s/bin/ik -S ik_setup.xml' % os.environ['OPENSIM_HOME'])\n", (3873, 3931), False, 'import os\n'), ((3932, 3999), 'os.system', 'os.system', (["('%s/bin/id -S id_setup.xml' % os.environ['OPENSIM_HOME'])"], {}), "('%s/bin/id -S id_setup.xml' % os.environ['OPENSIM_HOME'])\n", (3941, 3999), False, 'import os\n'), ((438, 458), 'perimysium.dataman.storage2numpy', 'storage2numpy', (['fname'], {}), '(fname)\n', (451, 458), False, 'from perimysium.dataman import TRCFile, storage2numpy, dict2storage\n'), ((471, 503), 'numpy.empty', 'np.empty', (['(pointkin.shape[0], 3)'], {}), '((pointkin.shape[0], 3))\n', (479, 503), True, 'import numpy as np\n'), ((2727, 2777), 'numpy.interp', 'np.interp', (["grf['time']", "forces['time']", 'grf[entry]'], {}), "(grf['time'], forces['time'], grf[entry])\n", (2736, 2777), True, 'import numpy as np\n'), ((3043, 3114), 'perimysium.postprocessing.filter_critically_damped', 'filter_critically_damped', (['grf[entry]', 'grf_rate', 'cutoff_frequency', 'order'], {}), '(grf[entry], grf_rate, cutoff_frequency, order)\n', (3067, 3114), False, 'from perimysium.postprocessing import filter_critically_damped\n'), ((1064, 1090), 'numpy.interp', 'np.interp', (['x', 'xp', 'fp[:, i]'], {}), '(x, xp, fp[:, i])\n', (1073, 1090), True, 'import numpy as np\n')] |
import logging
import os
import pickle
import time
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import scipy.io
import tensorflow as tf
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
class ExampleCreator:
def __init__(self, out_dir, dataset_name, label_to_text=None):
self._out_dir = out_dir
self._dataset_name = dataset_name
# Create a single Session to run all image coding calls.
self._sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 1}))
# Initializes function that decodes RGB PNG data.
self._decode_data = tf.placeholder(dtype=tf.string)
self._decoded = tf.image.decode_png(self._decode_data, channels=3)
self._encode_data = tf.placeholder(dtype=tf.uint8)
self._encoded = tf.image.encode_png(self._encode_data)
self.label_to_text = label_to_text or [
'ignore',
'pedestrian',
'rider',
'sitting',
'unusual',
'group',
]
def get_shard_filename(self, shard, num_shards, split):
shard_name = '{}-{}-{:05d}-of-{:05d}'.format(self._dataset_name, split, shard, num_shards)
return os.path.join(self._out_dir, shard_name)
def decode_png(self, img_data):
img = self._sess.run(self._decoded, feed_dict={self._decode_data: img_data})
assert len(img.shape) == 3
assert img.shape[2] == 3
return img
def encode_png(self, img):
assert len(img.shape) == 3
assert img.shape[2] == 3
return self._sess.run(self._encoded, feed_dict={self._encode_data: img})
def load_img(self, path):
ext = os.path.splitext(path)[1]
if path.endswith('.pgm'):
raise NotImplementedError('pgm not supported')
if path.endswith('.png'):
with tf.gfile.FastGFile(path, 'rb') as f:
img_data = f.read()
# seems a little bit stupid to first decode and then encode the image, but so what...
return self.decode_png(img_data), ext[1:]
else:
raise NotImplementedError('unknown file format: {}'.format(ext))
def create_example(self, img_path, annotations):
img, format = self.load_img(img_path)
img_height, img_width = img.shape[:2]
assert img_height == 1024
assert img_width == 2048
encoded = self.encode_png(img)
ymin, xmin, ymax, xmax, label, text, inst_id = [], [], [], [], [], [], []
skipped_annotations = 0
box_cnt = 0
box_sizes = []
for anno in annotations:
anno = anno.astype(np.int64) # this is important, otherwise overflows can occur
class_label, x1, y1, w, h, instance_id, x1_vis, y1_vis, w_vis, h_vis = anno
# we conform to the tf object detection API where 0 is reserved for the implicit background class
# this ensures that tfrecord files which work with the object detection API also work with this framework
if class_label == 2:
# rider
class_label = 2
elif class_label in [0, 5]:
# skip: ignore and group
skipped_annotations += 1
continue
else:
# pedestrian, sitting, unusual
class_label = 1
box_cnt += 1
label_text = self.label_to_text[class_label]
ymin.append(float(y1) / img_height)
xmin.append(float(x1) / img_width)
ymax.append(float(y1 + h) / img_height)
xmax.append(float(x1 + w) / img_width)
label.append(class_label)
text.append(label_text.encode('utf8'))
inst_id.append(instance_id)
if 'group' not in label_text and 'ignore' not in label_text:
# do not add group ore ignore boxes, we do not want these to affect the prior box calculation
box_sizes.append((h, w))
if skipped_annotations > 0:
logging.debug(
'Skipped {}/{} annotations for img {}'.format(skipped_annotations, len(annotations), img_path))
feature_dict = {
'image/height': int64_feature(img_height),
'image/width': int64_feature(img_width),
'image/filename': bytes_feature(img_path.encode('utf8')),
'image/source_id': bytes_feature(img_path.encode('utf8')),
'image/encoded': bytes_feature(encoded),
'image/format': bytes_feature('png'.encode('utf8')),
'image/object/bbox/xmin': float_list_feature(xmin),
'image/object/bbox/xmax': float_list_feature(xmax),
'image/object/bbox/ymin': float_list_feature(ymin),
'image/object/bbox/ymax': float_list_feature(ymax),
'image/object/class/text': bytes_list_feature(text),
'image/object/class/label': int64_list_feature(label),
'image/object/instance/id': int64_list_feature(inst_id),
'image/object/cnt': int64_feature(box_cnt),
}
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return example, skipped_annotations, box_sizes, (img_height, img_width)
def write_shard(args):
shard, num_shards, split, data, img_dir, example_creator = args
out_file = example_creator.get_shard_filename(shard, num_shards, split)
writer = tf.python_io.TFRecordWriter(out_file)
logging.info('Creating shard {}-{}/{}'.format(split, shard, num_shards))
skipped_annotations = 0
box_sizes = []
img_sizes = set()
cnt = 0
for cnt, datum in enumerate(data, start=1):
datum = datum[0][0] # strange matlab file format
city = str(datum[0][0])
img_name = str(datum[1][0])
annotations = datum[2]
img_path = os.path.join(img_dir, city, img_name)
example, skipped, sizes, img_size = example_creator.create_example(img_path, annotations)
skipped_annotations += skipped
box_sizes.extend(sizes)
img_sizes.add(img_size)
writer.write(example.SerializeToString())
if cnt % 10 == 0:
logging.info('Written {} examples for shard {}-{}/{}'.format(cnt, split, shard, num_shards))
if skipped_annotations > 0:
logging.info('Written {} examples for shard {}-{}/{}'.format(cnt, split, shard, num_shards))
logging.info(
'Finished shard {}-{}/{}: {} examples written and {} annotations skipped'.format(split, shard, num_shards, cnt,
skipped_annotations))
return box_sizes, split, img_sizes
def create_jobs(split, shuffle, annotations, img_dir, num_shards, example_creator):
if shuffle:
np.random.shuffle(annotations)
# split into roughly even sized pieces
k, m = divmod(len(annotations), num_shards)
shards = [annotations[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(num_shards)]
# check if we didn't f@#! it up
total_length = 0
for shard in shards:
total_length += shard.shape[0]
assert total_length == len(annotations)
# create and run jobs
jobs = [(shard_id + 1, num_shards, split, data, img_dir, example_creator) for shard_id, data in enumerate(shards)]
return jobs
def create_dirs(dirs):
for path in dirs:
try:
os.makedirs(path)
except OSError:
assert os.path.isdir(path), '{} exists but is not a directory'.format(path)
def process_dataset(out_dir, dataset_name, anno_dir, img_dir, train_shards, val_shards, shuffle):
out_dir = os.path.expandvars(out_dir)
img_dir = os.path.expandvars(img_dir)
anno_dir = os.path.expandvars(anno_dir)
create_dirs([out_dir])
if shuffle:
with open(os.path.join(out_dir, '{}-np_random_state'.format(dataset_name)), 'wb') as f:
pickle.dump(np.random.get_state(), f)
# prepare train and val splits
train_anno_path = os.path.join(anno_dir, 'annotations', 'anno_train.mat')
val_anno_path = os.path.join(anno_dir, 'annotations', 'anno_val.mat')
train_img_dir_ = os.path.join(img_dir, 'leftImg8bit_trainvaltest', 'leftImg8bit', 'train')
val_img_dir = os.path.join(img_dir, 'leftImg8bit_trainvaltest', 'leftImg8bit', 'val')
train_anno = scipy.io.loadmat(train_anno_path)['anno_train_aligned'][0] # citypersons data format
val_anno = scipy.io.loadmat(val_anno_path)['anno_val_aligned'][0] # citypersons data format
# object which does all the hard work
example_creator = ExampleCreator(out_dir, dataset_name)
# Process each split in a different thread
train_jobs = create_jobs('train', shuffle, train_anno, train_img_dir_, train_shards, example_creator)
val_jobs = create_jobs('val', shuffle, val_anno, val_img_dir, val_shards, example_creator)
jobs = train_jobs + val_jobs
with ThreadPoolExecutor() as executor:
result = executor.map(write_shard, jobs,
chunksize=1) # chunksize=1 is important, since our jobs are long running
box_sizes = []
img_sizes = set()
for sizes, split, img_sizes_ in result:
img_sizes.update(img_sizes_)
if split == 'train':
box_sizes.extend(sizes)
if len(img_sizes) > 1:
logging.error('Different image sizes detected: {}'.format(img_sizes))
box_sizes = np.array(box_sizes, np.float64)
np.save(os.path.join(out_dir, '{}-train-box_sizes'.format(dataset_name)), box_sizes)
np.save(os.path.join(out_dir, '{}-img_size_height_width'.format(dataset_name)), list(img_sizes)[0])
def main():
config = {
# Place to search for the created files.
'out_dir': '$HOME/data/citypersons/tfrecords_test',
# Name of the dataset, used to create the tfrecord files.
'dataset_name': 'citypersons',
# Base directory which contains the citypersons annotations.
'anno_dir': '$HOME/data/citypersons', # edit
# Base directory which contains the cityscapes images.
'img_dir': '$HOME/data/cityscapes',
# Number of training and validation shards.
'train_shards': 3,
'val_shards': 1,
# Shuffle the data before writing it to tfrecord files.
'shuffle': True,
}
logging.info('Saving results to {}'.format(config['out_dir']))
logging.info('----- START -----')
start = time.time()
process_dataset(**config)
end = time.time()
elapsed = int(end - start)
logging.info('----- FINISHED in {:02d}:{:02d}:{:02d} -----'.format(elapsed // 3600,
(elapsed // 60) % 60,
elapsed % 60))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, # edit change to DEBUG for more detailed output
format='%(asctime)s, %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
)
main()
| [
"tensorflow.train.Int64List",
"tensorflow.image.decode_png",
"tensorflow.ConfigProto",
"tensorflow.train.FloatList",
"os.path.join",
"tensorflow.placeholder",
"concurrent.futures.ThreadPoolExecutor",
"numpy.random.shuffle",
"tensorflow.train.BytesList",
"tensorflow.gfile.FastGFile",
"tensorflow.... | [((5905, 5942), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['out_file'], {}), '(out_file)\n', (5932, 5942), True, 'import tensorflow as tf\n'), ((8150, 8177), 'os.path.expandvars', 'os.path.expandvars', (['out_dir'], {}), '(out_dir)\n', (8168, 8177), False, 'import os\n'), ((8192, 8219), 'os.path.expandvars', 'os.path.expandvars', (['img_dir'], {}), '(img_dir)\n', (8210, 8219), False, 'import os\n'), ((8235, 8263), 'os.path.expandvars', 'os.path.expandvars', (['anno_dir'], {}), '(anno_dir)\n', (8253, 8263), False, 'import os\n'), ((8513, 8568), 'os.path.join', 'os.path.join', (['anno_dir', '"""annotations"""', '"""anno_train.mat"""'], {}), "(anno_dir, 'annotations', 'anno_train.mat')\n", (8525, 8568), False, 'import os\n'), ((8589, 8642), 'os.path.join', 'os.path.join', (['anno_dir', '"""annotations"""', '"""anno_val.mat"""'], {}), "(anno_dir, 'annotations', 'anno_val.mat')\n", (8601, 8642), False, 'import os\n'), ((8665, 8738), 'os.path.join', 'os.path.join', (['img_dir', '"""leftImg8bit_trainvaltest"""', '"""leftImg8bit"""', '"""train"""'], {}), "(img_dir, 'leftImg8bit_trainvaltest', 'leftImg8bit', 'train')\n", (8677, 8738), False, 'import os\n'), ((8757, 8828), 'os.path.join', 'os.path.join', (['img_dir', '"""leftImg8bit_trainvaltest"""', '"""leftImg8bit"""', '"""val"""'], {}), "(img_dir, 'leftImg8bit_trainvaltest', 'leftImg8bit', 'val')\n", (8769, 8828), False, 'import os\n'), ((9924, 9955), 'numpy.array', 'np.array', (['box_sizes', 'np.float64'], {}), '(box_sizes, np.float64)\n', (9932, 9955), True, 'import numpy as np\n'), ((10898, 10931), 'logging.info', 'logging.info', (['"""----- START -----"""'], {}), "('----- START -----')\n", (10910, 10931), False, 'import logging\n'), ((10944, 10955), 'time.time', 'time.time', ([], {}), '()\n', (10953, 10955), False, 'import time\n'), ((10998, 11009), 'time.time', 'time.time', ([], {}), '()\n', (11007, 11009), False, 'import time\n'), ((11341, 11474), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s, %(levelname)-8s %(message)s"""', 'datefmt': '"""%a, %d %b %Y %H:%M:%S"""'}), "(level=logging.INFO, format=\n '%(asctime)s, %(levelname)-8s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S'\n )\n", (11360, 11474), False, 'import logging\n'), ((1078, 1109), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.string'}), '(dtype=tf.string)\n', (1092, 1109), True, 'import tensorflow as tf\n'), ((1134, 1184), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['self._decode_data'], {'channels': '(3)'}), '(self._decode_data, channels=3)\n', (1153, 1184), True, 'import tensorflow as tf\n'), ((1214, 1244), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.uint8'}), '(dtype=tf.uint8)\n', (1228, 1244), True, 'import tensorflow as tf\n'), ((1269, 1307), 'tensorflow.image.encode_png', 'tf.image.encode_png', (['self._encode_data'], {}), '(self._encode_data)\n', (1288, 1307), True, 'import tensorflow as tf\n'), ((1678, 1717), 'os.path.join', 'os.path.join', (['self._out_dir', 'shard_name'], {}), '(self._out_dir, shard_name)\n', (1690, 1717), False, 'import os\n'), ((6327, 6364), 'os.path.join', 'os.path.join', (['img_dir', 'city', 'img_name'], {}), '(img_dir, city, img_name)\n', (6339, 6364), False, 'import os\n'), ((7282, 7312), 'numpy.random.shuffle', 'np.random.shuffle', (['annotations'], {}), '(annotations)\n', (7299, 7312), True, 'import numpy as np\n'), ((9426, 9446), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {}), '()\n', (9444, 9446), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((228, 261), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[value]'}), '(value=[value])\n', (246, 261), True, 'import tensorflow as tf\n'), ((335, 366), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': 'value'}), '(value=value)\n', (353, 366), True, 'import tensorflow as tf\n'), ((435, 468), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (453, 468), True, 'import tensorflow as tf\n'), ((542, 573), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': 'value'}), '(value=value)\n', (560, 573), True, 'import tensorflow as tf\n'), ((647, 678), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'value'}), '(value=value)\n', (665, 678), True, 'import tensorflow as tf\n'), ((2153, 2175), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (2169, 2175), False, 'import os\n'), ((7906, 7923), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (7917, 7923), False, 'import os\n'), ((950, 989), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': 1}"}), "(device_count={'GPU': 1})\n", (964, 989), True, 'import tensorflow as tf\n'), ((2323, 2353), 'tensorflow.gfile.FastGFile', 'tf.gfile.FastGFile', (['path', '"""rb"""'], {}), "(path, 'rb')\n", (2341, 2353), True, 'import tensorflow as tf\n'), ((5600, 5639), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature_dict'}), '(feature=feature_dict)\n', (5617, 5639), True, 'import tensorflow as tf\n'), ((7967, 7986), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (7980, 7986), False, 'import os\n'), ((8429, 8450), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (8448, 8450), True, 'import numpy as np\n')] |
# coding: utf-8
import numpy as np
import torchvision
import time
import os
import copy
import pdb
import time
import argparse
import sys
import cv2
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, models, transforms
from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, \
UnNormalizer, Normalizer
assert torch.__version__.split('.')[0] == '1'
print('CUDA available: {}'.format(torch.cuda.is_available()))
def main(args=None):
'''
test.py 会计算原始图片中的box的位置,而visualize.py返回的是resize和padding后的图片boudning box 位置
另外test.py支持对识别结果的保存
'''
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.')
parser.add_argument('--coco_path', help='Path to COCO directory')
parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)')
parser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)')
parser.add_argument('--model', help='Path to model (.pt) file.')
parser.add_argument('--resultsavepath', help='path to save detection images')
parser.add_argument('--thresh_score', help="thresh score", type=float, default=.5)
parser = parser.parse_args(args)
# 创建结果的保存路径
if parser.resultsavepath:
os.makedirs(parser.resultsavepath, exist_ok=True)
if parser.dataset == 'coco':
dataset_val = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose([Normalizer(), Resizer()]))
elif parser.dataset == 'csv':
#dataset_val = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()])) # 提示错误
dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()]))
else:
raise ValueError('Dataset type not understood (must be csv or coco), exiting.')
sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False)
dataloader_val = DataLoader(dataset_val, num_workers=1, collate_fn=collater, batch_sampler=sampler_val)
retinanet = torch.load(parser.model)
use_gpu = True
if use_gpu:
if torch.cuda.is_available():
retinanet = retinanet.cuda()
if torch.cuda.is_available():
retinanet = torch.nn.DataParallel(retinanet).cuda() # 设置为多GPU的并行模式
else:
retinanet = torch.nn.DataParallel(retinanet)
retinanet.eval()# 设置为评估模式
def draw_caption(image, box, caption):
b = np.array(box).astype(int)
# b[1]-20防止label超过上边界
cv2.putText(image, caption, (b[0], b[1]-10 if b[1]-20>0 else 30), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)
cv2.putText(image, caption, (b[0], b[1]-10 if b[1]-20>0 else 30), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
if parser.resultsavepath:
result_csv = "{}_result.csv".format(os.path.splitext(os.path.basename(parser.csv_val))[0])
result_csv = os.path.join(parser.resultsavepath, result_csv)
print(result_csv)
result_csv_fd = open(result_csv, 'w')
for idx, data in enumerate(dataloader_val):
#print("data shape:", data.shape)
print(data['image_path'])
with torch.no_grad():
st = time.time()
if torch.cuda.is_available():
the_result = retinanet(data['img'].cuda().float())
else:
the_result = retinanet(data['img'].float())
print('Elapsed time: {}'.format(time.time()-st))
for image_index, (scores, classification, transformed_anchors) in enumerate(the_result):
idxs = np.where(scores.cpu()>parser.thresh_score)
image_path = data['image_path'][image_index]
scale = data['scale'][image_index]
img = cv2.imread(image_path)
if idxs[0].shape[0]==0:
result_csv_fd.write("{},,,,,\n".format(image_path))
else:
for j in range(idxs[0].shape[0]):
bbox = transformed_anchors[idxs[0][j], :]
x1 = int(bbox[0]/scale)
y1 = int(bbox[1]/scale)
x2 = int(bbox[2]/scale)
y2 = int(bbox[3]/scale)
label_name = dataset_val.labels[int(classification[idxs[0][j]])]
txt_draw = "%s %.2f" %(label_name, scores[j])
draw_caption(img, (x1, y1, x2, y2), txt_draw)
cv2.rectangle(img, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=2)
if parser.resultsavepath:
result_csv_fd.write("{},{},{},{},{},{}\n".format(image_path,x1,y1,x2,y2,label_name))
print(label_name)
if parser.resultsavepath:
new_dir = os.path.join(parser.resultsavepath, os.path.dirname(image_path))
if not os.path.exists(new_dir):
os.makedirs(new_dir)
new_path = os.path.join(parser.resultsavepath, image_path)
cv2.imwrite(new_path, img)
#new_path = os.path.join(parser.resultsavepath, os.path.basename(image_path))
#cv2.imwrite(new_path, img)
#print("create result image:{} ".format(new_path))
else:
cv2.imshow('img', img)
cv2.waitKey(0)
if parser.resultsavepath:
result_csv_fd.close()
if __name__ == '__main__':
main() | [
"argparse.ArgumentParser",
"cv2.rectangle",
"cv2.imshow",
"torch.no_grad",
"os.path.join",
"retinanet.dataloader.Normalizer",
"torch.utils.data.DataLoader",
"torch.__version__.split",
"cv2.imwrite",
"torch.load",
"retinanet.dataloader.Resizer",
"os.path.dirname",
"os.path.exists",
"retinan... | [((692, 792), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Simple training script for training a RetinaNet network."""'}), "(description=\n 'Simple training script for training a RetinaNet network.')\n", (715, 792), False, 'import argparse\n'), ((2118, 2185), 'retinanet.dataloader.AspectRatioBasedSampler', 'AspectRatioBasedSampler', (['dataset_val'], {'batch_size': '(1)', 'drop_last': '(False)'}), '(dataset_val, batch_size=1, drop_last=False)\n', (2141, 2185), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, UnNormalizer, Normalizer\n'), ((2205, 2296), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_val'], {'num_workers': '(1)', 'collate_fn': 'collater', 'batch_sampler': 'sampler_val'}), '(dataset_val, num_workers=1, collate_fn=collater, batch_sampler=\n sampler_val)\n', (2215, 2296), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2308, 2332), 'torch.load', 'torch.load', (['parser.model'], {}), '(parser.model)\n', (2318, 2332), False, 'import torch\n'), ((2444, 2469), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2467, 2469), False, 'import torch\n'), ((438, 466), 'torch.__version__.split', 'torch.__version__.split', (['"""."""'], {}), "('.')\n", (461, 466), False, 'import torch\n'), ((514, 539), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (537, 539), False, 'import torch\n'), ((1472, 1521), 'os.makedirs', 'os.makedirs', (['parser.resultsavepath'], {'exist_ok': '(True)'}), '(parser.resultsavepath, exist_ok=True)\n', (1483, 1521), False, 'import os\n'), ((2374, 2399), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2397, 2399), False, 'import torch\n'), ((2564, 2596), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['retinanet'], {}), '(retinanet)\n', (2585, 2596), False, 'import torch\n'), ((2733, 2850), 'cv2.putText', 'cv2.putText', (['image', 'caption', '(b[0], b[1] - 10 if b[1] - 20 > 0 else 30)', 'cv2.FONT_HERSHEY_PLAIN', '(1)', '(0, 0, 0)', '(2)'], {}), '(image, caption, (b[0], b[1] - 10 if b[1] - 20 > 0 else 30), cv2\n .FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)\n', (2744, 2850), False, 'import cv2\n'), ((2843, 2966), 'cv2.putText', 'cv2.putText', (['image', 'caption', '(b[0], b[1] - 10 if b[1] - 20 > 0 else 30)', 'cv2.FONT_HERSHEY_PLAIN', '(1)', '(255, 255, 255)', '(1)'], {}), '(image, caption, (b[0], b[1] - 10 if b[1] - 20 > 0 else 30), cv2\n .FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)\n', (2854, 2966), False, 'import cv2\n'), ((3097, 3144), 'os.path.join', 'os.path.join', (['parser.resultsavepath', 'result_csv'], {}), '(parser.resultsavepath, result_csv)\n', (3109, 3144), False, 'import os\n'), ((3332, 3347), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3345, 3347), False, 'import torch\n'), ((3358, 3369), 'time.time', 'time.time', ([], {}), '()\n', (3367, 3369), False, 'import time\n'), ((3377, 3402), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3400, 3402), False, 'import torch\n'), ((2486, 2518), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['retinanet'], {}), '(retinanet)\n', (2507, 2518), False, 'import torch\n'), ((2679, 2692), 'numpy.array', 'np.array', (['box'], {}), '(box)\n', (2687, 2692), True, 'import numpy as np\n'), ((3842, 3864), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (3852, 3864), False, 'import cv2\n'), ((3043, 3075), 'os.path.basename', 'os.path.basename', (['parser.csv_val'], {}), '(parser.csv_val)\n', (3059, 3075), False, 'import os\n'), ((4792, 4839), 'os.path.join', 'os.path.join', (['parser.resultsavepath', 'image_path'], {}), '(parser.resultsavepath, image_path)\n', (4804, 4839), False, 'import os\n'), ((4846, 4872), 'cv2.imwrite', 'cv2.imwrite', (['new_path', 'img'], {}), '(new_path, img)\n', (4857, 4872), False, 'import cv2\n'), ((5081, 5103), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (5091, 5103), False, 'import cv2\n'), ((5110, 5124), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5121, 5124), False, 'import cv2\n'), ((1654, 1666), 'retinanet.dataloader.Normalizer', 'Normalizer', ([], {}), '()\n', (1664, 1666), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, UnNormalizer, Normalizer\n'), ((1668, 1677), 'retinanet.dataloader.Resizer', 'Resizer', ([], {}), '()\n', (1675, 1677), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, UnNormalizer, Normalizer\n'), ((3557, 3568), 'time.time', 'time.time', ([], {}), '()\n', (3566, 3568), False, 'import time\n'), ((4375, 4445), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y1)', '(x2, y2)'], {'color': '(0, 0, 255)', 'thickness': '(2)'}), '(img, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=2)\n', (4388, 4445), False, 'import cv2\n'), ((4680, 4707), 'os.path.dirname', 'os.path.dirname', (['image_path'], {}), '(image_path)\n', (4695, 4707), False, 'import os\n'), ((4722, 4745), 'os.path.exists', 'os.path.exists', (['new_dir'], {}), '(new_dir)\n', (4736, 4745), False, 'import os\n'), ((4754, 4774), 'os.makedirs', 'os.makedirs', (['new_dir'], {}), '(new_dir)\n', (4765, 4774), False, 'import os\n'), ((1982, 1994), 'retinanet.dataloader.Normalizer', 'Normalizer', ([], {}), '()\n', (1992, 1994), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, UnNormalizer, Normalizer\n'), ((1996, 2005), 'retinanet.dataloader.Resizer', 'Resizer', ([], {}), '()\n', (2003, 2005), False, 'from retinanet.dataloader import CocoDataset, CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, UnNormalizer, Normalizer\n')] |
import os
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.externals import joblib
from sklearn.preprocessing import MinMaxScaler
from sklearn.feature_selection import chi2, SelectKBest
import seaborn as sns
sns.set_style("white")
def load_model_from_pkl(pkl):
return joblib.load(os.path.abspath(pkl))
def load_data_from_csv(csv, use_cols=None):
return pd.read_csv(os.path.abspath(csv), usecols=use_cols, low_memory=False)
def perform_feature_scaling(features):
"""
This method is used in order to perform feature scaling according to the
min-max scaler. The scaler can be replaced with another one, like the
standard scaler
"""
scaler = MinMaxScaler()
scaler.fit(features)
return pd.DataFrame(scaler.transform(features), columns=features.columns)
def split_data(data, y_col):
# Remove missing data
data = clear_missing_data(data)
x = perform_feature_scaling(data.drop([y_col], axis=1))
y = data[y_col]
return x, y
def get_sub_sequences(sessions, seq_len):
sessions = perform_feature_scaling(clear_missing_data(sessions))
return [sessions[start:start+seq_len] for start in range(len(sessions)-seq_len)]
def clear_missing_data(df):
df_with_nan = df.replace("?", np.NaN)
return df_with_nan.dropna(0)
def take_n_of_each(df, n):
return df.groupby('device_category').head(n)
def perform_feature_selection(X_train, y_train, k_val):
""" This method is used in order to perform a feature selection by selecting
the best k_val features from X_train. It does so according to the chi2
criterion. The method prints the chosen features and creates
a new instance of X_train with only these features and returns it
"""
print("**********FEATURE SELECTION**********")
# Create and fit selector
selector = SelectKBest(chi2, k=k_val)
selector.fit(X_train, y_train)
# Get idxs of columns to keep
idxs_selected = selector.get_support(indices=True)
print(idxs_selected)
x_new = SelectKBest(chi2, k=k_val).fit_transform(X_train, y_train)
return x_new
def roc_auc_plot(y_true, y_proba, ax, label=' ', l='-', lw=1.0):
fpr, tpr, _ = metrics.roc_curve(y_true, y_proba)
score = metrics.roc_auc_score(y_true, y_proba)
ax.plot(fpr, tpr, linestyle=l, linewidth=lw,
label="%s (area=%.3f)" % (label, score))
return score
def eval_predictions(y_true, y_pred):
# Accuracy classification score.
accuracy_score = metrics.accuracy_score(y_true, y_pred)
# Build a text report showing the main classification metrics
classification_report = metrics.classification_report(y_true, y_pred)
# Compute confusion matrix to evaluate the accuracy of a classification
confusion_matrix = metrics.confusion_matrix(y_true, y_pred)
# Compute precision, recall, F-measure and support for each class
precision, recall, fscore, support = metrics.precision_recall_fscore_support(y_true, y_pred)
# Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC) from prediction scores.
print(classification_report)
classes = np.unique(np.array(y_true))
if classes.size > 1:
roc_auc_score = metrics.roc_auc_score(y_true, y_pred)
else:
if precision.size > 1:
if classes[0] == 0 or classes[0] == 1:
classes = np.array([0, 1])
else:
classes = np.array([0, classes[0]])
roc_auc_score = 0
metrics_dict = {
'class': classes,
'accuracy_score': np.full(classes.shape, accuracy_score),
'precision': precision,
'recall': recall,
'fscore': fscore,
'support': support,
'roc_auc_score': np.full(classes.shape, roc_auc_score),
}
return metrics_dict, confusion_matrix
| [
"numpy.full",
"seaborn.set_style",
"os.path.abspath",
"sklearn.metrics.roc_curve",
"sklearn.metrics.accuracy_score",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.metrics.classification_report",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.precision_recall_fscore_support",
"numpy.array",
"... | [((240, 262), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (253, 262), True, 'import seaborn as sns\n'), ((707, 721), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (719, 721), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1847, 1873), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', (['chi2'], {'k': 'k_val'}), '(chi2, k=k_val)\n', (1858, 1873), False, 'from sklearn.feature_selection import chi2, SelectKBest\n'), ((2196, 2230), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['y_true', 'y_proba'], {}), '(y_true, y_proba)\n', (2213, 2230), False, 'from sklearn import metrics\n'), ((2243, 2281), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['y_true', 'y_proba'], {}), '(y_true, y_proba)\n', (2264, 2281), False, 'from sklearn import metrics\n'), ((2499, 2537), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2521, 2537), False, 'from sklearn import metrics\n'), ((2632, 2677), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2661, 2677), False, 'from sklearn import metrics\n'), ((2777, 2817), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2801, 2817), False, 'from sklearn import metrics\n'), ((2929, 2984), 'sklearn.metrics.precision_recall_fscore_support', 'metrics.precision_recall_fscore_support', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2968, 2984), False, 'from sklearn import metrics\n'), ((317, 337), 'os.path.abspath', 'os.path.abspath', (['pkl'], {}), '(pkl)\n', (332, 337), False, 'import os\n'), ((408, 428), 'os.path.abspath', 'os.path.abspath', (['csv'], {}), '(csv)\n', (423, 428), False, 'import os\n'), ((3146, 3162), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (3154, 3162), True, 'import numpy as np\n'), ((3213, 3250), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3234, 3250), False, 'from sklearn import metrics\n'), ((3555, 3593), 'numpy.full', 'np.full', (['classes.shape', 'accuracy_score'], {}), '(classes.shape, accuracy_score)\n', (3562, 3593), True, 'import numpy as np\n'), ((3732, 3769), 'numpy.full', 'np.full', (['classes.shape', 'roc_auc_score'], {}), '(classes.shape, roc_auc_score)\n', (3739, 3769), True, 'import numpy as np\n'), ((2035, 2061), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', (['chi2'], {'k': 'k_val'}), '(chi2, k=k_val)\n', (2046, 2061), False, 'from sklearn.feature_selection import chi2, SelectKBest\n'), ((3369, 3385), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (3377, 3385), True, 'import numpy as np\n'), ((3430, 3455), 'numpy.array', 'np.array', (['[0, classes[0]]'], {}), '([0, classes[0]])\n', (3438, 3455), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 31 22:11:31 2022
Author: <NAME>
Course: CSE 598: Bio-Inspired AI and Optimization
Date: 2/10/21
"""
import numpy as np
import matplotlib.pyplot as plt
import math
import pandas as pd
"""
Assignment:
Genetic Algorithmn implementation:
- optimizaiton function: f = x*sin(10*x*x) +1
- x = [-.5, 1]
"""
NUM_GENES = 5
POP_SIZE = 8
x = np.arange(-.5,1,.01)
"""
Helper Functions
"""
def function(x):
"""
Used to assess x values for f(x) = x*sin(10*x*x) +1
"""
return (x*np.sin(10*math.pi*x)+1)
def create_parent(num_genes):
"""
Function used to create a parent individual. Samples random numerical
values for genotype encoding.
Parameters
----------
num_genes : number of genes for the parent to have
Returns
-------
p : encoding parent individual
"""
p = np.empty((1,num_genes))
p[0][0] = np.random.uniform(-1,1)
for i in range(1,num_genes):
p[0][i] = np.random.randint(0,9) #fix to change last number as well
return p
def decode_parent(parent):
"""
Function used to decode encoded parent vector into a value for function eval.
Parameters
----------
parent : encoded parent vector
Returns
-------
evaluation of parent on f(x)
"""
sign = 1
if parent[0][0] < 0:
sign = -1
number = parent[0][1]/10 + parent[0][2]/100 + parent[0][3]/1000 + parent[0][4]/10000
return sign * number
def fit_parent(parent):
"""
Function used to fit parent to decode and evaluate parent on f(x)
Parameters
----------
parent : encoded parent vector
Returns
-------
evaluation of f(x) where x= parent_vector
"""
parent = decode_parent(parent)
penalty = 0
if parent < .5:
penalty = -5
return function(parent) + penalty
"""
Functions
"""
def generate_population(num_genes, pop_size):
"""
This fucntion generates a population of M x N size, where M is the number of individuals
and N is the number of genes per individual.
Parameters
----------
num_genes : number of genes for individuals to have in population
pop_size : size of population
Returns
-------
population : population array
"""
if pop_size % 2 != 0: #have to have even population size
pop_size + 1
population = np.empty((pop_size,num_genes))
for i in range(0,pop_size):
parent = create_parent(num_genes)
population[i] = parent[0]
return population
def fit_population(population, debug=True):
"""
This fucntion fits a population and organizes the results in a dictionary for
calling
Parameters
----------
population : population array
Returns
-------
fitness_chart : list containing fitness information
DESCRIPTION.
fitness_dict : dictionary object containing parent individual information
"""
fitness_chart = []
fitness_dict = {
"parent": [],
"decoded parent": [],
"fitness_score": []}
for parent in population: #for each parent, 1) decode 2) eval fitness 3) append to dict
parent = [parent]
fitness_score = fit_parent(parent)
fitness_chart.append(fitness_score)
fitness_dict["parent"].extend(parent)
fitness_dict["decoded parent"].extend([(parent)])
fitness_dict["fitness_score"].extend([fitness_score])
fitness_dict = pd.DataFrame.from_dict(fitness_dict)
return fitness_chart, fitness_dict
def selection_operator(fit_dict, sel_pressure=3):
"""
Function to select candidates to proceed to next generation. The function runs N-many
tournaments until a complete new generation population is conceived:
- fit_dict:
dictionary generated from fit_population that has important data on current generation
- sel_pressure:
hyperparameter that determines how many candidates enter tournament ea. time.
The higher the #, the more participants, and the higher the SELECTIVE PRESSURE.
The lower the #, the least candidates, and the higher GENETIC DIVERSITY.
"""
parents = np.empty((2, NUM_GENES))
for num in range(0,2):
population, fitnesses = fit_dict["parent"], fit_dict["fitness_score"]
sel_idxs = []
for i in range(sel_pressure): #chose n numbers of indexes to identify tournamnet players
if sel_pressure > len(population):
print("ERROR: SELECTIVE PRESSURE for SELECTION OPERATOR greater than number of candidates. This will create uneven distribution of parent participants in tournament")
break
idx = np.random.randint(0, len(population))
sel_idxs.append(idx)
players = [population[sel_idxs[i]] for i in range(sel_pressure)] #identify tournament players
p_fitnesses = [fitnesses[sel_idxs[j]] for j in range(sel_pressure)] #identify tournament players' fitnesses
winner_fitness = np.max(p_fitnesses) #find winning fitness
parent_idx = fit_dict["fitness_score"] == winner_fitness #find index of parent with winning fitness
idx = population[parent_idx]
idx = idx.keys()[0]
parent = population[idx]
try:
parents[num] = parent
except KeyError:
print('KeyError: Key is not in parent dictionary.')
continue
return parents
def one_point_crossover(parent1, parent2, cross_pt = 3):
"""
Implementation of one point crossover
Parameters
----------
parent1 : parent vector
parent2 : parent vector
cross_pt : crossover point
Returns
-------
child1 : offspring vector 1
child2 : offspring vector 2
"""
#should crossover include first element of parent vector?
child1 = np.empty((parent1.shape))
child2 = np.empty((parent1.shape))
child1[0:cross_pt] = parent1[0:cross_pt]
child1[cross_pt:] = parent2[cross_pt:]
child2[0:cross_pt] = parent2[0:cross_pt]
child2[cross_pt:] = parent1[cross_pt:]
return child1, child2
def mutation(child,P_m=.97): #Uniformly mutate gene with probability Pm
"""
Uniform mutation implementation on gene with probability Pm
Parameters
----------
child : child vector
P_m : Probability of mutation. The default is .97.
Returns
-------
child : mutated child vector
"""
for i in range(1,len(child)):
if np.random.uniform(0,1) > P_m:
child[i] = np.random.randint(0,9)
else:
child[i] = child[i]
return child
def gen_childs(parents):
"""
Function used to generate child from a list of parent vectors
Parameters
----------
parents : list containing two parents
Returns
-------
child1 : generated offspring
child2 : generated offspring
"""
child1, child2 = one_point_crossover(parents[0], parents[1]) #crossover btw parents
child1 = mutation(child1) #mutation of child with Prob = P_m
child2 = mutation(child2) #mutation of child with Prob = P_m
return child1, child2
def gen_next_gen(population):
"""
Function used to generate a new generation N+1 from generation N
Parameters
----------
population : population array
Returns
-------
next_gen : new generation array
"""
next_gen = np.empty((population.shape))
children = []
num_sessions = 3
for i in range(num_sessions):
fitness_chart, fitness_dict = fit_population(population) #fit population
parents = selection_operator(fitness_dict) #select parent x 2
child1, child2 = gen_childs(parents)
children.append(child1)
children.append(child2)
next_gen = np.vstack([np.array(children), parents[0], parents[1]])
print(parents, "\n")
return next_gen
def plot_elites(elites, evolutions=0, save=True):
decoded_elites_x = np.empty((POP_SIZE,))
decoded_elites_y = np.empty((POP_SIZE,))
#colors = ['r','g','b','m','y','cyan','magenta', 'k']
for idx in range(0, len(elites)):
decoded_elite = decode_parent([elites[idx]])
decoded_elites_x[idx] = decoded_elite
decoded_elites_y[idx] = function(decoded_elite)
y = function(x)
plt.figure(2)
plt.plot(x,y)
plt.scatter(decoded_elites_x, decoded_elites_y, c='r')
plt.title(f'Elite Plot for {evolutions} evolutions')
if save:
plt.savefig(f'figures/eli_plot_{evolutions}.png', bbox_inches='tight')
plt.show()
def plot_function():
y = function(x)
plt.figure(1)
plt.plot(x,y)
plt.title('Optimization function')
plt.show()
def evolve(init_population, num_evolutions, debug = True):
"""
Function used to simulate evolution.
Parameters
----------
init_population : initial population
num_of_generations : number of generations composed in evolution
debug : Debug flag. The default is True.
Returns
-------
elites : Optimal population after n = num_of_generations generations
"""
population = init_population #initial population
plot_function()
for i in range(0, num_evolutions):
print(f"Parents for Generation: {i}")
new_population = gen_next_gen(population)
population = new_population
if debug:
plot_elites(population, i, save=False)
elites = population #final population
plot_elites(elites, num_evolutions)
return elites
init_population = generate_population(NUM_GENES, POP_SIZE)
elites = evolve(init_population, 100, True)
| [
"matplotlib.pyplot.title",
"numpy.random.uniform",
"matplotlib.pyplot.show",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.plot",
"numpy.empty",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"numpy.arange",
"numpy.max",
"numpy.sin",
"numpy.array",
"mat... | [((401, 425), 'numpy.arange', 'np.arange', (['(-0.5)', '(1)', '(0.01)'], {}), '(-0.5, 1, 0.01)\n', (410, 425), True, 'import numpy as np\n'), ((892, 916), 'numpy.empty', 'np.empty', (['(1, num_genes)'], {}), '((1, num_genes))\n', (900, 916), True, 'import numpy as np\n'), ((930, 954), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {}), '(-1, 1)\n', (947, 954), True, 'import numpy as np\n'), ((2416, 2447), 'numpy.empty', 'np.empty', (['(pop_size, num_genes)'], {}), '((pop_size, num_genes))\n', (2424, 2447), True, 'import numpy as np\n'), ((3499, 3535), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['fitness_dict'], {}), '(fitness_dict)\n', (3521, 3535), True, 'import pandas as pd\n'), ((4236, 4260), 'numpy.empty', 'np.empty', (['(2, NUM_GENES)'], {}), '((2, NUM_GENES))\n', (4244, 4260), True, 'import numpy as np\n'), ((5922, 5945), 'numpy.empty', 'np.empty', (['parent1.shape'], {}), '(parent1.shape)\n', (5930, 5945), True, 'import numpy as np\n'), ((5961, 5984), 'numpy.empty', 'np.empty', (['parent1.shape'], {}), '(parent1.shape)\n', (5969, 5984), True, 'import numpy as np\n'), ((7493, 7519), 'numpy.empty', 'np.empty', (['population.shape'], {}), '(population.shape)\n', (7501, 7519), True, 'import numpy as np\n'), ((8047, 8068), 'numpy.empty', 'np.empty', (['(POP_SIZE,)'], {}), '((POP_SIZE,))\n', (8055, 8068), True, 'import numpy as np\n'), ((8092, 8113), 'numpy.empty', 'np.empty', (['(POP_SIZE,)'], {}), '((POP_SIZE,))\n', (8100, 8113), True, 'import numpy as np\n'), ((8398, 8411), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (8408, 8411), True, 'import matplotlib.pyplot as plt\n'), ((8416, 8430), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (8424, 8430), True, 'import matplotlib.pyplot as plt\n'), ((8434, 8488), 'matplotlib.pyplot.scatter', 'plt.scatter', (['decoded_elites_x', 'decoded_elites_y'], {'c': '"""r"""'}), "(decoded_elites_x, decoded_elites_y, c='r')\n", (8445, 8488), True, 'import matplotlib.pyplot as plt\n'), ((8493, 8545), 'matplotlib.pyplot.title', 'plt.title', (['f"""Elite Plot for {evolutions} evolutions"""'], {}), "(f'Elite Plot for {evolutions} evolutions')\n", (8502, 8545), True, 'import matplotlib.pyplot as plt\n'), ((8642, 8652), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8650, 8652), True, 'import matplotlib.pyplot as plt\n'), ((8700, 8713), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (8710, 8713), True, 'import matplotlib.pyplot as plt\n'), ((8718, 8732), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (8726, 8732), True, 'import matplotlib.pyplot as plt\n'), ((8736, 8770), 'matplotlib.pyplot.title', 'plt.title', (['"""Optimization function"""'], {}), "('Optimization function')\n", (8745, 8770), True, 'import matplotlib.pyplot as plt\n'), ((8775, 8785), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8783, 8785), True, 'import matplotlib.pyplot as plt\n'), ((1005, 1028), 'numpy.random.randint', 'np.random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (1022, 1028), True, 'import numpy as np\n'), ((5070, 5089), 'numpy.max', 'np.max', (['p_fitnesses'], {}), '(p_fitnesses)\n', (5076, 5089), True, 'import numpy as np\n'), ((8567, 8637), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""figures/eli_plot_{evolutions}.png"""'], {'bbox_inches': '"""tight"""'}), "(f'figures/eli_plot_{evolutions}.png', bbox_inches='tight')\n", (8578, 8637), True, 'import matplotlib.pyplot as plt\n'), ((552, 576), 'numpy.sin', 'np.sin', (['(10 * math.pi * x)'], {}), '(10 * math.pi * x)\n', (558, 576), True, 'import numpy as np\n'), ((6566, 6589), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (6583, 6589), True, 'import numpy as np\n'), ((6619, 6642), 'numpy.random.randint', 'np.random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (6636, 6642), True, 'import numpy as np\n'), ((7881, 7899), 'numpy.array', 'np.array', (['children'], {}), '(children)\n', (7889, 7899), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import multiprocessing
import numpy as np
import os
import re
import subprocess
import sys
class colors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def run_program(optimizer, problem_file):
try:
result = subprocess.run(
['python3', str(os.path.join(os.path.dirname(__file__), '../program/program.py')), '--optimizer', optimizer, '--problem', problem_file, '--script'],
stdout=subprocess.PIPE)
except:
print("failed to run optimizer '{}' with problem '{}'".format(
optimizer, problem_file), file=sys.stderr)
sys.exit(-1)
return float(result.stdout.decode('utf-8').strip())
problem_file_path = '../assignment/Test Data/'
makespan_baseline = {
'1.txt': 55.0,
'2.txt': 930.0,
'3.txt': 1165.0,
'4.txt': 1005.0,
'5.txt': 1235.0,
'6.txt': 943.0
}
problem_files = sorted(
filter(lambda filename: re.match('\d+\.txt', filename),
os.listdir(problem_file_path)))
pool = multiprocessing.Pool(1)#multiprocessing.cpu_count())
run_count = 5
optimizers = ['aco', 'ba', 'pso']
makespan_values = np.zeros((len(optimizers), len(problem_files), run_count))
evaluations = [
(optimizer_index, problem_index, run_index, pool.apply_async(run_program, (optimizer, os.path.join(problem_file_path, problem_file))))
for problem_index, problem_file in enumerate(problem_files)
for optimizer_index, optimizer in enumerate(optimizers)
for run_index in range(run_count)]
for evaluation_index, evaluation in enumerate(evaluations):
optimizer_index, problem_index, run_index, result = evaluation
makespan = result.get()
makespan_values[optimizer_index, problem_index, run_index] = makespan
print('{:.2f}%'.format(100 * (evaluation_index + 1) / len(evaluations)))
pool.close()
pool.join()
def format_makespan(name, value, baseline):
color = '' if not baseline else colors.OKGREEN if value <= (baseline * 1.1) else colors.FAIL
return '{}{:>6.1f} {:>5.1f}% ({}){}'.format(
color, value, 100 * value / baseline, name, colors.ENDC)
for optimizer_index in range(len(optimizers)):
print(optimizers[optimizer_index])
for problem_index, problem_file in enumerate(problem_files):
baseline = makespan_baseline[problem_file]
min_makespan = np.min(makespan_values[optimizer_index, problem_index])
mean_makespan = np.mean(makespan_values[optimizer_index, problem_index])
print('{}: {} {} {:>6.1f} (baseline)'.format(
problem_file,
format_makespan('min', min_makespan, baseline),
format_makespan('mean', mean_makespan, baseline),
baseline))
| [
"os.path.dirname",
"re.match",
"numpy.min",
"numpy.mean",
"multiprocessing.Pool",
"os.path.join",
"os.listdir",
"sys.exit"
] | [((1171, 1194), 'multiprocessing.Pool', 'multiprocessing.Pool', (['(1)'], {}), '(1)\n', (1191, 1194), False, 'import multiprocessing\n'), ((1131, 1160), 'os.listdir', 'os.listdir', (['problem_file_path'], {}), '(problem_file_path)\n', (1141, 1160), False, 'import os\n'), ((2493, 2548), 'numpy.min', 'np.min', (['makespan_values[optimizer_index, problem_index]'], {}), '(makespan_values[optimizer_index, problem_index])\n', (2499, 2548), True, 'import numpy as np\n'), ((2573, 2629), 'numpy.mean', 'np.mean', (['makespan_values[optimizer_index, problem_index]'], {}), '(makespan_values[optimizer_index, problem_index])\n', (2580, 2629), True, 'import numpy as np\n'), ((771, 783), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (779, 783), False, 'import sys\n'), ((1088, 1120), 're.match', 're.match', (['"""\\\\d+\\\\.txt"""', 'filename'], {}), "('\\\\d+\\\\.txt', filename)\n", (1096, 1120), False, 'import re\n'), ((1459, 1504), 'os.path.join', 'os.path.join', (['problem_file_path', 'problem_file'], {}), '(problem_file_path, problem_file)\n', (1471, 1504), False, 'import os\n'), ((469, 494), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (484, 494), False, 'import os\n')] |
#Made by <NAME>
#Made by following the exercise 5 from MolStat
# ":" is used to generate subplots, so data after ":" is in same subplot
#------------Package---------------------------
import numpy as np
import sys
import matplotlib as mpl
import matplotlib.pyplot as plt
#------------Parameters---------------------------
#Wavelength interval with set values, (from,to,number of points)
wavelength_interval_set=np.linspace(200,700,1002)
#Search word in the file
search="Excited State"
#Full width half maximum [cm^-1]
sigma=3226.00
#Title (if only "" then the name of the first file)
title="test"
#Filetype of plot
end=".pdf"
#Save to file (y/n)
save_file="y"
#Activate oscillator strength (y/n)
osc_act="y"
#------------Constants in SI-units----------------
#Avogadros Number
NA=6.022140857e23
#Elemental charge
e=1.6021766208e-19
#Electron mass
me=9.10938356e-31
#Vacuum permittivity
eps0=8.8541878176e-12
#Speed of light
c=299792458
#Constant k
k=NA*e**2*np.sqrt(np.log(2)/np.pi)/(2*np.log(10)*me*eps0*c**2)
#print(k)
k2=k/sigma
#------------Functions------------------------------
#Extract excitation data from file
def extract(filename,search):
try:
with open(filename) as thefile:
content=thefile.readlines()
except:
print("No file given!")
#Find the wavelength & oscillator strength of the transitions
exi_state=[]
wave_trans=[]
osc_trans=[]
for line in content:
if search in line:
state=list(filter(None,line.split(" ")))
exi_state.append(state)
wave_trans.append(float(state[6]))
osc_trans.append(float(state[8][2:]))
return exi_state,wave_trans,osc_trans
#Calculate the molar absorption coefficients
def epsilon(wavelength_interval,wave_trans,osc_trans,k2,sigma):
epsi=[]
for w in wavelength_interval:
epsi_inter=0
for j in range(len(wave_trans)):
epsi_inter+=osc_trans[j]*np.exp(-4*np.log(2)*((1/w-1/wave_trans[j])/(sigma*1e-7))**2)
epsi.append(k2*epsi_inter)
return epsi
#Show Uv-Vis plot with oscillator strength
def plot_osc(wavelength_interval,epsi,wave_trans,osc_trans,end):
fig,ax1=plt.subplots(figsize=(8,6))
ax2=ax1.twinx()
ax1.set_xlabel("Wavelength / [nm]")
ax1.set_ylabel(r"$\epsilon$ / [M$^{-1}$ cm$^{-1}$]",color="b")
ax1.plot(wavelength_interval,epsi,"b-")
if osc_act.lower()=="y" or osc_act.lower()=="yes":
ax2.set_ylabel("Oscillator strength",color="r")
ax2.bar(wave_trans,osc_trans,width=1,color="r")
ax1.set_xlim(wavelength_interval[0],wavelength_interval[-1])
ax1.set_ylim(0,max(epsi))
ax1.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter(useMathText=True, useOffset=False))
ax1.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax2.set_ylim(0,1)
if save_file.lower()=="y" or save_file.lower()=="yes":
plt.savefig(title,bbox_inches='tight')
plt.show()
plt.close()
return
#Show Uv-Vis plot with oscillator strength for only one data set
def plot_osc_1data(filename,search,end):
#Extract excitation data from file 1
exi_state,wave_trans,osc_trans=extract(filename,search)
#Wavelength interval with automatic values
wavelength_interval=np.linspace(wave_trans[-1],wave_trans[0]+100,(wave_trans[0]-wave_trans[-1]+1)*3)
#Calculate the molar absorption coefficitent
epsi=epsilon(wavelength_interval,wave_trans,osc_trans,k2,sigma)
#Plot Uv-Vis with oscillator strength
plot_osc(wavelength_interval,epsi,wave_trans,osc_trans,end)
return
#Show Uv-Vis plot of multiply data set
def plot_multi(search,files,wavelength_interval_set,end):
num_sub=sys.argv.count(":")
fig,ax=plt.subplots(num_sub+1,sharex=True,sharey=True,figsize=(8,6+2*num_sub))
wave_list=[]
epsi_list=[]
num_sub_now=0
if num_sub==0:
ax=[ax]
ax_y_copy={}
if osc_act.lower()=="y" or osc_act.lower()=="yes":
ax_y_copy[num_sub_now]=ax[num_sub_now].twinx()
ax_y_copy[num_sub_now].set_ylabel("Oscillator strength")
for i in range(1,len(sys.argv)):
if sys.argv[i]!=":":
exi_state1,wave_trans1,osc_trans1=extract(sys.argv[i],search)
epsi1=epsilon(wavelength_interval_set,wave_trans1,osc_trans1,k2,sigma)
epsi_list+=epsi1
ax[num_sub_now].plot(wavelength_interval_set,epsi1,label=sys.argv[i])
ax[num_sub_now].legend(loc=0)
ax[num_sub_now].set_ylim(0,max(epsi_list)*1.05)
ax[num_sub_now].set_xlabel("Wavelength / [nm]")
ax[num_sub_now].grid(True)
if osc_act.lower()=="y" or osc_act.lower()=="yes":
ax_y_copy[num_sub_now].bar(wave_trans1,osc_trans1,width=1)
elif sys.argv[i]==":":
num_sub_now+=1
if osc_act.lower()=="y" or osc_act.lower()=="yes":
ax_y_copy[num_sub_now]=ax[num_sub_now].twinx()
ax_y_copy[num_sub_now].set_ylabel("Oscillator strength")
fig.subplots_adjust(hspace=0.2)
fig.text(0.04, 0.5, r"$\epsilon$ / [M$^{-1}$ cm$^{-1}$]", va='center', rotation='vertical')
ax[0].set_xlim(wavelength_interval_set[0],wavelength_interval_set[-1])
ax[0].ticklabel_format(style='sci', axis='y', scilimits=(0,0),useMathText=True)
if save_file.lower()=="y" or save_file.lower()=="yes":
plt.savefig(title,bbox_inches='tight')
plt.show()
plt.close()
return
#------------Program------------------------------
#Only execute if this is the main script
if __name__ == "__main__":
#Name of plot
if len(title)==0:
title=sys.argv[1][:-4]+end
else:
title=title+end
#Plot either one uv-vis with oscillator strength or multi uv-vis without
if len(sys.argv)<=2:
#Uv-Vis plot with oscillator strength for only one data set
plot_osc_1data(sys.argv[1],search,end)
else:
#Uv-Vis plot for multi data set
plot_multi(search,sys.argv,wavelength_interval_set,end)
| [
"matplotlib.pyplot.show",
"numpy.log",
"matplotlib.pyplot.close",
"sys.argv.count",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.ticker.ScalarFormatter"
] | [((413, 440), 'numpy.linspace', 'np.linspace', (['(200)', '(700)', '(1002)'], {}), '(200, 700, 1002)\n', (424, 440), True, 'import numpy as np\n'), ((2179, 2207), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (2191, 2207), True, 'import matplotlib.pyplot as plt\n'), ((2934, 2944), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2942, 2944), True, 'import matplotlib.pyplot as plt\n'), ((2949, 2960), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2958, 2960), True, 'import matplotlib.pyplot as plt\n'), ((3251, 3345), 'numpy.linspace', 'np.linspace', (['wave_trans[-1]', '(wave_trans[0] + 100)', '((wave_trans[0] - wave_trans[-1] + 1) * 3)'], {}), '(wave_trans[-1], wave_trans[0] + 100, (wave_trans[0] -\n wave_trans[-1] + 1) * 3)\n', (3262, 3345), True, 'import numpy as np\n'), ((3677, 3696), 'sys.argv.count', 'sys.argv.count', (['""":"""'], {}), "(':')\n", (3691, 3696), False, 'import sys\n'), ((3708, 3793), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(num_sub + 1)'], {'sharex': '(True)', 'sharey': '(True)', 'figsize': '(8, 6 + 2 * num_sub)'}), '(num_sub + 1, sharex=True, sharey=True, figsize=(8, 6 + 2 *\n num_sub))\n', (3720, 3793), True, 'import matplotlib.pyplot as plt\n'), ((5390, 5400), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5398, 5400), True, 'import matplotlib.pyplot as plt\n'), ((5405, 5416), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5414, 5416), True, 'import matplotlib.pyplot as plt\n'), ((2674, 2735), 'matplotlib.ticker.ScalarFormatter', 'mpl.ticker.ScalarFormatter', ([], {'useMathText': '(True)', 'useOffset': '(False)'}), '(useMathText=True, useOffset=False)\n', (2700, 2735), True, 'import matplotlib as mpl\n'), ((2891, 2930), 'matplotlib.pyplot.savefig', 'plt.savefig', (['title'], {'bbox_inches': '"""tight"""'}), "(title, bbox_inches='tight')\n", (2902, 2930), True, 'import matplotlib.pyplot as plt\n'), ((5347, 5386), 'matplotlib.pyplot.savefig', 'plt.savefig', (['title'], {'bbox_inches': '"""tight"""'}), "(title, bbox_inches='tight')\n", (5358, 5386), True, 'import matplotlib.pyplot as plt\n'), ((972, 981), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (978, 981), True, 'import numpy as np\n'), ((992, 1002), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (998, 1002), True, 'import numpy as np\n'), ((1956, 1965), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1962, 1965), True, 'import numpy as np\n')] |
import cdutil
import numpy as np
import MV2 as MV
###############################################################################################
def bony_sorting_part1(w500,binedges):
A,B,C = w500.shape
dx=np.diff(binedges)[0]
# Compute composite:
OKwaps=nanarray((A,B,C,2+len(binedges))) # add 2 for the exceedances
xx=0
for x in binedges:
xx+=1
w500_bin=MV.masked_less(w500,x)
OKwaps[...,xx]=MV.masked_greater_equal(w500_bin,x+dx)
# do the first wap bin:
OKwaps[...,0]=MV.masked_greater_equal(w500,binedges[0])
# do the last wap bin:
OKwaps[...,-1]=MV.masked_less(w500,binedges[-1]+dx)
return OKwaps # [month,lat,lon,wapbin]
###############################################################################################
def bony_sorting_part2(OKwaps,data,OKland,WTS,binedges):
# this function maps data from [time,lat,lon] to [time,wapbin]
A,B,C = data.shape
DATA = nanarray((A,3+len(binedges))) # add 2 for the exceedances, 1 for land
CNTS = MV.zeros((A,3+len(binedges)))
for xx in range(2+len(binedges)):
A1 = MV.masked_where(OKwaps[...,xx].mask,WTS)
A2 = MV.masked_where(data.mask,A1)
denom = np.ma.sum(np.ma.sum(A2,-1),-1)
DATA[...,xx] = np.sum(np.sum(data*A2,-1),-1)/denom # bin-avg data is computed where both data and wap are defined
CNTS[...,xx] = np.sum(np.sum(A1,-1),-1) # fractional area of this bin includes regions where data is undefined
# now do the land-only average:
xx+=1
A1 = MV.masked_where(OKland.mask,WTS)
A2 = MV.masked_where(data.mask,A1)
denom = np.ma.sum(np.ma.sum(A2,-1),-1)
DATA[...,xx] = np.sum(np.sum(data*A2,-1),-1)/denom # bin-avg data is computed where both data and wap are defined
CNTS[...,xx] = np.sum(np.sum(A1,-1),-1) # fractional area of this bin includes regions where data is undefined
# Ensure that the area matrix has zeros rather than masked points
CNTS[CNTS.mask]=0
if np.allclose(0.5,np.sum(CNTS,-1))==False:
print('sum of fractional counts over all wapbins does not equal 0.5 (tropical fraction)')
moot
# DATA contains area-weighted averages within each bin
# CNTS contains fractional areas represented by each bin
# so summing (DATA*CNTS) over all regimes should recover the tropical contribution to the global mean
v1 = np.sum(DATA*CNTS,1)
v2a = 0.5*cdutil.averager(data, axis='xy', weights='weighted')
v2b = np.ma.sum(np.ma.sum(WTS*data,1),1)
if np.allclose(v1,v2a)==False or np.allclose(v1,v2b)==False:
print('Cannot reconstruct tropical average via summing regimes')
return DATA,CNTS #[time,wapbin]
###########################################################################
def nanarray(vector):
"""
this generates a masked array with the size given by vector
example: vector = (90,144,28)
similar to this=NaN*ones(x,y,z) in matlab
"""
this=MV.zeros(vector)
this=MV.masked_where(this==0,this)
return this
| [
"MV2.masked_where",
"MV2.zeros",
"numpy.ma.sum",
"numpy.sum",
"cdutil.averager",
"numpy.allclose",
"MV2.masked_less",
"numpy.diff",
"MV2.masked_greater_equal"
] | [((535, 577), 'MV2.masked_greater_equal', 'MV.masked_greater_equal', (['w500', 'binedges[0]'], {}), '(w500, binedges[0])\n', (558, 577), True, 'import MV2 as MV\n'), ((623, 662), 'MV2.masked_less', 'MV.masked_less', (['w500', '(binedges[-1] + dx)'], {}), '(w500, binedges[-1] + dx)\n', (637, 662), True, 'import MV2 as MV\n'), ((1574, 1607), 'MV2.masked_where', 'MV.masked_where', (['OKland.mask', 'WTS'], {}), '(OKland.mask, WTS)\n', (1589, 1607), True, 'import MV2 as MV\n'), ((1616, 1646), 'MV2.masked_where', 'MV.masked_where', (['data.mask', 'A1'], {}), '(data.mask, A1)\n', (1631, 1646), True, 'import MV2 as MV\n'), ((2423, 2445), 'numpy.sum', 'np.sum', (['(DATA * CNTS)', '(1)'], {}), '(DATA * CNTS, 1)\n', (2429, 2445), True, 'import numpy as np\n'), ((3018, 3034), 'MV2.zeros', 'MV.zeros', (['vector'], {}), '(vector)\n', (3026, 3034), True, 'import MV2 as MV\n'), ((3044, 3076), 'MV2.masked_where', 'MV.masked_where', (['(this == 0)', 'this'], {}), '(this == 0, this)\n', (3059, 3076), True, 'import MV2 as MV\n'), ((217, 234), 'numpy.diff', 'np.diff', (['binedges'], {}), '(binedges)\n', (224, 234), True, 'import numpy as np\n'), ((403, 426), 'MV2.masked_less', 'MV.masked_less', (['w500', 'x'], {}), '(w500, x)\n', (417, 426), True, 'import MV2 as MV\n'), ((449, 490), 'MV2.masked_greater_equal', 'MV.masked_greater_equal', (['w500_bin', '(x + dx)'], {}), '(w500_bin, x + dx)\n', (472, 490), True, 'import MV2 as MV\n'), ((1134, 1176), 'MV2.masked_where', 'MV.masked_where', (['OKwaps[..., xx].mask', 'WTS'], {}), '(OKwaps[..., xx].mask, WTS)\n', (1149, 1176), True, 'import MV2 as MV\n'), ((1188, 1218), 'MV2.masked_where', 'MV.masked_where', (['data.mask', 'A1'], {}), '(data.mask, A1)\n', (1203, 1218), True, 'import MV2 as MV\n'), ((1668, 1685), 'numpy.ma.sum', 'np.ma.sum', (['A2', '(-1)'], {}), '(A2, -1)\n', (1677, 1685), True, 'import numpy as np\n'), ((1833, 1847), 'numpy.sum', 'np.sum', (['A1', '(-1)'], {}), '(A1, -1)\n', (1839, 1847), True, 'import numpy as np\n'), ((2457, 2509), 'cdutil.averager', 'cdutil.averager', (['data'], {'axis': '"""xy"""', 'weights': '"""weighted"""'}), "(data, axis='xy', weights='weighted')\n", (2472, 2509), False, 'import cdutil\n'), ((2530, 2554), 'numpy.ma.sum', 'np.ma.sum', (['(WTS * data)', '(1)'], {}), '(WTS * data, 1)\n', (2539, 2554), True, 'import numpy as np\n'), ((1244, 1261), 'numpy.ma.sum', 'np.ma.sum', (['A2', '(-1)'], {}), '(A2, -1)\n', (1253, 1261), True, 'import numpy as np\n'), ((1417, 1431), 'numpy.sum', 'np.sum', (['A1', '(-1)'], {}), '(A1, -1)\n', (1423, 1431), True, 'import numpy as np\n'), ((1715, 1736), 'numpy.sum', 'np.sum', (['(data * A2)', '(-1)'], {}), '(data * A2, -1)\n', (1721, 1736), True, 'import numpy as np\n'), ((2043, 2059), 'numpy.sum', 'np.sum', (['CNTS', '(-1)'], {}), '(CNTS, -1)\n', (2049, 2059), True, 'import numpy as np\n'), ((2567, 2587), 'numpy.allclose', 'np.allclose', (['v1', 'v2a'], {}), '(v1, v2a)\n', (2578, 2587), True, 'import numpy as np\n'), ((2597, 2617), 'numpy.allclose', 'np.allclose', (['v1', 'v2b'], {}), '(v1, v2b)\n', (2608, 2617), True, 'import numpy as np\n'), ((1295, 1316), 'numpy.sum', 'np.sum', (['(data * A2)', '(-1)'], {}), '(data * A2, -1)\n', (1301, 1316), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
f=np.load("output.npz",allow_pickle=True)
x=f["x"]
y=f["y"]
plt.plot(x,y)
plt.xlabel("thesis grade")
plt.ylabel("final grade")
plt.savefig("grades.png",format="png")
plt.savefig("grades.pdf",format="pdf")
plt.show()
| [
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((55, 95), 'numpy.load', 'np.load', (['"""output.npz"""'], {'allow_pickle': '(True)'}), "('output.npz', allow_pickle=True)\n", (62, 95), True, 'import numpy as np\n'), ((115, 129), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (123, 129), True, 'import matplotlib.pyplot as plt\n'), ((129, 155), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""thesis grade"""'], {}), "('thesis grade')\n", (139, 155), True, 'import matplotlib.pyplot as plt\n'), ((156, 181), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""final grade"""'], {}), "('final grade')\n", (166, 181), True, 'import matplotlib.pyplot as plt\n'), ((183, 222), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""grades.png"""'], {'format': '"""png"""'}), "('grades.png', format='png')\n", (194, 222), True, 'import matplotlib.pyplot as plt\n'), ((222, 261), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""grades.pdf"""'], {'format': '"""pdf"""'}), "('grades.pdf', format='pdf')\n", (233, 261), True, 'import matplotlib.pyplot as plt\n'), ((263, 273), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (271, 273), True, 'import matplotlib.pyplot as plt\n')] |
"""Code for parameter and function fitting of risk based approximate model to simulation data."""
import pdb
import copy
import itertools
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.optimize import minimize
from sklearn.linear_model import LinearRegression
import simulation
from simulation import Risk
import risk_model
import control_tools
import fitting
import visualisation
class RiskFitter:
"""Class for fitting risk based approximate model, handling likelihood and SSE fits."""
def __init__(self, parent):
self.data = {}
self.linear_fits = {}
self.parent_fitter = parent
def __repr__(self):
repr_str = "\nRisk Model Fitter\n" + "-"*20 + "\n\n"
if self.linear_fits:
for _, fit in self.linear_fits.items():
repr_str += repr(fit)
else:
repr_str += "No linear fits to show\n" + "-"*20 + "\n\n"
return repr_str
def fit(self, bounds, start):
"""Fit risk model to generated data."""
input_events = self.parent_fitter.data['input_events']
risk_input_events = []
# Convert form of input_events to be risk based only (no spatial info)
for run in input_events:
risk_run = np.zeros((len(run), 7))
for i in range(4):
risk_run[:, i] = np.sum(run[:, i:12:4], axis=1)
risk_run[:, 4:] = run[:, 12:15]
risk_input_events.append(risk_run)
self.data['input_events'] = risk_input_events
self.linear_fits['likelihood'] = RiskFitterLikelihood(self)
self.linear_fits['likelihood'].fit(bounds, start)
print("{0} fit complete.".format(self.linear_fits['likelihood'].name))
def assess(self, fitter, save_folder=None, control=None, likelihood_map=False,
initial_nodes=None, max_control_rate=100):
"""Assess fit quality."""
if "init_conds" not in self.data:
self.optimise(fitter, initial_nodes)
print("Assessing {0} risk model...".format(fitter.name))
# Generate model run data
if control is None:
controller = risk_model.no_control_policy
else:
controller = control
state_init = self.data['init_conds']
model_params = {
'birth_rate': self.parent_fitter.sim_params['birth_rate'],
'death_rate': self.parent_fitter.sim_params['death_rate'],
'removal_rate': self.parent_fitter.sim_params['removal_rate'],
'recov_rate': self.parent_fitter.sim_params['recov_rate'],
'state_init': state_init,
'times': self.parent_fitter.data['dpc_times'],
'max_control_rate': max_control_rate,
'high_alloc_cost': 0,
'low_alloc_cost': 0
}
model = risk_model.RiskModel(model_params, fitter)
model_run = model.run_policy(controller)
run_data = np.array([model_run.state(t) for t in model_params['times']])
self._assess_dpc(run_data, control=control, save_folder=save_folder)
self._calc_metrics(fitter, run_data, control=control)
if likelihood_map:
self._likelihood_map(fitter, save_folder=save_folder)
def optimise(self, fitter, initial_nodes=None, max_control_rate=100, run_sims=True):
"""Optimise control on given fit and run associated simulations."""
print("Optimising risk based control...")
if initial_nodes is None:
nodes_init = fitting.randomise_infection(self.parent_fitter.nodes, nrand=5)
else:
nodes_init = copy.deepcopy(initial_nodes)
state_init = np.zeros(6)
for node in nodes_init:
states = sorted(simulation.HIGH_LIVE_STATES | simulation.LOW_LIVE_STATES)
for i, state_val in enumerate(states):
state_init[i] += node.state[state_val]
model_params = {
'birth_rate': self.parent_fitter.sim_params['birth_rate'],
'death_rate': self.parent_fitter.sim_params['death_rate'],
'removal_rate': self.parent_fitter.sim_params['removal_rate'],
'recov_rate': self.parent_fitter.sim_params['recov_rate'],
'state_init': state_init,
'times': self.parent_fitter.data['dpc_times'],
'max_control_rate': max_control_rate,
'high_alloc_cost': 0,
'low_alloc_cost': 0
}
model = risk_model.RiskModel(model_params, fitter)
bocop_run = model.run_bocop(verbose=False, init_policy=risk_model.even_control_policy)
opt_control = bocop_run.control
self.data["opt_control"] = opt_control
self.data["init_conds"] = state_init
self.data["dpc_times"] = model_params['times']
if run_sims:
self._get_sim_dpcs(model, opt_control, initial_nodes)
def _get_sim_dpcs(self, model, opt_control, initial_nodes=None):
"""Run simulations with and without control for assessment DPCs."""
sim_params = copy.deepcopy(self.parent_fitter.sim_params)
# Run no control sims
print("Generating risk testing set...")
stored_data = {}
input_events = []
for _ in range(10):
if initial_nodes is None:
nodes_init = fitting.randomise_infection(
self.parent_fitter.nodes, nhigh=self.data['init_conds'][1],
nlow=self.data['init_conds'][4])
else:
nodes_init = copy.deepcopy(initial_nodes)
simulator = simulation.Simulator(nodes_init, self.parent_fitter.risk_coupling,
self.parent_fitter.dist_coupling, sim_params,
controller=None)
all_runs = simulator.run_simulation(nruns=10, verbose=False)
self.parent_fitter._store_dpc_data(all_runs, data_store=stored_data, include_vac=False)
input_events += self.parent_fitter._extract_event_data(all_runs)
# Convert to risk based format
risk_events = []
for run in input_events:
risk_run = np.zeros((len(run), 7))
for i in range(4):
risk_run[:, i] = np.sum(run[:, i:12:4], axis=1)
risk_run[:, 4:] = run[:, 12:15]
risk_events.append(risk_run)
for key, val in stored_data.items():
self.data[key] = val
self.data["assessment_events"] = risk_events
sim_params['update_control_on_all_events'] = True
sim_params['vac_rate'] = 1.0
sim_params["controller_args"] = {
"oc_model": model,
"policy": opt_control
}
# Run controlled simulations
print("Generating controlled risk testing set...")
stored_data = {}
input_events = []
for i in range(10):
if initial_nodes is None:
nodes_init = fitting.randomise_infection(
self.parent_fitter.nodes, nhigh=self.data['init_conds'][1],
nlow=self.data['init_conds'][4])
else:
nodes_init = copy.deepcopy(initial_nodes)
simulator = simulation.Simulator(nodes_init, self.parent_fitter.risk_coupling,
self.parent_fitter.dist_coupling, sim_params,
controller=control_tools.RiskPolicyController)
all_runs = simulator.run_simulation(nruns=10, verbose=False)
self.parent_fitter._store_dpc_data(all_runs, data_store=stored_data, include_vac=True)
input_events += self.parent_fitter._extract_event_data(all_runs)
# Convert to risk based format
risk_events = []
for run in input_events:
risk_run = np.zeros((len(run), 7))
for i in range(4):
risk_run[:, i] = np.sum(run[:, i:12:4], axis=1)
risk_run[:, 4:] = run[:, 12:15]
risk_events.append(risk_run)
for key, val in stored_data.items():
self.data["controlled_" + key] = val
self.data["controlled_assessment_events"] = risk_events
def _assess_dpc(self, run_data, control=None, save_folder=None, save_name="Linear_Likelihood"):
"""Plot DPC data fit against simulation data."""
data = self.data
fig, axes = plt.subplots(1, 2)
for risk in range(2):
if control is None:
axes[risk].plot(data['dpc_times'], np.sum(data['dpc_sus'][:, risk, :, :], axis=0),
'g--', alpha=0.1)
axes[risk].plot(data['dpc_times'], np.sum(data['dpc_inf'][:, risk, :, :], axis=0),
'r--', alpha=0.1)
else:
axes[risk].plot(data['dpc_times'],
np.sum(data['controlled_dpc_sus'][:, risk, :, :], axis=0),
'g--', alpha=0.1)
axes[risk].plot(data['dpc_times'],
np.sum(data['controlled_dpc_inf'][:, risk, :, :], axis=0),
'r--', alpha=0.1)
axes[risk].plot(data['dpc_times'],
np.sum(data['controlled_dpc_vac'][:, risk, :, :], axis=0),
'--', color="purple", alpha=0.1)
axes[risk].plot(data['dpc_times'], run_data[:, 3*risk], 'g-', lw=2)
axes[risk].plot(data['dpc_times'], run_data[:, 1+3*risk], 'r-', lw=2)
if control is not None:
axes[risk].plot(data['dpc_times'], run_data[:, 2+3*risk], '-', color="purple", lw=2)
if control is None:
fig_name = save_name + "_DPCQuality.pdf"
else:
fig_name = save_name + "_Controlled_DPCQuality.pdf"
fig.tight_layout()
if save_folder is None:
fig.savefig(fig_name)
else:
fig.savefig(os.path.join(save_folder, fig_name))
plt.close(fig)
fig, axes = plt.subplots(1, 2)
for risk in range(2):
if control is None:
visualisation.err_bands(np.sum(data['dpc_inf'][:, risk, :, :], axis=0),
axes[risk], data['dpc_times'], col="red",
alpha_range=[0.05, 0.35],
lower_percentiles=np.linspace(0, 50, 21, endpoint=False))
else:
visualisation.err_bands(
np.sum(data['controlled_dpc_inf'][:, risk, :, :], axis=0), axes[risk],
data['dpc_times'], col="red", alpha_range=[0.05, 0.35],
lower_percentiles=np.linspace(0, 50, 21, endpoint=False))
axes[risk].plot(data['dpc_times'], run_data[:, 1+3*risk], 'r-', lw=2)
if control is None:
fig_name = save_name + "_median.pdf"
else:
fig_name = save_name + "_Controlled_median.pdf"
fig.tight_layout()
if save_folder is None:
fig.savefig(fig_name)
else:
fig.savefig(os.path.join(save_folder, fig_name))
plt.close(fig)
def _calc_metrics(self, fitter, run_data, control=None):
"""Calulate model selection metrics."""
# Calculate SSE
sse = 0.0
if control is None:
for risk in range(2):
sse += np.sum(np.square(run_data[:, 1+3*risk:2+3*risk] -
np.sum(self.data['dpc_inf'][:, risk, :, :], axis=0)))
fitter.assessment['sse'] = sse
else:
for risk in range(2):
sse += np.sum(np.square(run_data[:, 1+3*risk:2+3*risk] - np.sum(
self.data['controlled_dpc_inf'][:, risk, :, :], axis=0)))
fitter.assessment['controlled_sse'] = sse
# Calculate AIC
if control is None:
aic = 2*4
aic -= 2*self.linear_fits['likelihood']._calc_likelihood(
params=fitter.data, events=self.data['assessment_events'])
fitter.assessment['aic'] = aic
else:
aic = 2*4
aic -= 2*self.linear_fits['likelihood']._calc_likelihood(
params=fitter.data, events=self.data['controlled_assessment_events'])
fitter.assessment['controlled_aic'] = aic
def _likelihood_map(self, fitter, save_folder=None):
"""Generate heat map of likelihood surface."""
fig, axes = plt.subplots(1, 2)
for risk_1 in Risk:
risk_2 = int(not bool(risk_1))
beta = fitter.data['beta']
x_vals = np.linspace(0.1*beta[risk_1, risk_1], 10*beta[risk_1, risk_1], 51)
y_vals = np.linspace(0.1*beta[risk_1, risk_2], 10*beta[risk_1, risk_2], 51)
xx, yy = np.meshgrid(x_vals, y_vals)
zz = np.zeros_like(xx)
test_beta = np.zeros((2, 2))
test_beta[risk_2] = beta[risk_2]
test_params = {'beta': test_beta}
for i, j in itertools.product(range(xx.shape[0]), range(xx.shape[1])):
test_beta[risk_1, risk_1] = xx[i, j]
test_beta[risk_1, risk_2] = yy[i, j]
zz[i, j] = self.linear_fits['likelihood']._calc_likelihood(
params=test_params, events=self.data['assessment_events'])
cmap = plt.get_cmap("inferno")
vmax = np.max(zz)
im = axes[risk_1].pcolormesh(xx, yy, zz, cmap=cmap, vmin=0.98*vmax, vmax=vmax)
divider = make_axes_locatable(axes[risk_1])
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(im, ax=axes[risk_1], cax=cax)
axes[risk_1].plot(beta[risk_1, risk_1], beta[risk_1, risk_2], "rx", ms=3)
axes[risk_1].set_xlabel(r"$\beta_{{{0}}}$".format(str(risk_1)[5] + str(risk_1)[5]))
axes[risk_1].set_ylabel(r"$\beta_{{{0}}}$".format(str(risk_1)[5] +
str(simulation.Risk(risk_2))[5]))
fig.tight_layout()
fig_name = fitter.name + "_LikelihoodSurface.pdf"
if save_folder is None:
fig.savefig(fig_name)
else:
fig.savefig(os.path.join(save_folder, fig_name))
plt.close(fig)
class RiskFitterLikelihood:
"""Class for fitting risk structured OCT model to simulations using maximum likelihood."""
def __init__(self, parent_fitter):
self.parent_fitter = parent_fitter
self.name = "Linear_Likelihood"
self.data = {}
self.assessment = {}
def __repr__(self):
repr_str = " ".join(self.name.split("_")) + "Fit\n"
if self.data:
repr_str += "Beta: {0}\n".format(self.data['beta'])
if self.assessment:
for key, val in sorted(self.assessment.items()):
repr_str += "{0}: {1}\n".format(key, str(val))
repr_str += "\n" + "-"*20 + "\n\n"
return repr_str
def fit(self, bounds=None, start=None):
"""Fit incidence function parameters."""
# Fit parameters associated with each risk group separately since log likelihood separates
for risk in Risk:
start_vals = start['beta'][risk]
bound_vals = bounds['beta'][risk]
param_dict_tmp = {
'beta': np.zeros((2, 2))
}
start_transformed = fitting.logit_transform(start_vals, bound_vals)
def neg_loglik(params, risk, bounds, param_dict):
"""Calculate negative log likelihood from transformed optimisation parameters."""
# First reverse logit transform parameters
_params_rev_trans = fitting.reverse_logit_transform(params, bounds)
param_dict['beta'][risk] = _params_rev_trans
val = self._calc_likelihood(param_dict, [risk])
return np.nan_to_num(-val)
# Minimise negative log likelihood
param_fit_transformed = minimize(
neg_loglik, start_transformed, method="L-BFGS-B", options={'ftol': 1e-12},
args=(risk, bound_vals, param_dict_tmp))
param_fit = fitting.reverse_logit_transform(param_fit_transformed.x, bound_vals)
if 'beta' not in self.data:
self.data['beta'] = np.zeros((2, 2))
self.data['beta'][risk] = param_fit
def predict_rates(self, states, params=None):
"""Calculate predicted infection rates.
Arguments:
states: Array of SH, IH, SL, IL (with multiple rows if calculating multiple rates
params: Dictionary to find beta values. If None uses self.data
"""
masked = np.ma.is_masked(states)
states = np.clip(states, 0, 2e10).reshape((-1, 4))
if params is None:
params = self.data
beta = params['beta']
rates = np.ma.array([
beta[i//2, i%2] *
np.prod(states[:, [2*(i//2), 2*(i%2)+1]], axis=1) for i in range(4)]).T
rates.set_fill_value(0)
if not masked:
rates = np.ma.filled(rates)
return rates
def _calc_likelihood(self, params=None, risks=None, events=None):
"""Calculate log likelihood value for given parameters and given list of risks."""
if "input_events" not in self.parent_fitter.parent_fitter.data:
raise ValueError("Run data intialisation first!")
if risks is None:
risks = [Risk.HIGH, Risk.LOW]
if events is None:
events = self.parent_fitter.data['input_events']
log_lik = 0
for run in events:
states = run[:, 0:4]
delta_t = run[:, 4]
inf_multi = run[:, 5]
inf_risk = run[:, 6]
all_rates = self.predict_rates(states, params)
# log_lik = lambda_k * exp(sum(lambda_i*deltat)) for each event
for risk in risks:
rates = np.sum(all_rates[:, 2*risk:(2*risk+2)], axis=1)
log_lik -= np.sum(rates * delta_t)
idcs = np.where((inf_multi > 0) & (inf_risk == risk))[0]
log_lik += np.sum(np.log(rates[idcs]))
return log_lik
class RiskFitterSSE:
"""Class for fitting risk structured OCT model to sims by minimising sum squared errors."""
def __init__(self, parent_fitter):
self.parent_fitter = parent_fitter
self.name = "Linear_SSE"
self.data = {}
self.assessment = {}
def __repr__(self):
repr_str = " ".join(self.name.split("_")) + "Fit\n"
if self.data:
repr_str += "Beta: {0}\n".format(self.data['beta'])
if self.assessment:
for key, val in sorted(self.assessment.items()):
repr_str += "{0}: {1}\n".format(key, str(val))
repr_str += "\n" + "-"*20 + "\n\n"
return repr_str
def sse(self, params, bounds, model):
"""Calculate sum squared errors from transformed optimisation parameters."""
# First reverse logit transform parameters
_params_rev_trans = fitting.reverse_logit_transform(params, bounds)
self.data['beta'] = np.array(_params_rev_trans).reshape((2, 2))
# Run ODE model with parameters
ode_run = model.run_policy(risk_model.no_control_policy)
dpc_data = self.parent_fitter.parent_fitter.data['dpc_inf']
dpc_times = self.parent_fitter.parent_fitter.data['dpc_times']
# Calculate SSE
sse = 0
for i in range(dpc_data.shape[3]):
sse += np.sum(np.square(np.sum(
dpc_data[:, 0, :, i], axis=0) - np.array([ode_run.state(t)[1] for t in dpc_times])))
sse += np.sum(np.square(np.sum(
dpc_data[:, 1, :, i], axis=0) - np.array([ode_run.state(t)[4] for t in dpc_times])))
return sse
def fit(self, bounds=None, start=None):
"""Fit incidence function parameters."""
start_vals = start['beta'].flatten()
bound_vals = bounds['beta'].flatten().reshape((4, 2))
start_transformed = fitting.logit_transform(start_vals, bound_vals)
init_state = np.sum(self.parent_fitter.parent_fitter.data['init_state'][0].reshape((3, 6)),
axis=0)
model_params = {
'birth_rate': self.parent_fitter.parent_fitter.sim_params['birth_rate'],
'death_rate': self.parent_fitter.parent_fitter.sim_params['death_rate'],
'removal_rate': self.parent_fitter.parent_fitter.sim_params['removal_rate'],
'recov_rate': self.parent_fitter.parent_fitter.sim_params['recov_rate'],
'state_init': init_state,
'times': self.parent_fitter.parent_fitter.data['dpc_times'],
'max_control_rate': 0,
'high_alloc_cost': 0,
'low_alloc_cost': 0
}
model = risk_model.RiskModel(model_params, self)
# Minimise SSE
param_fit_transformed = minimize(
self.sse, start_transformed, method="L-BFGS-B", options={'ftol': 1e-12},
args=(bound_vals, model))
param_fit = fitting.reverse_logit_transform(param_fit_transformed.x, bound_vals)
self.data['beta'] = np.array(param_fit).reshape((2, 2))
def predict_rates(self, states, params=None):
"""Calculate predicted infection rates."""
masked = np.ma.is_masked(states)
states = np.clip(states, 0, 2e10).reshape((-1, 4))
if params is None:
params = self.data
beta = params['beta']
rates = np.ma.array([
beta[i//2, i%2] *
np.prod(states[:, [2*(i//2), 2*(i%2)+1]], axis=1) for i in range(4)]).T
rates.set_fill_value(0)
if not masked:
rates = np.ma.filled(rates)
return rates
| [
"simulation.Simulator",
"numpy.sum",
"numpy.nan_to_num",
"numpy.clip",
"os.path.join",
"numpy.prod",
"scipy.optimize.minimize",
"numpy.meshgrid",
"numpy.zeros_like",
"matplotlib.pyplot.close",
"numpy.max",
"fitting.randomise_infection",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"si... | [((2893, 2935), 'risk_model.RiskModel', 'risk_model.RiskModel', (['model_params', 'fitter'], {}), '(model_params, fitter)\n', (2913, 2935), False, 'import risk_model\n'), ((3731, 3742), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (3739, 3742), True, 'import numpy as np\n'), ((4521, 4563), 'risk_model.RiskModel', 'risk_model.RiskModel', (['model_params', 'fitter'], {}), '(model_params, fitter)\n', (4541, 4563), False, 'import risk_model\n'), ((5102, 5146), 'copy.deepcopy', 'copy.deepcopy', (['self.parent_fitter.sim_params'], {}), '(self.parent_fitter.sim_params)\n', (5115, 5146), False, 'import copy\n'), ((8469, 8487), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (8481, 8487), True, 'import matplotlib.pyplot as plt\n'), ((10096, 10110), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (10105, 10110), True, 'import matplotlib.pyplot as plt\n'), ((10132, 10150), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (10144, 10150), True, 'import matplotlib.pyplot as plt\n'), ((11265, 11279), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (11274, 11279), True, 'import matplotlib.pyplot as plt\n'), ((12610, 12628), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (12622, 12628), True, 'import matplotlib.pyplot as plt\n'), ((14420, 14434), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (14429, 14434), True, 'import matplotlib.pyplot as plt\n'), ((16893, 16916), 'numpy.ma.is_masked', 'np.ma.is_masked', (['states'], {}), '(states)\n', (16908, 16916), True, 'import numpy as np\n'), ((19292, 19339), 'fitting.reverse_logit_transform', 'fitting.reverse_logit_transform', (['params', 'bounds'], {}), '(params, bounds)\n', (19323, 19339), False, 'import fitting\n'), ((20282, 20329), 'fitting.logit_transform', 'fitting.logit_transform', (['start_vals', 'bound_vals'], {}), '(start_vals, bound_vals)\n', (20305, 20329), False, 'import fitting\n'), ((21075, 21115), 'risk_model.RiskModel', 'risk_model.RiskModel', (['model_params', 'self'], {}), '(model_params, self)\n', (21095, 21115), False, 'import risk_model\n'), ((21172, 21284), 'scipy.optimize.minimize', 'minimize', (['self.sse', 'start_transformed'], {'method': '"""L-BFGS-B"""', 'options': "{'ftol': 1e-12}", 'args': '(bound_vals, model)'}), "(self.sse, start_transformed, method='L-BFGS-B', options={'ftol': \n 1e-12}, args=(bound_vals, model))\n", (21180, 21284), False, 'from scipy.optimize import minimize\n'), ((21326, 21394), 'fitting.reverse_logit_transform', 'fitting.reverse_logit_transform', (['param_fit_transformed.x', 'bound_vals'], {}), '(param_fit_transformed.x, bound_vals)\n', (21357, 21394), False, 'import fitting\n'), ((21580, 21603), 'numpy.ma.is_masked', 'np.ma.is_masked', (['states'], {}), '(states)\n', (21595, 21603), True, 'import numpy as np\n'), ((3578, 3640), 'fitting.randomise_infection', 'fitting.randomise_infection', (['self.parent_fitter.nodes'], {'nrand': '(5)'}), '(self.parent_fitter.nodes, nrand=5)\n', (3605, 3640), False, 'import fitting\n'), ((3680, 3708), 'copy.deepcopy', 'copy.deepcopy', (['initial_nodes'], {}), '(initial_nodes)\n', (3693, 3708), False, 'import copy\n'), ((5635, 5769), 'simulation.Simulator', 'simulation.Simulator', (['nodes_init', 'self.parent_fitter.risk_coupling', 'self.parent_fitter.dist_coupling', 'sim_params'], {'controller': 'None'}), '(nodes_init, self.parent_fitter.risk_coupling, self.\n parent_fitter.dist_coupling, sim_params, controller=None)\n', (5655, 5769), False, 'import simulation\n'), ((7281, 7450), 'simulation.Simulator', 'simulation.Simulator', (['nodes_init', 'self.parent_fitter.risk_coupling', 'self.parent_fitter.dist_coupling', 'sim_params'], {'controller': 'control_tools.RiskPolicyController'}), '(nodes_init, self.parent_fitter.risk_coupling, self.\n parent_fitter.dist_coupling, sim_params, controller=control_tools.\n RiskPolicyController)\n', (7301, 7450), False, 'import simulation\n'), ((12762, 12832), 'numpy.linspace', 'np.linspace', (['(0.1 * beta[risk_1, risk_1])', '(10 * beta[risk_1, risk_1])', '(51)'], {}), '(0.1 * beta[risk_1, risk_1], 10 * beta[risk_1, risk_1], 51)\n', (12773, 12832), True, 'import numpy as np\n'), ((12850, 12920), 'numpy.linspace', 'np.linspace', (['(0.1 * beta[risk_1, risk_2])', '(10 * beta[risk_1, risk_2])', '(51)'], {}), '(0.1 * beta[risk_1, risk_2], 10 * beta[risk_1, risk_2], 51)\n', (12861, 12920), True, 'import numpy as np\n'), ((12939, 12966), 'numpy.meshgrid', 'np.meshgrid', (['x_vals', 'y_vals'], {}), '(x_vals, y_vals)\n', (12950, 12966), True, 'import numpy as np\n'), ((12984, 13001), 'numpy.zeros_like', 'np.zeros_like', (['xx'], {}), '(xx)\n', (12997, 13001), True, 'import numpy as np\n'), ((13027, 13043), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (13035, 13043), True, 'import numpy as np\n'), ((13500, 13523), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""inferno"""'], {}), "('inferno')\n", (13512, 13523), True, 'import matplotlib.pyplot as plt\n'), ((13543, 13553), 'numpy.max', 'np.max', (['zz'], {}), '(zz)\n', (13549, 13553), True, 'import numpy as np\n'), ((13668, 13701), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['axes[risk_1]'], {}), '(axes[risk_1])\n', (13687, 13701), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((15559, 15606), 'fitting.logit_transform', 'fitting.logit_transform', (['start_vals', 'bound_vals'], {}), '(start_vals, bound_vals)\n', (15582, 15606), False, 'import fitting\n'), ((16164, 16292), 'scipy.optimize.minimize', 'minimize', (['neg_loglik', 'start_transformed'], {'method': '"""L-BFGS-B"""', 'options': "{'ftol': 1e-12}", 'args': '(risk, bound_vals, param_dict_tmp)'}), "(neg_loglik, start_transformed, method='L-BFGS-B', options={'ftol':\n 1e-12}, args=(risk, bound_vals, param_dict_tmp))\n", (16172, 16292), False, 'from scipy.optimize import minimize\n'), ((16347, 16415), 'fitting.reverse_logit_transform', 'fitting.reverse_logit_transform', (['param_fit_transformed.x', 'bound_vals'], {}), '(param_fit_transformed.x, bound_vals)\n', (16378, 16415), False, 'import fitting\n'), ((17286, 17305), 'numpy.ma.filled', 'np.ma.filled', (['rates'], {}), '(rates)\n', (17298, 17305), True, 'import numpy as np\n'), ((21973, 21992), 'numpy.ma.filled', 'np.ma.filled', (['rates'], {}), '(rates)\n', (21985, 21992), True, 'import numpy as np\n'), ((1412, 1442), 'numpy.sum', 'np.sum', (['run[:, i:12:4]'], {'axis': '(1)'}), '(run[:, i:12:4], axis=1)\n', (1418, 1442), True, 'import numpy as np\n'), ((5372, 5497), 'fitting.randomise_infection', 'fitting.randomise_infection', (['self.parent_fitter.nodes'], {'nhigh': "self.data['init_conds'][1]", 'nlow': "self.data['init_conds'][4]"}), "(self.parent_fitter.nodes, nhigh=self.data[\n 'init_conds'][1], nlow=self.data['init_conds'][4])\n", (5399, 5497), False, 'import fitting\n'), ((5581, 5609), 'copy.deepcopy', 'copy.deepcopy', (['initial_nodes'], {}), '(initial_nodes)\n', (5594, 5609), False, 'import copy\n'), ((6314, 6344), 'numpy.sum', 'np.sum', (['run[:, i:12:4]'], {'axis': '(1)'}), '(run[:, i:12:4], axis=1)\n', (6320, 6344), True, 'import numpy as np\n'), ((7018, 7143), 'fitting.randomise_infection', 'fitting.randomise_infection', (['self.parent_fitter.nodes'], {'nhigh': "self.data['init_conds'][1]", 'nlow': "self.data['init_conds'][4]"}), "(self.parent_fitter.nodes, nhigh=self.data[\n 'init_conds'][1], nlow=self.data['init_conds'][4])\n", (7045, 7143), False, 'import fitting\n'), ((7227, 7255), 'copy.deepcopy', 'copy.deepcopy', (['initial_nodes'], {}), '(initial_nodes)\n', (7240, 7255), False, 'import copy\n'), ((7989, 8019), 'numpy.sum', 'np.sum', (['run[:, i:12:4]'], {'axis': '(1)'}), '(run[:, i:12:4], axis=1)\n', (7995, 8019), True, 'import numpy as np\n'), ((10051, 10086), 'os.path.join', 'os.path.join', (['save_folder', 'fig_name'], {}), '(save_folder, fig_name)\n', (10063, 10086), False, 'import os\n'), ((11220, 11255), 'os.path.join', 'os.path.join', (['save_folder', 'fig_name'], {}), '(save_folder, fig_name)\n', (11232, 11255), False, 'import os\n'), ((14375, 14410), 'os.path.join', 'os.path.join', (['save_folder', 'fig_name'], {}), '(save_folder, fig_name)\n', (14387, 14410), False, 'import os\n'), ((15495, 15511), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (15503, 15511), True, 'import numpy as np\n'), ((15863, 15910), 'fitting.reverse_logit_transform', 'fitting.reverse_logit_transform', (['params', 'bounds'], {}), '(params, bounds)\n', (15894, 15910), False, 'import fitting\n'), ((16060, 16079), 'numpy.nan_to_num', 'np.nan_to_num', (['(-val)'], {}), '(-val)\n', (16073, 16079), True, 'import numpy as np\n'), ((16493, 16509), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (16501, 16509), True, 'import numpy as np\n'), ((16934, 16967), 'numpy.clip', 'np.clip', (['states', '(0)', '(20000000000.0)'], {}), '(states, 0, 20000000000.0)\n', (16941, 16967), True, 'import numpy as np\n'), ((18154, 18205), 'numpy.sum', 'np.sum', (['all_rates[:, 2 * risk:2 * risk + 2]'], {'axis': '(1)'}), '(all_rates[:, 2 * risk:2 * risk + 2], axis=1)\n', (18160, 18205), True, 'import numpy as np\n'), ((18229, 18252), 'numpy.sum', 'np.sum', (['(rates * delta_t)'], {}), '(rates * delta_t)\n', (18235, 18252), True, 'import numpy as np\n'), ((19368, 19395), 'numpy.array', 'np.array', (['_params_rev_trans'], {}), '(_params_rev_trans)\n', (19376, 19395), True, 'import numpy as np\n'), ((21424, 21443), 'numpy.array', 'np.array', (['param_fit'], {}), '(param_fit)\n', (21432, 21443), True, 'import numpy as np\n'), ((21621, 21654), 'numpy.clip', 'np.clip', (['states', '(0)', '(20000000000.0)'], {}), '(states, 0, 20000000000.0)\n', (21628, 21654), True, 'import numpy as np\n'), ((8601, 8647), 'numpy.sum', 'np.sum', (["data['dpc_sus'][:, risk, :, :]"], {'axis': '(0)'}), "(data['dpc_sus'][:, risk, :, :], axis=0)\n", (8607, 8647), True, 'import numpy as np\n'), ((8750, 8796), 'numpy.sum', 'np.sum', (["data['dpc_inf'][:, risk, :, :]"], {'axis': '(0)'}), "(data['dpc_inf'][:, risk, :, :], axis=0)\n", (8756, 8796), True, 'import numpy as np\n'), ((8950, 9007), 'numpy.sum', 'np.sum', (["data['controlled_dpc_sus'][:, risk, :, :]"], {'axis': '(0)'}), "(data['controlled_dpc_sus'][:, risk, :, :], axis=0)\n", (8956, 9007), True, 'import numpy as np\n'), ((9142, 9199), 'numpy.sum', 'np.sum', (["data['controlled_dpc_inf'][:, risk, :, :]"], {'axis': '(0)'}), "(data['controlled_dpc_inf'][:, risk, :, :], axis=0)\n", (9148, 9199), True, 'import numpy as np\n'), ((9334, 9391), 'numpy.sum', 'np.sum', (["data['controlled_dpc_vac'][:, risk, :, :]"], {'axis': '(0)'}), "(data['controlled_dpc_vac'][:, risk, :, :], axis=0)\n", (9340, 9391), True, 'import numpy as np\n'), ((10253, 10299), 'numpy.sum', 'np.sum', (["data['dpc_inf'][:, risk, :, :]"], {'axis': '(0)'}), "(data['dpc_inf'][:, risk, :, :], axis=0)\n", (10259, 10299), True, 'import numpy as np\n'), ((10627, 10684), 'numpy.sum', 'np.sum', (["data['controlled_dpc_inf'][:, risk, :, :]"], {'axis': '(0)'}), "(data['controlled_dpc_inf'][:, risk, :, :], axis=0)\n", (10633, 10684), True, 'import numpy as np\n'), ((18276, 18322), 'numpy.where', 'np.where', (['((inf_multi > 0) & (inf_risk == risk))'], {}), '((inf_multi > 0) & (inf_risk == risk))\n', (18284, 18322), True, 'import numpy as np\n'), ((18360, 18379), 'numpy.log', 'np.log', (['rates[idcs]'], {}), '(rates[idcs])\n', (18366, 18379), True, 'import numpy as np\n'), ((10507, 10545), 'numpy.linspace', 'np.linspace', (['(0)', '(50)', '(21)'], {'endpoint': '(False)'}), '(0, 50, 21, endpoint=False)\n', (10518, 10545), True, 'import numpy as np\n'), ((10812, 10850), 'numpy.linspace', 'np.linspace', (['(0)', '(50)', '(21)'], {'endpoint': '(False)'}), '(0, 50, 21, endpoint=False)\n', (10823, 10850), True, 'import numpy as np\n'), ((17138, 17197), 'numpy.prod', 'np.prod', (['states[:, [2 * (i // 2), 2 * (i % 2) + 1]]'], {'axis': '(1)'}), '(states[:, [2 * (i // 2), 2 * (i % 2) + 1]], axis=1)\n', (17145, 17197), True, 'import numpy as np\n'), ((19778, 19814), 'numpy.sum', 'np.sum', (['dpc_data[:, 0, :, i]'], {'axis': '(0)'}), '(dpc_data[:, 0, :, i], axis=0)\n', (19784, 19814), True, 'import numpy as np\n'), ((19923, 19959), 'numpy.sum', 'np.sum', (['dpc_data[:, 1, :, i]'], {'axis': '(0)'}), '(dpc_data[:, 1, :, i], axis=0)\n', (19929, 19959), True, 'import numpy as np\n'), ((21825, 21884), 'numpy.prod', 'np.prod', (['states[:, [2 * (i // 2), 2 * (i % 2) + 1]]'], {'axis': '(1)'}), '(states[:, [2 * (i // 2), 2 * (i % 2) + 1]], axis=1)\n', (21832, 21884), True, 'import numpy as np\n'), ((11608, 11659), 'numpy.sum', 'np.sum', (["self.data['dpc_inf'][:, risk, :, :]"], {'axis': '(0)'}), "(self.data['dpc_inf'][:, risk, :, :], axis=0)\n", (11614, 11659), True, 'import numpy as np\n'), ((11826, 11888), 'numpy.sum', 'np.sum', (["self.data['controlled_dpc_inf'][:, risk, :, :]"], {'axis': '(0)'}), "(self.data['controlled_dpc_inf'][:, risk, :, :], axis=0)\n", (11832, 11888), True, 'import numpy as np\n'), ((14154, 14177), 'simulation.Risk', 'simulation.Risk', (['risk_2'], {}), '(risk_2)\n', (14169, 14177), False, 'import simulation\n')] |
# Remove nans from textfile output of dmstack and only extract few columns
# Author: <NAME>
#
# Filtering:
# 1. flag calib_psfCandidate==False
# 2. column deblend_nChild==0
# 3. ellipticity e = sqrt(e1^2 + e2^2) < 1.5
# 4. choose only few columns given below
# 5. remove nans from all these columns
# 6. change delimiter to tab.
#
# columns:
# id (90)
# base_SdssCentroid_x, base_SdssCentroid_y (102, 103)
# base_SdssCentroid_xSigma, base_SdssCentroid_ySigma (104,105)
# ext_shapeHSM_HsmShapeRegauss_e1, ext_shapeHSM_HsmShapeRegauss_e2 (127, 128)
# base_SdssShape_flux (114)
#
# In total there are 8 columns
# id
# x1,x2 xerr1 xerr2
# e1 e2
# flux
#
import pandas as pd
import numpy as np
import sys
import glob
def remove_nans(ifile):
""" Remove nans and filter data from dmstack output csv file.
There are 90 flags col0 to col89
col90 is id is first column 'id'
There are 90 flags and 77 columns.
We exclude first column 'flags' and have 76 columns
In total there are 90 + 76 = 166 columns.
Columns selected:
1 : calib_psfCandidate (for filtering only)
94 : deblend_nChild (for filtering only)
90 : id
102 : base_SdssCentroid_x
103 : base_SdssCentroid_y
104 : base_SdssCentroid_xSigma
105 : base_SdssCentroid_ySigma
127 : ext_shapeHSM_HsmShapeRegauss_e1
128 : ext_shapeHSM_HsmShapeRegauss_e2
114 : ext_shapeHSM_HsmShapeRegauss_sigma
"""
usecols = [1, 94, 90, 102, 103, 104, 105, 127, 128, 114]
df = pd.read_csv(ifile, sep=",",low_memory=False)
for c in df.columns:
df[c] = pd.to_numeric(df[c],errors='coerce')
# filter the flag calib_psfCandidate==False
# not a star candidate
df = df.query('calib_psfCandidate == 0.0')
# filter the column deblend_nChild==0
# no child source after deblending
df = df.query('deblend_nChild == 0.0')
df = df.copy()
# clean out unphysical results
# e1^2 + e2^2 < 1.5^2
df['e'] = (df['ext_shapeHSM_HsmShapeRegauss_e1'] ** 2 + df['ext_shapeHSM_HsmShapeRegauss_e2'] ** 2)**0.5
df = df.query('e < 1.5')
# take only required columns
cols_select = ['id',
'base_SdssCentroid_x', 'base_SdssCentroid_y',
'base_SdssCentroid_xSigma','base_SdssCentroid_ySigma',
'ext_shapeHSM_HsmShapeRegauss_e1','ext_shapeHSM_HsmShapeRegauss_e2',
'base_SdssShape_flux']
df = df[cols_select]
# drop all nans
df = df.dropna()
# write txt file with commented header
prefix = ' '*11
header_line = prefix.join(cols_select)
np.savetxt(ifile[0:-4]+'.txt',df.values,header=header_line,delimiter='\t')
if __name__ == '__main__':
for ifile in glob.glob("*.csv"):
print("Reading: ", ifile)
remove_nans(ifile)
| [
"pandas.read_csv",
"numpy.savetxt",
"pandas.to_numeric",
"glob.glob"
] | [((1505, 1550), 'pandas.read_csv', 'pd.read_csv', (['ifile'], {'sep': '""","""', 'low_memory': '(False)'}), "(ifile, sep=',', low_memory=False)\n", (1516, 1550), True, 'import pandas as pd\n'), ((2581, 2660), 'numpy.savetxt', 'np.savetxt', (["(ifile[0:-4] + '.txt')", 'df.values'], {'header': 'header_line', 'delimiter': '"""\t"""'}), "(ifile[0:-4] + '.txt', df.values, header=header_line, delimiter='\\t')\n", (2591, 2660), True, 'import numpy as np\n'), ((2701, 2719), 'glob.glob', 'glob.glob', (['"""*.csv"""'], {}), "('*.csv')\n", (2710, 2719), False, 'import glob\n'), ((1592, 1629), 'pandas.to_numeric', 'pd.to_numeric', (['df[c]'], {'errors': '"""coerce"""'}), "(df[c], errors='coerce')\n", (1605, 1629), True, 'import pandas as pd\n')] |
import re
import random
import torch
from omegaconf import OmegaConf
import torchaudio
import struct
from io import BytesIO, FileIO
import wave
import numpy as np
from nltk import sent_tokenize
import nltk
try:
nltk.data.find('tokenizers/punkt')
except LookupError:
nltk.download('punkt')
def _make_wav(data, rate):
""" Transform a numpy array to a PCM bytestring """
try:
data = np.array(data, dtype=float)
if len(data.shape) == 1:
nchan = 1
elif len(data.shape) == 2:
# In wave files,channels are interleaved. E.g.,
# "L1R1L2R2..." for stereo. See
# http://msdn.microsoft.com/en-us/library/windows/hardware/dn653308(v=vs.85).aspx
# for channel ordering
nchan = data.shape[0]
data = data.T.ravel()
else:
raise ValueError('Array audio input must be a 1D or 2D array')
scaled = np.int16(data/np.max(np.abs(data))*32767).tolist()
except ImportError:
# check that it is a "1D" list
idata = iter(data) # fails if not an iterable
try:
iter(idata.next())
raise TypeError('Only lists of mono audio are '
'supported if numpy is not installed')
except TypeError:
# this means it's not a nested list, which is what we want
pass
maxabsvalue = float(max([abs(x) for x in data]))
scaled = [int(x/maxabsvalue*32767) for x in data]
nchan = 1
fp = BytesIO()
waveobj = wave.open(fp,mode='wb')
waveobj.setnchannels(nchan)
waveobj.setframerate(rate)
waveobj.setsampwidth(2)
waveobj.setcomptype('NONE','NONE')
waveobj.writeframes(b''.join([struct.pack('<h',x) for x in scaled]))
val = fp.getvalue()
waveobj.close()
# fp.seek(0)
return val
# see latest avaiable models
models = OmegaConf.load('latest_silero_models.yml')
available_languages = list(models.tts_models.keys())
# print(f'Available languages {available_languages}')
# for lang in available_languages:
# speakers = list(models.tts_models.get(lang).keys())
# print(f'Available speakers for {lang}: {speakers}')
def voice_act(text, filename='output'):
# text = text[:140]
# print(text)
language = 'ru'
# speaker = 'ruslan_16khz'
speaker = random.choice([s for s in list(models.tts_models.get(language).keys()) if re.compile('^.*_16khz$').match(s)])
device = torch.device('cpu')
(model,
symbols,
sample_rate,
example_text,
apply_tts) = torch.hub.load(repo_or_dir='snakers4/silero-models',
model='silero_tts',
language=language,
speaker=speaker)
# torchaudio.set_audio_backend('sox_io')
model = model.to(device) # gpu or cpu
texts = sent_tokenize(text)
print(texts)
audio = apply_tts(texts=texts,
model=model,
sample_rate=sample_rate,
symbols=symbols,
device=device)
# with open('output.wav', 'wb') as fp:
# fp.write(_make_wav(audio[0], rate = sample_rate).read())
# wav = []
# for audio in audios:
# wav.append(_make_wav(audio, rate = sample_rate))
# return wav[-1:] + wav[:-1]
# return _make_wav(audio[0], rate = sample_rate)
for i, _audio in enumerate(audio):
torchaudio.save(f'{filename}-{i}.wav',
_audio.unsqueeze(0),
sample_rate=16000,
bits_per_sample=16)
return i + 1
if __name__=='__main__':
#print(voice_act('Жили-были три китайца - Як, Як-Цидрак, Як-Цидрак-Цидрон-Цидрони, И еще три китаянки - Цыпа, Цыпа-Дрипа, Цыпа-Дрипа-Лампомпони. Поженились Як на Цыпе, Як-Цидрак на Цыпе-Дрипе, Як-Цидрак-Цидрон-Цидрони на Цыпе-Дрипе-Лампомпони. Вот у них родились дети: у Яка с Цыпой — Шах, у Як-Цидрака с Цыпой-Дрыпой — Шах-Шарах, у Як-Цидрак-Цидрони с Цыпо-Дрыпой-Лампопони — Шах-Шарах-Шарони.'))
# wav = voice_act('Жили-были три китайца - Як, Як-Цидрак, Як-Цидрак-Цидрон-Цидрони, И еще три китаянки - Цыпа, Цыпа-Дрипа, Цыпа-Дрипа-Лампомпони.')
# voice_act('На отделанном вагонкой балконе его квартиры — целая экспозиция: российский триколор и флаг пограничных войск, над ними висят две фуражки — пограничная с советской кокардой и прокурорская, между ними — наградное холодное оружие, на многих клинках — дарственные надписи от руководителей разных ведомств, но по-настоящему теплые эмоции у Бакина вызывают только атрибуты пограничных войск: «Присягу-то я один раз давал, в армии». ')
voice_act('ек. ке. проп. попуп. пипипуп. орполуп. бибуп. скрипипуп. папуп. папипуп. бубибуп. бабап. пибапабуп. пибибибуп.')
# voice_act('ек ке проп попуп пипипуп орполуп бибуп скрипипуп папуп папипуп бубибуп бабап пибапабуп пибибибуп')
| [
"wave.open",
"io.BytesIO",
"numpy.abs",
"nltk.sent_tokenize",
"omegaconf.OmegaConf.load",
"nltk.data.find",
"struct.pack",
"numpy.array",
"torch.device",
"nltk.download",
"torch.hub.load",
"re.compile"
] | [((1884, 1926), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['"""latest_silero_models.yml"""'], {}), "('latest_silero_models.yml')\n", (1898, 1926), False, 'from omegaconf import OmegaConf\n'), ((218, 252), 'nltk.data.find', 'nltk.data.find', (['"""tokenizers/punkt"""'], {}), "('tokenizers/punkt')\n", (232, 252), False, 'import nltk\n'), ((1518, 1527), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1525, 1527), False, 'from io import BytesIO, FileIO\n'), ((1542, 1566), 'wave.open', 'wave.open', (['fp'], {'mode': '"""wb"""'}), "(fp, mode='wb')\n", (1551, 1566), False, 'import wave\n'), ((2457, 2476), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2469, 2476), False, 'import torch\n'), ((2555, 2667), 'torch.hub.load', 'torch.hub.load', ([], {'repo_or_dir': '"""snakers4/silero-models"""', 'model': '"""silero_tts"""', 'language': 'language', 'speaker': 'speaker'}), "(repo_or_dir='snakers4/silero-models', model='silero_tts',\n language=language, speaker=speaker)\n", (2569, 2667), False, 'import torch\n'), ((2897, 2916), 'nltk.sent_tokenize', 'sent_tokenize', (['text'], {}), '(text)\n', (2910, 2916), False, 'from nltk import sent_tokenize\n'), ((277, 299), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (290, 299), False, 'import nltk\n'), ((408, 435), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (416, 435), True, 'import numpy as np\n'), ((1730, 1750), 'struct.pack', 'struct.pack', (['"""<h"""', 'x'], {}), "('<h', x)\n", (1741, 1750), False, 'import struct\n'), ((2408, 2432), 're.compile', 're.compile', (['"""^.*_16khz$"""'], {}), "('^.*_16khz$')\n", (2418, 2432), False, 'import re\n'), ((954, 966), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (960, 966), True, 'import numpy as np\n')] |
import os
import numpy as np
import pandas as pd
from contextlib import closing
from itertools import combinations
from itertools import product
import tensorflow as tf
import matplotlib
from matplotlib import gridspec
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
from interpret.glassbox.ebm.utils import EBMUtils
from interpret.utils import autogen_schema
from interpret.glassbox.ebm.internal import NativeEBM
from interpret.glassbox.ebm.ebm import EBMPreprocessor
def get_interaction_list(tr_x, val_x, tr_y, val_y, pred_tr, pred_val, feature_list, feature_type_list,
active_main_effect_index, user_feature_list, item_feature_list, task_type="Regression", interaction_restrict=None):
if task_type == "Regression":
num_classes_ = -1
model_type = "regression"
elif task_type == "Classification":
num_classes_ = 2
model_type = "classification"
pred_tr = np.minimum(np.maximum(pred_tr, 0.0000001), 0.9999999)
pred_val = np.minimum(np.maximum(pred_val, 0.0000001), 0.9999999)
pred_tr = np.log(pred_tr / (1 - pred_tr))
pred_val = np.log(pred_val / (1 - pred_val))
train_num = tr_x.shape[0]
x = np.vstack([tr_x, val_x])
schema_ = autogen_schema(pd.DataFrame(x), feature_names=feature_list, feature_types=feature_type_list)
preprocessor_ = EBMPreprocessor(schema=schema_)
preprocessor_.fit(x)
xt = preprocessor_.transform(x)
tr_x, val_x = xt[:train_num, :], xt[train_num:, :]
attributes_ = EBMUtils.gen_attributes(preprocessor_.col_types_, preprocessor_.col_n_bins_)
main_attr_sets = EBMUtils.gen_attribute_sets([[item] for item in range(len(attributes_))])
with closing(
NativeEBM(
attributes_,
main_attr_sets,
tr_x,
tr_y,
val_x,
val_y,
num_inner_bags=0,
num_classification_states=num_classes_,
model_type=model_type,
training_scores=pred_tr,
validation_scores=pred_val,
)
) as native_ebm:
interaction_scores = []
interaction_scores = []
if interaction_restrict=='inter':
interaction_indices = [item for item in combinations(user_feature_list,2)] + [item for item in combinations(item_feature_list,2)]
elif interaction_restrict=='intra':
interaction_indices = [item for item in product(user_feature_list, item_feature_list)]
else:
interaction_indices = [item for item in combinations(range(len(preprocessor_.col_types_)), 2)]
for pair in interaction_indices:
if (pair[0] in active_main_effect_index) or (pair[1] in active_main_effect_index):
score = native_ebm.fast_interaction_score(pair)
interaction_scores.append((pair, score))
ranked_scores = list(
sorted(interaction_scores, key=lambda item: item[1], reverse=True)
)
interaction_list = [ranked_scores[i][0] for i in range(len(ranked_scores))]
return interaction_list
def plot_regularization(data_dict_logs, log_scale=True, save_eps=False, save_png=False, folder="./results/", name="trajectory_plot"):
main_loss = data_dict_logs["main_effect_val_loss"]
inter_loss = data_dict_logs["interaction_val_loss"]
active_main_effect_index = data_dict_logs["active_main_effect_index"]
active_interaction_index = data_dict_logs["active_interaction_index"]
fig = plt.figure(figsize=(14, 4))
if len(main_loss) > 0:
ax1 = plt.subplot(1, 2, 1)
ax1.plot(np.arange(0, len(main_loss), 1), main_loss)
ax1.axvline(np.argmin(main_loss), linestyle="dotted", color="red")
ax1.axvline(len(active_main_effect_index), linestyle="dotted", color="red")
ax1.plot(np.argmin(main_loss), np.min(main_loss), "*", markersize=12, color="red")
ax1.plot(len(active_main_effect_index), main_loss[len(active_main_effect_index)], "o", markersize=8, color="red")
ax1.set_xlabel("Number of Main Effects", fontsize=12)
ax1.set_ylabel("Validation Loss (Log Scale)", fontsize=12)
ax1.set_xlim(-0.5, len(main_loss) - 0.5)
ax1.xaxis.set_major_locator(MaxNLocator(integer=True))
if log_scale:
ax1.set_yscale("log")
ax1.set_yticks((10 ** np.linspace(np.log10(np.nanmin(main_loss)), np.log10(np.nanmax(main_loss)), 5)).round(5))
ax1.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax1.get_yaxis().set_minor_formatter(matplotlib.ticker.NullFormatter())
ax1.set_ylabel("Training Loss (Log Scale)", fontsize=12)
else:
ax1.set_yticks((np.linspace(np.nanmin(main_loss), np.nanmax(main_loss), 5)).round(5))
ax1.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax1.get_yaxis().set_minor_formatter(matplotlib.ticker.NullFormatter())
ax1.set_ylabel("Training Loss", fontsize=12)
if len(inter_loss) > 0:
ax2 = plt.subplot(1, 2, 2)
ax2.plot(np.arange(0, len(inter_loss), 1), inter_loss)
ax2.axvline(np.argmin(inter_loss), linestyle="dotted", color="red")
ax2.axvline(len(active_interaction_index), linestyle="dotted", color="red")
ax2.plot(np.argmin(inter_loss), np.min(inter_loss), "*", markersize=12, color="red")
ax2.plot(len(active_interaction_index), inter_loss[len(active_interaction_index)], "o", markersize=8, color="red")
ax2.set_xlabel("Number of Interactions", fontsize=12)
ax2.set_ylabel("Validation Loss (Log Scale)", fontsize=12)
ax2.set_xlim(-0.5, len(inter_loss) - 0.5)
ax2.xaxis.set_major_locator(MaxNLocator(integer=True))
if log_scale:
ax2.set_yscale("log")
ax2.set_yticks((10 ** np.linspace(np.log10(np.nanmin(inter_loss)), np.log10(np.nanmax(inter_loss)), 5)).round(5))
ax2.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax2.get_yaxis().set_minor_formatter(matplotlib.ticker.NullFormatter())
ax2.set_ylabel("Validation Loss (Log Scale)", fontsize=12)
else:
ax2.set_yticks((np.linspace(np.nanmin(inter_loss), np.nanmax(inter_loss), 5)).round(5))
ax2.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax2.get_yaxis().set_minor_formatter(matplotlib.ticker.NullFormatter())
ax2.set_ylabel("Validation Loss", fontsize=12)
plt.show()
save_path = folder + name
if not os.path.exists(folder):
os.makedirs(folder)
if save_eps:
fig.savefig("%s.eps" % save_path, bbox_inches="tight", dpi=100)
if save_png:
fig.savefig("%s.png" % save_path, bbox_inches="tight", dpi=100)
def plot_trajectory(data_dict_logs, log_scale=True, save_eps=False, save_png=False, folder="./results/", name="trajectory_plot"):
t1, t2, t3, t4 = [data_dict_logs["err_train_main_effect_training"],
data_dict_logs["err_train_interaction_training"], data_dict_logs["err_train_tuning"], data_dict_logs["err_train_mf"]]
v1, v2, v3, v4 = [data_dict_logs["err_val_main_effect_training"],
data_dict_logs["err_val_interaction_training"], data_dict_logs["err_val_tuning"], data_dict_logs["err_val_mf"]]
offset1x = (0.25 - len(t1) / len(t1 + t2 + t3 + t4 )) * 300
offset2x = (0.45 - len(t1 + t2) / len(t1 + t2 + t3 + t4 )) * 300
offset3x = (0.65 - len(t1 + t2 + t3) / len(t1 + t2 + t3 + t4)) * 300
fig = plt.figure(figsize=(16, 6))
ax1 = plt.subplot(1, 2, 1)
ax1.plot(np.arange(1, len(t1) + 1, 1), t1, color="r")
ax1.plot(np.arange(len(t1) + 1, len(t1 + t2 + t3 ) + 1, 1), t2 + t3, color="b")
ax1.plot(np.arange(len(t1 + t2 + t3 ),len(t1 + t2 + t3 + t4 ),1), t4 ,color='g')
if log_scale:
ax1.set_yscale("log")
ax1.set_yticks((10 ** np.linspace(np.log10(np.nanmin(t1 + t2 + t3 + t4 )), np.log10(np.nanmax(t1 + t2 + t3 + t4)), 6)).round(5))
ax1.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax1.get_yaxis().set_minor_formatter(matplotlib.ticker.NullFormatter())
ax1.set_xlabel("Number of Epochs", fontsize=12)
ax1.set_ylabel("Training Loss (Log Scale)", fontsize=12)
hp1 = ((np.log10(t1[-1]) - np.log10(np.min(t1 + t2 + t3 + t4)))
/ (np.log10(np.max(t1 + t2 + t3 + t4 )) - np.log10(np.min(t1 + t2 + t3 + t4 ))))
hp2 = ((np.log10((t1 + t2)[-1]) - np.log10(np.min(t1 + t2 + t3 + t4 )))
/ (np.log10(np.max(t1 + t2 + t3 + t4 )) - np.log10(np.min(t1 + t2 + t3 + t4 ))))
hp3 = ((np.log10((t1 + t2 + t3)[-1]) - np.log10(np.min(t1 + t2 + t3 + t4 )))
/ (np.log10(np.max(t1 + t2 + t3 + t4 )) - np.log10(np.min(t1 + t2 + t3 + t4 ))))
else:
ax1.set_yticks((np.linspace(np.nanmin(t1 + t2), np.nanmax(t1 + t2), 6)).round(5))
ax1.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax1.get_yaxis().set_minor_formatter(matplotlib.ticker.NullFormatter())
ax1.set_xlabel("Number of Epochs", fontsize=12)
ax1.set_ylabel("Training Loss", fontsize=12)
hp1 = (t1[-1] - np.min(t1 + t2 + t3 + t4 )) / (np.max(t1 + t2 + t3 + t4 ) - np.min(t1 + t2 + t3 + t4))
hp2 = ((t1 + t2)[-1] - np.min(t1 + t2 + t3 + t4 )) / (np.max(t1 + t2 + t3 + t4 ) - np.min(t1 + t2 + t3 + t4 ))
hp3 = ((t1 + t2 + t3)[-1] - np.min(t1 + t2 + t3 + t4 )) / (np.max(t1 + t2 + t3 + t4 ) - np.min(t1 + t2 + t3 + t4 ))
offset1y = 65 if hp1 < 0.6 else -65
offset2y = 65 if hp2 < 0.6 else -65
offset3y = 65 if hp3 < 0.6 else -65
"""
if len(t2) > 0:
ax1.annotate("Add \n Manifest \n Interactions", ((len(t1) + 1), t1[-1]), xycoords="data",
xytext=(offset1x, offset1y), textcoords="offset pixels", arrowprops=dict(facecolor="black", shrink=0.1), fontsize=10,
horizontalalignment="center", verticalalignment="top")
if len(t3) > 0:
ax1.annotate("Prune ", ((len(t1 + t2) + 1), (t1 + t2)[-1]), xycoords="data",
xytext=(offset2x, offset2y), textcoords="offset pixels", arrowprops=dict(facecolor="black", shrink=0.1), fontsize=10,
horizontalalignment="center", verticalalignment="top")
if len(t4) > 0:
ax1.annotate("Add \n Latent \n Interactions", ((len(t1 + t2 + t3) + 1), (t1 + t2 + t3)[-1]), xycoords="data",
xytext=(offset3x, offset3y), textcoords="offset pixels", arrowprops=dict(facecolor="black", shrink=0.1), fontsize=10,
horizontalalignment="center", verticalalignment="top")
"""
ax1.legend(["Stage 1: Training Main Effects", "Stage 2: Training Manifest Interactions", "Stage 3: Training Latent Interactions"])
ax2 = plt.subplot(1, 2, 2)
ax2.plot(np.arange(1, len(v1) + 1, 1), v1 , color="r")
ax2.plot(np.arange(len(v1) + 1, len(v1 + v2 + v3 ) + 1, 1), v2 + v3, color="b")
ax2.plot(np.arange(len(v1 + v2 + v3 ) + 1, len(v1 + v2 + v3 + v4 ) + 1, 1), v4, color="g")
if log_scale:
ax2.set_yscale("log")
ax2.set_yticks((10 ** np.linspace(np.log10(np.nanmin(v1 + v2 + v3 + v4 )), np.log10(np.nanmax(v1 + v2 + v3 + v4 )), 6)).round(5))
ax2.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax2.get_yaxis().set_minor_formatter(matplotlib.ticker.NullFormatter())
ax2.set_xlabel("Number of Epochs", fontsize=12)
ax2.set_ylabel("Validation Loss (Log Scale)", fontsize=12)
hp1 = ((np.log10(v1[-1]) - np.log10(np.min(v1 + v2 + v3 + v4 )))
/ (np.log10(np.max(v1 + v2 + v3 + v4 )) - np.log10(np.min(v1 + v2 + v3 + v4 ))))
hp2 = ((np.log10((v1 + v2)[-1]) - np.log10(np.min(v1 + v2 + v3 + v4 )))
/ (np.log10(np.max(v1 + v2 + v3 + v4 )) - np.log10(np.min(v1 + v2 + v3 + v4 ))))
hp3 = ((np.log10((v1 + v2 + v3)[-1]) - np.log10(np.min(v1 + v2 + v3 + v4 )))
/ (np.log10(np.max(v1 + v2 + v3 + v4 )) - np.log10(np.min(v1 + v2 + v3 + v4 ))))
else:
ax2.set_yticks((np.linspace(np.nanmin(v1 + v2 + v3 + v4 + v5), np.nanmax(v1 + v2 + v3 + v4 + v5), 6)).round(5))
ax2.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax2.get_yaxis().set_minor_formatter(matplotlib.ticker.NullFormatter())
ax2.set_xlabel("Number of Epochs", fontsize=12)
ax2.set_ylabel("Validation Loss", fontsize=12)
hp1 = (v1[-1] - np.min(v1 + v2 + v3 + v4)) / (np.max(v1 + v2 + v3 + v4) - np.min(v1 + v2 + v3 + v4))
hp2 = ((v1 + v2)[-1] - np.min(v1 + v2 + v3 + v4)) / (np.max(v1 + v2 + v3 + v4) - np.min(v1 + v2 + v3 + v4))
hp3 = ((v1 + v2 + v3)[-1] - np.min(v1 + v2 + v3 + v4)) / (np.max(v1 + v2 + v3 + v4) - np.min(v1 + v2 + v3 + v4))
offset1y = 65 if hp1 < 0.6 else -65
offset2y = 65 if hp2 < 0.6 else -65
offset3y = 65 if hp3 < 0.6 else -65
"""
if len(v2) > 0:
ax2.annotate("Add \n Manifest \n Interactions", ((len(v1) + 1), v1[-1]), xycoords="data",
xytext=(offset1x, offset1y), textcoords="offset pixels", arrowprops=dict(facecolor="black", shrink=0.1), fontsize=10,
horizontalalignment="center", verticalalignment="top")
if len(v3) > 0:
ax2.annotate("Prune ", ((len(v1 + v2) + 1), (v1 + v2)[-1]), xycoords="data",
xytext=(offset2x, offset2y), textcoords="offset pixels", arrowprops=dict(facecolor="black", shrink=0.1), fontsize=10,
horizontalalignment="center", verticalalignment="top")
if len(v4) > 0:
ax2.annotate("Add \n Latent \n Interactions", ((len(v1 + v2 + v3) + 1), (v1 + v2 + v3)[-1]), xycoords="data",
xytext=(offset3x, offset3y), textcoords="offset pixels", arrowprops=dict(facecolor="black", shrink=0.1), fontsize=10,
horizontalalignment="center", verticalalignment="top")
"""
ax2.legend(["Stage 1: Training Main Effects", "Stage 2: Training Manifest Interactions", "Stage 3: Training Latent Interactions"])
plt.show()
save_path = folder + name
if not os.path.exists(folder):
os.makedirs(folder)
if save_eps:
fig.savefig("%s.eps" % save_path, bbox_inches="tight", dpi=100)
if save_png:
fig.savefig("%s.png" % save_path, bbox_inches="tight", dpi=100)
def feature_importance_visualize(data_dict_global, folder="./results/", name="demo", save_png=False, save_eps=False):
all_ir = []
all_names = []
for key, item in data_dict_global.items():
if item["importance"] > 0:
all_ir.append(item["importance"])
all_names.append(key)
max_ids = len(all_names)
if max_ids > 0:
fig = plt.figure(figsize=(7, 0.4 + 0.6 * max_ids))
ax = plt.axes()
rects = ax.barh(np.arange(len(all_ir)), [ir for ir,_ in sorted(zip(all_ir, all_names))])
ax.set_yticks(np.arange(len(all_ir)))
ax.set_yticklabels([name for _,name in sorted(zip(all_ir, all_names))])
for rect in rects:
_, height = rect.get_xy()
ax.text(rect.get_x() + rect.get_width() + 0.005, height + 0.3,
"%0.3f" % (rect.get_width()))
plt.ylabel("Feature Name", fontsize=12)
plt.xlim(0, np.max(all_ir) + 0.05)
plt.ylim(-1, len(all_names))
plt.title("Feature Importance")
save_path = folder + name
if (max_ids > 0) & save_eps:
if not os.path.exists(folder):
os.makedirs(folder)
fig.savefig("%s.eps" % save_path, bbox_inches="tight", dpi=100)
if (max_ids > 0) & save_png:
if not os.path.exists(folder):
os.makedirs(folder)
fig.savefig("%s.png" % save_path, bbox_inches="tight", dpi=100)
def global_visualize_density(data_dict_global, main_effect_num=10**5, interaction_num=10**5, cols_per_row=4,
save_png=False, save_eps=False, folder="./results/", name="demo"):
maineffect_count = 0
componment_scales = []
for key, item in data_dict_global.items():
componment_scales.append(item["importance"])
if item["type"] != "pairwise":
maineffect_count += 1
componment_scales = np.array(componment_scales)
sorted_index = np.argsort(componment_scales)
active_index = sorted_index[componment_scales[sorted_index].cumsum()>0][::-1]
active_univariate_index = active_index[active_index < maineffect_count][:main_effect_num]
active_interaction_index = active_index[active_index >= maineffect_count][:interaction_num]
max_ids = len(active_univariate_index) + len(active_interaction_index)
idx = 0
fig = plt.figure(figsize=(6 * cols_per_row, 4.6 * int(np.ceil(max_ids / cols_per_row))))
#fig.suptitle('Main Effects & Manifest Interactions',fontsize=25)
outer = gridspec.GridSpec(int(np.ceil(max_ids / cols_per_row)), cols_per_row, wspace=0.25, hspace=0.35)
for indice in active_univariate_index:
feature_name = list(data_dict_global.keys())[indice]
if data_dict_global[feature_name]["type"] == "continuous":
inner = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=outer[idx], wspace=0.1, hspace=0.1, height_ratios=[6, 1])
ax1 = plt.Subplot(fig, inner[0])
ax1.plot(data_dict_global[feature_name]["inputs"], data_dict_global[feature_name]["outputs"])
ax1.set_xticklabels([])
fig.add_subplot(ax1)
ax2 = plt.Subplot(fig, inner[1])
xint = ((np.array(data_dict_global[feature_name]["density"]["names"][1:])
+ np.array(data_dict_global[feature_name]["density"]["names"][:-1])) / 2).reshape([-1, 1]).reshape([-1])
ax2.bar(xint, data_dict_global[feature_name]["density"]["scores"], width=xint[1] - xint[0])
ax1.get_shared_x_axes().join(ax1, ax2)
ax2.set_yticklabels([])
fig.add_subplot(ax2)
elif data_dict_global[feature_name]["type"] == "categorical":
inner = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec=outer[idx],
wspace=0.1, hspace=0.1, height_ratios=[6, 1])
ax1 = plt.Subplot(fig, inner[0])
ax1.bar(np.arange(len(data_dict_global[feature_name]["inputs"])),
data_dict_global[feature_name]["outputs"])
ax1.set_xticklabels([])
fig.add_subplot(ax1)
ax2 = plt.Subplot(fig, inner[1])
ax2.bar(np.arange(len(data_dict_global[feature_name]["density"]["names"])),
data_dict_global[feature_name]["density"]["scores"])
ax1.get_shared_x_axes().join(ax1, ax2)
ax2.set_xticks(data_dict_global[feature_name]["input_ticks"])
ax2.set_xticklabels(data_dict_global[feature_name]["input_labels"])
ax2.set_yticklabels([])
fig.add_subplot(ax2)
idx = idx + 1
if len(str(ax2.get_xticks())) > 60:
ax2.xaxis.set_tick_params(rotation=20)
ax1.set_title(feature_name + " (" + str(np.round(100 * data_dict_global[feature_name]["importance"], 1)) + "%)", fontsize=14)
for indice in active_interaction_index:
feature_name = list(data_dict_global.keys())[indice]
feature_name1 = feature_name.split(" vs. ")[0]
feature_name2 = feature_name.split(" vs. ")[1]
axis_extent = data_dict_global[feature_name]["axis_extent"]
inner = gridspec.GridSpecFromSubplotSpec(2, 4, subplot_spec=outer[idx],
wspace=0.1, hspace=0.1, height_ratios=[6, 1], width_ratios=[0.6, 3, 0.15, 0.2])
ax_main = plt.Subplot(fig, inner[1])
interact_plot = ax_main.imshow(data_dict_global[feature_name]["outputs"], interpolation="nearest",
aspect="auto", extent=axis_extent)
ax_main.set_xticklabels([])
ax_main.set_yticklabels([])
ax_main.set_title(feature_name + " (" + str(np.round(100 * data_dict_global[feature_name]["importance"], 1)) + "%)", fontsize=14)
fig.add_subplot(ax_main)
ax_bottom = plt.Subplot(fig, inner[5])
if data_dict_global[feature_name]["xtype"] == "categorical":
xint = np.arange(len(data_dict_global[feature_name1]["density"]["names"]))
ax_bottom.bar(xint, data_dict_global[feature_name1]["density"]["scores"])
ax_bottom.set_xticks(data_dict_global[feature_name]["input1_ticks"])
ax_bottom.set_xticklabels(data_dict_global[feature_name]["input1_labels"])
else:
xint = ((np.array(data_dict_global[feature_name1]["density"]["names"][1:])
+ np.array(data_dict_global[feature_name1]["density"]["names"][:-1])) / 2).reshape([-1])
ax_bottom.bar(xint, data_dict_global[feature_name1]["density"]["scores"], width=xint[1] - xint[0])
ax_bottom.set_yticklabels([])
ax_bottom.set_xlim([axis_extent[0], axis_extent[1]])
ax_bottom.get_shared_x_axes().join(ax_bottom, ax_main)
fig.add_subplot(ax_bottom)
if len(str(ax_bottom.get_xticks())) > 60:
ax_bottom.xaxis.set_tick_params(rotation=20)
ax_left = plt.Subplot(fig, inner[0])
if data_dict_global[feature_name]["ytype"] == "categorical":
xint = np.arange(len(data_dict_global[feature_name2]["density"]["names"]))
ax_left.barh(xint, data_dict_global[feature_name2]["density"]["scores"])
ax_left.set_yticks(data_dict_global[feature_name]["input2_ticks"])
ax_left.set_yticklabels(data_dict_global[feature_name]["input2_labels"])
else:
xint = ((np.array(data_dict_global[feature_name2]["density"]["names"][1:])
+ np.array(data_dict_global[feature_name2]["density"]["names"][:-1])) / 2).reshape([-1])
ax_left.barh(xint, data_dict_global[feature_name2]["density"]["scores"], height=xint[1] - xint[0])
ax_left.set_xticklabels([])
ax_left.set_ylim([axis_extent[2], axis_extent[3]])
ax_left.get_shared_y_axes().join(ax_left, ax_main)
fig.add_subplot(ax_left)
ax_colorbar = plt.Subplot(fig, inner[2])
response_precision = max(int(- np.log10(np.max(data_dict_global[feature_name]["outputs"])
- np.min(data_dict_global[feature_name]["outputs"]))) + 2, 0)
fig.colorbar(interact_plot, cax=ax_colorbar, orientation="vertical",
format="%0." + str(response_precision) + "f", use_gridspec=True)
fig.add_subplot(ax_colorbar)
idx = idx + 1
save_path = folder + name
if (max_ids > 0) & save_eps:
if not os.path.exists(folder):
os.makedirs(folder)
fig.savefig("%s.eps" % save_path, bbox_inches="tight", dpi=100)
if (max_ids > 0) & save_png:
if not os.path.exists(folder):
os.makedirs(folder)
fig.savefig("%s.png" % save_path, bbox_inches="tight", dpi=100)
def global_visualize_wo_density(data_dict_global, main_effect_num=10**5, interaction_num=10**5, cols_per_row=4,
save_png=False, save_eps=False, folder="./results/", name="demo"):
maineffect_count = 0
componment_scales = []
for key, item in data_dict_global.items():
componment_scales.append(item["importance"])
if item["type"] != "pairwise":
maineffect_count += 1
componment_scales = np.array(componment_scales)
sorted_index = np.argsort(componment_scales)
active_index = sorted_index[componment_scales[sorted_index].cumsum()>0][::-1]
active_univariate_index = active_index[active_index < maineffect_count][:main_effect_num]
active_interaction_index = active_index[active_index >= maineffect_count][:interaction_num]
max_ids = len(active_univariate_index) + len(active_interaction_index)
idx = 0
fig = plt.figure(figsize=(5.2 * cols_per_row, 4 * int(np.ceil(max_ids / cols_per_row))))
outer = gridspec.GridSpec(int(np.ceil(max_ids/cols_per_row)), cols_per_row, wspace=0.25, hspace=0.35)
for indice in active_univariate_index:
feature_name = list(data_dict_global.keys())[indice]
if data_dict_global[feature_name]["type"] == "continuous":
ax1 = plt.Subplot(fig, outer[idx])
ax1.plot(data_dict_global[feature_name]["inputs"], data_dict_global[feature_name]["outputs"])
ax1.set_title(feature_name, fontsize=12)
fig.add_subplot(ax1)
if len(str(ax1.get_xticks())) > 80:
ax1.xaxis.set_tick_params(rotation=20)
elif data_dict_global[feature_name]["type"] == "categorical":
ax1 = plt.Subplot(fig, outer[idx])
ax1.bar(np.arange(len(data_dict_global[feature_name]["inputs"])),
data_dict_global[feature_name]["outputs"])
ax1.set_title(feature_name, fontsize=12)
ax1.set_xticks(data_dict_global[feature_name]["input_ticks"])
ax1.set_xticklabels(data_dict_global[feature_name]["input_labels"])
fig.add_subplot(ax1)
idx = idx + 1
if len(str(ax1.get_xticks())) > 60:
ax1.xaxis.set_tick_params(rotation=20)
ax1.set_title(feature_name + " (" + str(np.round(100 * data_dict_global[feature_name]["importance"], 1)) + "%)", fontsize=12)
for indice in active_interaction_index:
feature_name = list(data_dict_global.keys())[indice]
feature_name1 = feature_name.split(" vs. ")[0]
feature_name2 = feature_name.split(" vs. ")[1]
axis_extent = data_dict_global[feature_name]["axis_extent"]
ax_main = plt.Subplot(fig, outer[idx])
interact_plot = ax_main.imshow(data_dict_global[feature_name]["outputs"], interpolation="nearest",
aspect="auto", extent=axis_extent)
if data_dict_global[feature_name]["xtype"] == "categorical":
ax_main.set_xticks(data_dict_global[feature_name]["input1_ticks"])
ax_main.set_xticklabels(data_dict_global[feature_name]["input1_labels"])
if data_dict_global[feature_name]["ytype"] == "categorical":
ax_main.set_yticks(data_dict_global[feature_name]["input2_ticks"])
ax_main.set_yticklabels(data_dict_global[feature_name]["input2_labels"])
response_precision = max(int(- np.log10(np.max(data_dict_global[feature_name]["outputs"])
- np.min(data_dict_global[feature_name]["outputs"]))) + 2, 0)
fig.colorbar(interact_plot, ax=ax_main, orientation="vertical",
format="%0." + str(response_precision) + "f", use_gridspec=True)
ax_main.set_title(feature_name + " (" + str(np.round(100 * data_dict_global[feature_name]["importance"], 1)) + "%)", fontsize=12)
fig.add_subplot(ax_main)
idx = idx + 1
if len(str(ax_main.get_xticks())) > 60:
ax_main.xaxis.set_tick_params(rotation=20)
save_path = folder + name
if (max_ids > 0) & save_eps:
if not os.path.exists(folder):
os.makedirs(folder)
fig.savefig("%s.eps" % save_path, bbox_inches="tight", dpi=100)
if (max_ids > 0) & save_png:
if not os.path.exists(folder):
os.makedirs(folder)
fig.savefig("%s.png" % save_path, bbox_inches="tight", dpi=100)
def local_visualize(data_dict_local, folder="./results/", name="demo", save_png=False, save_eps=False , task_type='Regression'):
left_x,left_y=0.1,0.1
width,height=1,1.5
#left_xh=left_x+width+0.2
#left_xhh=left_xh+0.5
scatter_area=[left_x,left_y,width,height]
#hist_y=[left_xh,left_y,0.2,height]
#hist_yy = [left_xhh,left_y,0.2,height]
plt.figure(figsize=(6, round((len(data_dict_local["active_indice"]) + 1) * 0.45)))
area_scatter=plt.axes(scatter_area)
#area_histy=plt.axes(hist_y)
#area_histyy=plt.axes(hist_yy)
final_pre = data_dict_local["predicted"]+data_dict_local["scores"][0]
if task_type== 'Classification':
final_pre = data_dict_local["initial_predict"][0]+data_dict_local["scores"][0]
final_pre = tf.sigmoid(final_pre).numpy()[0]
if "actual" in data_dict_local.keys():
area_scatter.set_title("Final Predicted: %0.4f | Actual: %0.4f" % (final_pre, data_dict_local["actual"]))
#area_histyy.set_title("Final Predicted: %0.4f | Actual: %0.4f" % (final_pre, data_dict_local["actual"]))
else:
area_scatter.set_title("Fianl Predicted: %0.4f"% (final_pre))
#area_histyy.set_title("Final Predicted: %0.4f"% (final_pre))
area_scatter.barh(data_dict_local["effect_names"][data_dict_local["active_indice"]][::-1].tolist(), data_dict_local["scores"][data_dict_local["active_indice"]][::-1], )
area_scatter.set_yticks(np.arange(len(data_dict_local["active_indice"])))
#area_histy.set_title('importance')
#if task_type == 'Classification':
# area_histy.pie([abs(data_dict_local['initial_predict'][0][0])*100,abs(data_dict_local['scores'][0])*100],labels=['explicit','implicit'],shadow=True,explode=[0,1],autopct="%0.2f%%")
# area_histyy.bar(['explicit','implicit'],[data_dict_local['initial_predict'][0][0],data_dict_local['scores'][0]])
#else:
# area_histy.pie([abs(data_dict_local['predicted'][0][0])*100,abs(data_dict_local['scores'][0])*100],labels=['explicit','implicit'],shadow=True,explode=[0,1],autopct="%0.2f%%")
# area_histyy.bar(['explicit','implicit'],[data_dict_local['predicted'][0][0],data_dict_local['scores'][0]])
if save_eps:
plt.savefig("local.eps", bbox_inches="tight", dpi=100)
| [
"matplotlib.pyplot.title",
"numpy.maximum",
"matplotlib.pyplot.axes",
"numpy.argmin",
"numpy.argsort",
"matplotlib.pyplot.figure",
"interpret.glassbox.ebm.ebm.EBMPreprocessor",
"numpy.round",
"matplotlib.ticker.ScalarFormatter",
"pandas.DataFrame",
"interpret.glassbox.ebm.internal.NativeEBM",
... | [((1239, 1263), 'numpy.vstack', 'np.vstack', (['[tr_x, val_x]'], {}), '([tr_x, val_x])\n', (1248, 1263), True, 'import numpy as np\n'), ((1391, 1422), 'interpret.glassbox.ebm.ebm.EBMPreprocessor', 'EBMPreprocessor', ([], {'schema': 'schema_'}), '(schema=schema_)\n', (1406, 1422), False, 'from interpret.glassbox.ebm.ebm import EBMPreprocessor\n'), ((1562, 1638), 'interpret.glassbox.ebm.utils.EBMUtils.gen_attributes', 'EBMUtils.gen_attributes', (['preprocessor_.col_types_', 'preprocessor_.col_n_bins_'], {}), '(preprocessor_.col_types_, preprocessor_.col_n_bins_)\n', (1585, 1638), False, 'from interpret.glassbox.ebm.utils import EBMUtils\n'), ((3559, 3586), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 4)'}), '(figsize=(14, 4))\n', (3569, 3586), True, 'from matplotlib import pyplot as plt\n'), ((6588, 6598), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6596, 6598), True, 'from matplotlib import pyplot as plt\n'), ((7633, 7660), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 6)'}), '(figsize=(16, 6))\n', (7643, 7660), True, 'from matplotlib import pyplot as plt\n'), ((7671, 7691), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (7682, 7691), True, 'from matplotlib import pyplot as plt\n'), ((10905, 10925), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (10916, 10925), True, 'from matplotlib import pyplot as plt\n'), ((14160, 14170), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14168, 14170), True, 'from matplotlib import pyplot as plt\n'), ((16361, 16388), 'numpy.array', 'np.array', (['componment_scales'], {}), '(componment_scales)\n', (16369, 16388), True, 'import numpy as np\n'), ((16408, 16437), 'numpy.argsort', 'np.argsort', (['componment_scales'], {}), '(componment_scales)\n', (16418, 16437), True, 'import numpy as np\n'), ((23615, 23642), 'numpy.array', 'np.array', (['componment_scales'], {}), '(componment_scales)\n', (23623, 23642), True, 'import numpy as np\n'), ((23662, 23691), 'numpy.argsort', 'np.argsort', (['componment_scales'], {}), '(componment_scales)\n', (23672, 23691), True, 'import numpy as np\n'), ((28033, 28055), 'matplotlib.pyplot.axes', 'plt.axes', (['scatter_area'], {}), '(scatter_area)\n', (28041, 28055), True, 'from matplotlib import pyplot as plt\n'), ((1293, 1308), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {}), '(x)\n', (1305, 1308), True, 'import pandas as pd\n'), ((3628, 3648), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (3639, 3648), True, 'from matplotlib import pyplot as plt\n'), ((5120, 5140), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (5131, 5140), True, 'from matplotlib import pyplot as plt\n'), ((6641, 6663), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (6655, 6663), False, 'import os\n'), ((6673, 6692), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (6684, 6692), False, 'import os\n'), ((14217, 14239), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (14231, 14239), False, 'import os\n'), ((14249, 14268), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (14260, 14268), False, 'import os\n'), ((14840, 14884), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 0.4 + 0.6 * max_ids)'}), '(figsize=(7, 0.4 + 0.6 * max_ids))\n', (14850, 14884), True, 'from matplotlib import pyplot as plt\n'), ((14898, 14908), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (14906, 14908), True, 'from matplotlib import pyplot as plt\n'), ((15330, 15369), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Feature Name"""'], {'fontsize': '(12)'}), "('Feature Name', fontsize=12)\n", (15340, 15369), True, 'from matplotlib import pyplot as plt\n'), ((15458, 15489), 'matplotlib.pyplot.title', 'plt.title', (['"""Feature Importance"""'], {}), "('Feature Importance')\n", (15467, 15489), True, 'from matplotlib import pyplot as plt\n'), ((19633, 19780), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(2)', '(4)'], {'subplot_spec': 'outer[idx]', 'wspace': '(0.1)', 'hspace': '(0.1)', 'height_ratios': '[6, 1]', 'width_ratios': '[0.6, 3, 0.15, 0.2]'}), '(2, 4, subplot_spec=outer[idx], wspace=0.1,\n hspace=0.1, height_ratios=[6, 1], width_ratios=[0.6, 3, 0.15, 0.2])\n', (19665, 19780), False, 'from matplotlib import gridspec\n'), ((19827, 19853), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'inner[1]'], {}), '(fig, inner[1])\n', (19838, 19853), True, 'from matplotlib import pyplot as plt\n'), ((20289, 20315), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'inner[5]'], {}), '(fig, inner[5])\n', (20300, 20315), True, 'from matplotlib import pyplot as plt\n'), ((21369, 21395), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'inner[0]'], {}), '(fig, inner[0])\n', (21380, 21395), True, 'from matplotlib import pyplot as plt\n'), ((22331, 22357), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'inner[2]'], {}), '(fig, inner[2])\n', (22342, 22357), True, 'from matplotlib import pyplot as plt\n'), ((25836, 25864), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'outer[idx]'], {}), '(fig, outer[idx])\n', (25847, 25864), True, 'from matplotlib import pyplot as plt\n'), ((29803, 29857), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""local.eps"""'], {'bbox_inches': '"""tight"""', 'dpi': '(100)'}), "('local.eps', bbox_inches='tight', dpi=100)\n", (29814, 29857), True, 'from matplotlib import pyplot as plt\n'), ((1105, 1136), 'numpy.log', 'np.log', (['(pred_tr / (1 - pred_tr))'], {}), '(pred_tr / (1 - pred_tr))\n', (1111, 1136), True, 'import numpy as np\n'), ((1157, 1190), 'numpy.log', 'np.log', (['(pred_val / (1 - pred_val))'], {}), '(pred_val / (1 - pred_val))\n', (1163, 1190), True, 'import numpy as np\n'), ((1761, 1968), 'interpret.glassbox.ebm.internal.NativeEBM', 'NativeEBM', (['attributes_', 'main_attr_sets', 'tr_x', 'tr_y', 'val_x', 'val_y'], {'num_inner_bags': '(0)', 'num_classification_states': 'num_classes_', 'model_type': 'model_type', 'training_scores': 'pred_tr', 'validation_scores': 'pred_val'}), '(attributes_, main_attr_sets, tr_x, tr_y, val_x, val_y,\n num_inner_bags=0, num_classification_states=num_classes_, model_type=\n model_type, training_scores=pred_tr, validation_scores=pred_val)\n', (1770, 1968), False, 'from interpret.glassbox.ebm.internal import NativeEBM\n'), ((3730, 3750), 'numpy.argmin', 'np.argmin', (['main_loss'], {}), '(main_loss)\n', (3739, 3750), True, 'import numpy as np\n'), ((3886, 3906), 'numpy.argmin', 'np.argmin', (['main_loss'], {}), '(main_loss)\n', (3895, 3906), True, 'import numpy as np\n'), ((3908, 3925), 'numpy.min', 'np.min', (['main_loss'], {}), '(main_loss)\n', (3914, 3925), True, 'import numpy as np\n'), ((4296, 4321), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (4307, 4321), False, 'from matplotlib.ticker import MaxNLocator\n'), ((5224, 5245), 'numpy.argmin', 'np.argmin', (['inter_loss'], {}), '(inter_loss)\n', (5233, 5245), True, 'import numpy as np\n'), ((5381, 5402), 'numpy.argmin', 'np.argmin', (['inter_loss'], {}), '(inter_loss)\n', (5390, 5402), True, 'import numpy as np\n'), ((5404, 5422), 'numpy.min', 'np.min', (['inter_loss'], {}), '(inter_loss)\n', (5410, 5422), True, 'import numpy as np\n'), ((5795, 5820), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (5806, 5820), False, 'from matplotlib.ticker import MaxNLocator\n'), ((8148, 8183), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {}), '()\n', (8181, 8183), False, 'import matplotlib\n'), ((8229, 8262), 'matplotlib.ticker.NullFormatter', 'matplotlib.ticker.NullFormatter', ([], {}), '()\n', (8260, 8262), False, 'import matplotlib\n'), ((9049, 9084), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {}), '()\n', (9082, 9084), False, 'import matplotlib\n'), ((9130, 9163), 'matplotlib.ticker.NullFormatter', 'matplotlib.ticker.NullFormatter', ([], {}), '()\n', (9161, 9163), False, 'import matplotlib\n'), ((11394, 11429), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {}), '()\n', (11427, 11429), False, 'import matplotlib\n'), ((11475, 11508), 'matplotlib.ticker.NullFormatter', 'matplotlib.ticker.NullFormatter', ([], {}), '()\n', (11506, 11508), False, 'import matplotlib\n'), ((12326, 12361), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {}), '()\n', (12359, 12361), False, 'import matplotlib\n'), ((12407, 12440), 'matplotlib.ticker.NullFormatter', 'matplotlib.ticker.NullFormatter', ([], {}), '()\n', (12438, 12440), False, 'import matplotlib\n'), ((16995, 17026), 'numpy.ceil', 'np.ceil', (['(max_ids / cols_per_row)'], {}), '(max_ids / cols_per_row)\n', (17002, 17026), True, 'import numpy as np\n'), ((17262, 17375), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(2)', '(1)'], {'subplot_spec': 'outer[idx]', 'wspace': '(0.1)', 'hspace': '(0.1)', 'height_ratios': '[6, 1]'}), '(2, 1, subplot_spec=outer[idx], wspace=0.1,\n hspace=0.1, height_ratios=[6, 1])\n', (17294, 17375), False, 'from matplotlib import gridspec\n'), ((17390, 17416), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'inner[0]'], {}), '(fig, inner[0])\n', (17401, 17416), True, 'from matplotlib import pyplot as plt\n'), ((17612, 17638), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'inner[1]'], {}), '(fig, inner[1])\n', (17623, 17638), True, 'from matplotlib import pyplot as plt\n'), ((22855, 22877), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (22869, 22877), False, 'import os\n'), ((22891, 22910), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (22902, 22910), False, 'import os\n'), ((23031, 23053), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (23045, 23053), False, 'import os\n'), ((23067, 23086), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (23078, 23086), False, 'import os\n'), ((24179, 24210), 'numpy.ceil', 'np.ceil', (['(max_ids / cols_per_row)'], {}), '(max_ids / cols_per_row)\n', (24186, 24210), True, 'import numpy as np\n'), ((24442, 24470), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'outer[idx]'], {}), '(fig, outer[idx])\n', (24453, 24470), True, 'from matplotlib import pyplot as plt\n'), ((27256, 27278), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (27270, 27278), False, 'import os\n'), ((27292, 27311), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (27303, 27311), False, 'import os\n'), ((27432, 27454), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (27446, 27454), False, 'import os\n'), ((27468, 27487), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (27479, 27487), False, 'import os\n'), ((970, 996), 'numpy.maximum', 'np.maximum', (['pred_tr', '(1e-07)'], {}), '(pred_tr, 1e-07)\n', (980, 996), True, 'import numpy as np\n'), ((1043, 1070), 'numpy.maximum', 'np.maximum', (['pred_val', '(1e-07)'], {}), '(pred_val, 1e-07)\n', (1053, 1070), True, 'import numpy as np\n'), ((4551, 4586), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {}), '()\n', (4584, 4586), False, 'import matplotlib\n'), ((4636, 4669), 'matplotlib.ticker.NullFormatter', 'matplotlib.ticker.NullFormatter', ([], {}), '()\n', (4667, 4669), False, 'import matplotlib\n'), ((4900, 4935), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {}), '()\n', (4933, 4935), False, 'import matplotlib\n'), ((4985, 5018), 'matplotlib.ticker.NullFormatter', 'matplotlib.ticker.NullFormatter', ([], {}), '()\n', (5016, 5018), False, 'import matplotlib\n'), ((6052, 6087), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {}), '()\n', (6085, 6087), False, 'import matplotlib\n'), ((6137, 6170), 'matplotlib.ticker.NullFormatter', 'matplotlib.ticker.NullFormatter', ([], {}), '()\n', (6168, 6170), False, 'import matplotlib\n'), ((6405, 6440), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {}), '()\n', (6438, 6440), False, 'import matplotlib\n'), ((6490, 6523), 'matplotlib.ticker.NullFormatter', 'matplotlib.ticker.NullFormatter', ([], {}), '()\n', (6521, 6523), False, 'import matplotlib\n'), ((8401, 8417), 'numpy.log10', 'np.log10', (['t1[-1]'], {}), '(t1[-1])\n', (8409, 8417), True, 'import numpy as np\n'), ((8567, 8590), 'numpy.log10', 'np.log10', (['(t1 + t2)[-1]'], {}), '((t1 + t2)[-1])\n', (8575, 8590), True, 'import numpy as np\n'), ((8741, 8769), 'numpy.log10', 'np.log10', (['(t1 + t2 + t3)[-1]'], {}), '((t1 + t2 + t3)[-1])\n', (8749, 8769), True, 'import numpy as np\n'), ((9298, 9323), 'numpy.min', 'np.min', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (9304, 9323), True, 'import numpy as np\n'), ((9329, 9354), 'numpy.max', 'np.max', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (9335, 9354), True, 'import numpy as np\n'), ((9358, 9383), 'numpy.min', 'np.min', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (9364, 9383), True, 'import numpy as np\n'), ((9416, 9441), 'numpy.min', 'np.min', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (9422, 9441), True, 'import numpy as np\n'), ((9447, 9472), 'numpy.max', 'np.max', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (9453, 9472), True, 'import numpy as np\n'), ((9476, 9501), 'numpy.min', 'np.min', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (9482, 9501), True, 'import numpy as np\n'), ((9540, 9565), 'numpy.min', 'np.min', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (9546, 9565), True, 'import numpy as np\n'), ((9571, 9596), 'numpy.max', 'np.max', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (9577, 9596), True, 'import numpy as np\n'), ((9600, 9625), 'numpy.min', 'np.min', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (9606, 9625), True, 'import numpy as np\n'), ((11649, 11665), 'numpy.log10', 'np.log10', (['v1[-1]'], {}), '(v1[-1])\n', (11657, 11665), True, 'import numpy as np\n'), ((11815, 11838), 'numpy.log10', 'np.log10', (['(v1 + v2)[-1]'], {}), '((v1 + v2)[-1])\n', (11823, 11838), True, 'import numpy as np\n'), ((11989, 12017), 'numpy.log10', 'np.log10', (['(v1 + v2 + v3)[-1]'], {}), '((v1 + v2 + v3)[-1])\n', (11997, 12017), True, 'import numpy as np\n'), ((12577, 12602), 'numpy.min', 'np.min', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (12583, 12602), True, 'import numpy as np\n'), ((12607, 12632), 'numpy.max', 'np.max', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (12613, 12632), True, 'import numpy as np\n'), ((12635, 12660), 'numpy.min', 'np.min', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (12641, 12660), True, 'import numpy as np\n'), ((12693, 12718), 'numpy.min', 'np.min', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (12699, 12718), True, 'import numpy as np\n'), ((12723, 12748), 'numpy.max', 'np.max', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (12729, 12748), True, 'import numpy as np\n'), ((12751, 12776), 'numpy.min', 'np.min', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (12757, 12776), True, 'import numpy as np\n'), ((12814, 12839), 'numpy.min', 'np.min', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (12820, 12839), True, 'import numpy as np\n'), ((12844, 12869), 'numpy.max', 'np.max', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (12850, 12869), True, 'import numpy as np\n'), ((12872, 12897), 'numpy.min', 'np.min', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (12878, 12897), True, 'import numpy as np\n'), ((15390, 15404), 'numpy.max', 'np.max', (['all_ir'], {}), '(all_ir)\n', (15396, 15404), True, 'import numpy as np\n'), ((15581, 15603), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (15595, 15603), False, 'import os\n'), ((15621, 15640), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (15632, 15640), False, 'import os\n'), ((15773, 15795), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (15787, 15795), False, 'import os\n'), ((15813, 15832), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (15824, 15832), False, 'import os\n'), ((18176, 18289), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(2)', '(1)'], {'subplot_spec': 'outer[idx]', 'wspace': '(0.1)', 'hspace': '(0.1)', 'height_ratios': '[6, 1]'}), '(2, 1, subplot_spec=outer[idx], wspace=0.1,\n hspace=0.1, height_ratios=[6, 1])\n', (18208, 18289), False, 'from matplotlib import gridspec\n'), ((18344, 18370), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'inner[0]'], {}), '(fig, inner[0])\n', (18355, 18370), True, 'from matplotlib import pyplot as plt\n'), ((18604, 18630), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'inner[1]'], {}), '(fig, inner[1])\n', (18615, 18630), True, 'from matplotlib import pyplot as plt\n'), ((24857, 24885), 'matplotlib.pyplot.Subplot', 'plt.Subplot', (['fig', 'outer[idx]'], {}), '(fig, outer[idx])\n', (24868, 24885), True, 'from matplotlib import pyplot as plt\n'), ((8429, 8454), 'numpy.min', 'np.min', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (8435, 8454), True, 'import numpy as np\n'), ((8482, 8507), 'numpy.max', 'np.max', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (8488, 8507), True, 'import numpy as np\n'), ((8521, 8546), 'numpy.min', 'np.min', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (8527, 8546), True, 'import numpy as np\n'), ((8602, 8627), 'numpy.min', 'np.min', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (8608, 8627), True, 'import numpy as np\n'), ((8656, 8681), 'numpy.max', 'np.max', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (8662, 8681), True, 'import numpy as np\n'), ((8695, 8720), 'numpy.min', 'np.min', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (8701, 8720), True, 'import numpy as np\n'), ((8781, 8806), 'numpy.min', 'np.min', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (8787, 8806), True, 'import numpy as np\n'), ((8835, 8860), 'numpy.max', 'np.max', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (8841, 8860), True, 'import numpy as np\n'), ((8874, 8899), 'numpy.min', 'np.min', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (8880, 8899), True, 'import numpy as np\n'), ((11677, 11702), 'numpy.min', 'np.min', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (11683, 11702), True, 'import numpy as np\n'), ((11730, 11755), 'numpy.max', 'np.max', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (11736, 11755), True, 'import numpy as np\n'), ((11769, 11794), 'numpy.min', 'np.min', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (11775, 11794), True, 'import numpy as np\n'), ((11850, 11875), 'numpy.min', 'np.min', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (11856, 11875), True, 'import numpy as np\n'), ((11904, 11929), 'numpy.max', 'np.max', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (11910, 11929), True, 'import numpy as np\n'), ((11943, 11968), 'numpy.min', 'np.min', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (11949, 11968), True, 'import numpy as np\n'), ((12029, 12054), 'numpy.min', 'np.min', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (12035, 12054), True, 'import numpy as np\n'), ((12083, 12108), 'numpy.max', 'np.max', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (12089, 12108), True, 'import numpy as np\n'), ((12122, 12147), 'numpy.min', 'np.min', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (12128, 12147), True, 'import numpy as np\n'), ((28348, 28369), 'tensorflow.sigmoid', 'tf.sigmoid', (['final_pre'], {}), '(final_pre)\n', (28358, 28369), True, 'import tensorflow as tf\n'), ((2283, 2317), 'itertools.combinations', 'combinations', (['user_feature_list', '(2)'], {}), '(user_feature_list, 2)\n', (2295, 2317), False, 'from itertools import combinations\n'), ((2338, 2372), 'itertools.combinations', 'combinations', (['item_feature_list', '(2)'], {}), '(item_feature_list, 2)\n', (2350, 2372), False, 'from itertools import combinations\n'), ((2482, 2527), 'itertools.product', 'product', (['user_feature_list', 'item_feature_list'], {}), '(user_feature_list, item_feature_list)\n', (2489, 2527), False, 'from itertools import product\n'), ((8951, 8969), 'numpy.nanmin', 'np.nanmin', (['(t1 + t2)'], {}), '(t1 + t2)\n', (8960, 8969), True, 'import numpy as np\n'), ((8971, 8989), 'numpy.nanmax', 'np.nanmax', (['(t1 + t2)'], {}), '(t1 + t2)\n', (8980, 8989), True, 'import numpy as np\n'), ((12198, 12231), 'numpy.nanmin', 'np.nanmin', (['(v1 + v2 + v3 + v4 + v5)'], {}), '(v1 + v2 + v3 + v4 + v5)\n', (12207, 12231), True, 'import numpy as np\n'), ((12233, 12266), 'numpy.nanmax', 'np.nanmax', (['(v1 + v2 + v3 + v4 + v5)'], {}), '(v1 + v2 + v3 + v4 + v5)\n', (12242, 12266), True, 'import numpy as np\n'), ((16856, 16887), 'numpy.ceil', 'np.ceil', (['(max_ids / cols_per_row)'], {}), '(max_ids / cols_per_row)\n', (16863, 16887), True, 'import numpy as np\n'), ((19245, 19308), 'numpy.round', 'np.round', (["(100 * data_dict_global[feature_name]['importance'])", '(1)'], {}), "(100 * data_dict_global[feature_name]['importance'], 1)\n", (19253, 19308), True, 'import numpy as np\n'), ((20149, 20212), 'numpy.round', 'np.round', (["(100 * data_dict_global[feature_name]['importance'])", '(1)'], {}), "(100 * data_dict_global[feature_name]['importance'], 1)\n", (20157, 20212), True, 'import numpy as np\n'), ((24110, 24141), 'numpy.ceil', 'np.ceil', (['(max_ids / cols_per_row)'], {}), '(max_ids / cols_per_row)\n', (24117, 24141), True, 'import numpy as np\n'), ((25446, 25509), 'numpy.round', 'np.round', (["(100 * data_dict_global[feature_name]['importance'])", '(1)'], {}), "(100 * data_dict_global[feature_name]['importance'], 1)\n", (25454, 25509), True, 'import numpy as np\n'), ((26911, 26974), 'numpy.round', 'np.round', (["(100 * data_dict_global[feature_name]['importance'])", '(1)'], {}), "(100 * data_dict_global[feature_name]['importance'], 1)\n", (26919, 26974), True, 'import numpy as np\n'), ((4794, 4814), 'numpy.nanmin', 'np.nanmin', (['main_loss'], {}), '(main_loss)\n', (4803, 4814), True, 'import numpy as np\n'), ((4816, 4836), 'numpy.nanmax', 'np.nanmax', (['main_loss'], {}), '(main_loss)\n', (4825, 4836), True, 'import numpy as np\n'), ((6297, 6318), 'numpy.nanmin', 'np.nanmin', (['inter_loss'], {}), '(inter_loss)\n', (6306, 6318), True, 'import numpy as np\n'), ((6320, 6341), 'numpy.nanmax', 'np.nanmax', (['inter_loss'], {}), '(inter_loss)\n', (6329, 6341), True, 'import numpy as np\n'), ((20761, 20826), 'numpy.array', 'np.array', (["data_dict_global[feature_name1]['density']['names'][1:]"], {}), "(data_dict_global[feature_name1]['density']['names'][1:])\n", (20769, 20826), True, 'import numpy as np\n'), ((20848, 20914), 'numpy.array', 'np.array', (["data_dict_global[feature_name1]['density']['names'][:-1]"], {}), "(data_dict_global[feature_name1]['density']['names'][:-1])\n", (20856, 20914), True, 'import numpy as np\n'), ((21836, 21901), 'numpy.array', 'np.array', (["data_dict_global[feature_name2]['density']['names'][1:]"], {}), "(data_dict_global[feature_name2]['density']['names'][1:])\n", (21844, 21901), True, 'import numpy as np\n'), ((21923, 21989), 'numpy.array', 'np.array', (["data_dict_global[feature_name2]['density']['names'][:-1]"], {}), "(data_dict_global[feature_name2]['density']['names'][:-1])\n", (21931, 21989), True, 'import numpy as np\n'), ((8018, 8046), 'numpy.nanmin', 'np.nanmin', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (8027, 8046), True, 'import numpy as np\n'), ((8059, 8087), 'numpy.nanmax', 'np.nanmax', (['(t1 + t2 + t3 + t4)'], {}), '(t1 + t2 + t3 + t4)\n', (8068, 8087), True, 'import numpy as np\n'), ((11263, 11291), 'numpy.nanmin', 'np.nanmin', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (11272, 11291), True, 'import numpy as np\n'), ((11304, 11332), 'numpy.nanmax', 'np.nanmax', (['(v1 + v2 + v3 + v4)'], {}), '(v1 + v2 + v3 + v4)\n', (11313, 11332), True, 'import numpy as np\n'), ((22406, 22455), 'numpy.max', 'np.max', (["data_dict_global[feature_name]['outputs']"], {}), "(data_dict_global[feature_name]['outputs'])\n", (22412, 22455), True, 'import numpy as np\n'), ((22494, 22543), 'numpy.min', 'np.min', (["data_dict_global[feature_name]['outputs']"], {}), "(data_dict_global[feature_name]['outputs'])\n", (22500, 22543), True, 'import numpy as np\n'), ((26552, 26601), 'numpy.max', 'np.max', (["data_dict_global[feature_name]['outputs']"], {}), "(data_dict_global[feature_name]['outputs'])\n", (26558, 26601), True, 'import numpy as np\n'), ((26640, 26689), 'numpy.min', 'np.min', (["data_dict_global[feature_name]['outputs']"], {}), "(data_dict_global[feature_name]['outputs'])\n", (26646, 26689), True, 'import numpy as np\n'), ((4434, 4454), 'numpy.nanmin', 'np.nanmin', (['main_loss'], {}), '(main_loss)\n', (4443, 4454), True, 'import numpy as np\n'), ((4466, 4486), 'numpy.nanmax', 'np.nanmax', (['main_loss'], {}), '(main_loss)\n', (4475, 4486), True, 'import numpy as np\n'), ((5933, 5954), 'numpy.nanmin', 'np.nanmin', (['inter_loss'], {}), '(inter_loss)\n', (5942, 5954), True, 'import numpy as np\n'), ((5966, 5987), 'numpy.nanmax', 'np.nanmax', (['inter_loss'], {}), '(inter_loss)\n', (5975, 5987), True, 'import numpy as np\n'), ((17661, 17725), 'numpy.array', 'np.array', (["data_dict_global[feature_name]['density']['names'][1:]"], {}), "(data_dict_global[feature_name]['density']['names'][1:])\n", (17669, 17725), True, 'import numpy as np\n'), ((17757, 17822), 'numpy.array', 'np.array', (["data_dict_global[feature_name]['density']['names'][:-1]"], {}), "(data_dict_global[feature_name]['density']['names'][:-1])\n", (17765, 17822), True, 'import numpy as np\n')] |
'''
Algorithm for testing trained model
'''
import numpy as np
import pandas as pd
from keras.layers import Input
from keras import backend as K
from keras.models import load_model, Model
from dataset import load_dataset
def test_model():
testData, testLabels = load_dataset()
# DEFINE MODEL PARAMETERS
model = load_model(MODEL_PATH)
print(model.summary())
predict = model.predict(testData)
threshold = 0.5
predict[predict > threshold] = 1
predict[predict <= threshold] = 0
y_test_non_category = [ np.argmax(t) for t in testLabels ]
y_predict_non_category = [ np.argmax(z) for z in predict ]
cm = ConfusionMatrix(actual_vector=y_test_non_category, predict_vector=y_predict_non_category)
cm.save_html("classification_report")
# print('\nConfusion Matrix: \n', cm, '\n')
print(os.system("scp classification_report.html alex@192.168.0.180:~/Documents/git/oil_class"))
# target_names = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
# cm = confusion_matrix(y_test_non_category, y_predict_non_category)
# print('\nConfusion Matrix: \n', cm)
cr = classification_report(y_test_non_category, y_predict_non_category)
print('\nClassification Report: \n', cr)
scores = model.evaluate(testData, testLabels)
print("\nAccuracy: %.2f%%" % (scores[1]*100))
if __name__ == "__main__":
test_model()
| [
"keras.models.load_model",
"dataset.load_dataset",
"numpy.argmax"
] | [((272, 286), 'dataset.load_dataset', 'load_dataset', ([], {}), '()\n', (284, 286), False, 'from dataset import load_dataset\n'), ((330, 352), 'keras.models.load_model', 'load_model', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (340, 352), False, 'from keras.models import load_model, Model\n'), ((543, 555), 'numpy.argmax', 'np.argmax', (['t'], {}), '(t)\n', (552, 555), True, 'import numpy as np\n'), ((609, 621), 'numpy.argmax', 'np.argmax', (['z'], {}), '(z)\n', (618, 621), True, 'import numpy as np\n')] |
# Stiffness matrix calculation.
import numpy as np
import scipy
import sys, os
sys.path.append(os.path.dirname(sys.path[0]))
from otherFunctions import numericalIntegration
from elementLibrary import shapeFunction,invShapeFunction
def triFE(coord,Dmat):
nInt=1
(pos,wei)=numericalIntegration.gaussTri(nInt)
Kloc=np.zeros((2*len(coord),2*len(coord)))
for i in range(nInt):
(Bmat,Jacobian)=shapeFunction.shapeTriFE(coord,pos[i,0],pos[i,1])[1:3]
Kloc+=0.5*np.matmul(Bmat.transpose(),np.matmul(Dmat,Bmat))*Jacobian*wei[i]
return Kloc
def triQuadFE(coord,Dmat):
nInt=3
(pos,wei)=numericalIntegration.gaussTri(nInt)
Kloc=np.zeros((2*len(coord),2*len(coord)))
for i in range(nInt):
(Bmat,Jacobian)=shapeFunction.shapeTriQuadFE(coord,pos[i,0],pos[i,1])[1:3]
Kloc+=0.5*np.matmul(Bmat.transpose(),np.matmul(Dmat,Bmat))*Jacobian*wei[i]
return Kloc
def quadFE(coord,Dmat):
nInt=2 # 2 quadrature points in each direction.
(pos,wei)=numericalIntegration.gaussQuad(nInt)
Kloc=np.zeros((2*len(coord),2*len(coord)))
for i in range(nInt):
for j in range(nInt):
_,Bmat,Jacobian=shapeFunction.shapeQuadFE(coord,pos[i],pos[j])
Kloc+=np.matmul(Bmat.transpose(),np.matmul(Dmat,Bmat))*Jacobian*wei[i]*wei[j]
return Kloc
def ICMFE(coord,Dmat): # For square elements
nInt=2 # 2 quadrature points in each direction.
(pos,wei)=numericalIntegration.gaussQuad(nInt)
Kloc=np.zeros((2*len(coord),2*len(coord)))
Hloc=np.zeros((4,4))
Eloc=np.zeros((2*len(coord),4))
for i in range(nInt):
for j in range(nInt):
Bmat,Gmat,Jacobian=shapeFunction.shapeICMFE(coord,pos[i],pos[j])
Kloc+=np.matmul(Bmat.transpose(),np.matmul(Dmat,Bmat))*Jacobian*wei[i]*wei[j]
Hloc+=np.matmul(Gmat.transpose(),np.matmul(Dmat,Gmat))*Jacobian*wei[i]*wei[j]
Eloc+=np.matmul(Bmat.transpose(),np.matmul(Dmat,Gmat))*Jacobian*wei[i]*wei[j]
Kloc-=Eloc@np.linalg.solve(Hloc,Eloc.transpose())
return Kloc,Hloc,Eloc
def quadQuadFE(coord,Dmat):
nInt=3 # 2 quadrature points in each direction.
(pos,wei)=numericalIntegration.gaussQuad(nInt)
Kloc=np.zeros((2*len(coord),2*len(coord)))
for i in range(nInt):
for j in range(nInt):
_,Bmat,Jacobian=shapeFunction.shapeQuadQuadFE(coord,pos[i],pos[j])
Kloc+=np.matmul(Bmat.transpose(),np.matmul(Dmat,Bmat))*Jacobian*wei[i]*wei[j]
return Kloc
def sparsifyElementMatrix(K,numbering1,numbering2=None):
"""Convert the element stiffness matrix into COO format for assembling."""
if numbering2 is None: numbering2=numbering1
IArray=[]
JArray=[]
VArray=[]
for i in range(2*len(numbering1)):
for j in range(2*len(numbering2)):
IArray.append(2*numbering1[i//2]+i%2)
JArray.append(2*numbering2[j//2]+j%2)
VArray.append(K[i,j])
return IArray,JArray,VArray
def lowerOrderAMORE(coordTri,Dmat,coord1,rho1,coord2=None,rho2=None):
if coord2 is None:
coord2=coord1
rho2=rho1
if coord1.shape[0]+coord2.shape[0]==6: nInt=3
elif coord1.shape[0]+coord2.shape[0]==7: nInt=4
else: nInt=6
pos,wei=numericalIntegration.gaussTri(nInt)
LCmat1=getLC(coordTri,rho1)
if rho2 is rho1: LCmat2=LCmat1
else: LCmat2=getLC(coordTri,rho2)
Kloc=np.zeros((2*coord1.shape[0],2*coord2.shape[0]))
for i in range(nInt):
Nmat,_,Jacobian=shapeFunction.shapeTriFE2(coordTri,pos[i,0],pos[i,1])
rho1Value=(Nmat@rho1)[0]
xy=(Nmat@coordTri).reshape(-1)
Bmat1=getStrainMatrix(LCmat1,coord1,xy,rho1Value)
if rho2 is rho1: Bmat2=Bmat1
else:
rho2Value=(Nmat@rho2)[0]
Bmat2=getStrainMatrix(LCmat2,coord2,xy,rho2Value)
Kloc+=0.5*np.matmul(Bmat1.transpose(),np.matmul(Dmat,Bmat2))*Jacobian*wei[i]
return Kloc
def quadraticAMORE(coordTri,Dmat,coord1,rho1,coord2=None,rho2=None):
"""For a straight-edge triangle. It can be curved only at the boundary."""
if coord2 is None:
coord2=coord1
rho2=rho1
if coord1.shape[0]+coord2.shape[0]==6: nInt=3 # 3+3
elif coord1.shape[0]+coord2.shape[0]==7: nInt=4 # 3+4
elif coord1.shape[0]+coord2.shape[0]==8: nInt=6 # 4+4
elif coord1.shape[0]+coord2.shape[0]==9: nInt=4 # 3+6
elif coord1.shape[0]+coord2.shape[0]==10: nInt=6 # 4+6
elif coord1.shape[0]+coord2.shape[0]==12: nInt=9 # 3+9
elif coord1.shape[0]+coord2.shape[0]==13: nInt=12 # 4+9
else: nInt=16 # 9+9
pos,wei=numericalIntegration.gaussTri(nInt)
Kloc=np.zeros((2*coord1.shape[0],2*coord2.shape[0]))
if len(coordTri)==3:
LCmat1=getLC(coordTri,rho1) # A constant matrix
if rho2 is rho1: LCmat2=LCmat1
else: LCmat2=getLC(coordTri,rho2)
for i in range(nInt):
Nmat,_,Jacobian=shapeFunction.shapeTriFE2(coordTri,pos[i,0],pos[i,1])
rho1Value=(Nmat@rho1)[0]
xy=(Nmat@coordTri).reshape(-1)
Bmat1=getStrainMatrix(LCmat1,coord1,xy,rho1Value)
if rho2 is rho1: Bmat2=Bmat1
else:
rho2Value=(Nmat@rho2)[0]
Bmat2=getStrainMatrix(LCmat2,coord2,xy,rho2Value)
Kloc+=0.5*np.matmul(Bmat1.transpose(),np.matmul(Dmat,Bmat2))*Jacobian*wei[i]
elif len(coordTri)==6:
for i in range(nInt):
LCmat1=getLC(coordTri,rho1,pos[i,0],pos[i,1]) # No longer constant
if rho2 is rho1: LCmat2=LCmat1
else: LCmat2=getLC(coordTri,rho2,pos[i,0],pos[i,1])
Nmat,_,Jacobian=shapeFunction.shapeTriQuadFE2(coordTri,pos[i,0],pos[i,1])
rho1Value=pos[i,0]*rho1[0]+pos[i,1]*rho1[1]+pos[i,2]*rho1[2]
xy=(Nmat@coordTri).reshape(-1)
Bmat1=getStrainMatrix(LCmat1,coord1,xy,rho1Value)
if rho2 is rho1: Bmat2=Bmat1
else:
rho2Value=pos[i,0]*rho2[0]+pos[i,1]*rho2[1]+pos[i,2]*rho2[2]
Bmat2=getStrainMatrix(LCmat2,coord2,xy,rho2Value)
Kloc+=0.5*np.matmul(Bmat1.transpose(),np.matmul(Dmat,Bmat2))*Jacobian*wei[i]
else: raise ValueError
return Kloc
def getStrainMatrix(LCmat,coord,xy,rho):
if coord.shape[0]==4:
isoCoord=invShapeFunction.invQuad(coord,xy)
Nmat,Bmat,_=shapeFunction.shapeQuadFE(coord,isoCoord[0],isoCoord[1])
elif coord.shape[0]==9:
isoCoord=invShapeFunction.invQuadQuad(coord,xy)
Nmat,Bmat,_=shapeFunction.shapeQuadQuadFE(coord,isoCoord[0],isoCoord[1])
elif coord.shape[0]==6:
isoCoord=invShapeFunction.invTriQuad(coord,xy)
Nmat,Bmat,_=shapeFunction.shapeTriQuadFE(coord,isoCoord[0],isoCoord[1])
elif coord.shape[0]==3:
isoCoord=invShapeFunction.invTri(coord,xy)
Nmat,Bmat,_=shapeFunction.shapeTriFE(coord,isoCoord[0],isoCoord[1])
else: raise ValueError
return (LCmat@Nmat) + (rho*Bmat)
def getLC(coordTri,rho,r=0.0,s=0.0):
if len(coordTri)==3:
dudx=shapeFunction.shapeTriFE2(coordTri,r,s)[1]
drhodx=dudx@rho
elif len(coordTri)==6:
dudx=shapeFunction.shapeTriQuadFE2(coordTri,r,s)[1]
rhoExtension=np.zeros(6)
rhoExtension[0:3]=rho
rhoExtension[3:6]=0.5*(rho+rho[[1,2,0]])
drhodx=dudx@rhoExtension
else: raise ValueError
LC=np.array([[drhodx[0],0.0],[0.0,drhodx[1]],[drhodx[1],drhodx[0]]])
return LC
def getCoordFromHalfEdge(edge):
"""Obtain the coordinates of a triangle in the half-edge form."""
startingEdge=edge
coord=[]
coord.append([edge.origin.x,edge.origin.y])
edge=edge.next
while edge is not startingEdge:
coord.append([edge.origin.x,edge.origin.y])
edge=edge.next
coord=np.array(coord,dtype='d')
return coord
if __name__=='__main__':
pass | [
"otherFunctions.numericalIntegration.gaussTri",
"elementLibrary.invShapeFunction.invTri",
"os.path.dirname",
"elementLibrary.shapeFunction.shapeTriFE2",
"numpy.zeros",
"elementLibrary.invShapeFunction.invQuadQuad",
"elementLibrary.shapeFunction.shapeTriFE",
"otherFunctions.numericalIntegration.gaussQu... | [((96, 124), 'os.path.dirname', 'os.path.dirname', (['sys.path[0]'], {}), '(sys.path[0])\n', (111, 124), False, 'import sys, os\n'), ((282, 317), 'otherFunctions.numericalIntegration.gaussTri', 'numericalIntegration.gaussTri', (['nInt'], {}), '(nInt)\n', (311, 317), False, 'from otherFunctions import numericalIntegration\n'), ((630, 665), 'otherFunctions.numericalIntegration.gaussTri', 'numericalIntegration.gaussTri', (['nInt'], {}), '(nInt)\n', (659, 665), False, 'from otherFunctions import numericalIntegration\n'), ((1020, 1056), 'otherFunctions.numericalIntegration.gaussQuad', 'numericalIntegration.gaussQuad', (['nInt'], {}), '(nInt)\n', (1050, 1056), False, 'from otherFunctions import numericalIntegration\n'), ((1461, 1497), 'otherFunctions.numericalIntegration.gaussQuad', 'numericalIntegration.gaussQuad', (['nInt'], {}), '(nInt)\n', (1491, 1497), False, 'from otherFunctions import numericalIntegration\n'), ((1555, 1571), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (1563, 1571), True, 'import numpy as np\n'), ((2189, 2225), 'otherFunctions.numericalIntegration.gaussQuad', 'numericalIntegration.gaussQuad', (['nInt'], {}), '(nInt)\n', (2219, 2225), False, 'from otherFunctions import numericalIntegration\n'), ((3273, 3308), 'otherFunctions.numericalIntegration.gaussTri', 'numericalIntegration.gaussTri', (['nInt'], {}), '(nInt)\n', (3302, 3308), False, 'from otherFunctions import numericalIntegration\n'), ((3425, 3477), 'numpy.zeros', 'np.zeros', (['(2 * coord1.shape[0], 2 * coord2.shape[0])'], {}), '((2 * coord1.shape[0], 2 * coord2.shape[0]))\n', (3433, 3477), True, 'import numpy as np\n'), ((4626, 4661), 'otherFunctions.numericalIntegration.gaussTri', 'numericalIntegration.gaussTri', (['nInt'], {}), '(nInt)\n', (4655, 4661), False, 'from otherFunctions import numericalIntegration\n'), ((4672, 4724), 'numpy.zeros', 'np.zeros', (['(2 * coord1.shape[0], 2 * coord2.shape[0])'], {}), '((2 * coord1.shape[0], 2 * coord2.shape[0]))\n', (4680, 4724), True, 'import numpy as np\n'), ((7427, 7497), 'numpy.array', 'np.array', (['[[drhodx[0], 0.0], [0.0, drhodx[1]], [drhodx[1], drhodx[0]]]'], {}), '([[drhodx[0], 0.0], [0.0, drhodx[1]], [drhodx[1], drhodx[0]]])\n', (7435, 7497), True, 'import numpy as np\n'), ((7846, 7872), 'numpy.array', 'np.array', (['coord'], {'dtype': '"""d"""'}), "(coord, dtype='d')\n", (7854, 7872), True, 'import numpy as np\n'), ((3524, 3581), 'elementLibrary.shapeFunction.shapeTriFE2', 'shapeFunction.shapeTriFE2', (['coordTri', 'pos[i, 0]', 'pos[i, 1]'], {}), '(coordTri, pos[i, 0], pos[i, 1])\n', (3549, 3581), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((6347, 6382), 'elementLibrary.invShapeFunction.invQuad', 'invShapeFunction.invQuad', (['coord', 'xy'], {}), '(coord, xy)\n', (6371, 6382), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((6402, 6460), 'elementLibrary.shapeFunction.shapeQuadFE', 'shapeFunction.shapeQuadFE', (['coord', 'isoCoord[0]', 'isoCoord[1]'], {}), '(coord, isoCoord[0], isoCoord[1])\n', (6427, 6460), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((417, 470), 'elementLibrary.shapeFunction.shapeTriFE', 'shapeFunction.shapeTriFE', (['coord', 'pos[i, 0]', 'pos[i, 1]'], {}), '(coord, pos[i, 0], pos[i, 1])\n', (441, 470), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((765, 822), 'elementLibrary.shapeFunction.shapeTriQuadFE', 'shapeFunction.shapeTriQuadFE', (['coord', 'pos[i, 0]', 'pos[i, 1]'], {}), '(coord, pos[i, 0], pos[i, 1])\n', (793, 822), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((1190, 1238), 'elementLibrary.shapeFunction.shapeQuadFE', 'shapeFunction.shapeQuadFE', (['coord', 'pos[i]', 'pos[j]'], {}), '(coord, pos[i], pos[j])\n', (1215, 1238), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((1695, 1742), 'elementLibrary.shapeFunction.shapeICMFE', 'shapeFunction.shapeICMFE', (['coord', 'pos[i]', 'pos[j]'], {}), '(coord, pos[i], pos[j])\n', (1719, 1742), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((2359, 2411), 'elementLibrary.shapeFunction.shapeQuadQuadFE', 'shapeFunction.shapeQuadQuadFE', (['coord', 'pos[i]', 'pos[j]'], {}), '(coord, pos[i], pos[j])\n', (2388, 2411), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((4943, 5000), 'elementLibrary.shapeFunction.shapeTriFE2', 'shapeFunction.shapeTriFE2', (['coordTri', 'pos[i, 0]', 'pos[i, 1]'], {}), '(coordTri, pos[i, 0], pos[i, 1])\n', (4968, 5000), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((6504, 6543), 'elementLibrary.invShapeFunction.invQuadQuad', 'invShapeFunction.invQuadQuad', (['coord', 'xy'], {}), '(coord, xy)\n', (6532, 6543), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((6563, 6625), 'elementLibrary.shapeFunction.shapeQuadQuadFE', 'shapeFunction.shapeQuadQuadFE', (['coord', 'isoCoord[0]', 'isoCoord[1]'], {}), '(coord, isoCoord[0], isoCoord[1])\n', (6592, 6625), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((7093, 7134), 'elementLibrary.shapeFunction.shapeTriFE2', 'shapeFunction.shapeTriFE2', (['coordTri', 'r', 's'], {}), '(coordTri, r, s)\n', (7118, 7134), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((7268, 7279), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (7276, 7279), True, 'import numpy as np\n'), ((5681, 5742), 'elementLibrary.shapeFunction.shapeTriQuadFE2', 'shapeFunction.shapeTriQuadFE2', (['coordTri', 'pos[i, 0]', 'pos[i, 1]'], {}), '(coordTri, pos[i, 0], pos[i, 1])\n', (5710, 5742), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((6669, 6707), 'elementLibrary.invShapeFunction.invTriQuad', 'invShapeFunction.invTriQuad', (['coord', 'xy'], {}), '(coord, xy)\n', (6696, 6707), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((6727, 6788), 'elementLibrary.shapeFunction.shapeTriQuadFE', 'shapeFunction.shapeTriQuadFE', (['coord', 'isoCoord[0]', 'isoCoord[1]'], {}), '(coord, isoCoord[0], isoCoord[1])\n', (6755, 6788), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((7200, 7245), 'elementLibrary.shapeFunction.shapeTriQuadFE2', 'shapeFunction.shapeTriQuadFE2', (['coordTri', 'r', 's'], {}), '(coordTri, r, s)\n', (7229, 7245), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((6838, 6872), 'elementLibrary.invShapeFunction.invTri', 'invShapeFunction.invTri', (['coord', 'xy'], {}), '(coord, xy)\n', (6861, 6872), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((6892, 6949), 'elementLibrary.shapeFunction.shapeTriFE', 'shapeFunction.shapeTriFE', (['coord', 'isoCoord[0]', 'isoCoord[1]'], {}), '(coord, isoCoord[0], isoCoord[1])\n', (6916, 6949), False, 'from elementLibrary import shapeFunction, invShapeFunction\n'), ((517, 538), 'numpy.matmul', 'np.matmul', (['Dmat', 'Bmat'], {}), '(Dmat, Bmat)\n', (526, 538), True, 'import numpy as np\n'), ((869, 890), 'numpy.matmul', 'np.matmul', (['Dmat', 'Bmat'], {}), '(Dmat, Bmat)\n', (878, 890), True, 'import numpy as np\n'), ((3907, 3929), 'numpy.matmul', 'np.matmul', (['Dmat', 'Bmat2'], {}), '(Dmat, Bmat2)\n', (3916, 3929), True, 'import numpy as np\n'), ((1282, 1303), 'numpy.matmul', 'np.matmul', (['Dmat', 'Bmat'], {}), '(Dmat, Bmat)\n', (1291, 1303), True, 'import numpy as np\n'), ((1786, 1807), 'numpy.matmul', 'np.matmul', (['Dmat', 'Bmat'], {}), '(Dmat, Bmat)\n', (1795, 1807), True, 'import numpy as np\n'), ((1876, 1897), 'numpy.matmul', 'np.matmul', (['Dmat', 'Gmat'], {}), '(Dmat, Gmat)\n', (1885, 1897), True, 'import numpy as np\n'), ((1966, 1987), 'numpy.matmul', 'np.matmul', (['Dmat', 'Gmat'], {}), '(Dmat, Gmat)\n', (1975, 1987), True, 'import numpy as np\n'), ((2455, 2476), 'numpy.matmul', 'np.matmul', (['Dmat', 'Bmat'], {}), '(Dmat, Bmat)\n', (2464, 2476), True, 'import numpy as np\n'), ((5366, 5388), 'numpy.matmul', 'np.matmul', (['Dmat', 'Bmat2'], {}), '(Dmat, Bmat2)\n', (5375, 5388), True, 'import numpy as np\n'), ((6173, 6195), 'numpy.matmul', 'np.matmul', (['Dmat', 'Bmat2'], {}), '(Dmat, Bmat2)\n', (6182, 6195), True, 'import numpy as np\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bezier
import matplotlib.pyplot as plt
import numpy as np
import plot_utils
LABEL_SIZE = 14.0
FONT_SIZE = 20.0
def image1():
figure, (ax1, ax2) = plt.subplots(1, 2)
nodes1 = np.asfortranarray([[0.0, 3.0, 7.0], [5.0, 0.0, 8.0]])
triangle1 = bezier.Surface(nodes1, degree=1)
triangle1.plot(256, ax=ax1)
nodes2 = np.asfortranarray(
[[0.0, 1.0, 2.0, 2.0, 2.0, 0.0], [0.0, 0.0, 0.0, 1.0, 2.0, 2.0]]
)
triangle2 = bezier.Surface(nodes2, degree=2)
triangle2.plot(256, ax=ax2)
params = np.asfortranarray([[0.125, 0.125], [0.125, 0.75]])
points1 = triangle1.evaluate_cartesian_multi(params)
ax1.plot(points1[0, :], points1[1, :], marker="o", color="black")
points2 = triangle2.evaluate_cartesian_multi(params)
ax2.plot(points2[0, :], points2[1, :], marker="o", color="black")
for ax in (ax1, ax2):
ax.tick_params(labelsize=LABEL_SIZE, which="both")
ax.axis("equal")
ax1.set_title("Convex", fontsize=FONT_SIZE)
ax2.set_title("Not (Necessarily) Convex", fontsize=FONT_SIZE)
figure.set_size_inches(8.74, 4.8)
figure.subplots_adjust(
left=0.05, bottom=0.06, right=0.99, top=0.93, wspace=0.12, hspace=0.2
)
filename = "not_convex.pdf"
path = plot_utils.get_path("slides", filename)
figure.savefig(path)
print("Saved {}".format(filename))
plt.close(figure)
def image2():
figure, (ax1, ax2) = plt.subplots(1, 2)
nodes1a = np.asfortranarray([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
nodes2a = np.asfortranarray([[-0.125, 1.0, 0.125], [-0.0625, 0.5, 0.375]])
nodes1b = np.asfortranarray(
[[0.0, 0.375, 1.0, 0.25, 0.75, 0.5], [0.0, 0.375, 0.0, 0.5, 0.5, 1.0]]
)
nodes2b = np.asfortranarray(
[
[1.0, 0.625, 0.0, 0.75, 0.25, 0.5],
[0.375, -0.125, 0.375, -0.1875, -0.1875, -0.75],
]
)
info = ((nodes1a, nodes2a, ax1), (nodes1b, nodes2b, ax2))
for nodes1, nodes2, ax in info:
triangle1 = bezier.Surface.from_nodes(nodes1)
triangle2 = bezier.Surface.from_nodes(nodes2)
intersections = triangle1.intersect(triangle2)
triangle1.plot(256, ax=ax, color=plot_utils.BLUE)
triangle2.plot(256, ax=ax, color=plot_utils.GREEN)
for intersection in intersections:
intersection.plot(256, ax=ax, color=plot_utils.RED)
for ax in (ax1, ax2):
ax.tick_params(labelsize=LABEL_SIZE, which="both")
ax.axis("equal")
ax1.set_title("Convex Intersection", fontsize=FONT_SIZE)
ax2.set_title("Multiple Intersections", fontsize=FONT_SIZE)
figure.set_size_inches(8.74, 4.8)
figure.subplots_adjust(
left=0.06, bottom=0.06, right=0.99, top=0.93, wspace=0.18, hspace=0.2
)
filename = "split_intersection.pdf"
path = plot_utils.get_path("slides", filename)
figure.savefig(path)
print("Saved {}".format(filename))
plt.close(figure)
def main():
image1()
image2()
if __name__ == "__main__":
plot_utils.set_styles()
main()
| [
"plot_utils.set_styles",
"matplotlib.pyplot.close",
"numpy.asfortranarray",
"plot_utils.get_path",
"bezier.Surface",
"bezier.Surface.from_nodes",
"matplotlib.pyplot.subplots"
] | [((708, 726), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (720, 726), True, 'import matplotlib.pyplot as plt\n'), ((740, 793), 'numpy.asfortranarray', 'np.asfortranarray', (['[[0.0, 3.0, 7.0], [5.0, 0.0, 8.0]]'], {}), '([[0.0, 3.0, 7.0], [5.0, 0.0, 8.0]])\n', (757, 793), True, 'import numpy as np\n'), ((810, 842), 'bezier.Surface', 'bezier.Surface', (['nodes1'], {'degree': '(1)'}), '(nodes1, degree=1)\n', (824, 842), False, 'import bezier\n'), ((889, 976), 'numpy.asfortranarray', 'np.asfortranarray', (['[[0.0, 1.0, 2.0, 2.0, 2.0, 0.0], [0.0, 0.0, 0.0, 1.0, 2.0, 2.0]]'], {}), '([[0.0, 1.0, 2.0, 2.0, 2.0, 0.0], [0.0, 0.0, 0.0, 1.0, 2.0,\n 2.0]])\n', (906, 976), True, 'import numpy as np\n'), ((1003, 1035), 'bezier.Surface', 'bezier.Surface', (['nodes2'], {'degree': '(2)'}), '(nodes2, degree=2)\n', (1017, 1035), False, 'import bezier\n'), ((1082, 1132), 'numpy.asfortranarray', 'np.asfortranarray', (['[[0.125, 0.125], [0.125, 0.75]]'], {}), '([[0.125, 0.125], [0.125, 0.75]])\n', (1099, 1132), True, 'import numpy as np\n'), ((1807, 1846), 'plot_utils.get_path', 'plot_utils.get_path', (['"""slides"""', 'filename'], {}), "('slides', filename)\n", (1826, 1846), False, 'import plot_utils\n'), ((1915, 1932), 'matplotlib.pyplot.close', 'plt.close', (['figure'], {}), '(figure)\n', (1924, 1932), True, 'import matplotlib.pyplot as plt\n'), ((1974, 1992), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (1986, 1992), True, 'import matplotlib.pyplot as plt\n'), ((2008, 2061), 'numpy.asfortranarray', 'np.asfortranarray', (['[[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (2025, 2061), True, 'import numpy as np\n'), ((2076, 2140), 'numpy.asfortranarray', 'np.asfortranarray', (['[[-0.125, 1.0, 0.125], [-0.0625, 0.5, 0.375]]'], {}), '([[-0.125, 1.0, 0.125], [-0.0625, 0.5, 0.375]])\n', (2093, 2140), True, 'import numpy as np\n'), ((2155, 2249), 'numpy.asfortranarray', 'np.asfortranarray', (['[[0.0, 0.375, 1.0, 0.25, 0.75, 0.5], [0.0, 0.375, 0.0, 0.5, 0.5, 1.0]]'], {}), '([[0.0, 0.375, 1.0, 0.25, 0.75, 0.5], [0.0, 0.375, 0.0, \n 0.5, 0.5, 1.0]])\n', (2172, 2249), True, 'import numpy as np\n'), ((2273, 2382), 'numpy.asfortranarray', 'np.asfortranarray', (['[[1.0, 0.625, 0.0, 0.75, 0.25, 0.5], [0.375, -0.125, 0.375, -0.1875, -\n 0.1875, -0.75]]'], {}), '([[1.0, 0.625, 0.0, 0.75, 0.25, 0.5], [0.375, -0.125, \n 0.375, -0.1875, -0.1875, -0.75]])\n', (2290, 2382), True, 'import numpy as np\n'), ((3353, 3392), 'plot_utils.get_path', 'plot_utils.get_path', (['"""slides"""', 'filename'], {}), "('slides', filename)\n", (3372, 3392), False, 'import plot_utils\n'), ((3461, 3478), 'matplotlib.pyplot.close', 'plt.close', (['figure'], {}), '(figure)\n', (3470, 3478), True, 'import matplotlib.pyplot as plt\n'), ((3552, 3575), 'plot_utils.set_styles', 'plot_utils.set_styles', ([], {}), '()\n', (3573, 3575), False, 'import plot_utils\n'), ((2546, 2579), 'bezier.Surface.from_nodes', 'bezier.Surface.from_nodes', (['nodes1'], {}), '(nodes1)\n', (2571, 2579), False, 'import bezier\n'), ((2600, 2633), 'bezier.Surface.from_nodes', 'bezier.Surface.from_nodes', (['nodes2'], {}), '(nodes2)\n', (2625, 2633), False, 'import bezier\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 23 16:00:33 2018
@author: tcmorgan2
Reads in wind speed data from tab delimited text files.
The first portion of the file contains header information.
The second portion contains average and standard deviation in wind speed of some time interval.
Imported files are passed through a sqlite database for temporary storage and processing.
A netCDF of raw estimated values is saved into the rawWindData folder. This file includes any gaps in data collection.
"""
import pandas as pd
import re
import logging
import sqlite3 as lite
import numpy as np
import os
from netCDF4 import Dataset
from MiGRIDS.InputHandler.processInputDataFrame import processInputDataFrame
#String, String -> dataframe
def readWindData(inputDict):
'''imports all MET data files in a folder and converts parameters to a dataframe.
:param inputDict: [Dictionary] a dictionary containing file location, datetime and channel information
:return: [Dictionary],[pandas.DataFrame] a dictionary of files that were read and the resulting dataframe of values is returned
'''
#DATETIME = 'Date_&_Time_Stamp'
DATETIME = inputDict['dateColumnName']
def readAsHeader(file, header_dict, componentName):
'''extracts the header information from a MET file.
:param file [File] a MET file to be read.
:param header_dict [Dictionary] a dictionary of header information
:param componentName [String] the name of the channel of interest.
:return [Dictionary] of header information for the file.
'''
inline = file.readline().split('\t')
inline = [re.sub(r"\s+", '_', x.strip()) for x in inline] # strip whitespaces at ends and replaces spaces with underscores
#assumes 'Date & Time Stamp' is the first column name where the dataframe starts.
#we return the dictionary of header information
if inline[0] == DATETIME:
names = inline
return header_dict, names
else:
#assumes that header information for channels are prefixed with 'Channel ...'
if inline[0][0:3] == 'Cha':
#start a new component
componentName = 'CH' + inline[1].rstrip()
header_dict[componentName] = {}
if (componentName is not None) & (len(inline) > 1):
header_dict[componentName][inline[0].rstrip()] = inline[1].rstrip()
return readAsHeader(file, header_dict, componentName)
def readAsData(file, names):
'''reast the data portion of a MET file into a dataframe
:param file [File] the MET file to be read
:param names [ListOf String] the channels to be read.
:return [DataFrame] of values for specified channels with datetime index'''
rowList = []
for line in file:
#these are the data lines with column names
value_dict = {}
cols = line.split('\t')
for i in range(len(names)):
value_dict[names[i]] = cols[i]
rowList.append(value_dict)
filedf = pd.DataFrame(rowList)
return filedf
#if a new channel speficication is encountered within the input files it gets incremented with an appended number
#i.e. Channel 3 was windspeed in input file 1 but in input file 6 it becomes wind direction thus the channel name becomes CH3_1
def channelUp(channel, existing, increment = 1) :
newchannel = channel + '_' + str(increment)
if newchannel not in existing.keys():
return newchannel
else:
increment +=1
return channelUp(channel, existing, increment)
#checks it the channel input information just read in matches the information stored in the working header dictionary
def checkMatch(channel, h, combinedHeader):
for attribute in combinedHeader[channel].keys():
if combinedHeader[channel][attribute]!= h[channel][attribute]:
return False
return True
#adds a new channel to the combined header dictionary if it doesn't exist yet
def addChannel(channel, h, combinedHeader, oc):
combinedHeader[channel]={'Description':h[oc]['Description'],
'Height':h[oc]['Height'],
'Offset':h[oc]['Offset'],
'Scale_Factor':h[oc]['Scale_Factor'],
'Units':h[oc]['Units']}
return combinedHeader
# a data class for estimating and storing windspeed data collected at intervals
class windRecord():
def __init__(self, sigma=25, mu=250, minws = 0, maxws = 20, datetime = None):
self.sigma = sigma
self.mu = mu
self.distribution = None
self.minws = minws
self.maxws = maxws
self.datetime = datetime
def getDatetime(self):
return self.datetime
#finds the previous value based on timestamp
def getStart(self, duration, df):
#find the wind record immediately prior to current windrecord
previousrecordtime = self.getDatetime() - duration
sorteddf = df.sort_values('time')
myvalue = sorteddf['values'][sorteddf['time'] < previousrecordtime][-1:]
if len(myvalue) > 1:
myvalue = myvalue[0]
elif len(myvalue) == 0:
myvalue = None
return myvalue
#self, integer, numeric,string, integer
def getValues(self, elapsed_time, start,interval, tau = None):
mu = self.mu
sigma = self.sigma
timestep = pd.Timedelta(interval).seconds
#number of records to estimate
n = int(elapsed_time/timestep)
#tau scales the relationship between time and change in value of x
#larger values result in larger drift and diffusion
if tau is None:
tau = n
x = np.zeros(n)
#renormalized variables
sigma_bis = sigma * np.sqrt(2.0 /n)
sqrtdt = np.sqrt(timestep)
x[0] = start
#np.random is the random gaussian with mean 0
for i in range(n-1):
x[i+1] = x[i] + timestep*(-(x[i]-mu)/tau) + sigma_bis * sqrtdt * np.random.randn()
return x
def estimateDistribution(self, records,interval, start = None, tau = None):
if start is None:
start = self.minws
tau = records
x = self.getValues(records, start, interval, tau)
t = pd.date_range(self.datetime - pd.to_timedelta(pd.Timedelta(interval).seconds * records, unit='s'), periods=records,freq='s')
self.distribution = [x,t]
return
#a dictionary of files that are read
fileDict = {}
df = pd.DataFrame()
for root, dirs, files in os.walk(inputDict['fileLocation']):
for f in files:
with open(os.path.join(root, f), 'r',errors='ignore') as file:
#read the header information of each file
if (file.name)[-3:] == 'txt':
print(os.path.basename(file.name))
data = pd.DataFrame()
headerDict = {}
headerDict, names = readAsHeader(file, headerDict, None)
fileDict[file.name] = headerDict
for n in names:
if n not in df.columns:
df[n] = None
data[n] = None
#read the data from each file
fileData = readAsData(file, names)
df = pd.concat([df, fileData], axis=0, ignore_index=True)
if file.name in fileDict.keys():
fileDict[file.name]['rows'] = len(fileData)
df = df.set_index(pd.to_datetime(df[DATETIME]))
df = df.apply(pd.to_numeric, errors='ignore')
df = df.sort_index()
combinedHeader = {}
fileLog = fileDict
#check that there isn't mismatched header information
for f in fileLog.keys():
h = fileLog[f]
rows = 0
for channel in h.keys():
if channel == 'rows':
rows += h[channel]
else:
if channel in combinedHeader.keys():
#check that the values match
if not checkMatch(channel, h, combinedHeader):
addChannel(channelUp(channel, combinedHeader), h, combinedHeader, channel)
else:
#add the channel
addChannel(channel, h, combinedHeader, channel)
def createNetCDF(df, increment):
# create a netcdf file
dtype = 'float'
# column = df.columns.values[i]
ncName = os.path.join(inputDict['fileLocation'], (str(increment) + 'WS.nc'))
rootgrp = Dataset(ncName, 'w', format='NETCDF4') # create netCDF object
rootgrp.createDimension('time', None) # create dimension for all called time
# create the time variable
rootgrp.createVariable('time', dtype, 'time') # create a var using the varnames
rootgrp.variables['time'][:] = pd.to_timedelta(pd.Series(df.index)).values.dt.total_seconds().astype(int)
# create the value variable
rootgrp.createVariable('value', dtype, 'time') # create a var using the varnames
rootgrp.variables['value'][:] = np.array(winddf['values']) # fill with values
# assign attributes
rootgrp.variables['time'].units = 'seconds' # set unit attribute
rootgrp.variables['value'].units = 'm/s' # set unit attribute
rootgrp.variables['value'].Scale = 1 # set unit attribute
rootgrp.variables['value'].offset = 0 # set unit attribute
# close file
rootgrp.close()
#now we need to fill in the gaps between sampling points
#apply to every row, 10 minutes = 600 seconds
def fillWindRecords(df, channels):
database = os.path.join(inputDict['fileLocation'], 'wind.db')
connection = lite.connect(database)
for k in channels:
logging.info(k)
newdf = df.copy()
newdf = newdf.sort_index()
newColumns = [x.replace(k,'').rstrip() for x in newdf.columns]
newdf.columns = newColumns
valuesdf = pd.DataFrame()
valuesdf['time'] = None
valuesdf['values'] = None
newdf['date'] = pd.to_datetime(newdf.index)
#turn the df records into windrecords
ListOfWindRecords = newdf.apply(lambda x: windRecord(x['SD'], x['Avg'], x['Min'], x['Max'], x['date']), 1)
logging.info(len(ListOfWindRecords))
#k is a list of values for each 10 minute interval
recordCount = 0
for r in ListOfWindRecords:
#logging.info(recordCount)
start = r.getStart(pd.Timedelta(minutes=10), valuesdf)
recordCount +=1
r.estimateDistribution(601,'1s',start)
valuesdf = pd.concat([valuesdf,pd.DataFrame({'time':r.distribution[1],'values':r.distribution[0]})])
valuesdf['values'] = valuesdf['values']
#every 5000 records write the new values
if recordCount%5000 == 0:
valuesdf.to_sql('windrecord' + k, connection, if_exists='append')
connection.commit()
valuesdf = pd.DataFrame()
valuesdf['time'] = None
valuesdf['values'] = None
valuesdf.to_sql('windrecord' + k, connection, if_exists='append')
winddf = pd.read_sql_query("select * from windrecord" + k, connection)
winddf = winddf.set_index(pd.to_datetime(winddf[DATETIME], unit='s'))
winddf = winddf[~winddf.index.duplicated(keep='first')]
try:
createNetCDF(winddf, k)
connection.close()
os.remove(database)
except:
print ('An error occured. Current results are stored in %s' %database)
return winddf
inputDict['df'] = df
# only choose the channels desired
winddf = processInputDataFrame(inputDict)
return fileDict, winddf
| [
"pandas.DataFrame",
"netCDF4.Dataset",
"os.remove",
"numpy.random.randn",
"os.path.basename",
"os.walk",
"numpy.zeros",
"logging.info",
"pandas.to_datetime",
"MiGRIDS.InputHandler.processInputDataFrame.processInputDataFrame",
"numpy.array",
"sqlite3.connect",
"pandas.read_sql_query",
"pand... | [((6882, 6896), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6894, 6896), True, 'import pandas as pd\n'), ((6931, 6965), 'os.walk', 'os.walk', (["inputDict['fileLocation']"], {}), "(inputDict['fileLocation'])\n", (6938, 6965), False, 'import os\n'), ((12320, 12352), 'MiGRIDS.InputHandler.processInputDataFrame.processInputDataFrame', 'processInputDataFrame', (['inputDict'], {}), '(inputDict)\n', (12341, 12352), False, 'from MiGRIDS.InputHandler.processInputDataFrame import processInputDataFrame\n'), ((3121, 3142), 'pandas.DataFrame', 'pd.DataFrame', (['rowList'], {}), '(rowList)\n', (3133, 3142), True, 'import pandas as pd\n'), ((7915, 7943), 'pandas.to_datetime', 'pd.to_datetime', (['df[DATETIME]'], {}), '(df[DATETIME])\n', (7929, 7943), True, 'import pandas as pd\n'), ((8953, 8991), 'netCDF4.Dataset', 'Dataset', (['ncName', '"""w"""'], {'format': '"""NETCDF4"""'}), "(ncName, 'w', format='NETCDF4')\n", (8960, 8991), False, 'from netCDF4 import Dataset\n'), ((9507, 9533), 'numpy.array', 'np.array', (["winddf['values']"], {}), "(winddf['values'])\n", (9515, 9533), True, 'import numpy as np\n'), ((10077, 10127), 'os.path.join', 'os.path.join', (["inputDict['fileLocation']", '"""wind.db"""'], {}), "(inputDict['fileLocation'], 'wind.db')\n", (10089, 10127), False, 'import os\n'), ((10149, 10171), 'sqlite3.connect', 'lite.connect', (['database'], {}), '(database)\n', (10161, 10171), True, 'import sqlite3 as lite\n'), ((6007, 6018), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (6015, 6018), True, 'import numpy as np\n'), ((6124, 6141), 'numpy.sqrt', 'np.sqrt', (['timestep'], {}), '(timestep)\n', (6131, 6141), True, 'import numpy as np\n'), ((10212, 10227), 'logging.info', 'logging.info', (['k'], {}), '(k)\n', (10224, 10227), False, 'import logging\n'), ((10440, 10454), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10452, 10454), True, 'import pandas as pd\n'), ((10558, 10585), 'pandas.to_datetime', 'pd.to_datetime', (['newdf.index'], {}), '(newdf.index)\n', (10572, 10585), True, 'import pandas as pd\n'), ((11771, 11832), 'pandas.read_sql_query', 'pd.read_sql_query', (["('select * from windrecord' + k)", 'connection'], {}), "('select * from windrecord' + k, connection)\n", (11788, 11832), True, 'import pandas as pd\n'), ((5676, 5698), 'pandas.Timedelta', 'pd.Timedelta', (['interval'], {}), '(interval)\n', (5688, 5698), True, 'import pandas as pd\n'), ((6087, 6103), 'numpy.sqrt', 'np.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (6094, 6103), True, 'import numpy as np\n'), ((11872, 11914), 'pandas.to_datetime', 'pd.to_datetime', (['winddf[DATETIME]'], {'unit': '"""s"""'}), "(winddf[DATETIME], unit='s')\n", (11886, 11914), True, 'import pandas as pd\n'), ((12092, 12111), 'os.remove', 'os.remove', (['database'], {}), '(database)\n', (12101, 12111), False, 'import os\n'), ((7013, 7034), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (7025, 7034), False, 'import os\n'), ((7252, 7266), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7264, 7266), True, 'import pandas as pd\n'), ((7732, 7784), 'pandas.concat', 'pd.concat', (['[df, fileData]'], {'axis': '(0)', 'ignore_index': '(True)'}), '([df, fileData], axis=0, ignore_index=True)\n', (7741, 7784), True, 'import pandas as pd\n'), ((11013, 11037), 'pandas.Timedelta', 'pd.Timedelta', ([], {'minutes': '(10)'}), '(minutes=10)\n', (11025, 11037), True, 'import pandas as pd\n'), ((11566, 11580), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (11578, 11580), True, 'import pandas as pd\n'), ((6340, 6357), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (6355, 6357), True, 'import numpy as np\n'), ((7196, 7223), 'os.path.basename', 'os.path.basename', (['file.name'], {}), '(file.name)\n', (7212, 7223), False, 'import os\n'), ((11184, 11254), 'pandas.DataFrame', 'pd.DataFrame', (["{'time': r.distribution[1], 'values': r.distribution[0]}"], {}), "({'time': r.distribution[1], 'values': r.distribution[0]})\n", (11196, 11254), True, 'import pandas as pd\n'), ((6677, 6699), 'pandas.Timedelta', 'pd.Timedelta', (['interval'], {}), '(interval)\n', (6689, 6699), True, 'import pandas as pd\n'), ((9281, 9300), 'pandas.Series', 'pd.Series', (['df.index'], {}), '(df.index)\n', (9290, 9300), True, 'import pandas as pd\n')] |
import numpy as np
import scipy.optimize as opt
def OneStep(des,ndes,nfaces):
i=np.random.randint(0,ndes)
j=i
while (j==i):
j=np.random.randint(0,ndes)
destrial=np.copy(des)
destrial[i]-=1
destrial[j]+=1
if (destrial[i]>0) and (destrial[j]<nfaces+1):
return destrial
else:
return np.copy(des)
def DicesToString(des):
destexte=""
for i in range(len(des)-1):
destexte+=str(des[i])+"-"
destexte+=str(des[-1])
return destexte
def StringToDice(destexte):
liste=destexte.split('-')
des=np.zeros(len(liste))
for i,s in enumerate(liste):
des[i]=np.int(s)
return des
def TiragesDes(ndes=3,somme=8,nfaces=6,ntirages=100,npas=10):
tirages=np.zeros((ndes,ntirages),dtype=np.int)
if ((somme<ndes) or (somme>nfaces*ndes)):
raise ValueError("La valeur de l'argument somme n'est pas valide" )
desinit=np.ones(ndes)*somme//ndes
desinit[0:somme%ndes]=desinit[0:somme%ndes]+1
des=np.copy(desinit)
for tirage in range(ntirages):
for pas in range(npas):
des=OneStep(des,ndes,nfaces)
tirages[:,tirage]=np.copy(des)
return tirages
def HistogrammeDes(tirages):
ndes,ntirages=tirages.shape
hist={}
for i in range(ntirages):
des=tirages[:,i]
txt=DicesToString(des)
hist[txt]=hist.get(txt,0)+1
return hist
def Proba(tirages,ides,nfaces=6):
ndes,ntirages=tirages.shape
if (ides>ndes):
raise ValueError("Le numéro du dé demandé, {}, est plus grand que le nombre de dés utilisé pour générer tirages, {}".format(ides,ndes))
proba=np.zeros(nfaces)
for i in range(nfaces):
proba[i]=np.sum(tirages[ides,:]==i+1)/ntirages
return proba
def f(x,nfaces,target):
mean=(nfaces*x**(nfaces+1)-(nfaces+1)*x**nfaces+1)/(x**(nfaces+1)-x**nfaces-x+1)
return mean-target
def BoltzmannParam(mean,nfaces=6):
sol=opt.root_scalar(f,args=(nfaces,mean),x0=0.3,x1=0)
x=sol.root
n0=-1/np.log(x)
q=(x**nfaces-1)/(x-1)*x
return n0,q
| [
"numpy.sum",
"numpy.log",
"numpy.copy",
"scipy.optimize.root_scalar",
"numpy.zeros",
"numpy.ones",
"numpy.random.randint",
"numpy.int"
] | [((87, 113), 'numpy.random.randint', 'np.random.randint', (['(0)', 'ndes'], {}), '(0, ndes)\n', (104, 113), True, 'import numpy as np\n'), ((189, 201), 'numpy.copy', 'np.copy', (['des'], {}), '(des)\n', (196, 201), True, 'import numpy as np\n'), ((745, 785), 'numpy.zeros', 'np.zeros', (['(ndes, ntirages)'], {'dtype': 'np.int'}), '((ndes, ntirages), dtype=np.int)\n', (753, 785), True, 'import numpy as np\n'), ((1005, 1021), 'numpy.copy', 'np.copy', (['desinit'], {}), '(desinit)\n', (1012, 1021), True, 'import numpy as np\n'), ((1652, 1668), 'numpy.zeros', 'np.zeros', (['nfaces'], {}), '(nfaces)\n', (1660, 1668), True, 'import numpy as np\n'), ((1947, 2000), 'scipy.optimize.root_scalar', 'opt.root_scalar', (['f'], {'args': '(nfaces, mean)', 'x0': '(0.3)', 'x1': '(0)'}), '(f, args=(nfaces, mean), x0=0.3, x1=0)\n', (1962, 2000), True, 'import scipy.optimize as opt\n'), ((149, 175), 'numpy.random.randint', 'np.random.randint', (['(0)', 'ndes'], {}), '(0, ndes)\n', (166, 175), True, 'import numpy as np\n'), ((341, 353), 'numpy.copy', 'np.copy', (['des'], {}), '(des)\n', (348, 353), True, 'import numpy as np\n'), ((645, 654), 'numpy.int', 'np.int', (['s'], {}), '(s)\n', (651, 654), True, 'import numpy as np\n'), ((1156, 1168), 'numpy.copy', 'np.copy', (['des'], {}), '(des)\n', (1163, 1168), True, 'import numpy as np\n'), ((2022, 2031), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (2028, 2031), True, 'import numpy as np\n'), ((920, 933), 'numpy.ones', 'np.ones', (['ndes'], {}), '(ndes)\n', (927, 933), True, 'import numpy as np\n'), ((1714, 1747), 'numpy.sum', 'np.sum', (['(tirages[ides, :] == i + 1)'], {}), '(tirages[ides, :] == i + 1)\n', (1720, 1747), True, 'import numpy as np\n')] |
import numpy as np
import os
import re
from sys import argv, exit
import cv2
from tqdm import tqdm
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def getWarp(imgFile, H):
im1 = cv2.imread(imgFile)
im1 = np.array(cv2.cvtColor(np.array(im1), cv2.COLOR_BGR2RGB))
img1 = cv2.warpPerspective(im1, H, (800, 800))
# cv2.imshow("Image2", img1)
# cv2.waitKey(10)
return img1
if __name__ == '__main__':
rgbDir = argv[1]
HFile = argv[2]
rgbImgs = natural_sort(os.listdir(rgbDir))
rgbImgs = [os.path.join(rgbDir, img) for img in rgbImgs if ".png" in img]
H = np.load(HFile)
for imgFile in tqdm(rgbImgs, total=len(rgbImgs)):
warpImg = getWarp(imgFile, H)
cv2.imwrite(imgFile, warpImg) | [
"cv2.warpPerspective",
"numpy.load",
"re.split",
"cv2.imwrite",
"cv2.imread",
"numpy.array",
"os.path.join",
"os.listdir"
] | [((345, 364), 'cv2.imread', 'cv2.imread', (['imgFile'], {}), '(imgFile)\n', (355, 364), False, 'import cv2\n'), ((437, 476), 'cv2.warpPerspective', 'cv2.warpPerspective', (['im1', 'H', '(800, 800)'], {}), '(im1, H, (800, 800))\n', (456, 476), False, 'import cv2\n'), ((730, 744), 'numpy.load', 'np.load', (['HFile'], {}), '(HFile)\n', (737, 744), True, 'import numpy as np\n'), ((630, 648), 'os.listdir', 'os.listdir', (['rgbDir'], {}), '(rgbDir)\n', (640, 648), False, 'import os\n'), ((662, 687), 'os.path.join', 'os.path.join', (['rgbDir', 'img'], {}), '(rgbDir, img)\n', (674, 687), False, 'import os\n'), ((831, 860), 'cv2.imwrite', 'cv2.imwrite', (['imgFile', 'warpImg'], {}), '(imgFile, warpImg)\n', (842, 860), False, 'import cv2\n'), ((394, 407), 'numpy.array', 'np.array', (['im1'], {}), '(im1)\n', (402, 407), True, 'import numpy as np\n'), ((244, 269), 're.split', 're.split', (['"""([0-9]+)"""', 'key'], {}), "('([0-9]+)', key)\n", (252, 269), False, 'import re\n')] |
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2013, 2014 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import numpy
from microanalyst.model import welladdr
from microanalyst.model.commons import flatten, slice_or_index
class Gene(str):
"""Gene name and its associated (microplate, well) pair."""
def __new__(cls, model, name, well, microplate):
return str.__new__(cls, name)
def __init__(self, model, name, well, microplate):
super(Gene, self).__init__()
self.model = model
self.name = name
self.well_name = well
self.microplate_name = microplate
def __call__(self):
return (self.microplate_name, self.well_name)
def values(self, iteration=None, spreadsheet=None):
"""Return data samples corresponding to a gene.
>>> gene.values()
[[ 0.7385 0.66869998 0.66420001]
[ 0.74629998 0.70660001 0.63870001]
[ 0.71689999 0.78380001 0.72259998]]
"""
return self.model.values(iteration=iteration,
spreadsheet=spreadsheet,
microplate=self.microplate_name,
well=self.well_name)
class Genes(object):
"""Helper class for handling genes' names."""
def __init__(self, model, microplate_names):
if not u'genes' in model.json_data:
self.genes_matrix = None
else:
genes_json = model.json_data[u'genes']
self.microplate_names = microplate_names
self.microplate_names_with_genes = sorted(set(genes_json))
self.indexof = {
x: i for i, x in enumerate(self.microplate_names_with_genes)
}
self.genes_matrix = self._process(model, genes_json)
self.genes = {}
for gene in self.genes_matrix.ravel():
if gene:
gene_name = gene.name.lower()
if gene_name in self.genes:
self.genes[gene_name].append(gene)
else:
self.genes[gene_name] = [gene]
self._check_duplicates()
def get_by_name(self, name):
"""Get gene by its name (case insensitive)."""
name = name.lower()
if name in self.genes:
return self.genes[name][0]
else:
return None
def get(self, well, microplate):
"""Return a sorted list of genes' names."""
if self.genes_matrix is None:
return []
if microplate is None:
x = slice_or_index(None)
else:
if microplate in self.microplate_names_with_genes:
x = slice_or_index(self.indexof[microplate])
else:
if microplate in self.microplate_names:
return []
else:
raise KeyError('Unknown microplate "%s"' % microplate)
y = slice_or_index(welladdr.indexof(well))
if not (well is None or microplate is None):
return flatten([self.genes_matrix[x, y]])
else:
return sorted(set(flatten(self.genes_matrix[x, y])))
def get_used(self):
"""Return a sorted list of genes' names used in the experiment."""
if self.genes_matrix is None:
return []
genes = []
for name in self.microplate_names:
genes.extend(self.get(microplate=name, well=None))
return sorted(set(genes))
def _process(self, model, genes_json):
"""Return a 2d array (microplate x well) of gene names."""
microplates = []
for microplate in self.microplate_names_with_genes:
wells = []
for well in welladdr.names():
if well in genes_json[microplate]:
name = genes_json[microplate][well]
wells.append(Gene(model, name, well, microplate))
else:
wells.append(None)
microplates.append(wells)
return numpy.array(microplates)
def _check_duplicates(self):
"""Warn about duplicate instances of genes on microplates."""
duplicates = set()
for name in self.genes:
if len(self.genes[name]) > 1:
duplicates.add(name)
for name in sorted(duplicates):
original_name = self.genes[name][0].name
instances = ['("%s"/%s)' % (
x.microplate_name, x.well_name) for x in self.genes[name]
]
message = 'Warning: duplicate gene "%s" at %s'
print >> sys.stderr, message % (original_name, ', '.join(instances))
| [
"microanalyst.model.welladdr.names",
"microanalyst.model.commons.slice_or_index",
"microanalyst.model.welladdr.indexof",
"microanalyst.model.commons.flatten",
"numpy.array"
] | [((5143, 5167), 'numpy.array', 'numpy.array', (['microplates'], {}), '(microplates)\n', (5154, 5167), False, 'import numpy\n'), ((3668, 3688), 'microanalyst.model.commons.slice_or_index', 'slice_or_index', (['None'], {}), '(None)\n', (3682, 3688), False, 'from microanalyst.model.commons import flatten, slice_or_index\n'), ((4056, 4078), 'microanalyst.model.welladdr.indexof', 'welladdr.indexof', (['well'], {}), '(well)\n', (4072, 4078), False, 'from microanalyst.model import welladdr\n'), ((4153, 4187), 'microanalyst.model.commons.flatten', 'flatten', (['[self.genes_matrix[x, y]]'], {}), '([self.genes_matrix[x, y]])\n', (4160, 4187), False, 'from microanalyst.model.commons import flatten, slice_or_index\n'), ((4833, 4849), 'microanalyst.model.welladdr.names', 'welladdr.names', ([], {}), '()\n', (4847, 4849), False, 'from microanalyst.model import welladdr\n'), ((3786, 3826), 'microanalyst.model.commons.slice_or_index', 'slice_or_index', (['self.indexof[microplate]'], {}), '(self.indexof[microplate])\n', (3800, 3826), False, 'from microanalyst.model.commons import flatten, slice_or_index\n'), ((4232, 4264), 'microanalyst.model.commons.flatten', 'flatten', (['self.genes_matrix[x, y]'], {}), '(self.genes_matrix[x, y])\n', (4239, 4264), False, 'from microanalyst.model.commons import flatten, slice_or_index\n')] |
import itertools
from pathlib import Path
from typing import (
Any,
Callable,
Dict,
Generator,
Iterable,
List,
NamedTuple,
Optional,
Tuple,
)
import bootstraphistogram
import matplotlib.pyplot as plt
import numpy as np
from bootstraphistogram.bootstraphistogram import BootstrapHistogram
from pandas.core.frame import DataFrame
from tabulate import tabulate
from toolz.itertoolz import groupby
from watchopticalanalysis.algorithm import Algorithm
from watchopticalanalysis.category import Category
from watchopticalanalysis.internal.selectiondefs import SelectionDefs
from watchopticalanalysis.internal.variable import (
AnalysisVariableDefs,
BonsaiVariableDefs,
)
from watchopticalmc import AnalysisEventTuple
from watchopticalutils.collectionutils import summap
from watchopticalutils.histoutils.categorybootstraphistogram import (
CategoryBootstrapHistogram,
)
from watchopticalutils.histoutils.selection import Selection
_BONSAIVARIABLES = [
BonsaiVariableDefs.innerPE_over_mcenergy,
BonsaiVariableDefs.deltar,
]
_ANALYSISVARIABLES = [
AnalysisVariableDefs.totalcharge,
AnalysisVariableDefs.totalcharge_over_mcenergy,
]
class _Key(NamedTuple):
variable: str
selection: str
subevent: Optional[int]
class Resolution(Algorithm["Resolution.Result", None]):
"""Plot resolution."""
def __init__(self, output: Path) -> None:
self._output = output
super().__init__()
class Result:
def __init__(self, hist: Dict[_Key, CategoryBootstrapHistogram]) -> None:
self.hist = hist
def __add__(self, rhs: "Resolution.Result") -> "Resolution.Result":
return Resolution.Result(summap((self.hist, rhs.hist)))
def key(self) -> Optional[str]:
return "Resolution"
def apply(self, data: AnalysisEventTuple) -> "Resolution.Result":
return self.Result(_make_resolution(data))
def finish(self, result: "Resolution.Result") -> None:
_dumpresolutiontables(result, dest=self._output)
_summaryplot(result, dest=self._output)
return
def _make_resolution(
tree: AnalysisEventTuple,
) -> Dict[_Key, CategoryBootstrapHistogram]:
hist: Dict[_Key, CategoryBootstrapHistogram] = {}
_make_bonsai_hists(tree, hist)
_make_analysisvar_hists(tree, hist)
return hist
def _make_bonsai_hists(
tree: AnalysisEventTuple, hist: Dict[_Key, CategoryBootstrapHistogram]
):
for (selection, variable, subevent) in itertools.product(
SelectionDefs, _BONSAIVARIABLES, (None, 0, 1)
):
key = _Key(variable.name, selection.name, subevent)
hist[key] = _makebonsaibootstraphistogram(
tree, variable.value.binning, variable.value, selection=selection.value
)
return
def _make_analysisvar_hists(
tree: AnalysisEventTuple, hist: Dict[_Key, CategoryBootstrapHistogram]
):
for (selection, variable, subevent) in itertools.product(
SelectionDefs, _ANALYSISVARIABLES, (None, 0, 1)
):
key = _Key(variable.name, selection.name, subevent)
hist[key] = _makeanalysisvarbootstraphistogram(
tree, variable.value.binning, variable.value, selection=selection.value
)
return hist
def _makebonsaibootstraphistogram(
tree: AnalysisEventTuple,
binning: bootstraphistogram.axis.Axis,
x: Callable[[DataFrame], Any],
w: Optional[Callable[[DataFrame], Any]] = None,
selection: Selection = SelectionDefs.nominal.value,
subevent: int = 0,
) -> CategoryBootstrapHistogram:
histo = CategoryBootstrapHistogram(binning)
category = Category.fromAnalysisEventTuple(tree)
data = selection(tree.bonsai).groupby("mcid").nth(subevent)
xv = np.asarray(x(data))
wv = None if not w else np.asarray(w(data))
histo.fill(category, xv, weight=wv)
return histo
def _makeanalysisvarbootstraphistogram(
tree: AnalysisEventTuple,
binning: bootstraphistogram.axis.Axis,
x: Callable[[AnalysisEventTuple], Any],
w: Optional[Callable[[AnalysisEventTuple], Any]] = None,
selection: Selection = SelectionDefs.nominal.value,
subevent: int = 0,
) -> CategoryBootstrapHistogram:
histo = CategoryBootstrapHistogram(binning)
category = Category.fromAnalysisEventTuple(tree)
xv = np.asarray(x(tree))
wv = None if not w else np.asarray(w(tree))
histo.fill(category, xv, weight=wv)
return histo
def _dumpresolutiontables(result: Resolution.Result, dest: Path) -> None:
dest = dest / "resolution"
for k, v in result.hist.items():
_make_resolution_table(k, v, dest)
return
def _make_resolution_table(key: _Key, hist: CategoryBootstrapHistogram, dest: Path):
table = _make_resolution_table_str(hist)
for (tablefmt, ext) in [
("simple", ".md"),
("csv", "csv"),
("html", "html"),
("plain", "txt"),
]:
path = dest / tablefmt / ("_".join(map(str, key)) + "." + ext)
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, "w") as f:
f.write(tabulate(table, tablefmt=tablefmt))
def _make_resolution_table_str(hist: CategoryBootstrapHistogram) -> List[List[Any]]:
table = [
[
"Event type",
"Attenuation",
"Scattering",
"mean",
"mear err.",
"std. dev.",
"std. dev. err.",
]
]
table += _make_resolution_table_rows(hist)
return table
def _make_resolution_table_rows(hist: CategoryBootstrapHistogram) -> List[List[Any]]:
return [_calcrow(item.category, item.histogram) for item in sorted(hist)]
def _calcrow(category: Category, histogram: BootstrapHistogram) -> List[Any]:
mean, meanerr = _calcmu(histogram)
sigma, sigmaerr = _calcsigma(histogram)
return [*category, mean, meanerr, sigma, sigmaerr]
def _calcmu(histogram: BootstrapHistogram) -> Tuple[float, float]:
def binnedavg(h):
try:
return np.average(h.axes[0].centers, weights=h.view())
except ZeroDivisionError:
return np.NaN
mu = binnedavg(histogram.nominal)
err = np.std(
[
binnedavg(histogram.samples[:, sample])
for sample in range(histogram.numsamples)
]
)
return (mu, err)
def _calcsigma(histogram: BootstrapHistogram) -> Tuple[float, float]:
def binnedstd(h):
try:
values = h.axes[0].centers
weights = h.view()
mu = np.average(values, weights=weights)
var = np.average((values - mu) ** 2, weights=weights)
return np.sqrt(var)
except ZeroDivisionError:
return np.NaN
std = binnedstd(histogram.nominal)
err = np.std(
[
binnedstd(histogram.samples[:, sample])
for sample in range(histogram.numsamples)
]
)
return (std, err)
def _summaryplot(result: Resolution.Result, dest: Path) -> None:
dest = dest / "resolution"
for k, v in result.hist.items():
if k.subevent is not None and k.subevent == 0:
_make_resolution_plot(k, v, dest)
return
def _make_resolution_plot(key: _Key, hist: CategoryBootstrapHistogram, dest: Path):
for xattr, groupbyattr in [
("attenuation", "scattering"),
("scattering", "attenuation"),
]:
subplotdata = list(_iter_subplots(hist, xattr=xattr, groupbyattr=groupbyattr))
assert len(subplotdata) > 0
subplotcombos = [("all", "all", subplotdata)] + [
("single", f"{p.groupname}_{p.groupvalue}", [p]) for p in subplotdata
]
for prefix, label, subplotdata in subplotcombos:
_make_resolution_summary_plot(
subplotdata,
dest
/ "summary"
/ prefix
/ f"{key.variable}_{key.selection}_resolution_by_{xattr}_{label}",
)
return
class _SubplotData(NamedTuple):
groupname: str
groupvalue: float
xvarname: str
x: np.ndarray
mean: np.ndarray
meanerr: np.ndarray
sigma: np.ndarray
sigmaerr: np.ndarray
def _iter_subplots(
hist: CategoryBootstrapHistogram,
xattr: str = "attenuation",
groupbyattr: str = "scattering",
) -> Generator[_SubplotData, None, None]:
signalonly = filter(lambda item: "ibd" in item.category.eventtype.lower(), hist)
groupedbyattr = groupby(
lambda item: getattr(item.category, groupbyattr), signalonly
)
for attrvalue, items in groupedbyattr.items():
X = [getattr(it.category, xattr) for it in items]
mean, meanerr = zip(*[_calcmu(it.histogram) for it in items])
sigma, sigmaerr = zip(*[_calcsigma(it.histogram) for it in items])
(X, mean, meanerr, sigma, sigmaerr) = (
np.array(it) for it in (X, mean, meanerr, sigma, sigmaerr)
)
yield _SubplotData(
groupbyattr, attrvalue, xattr, X, mean, meanerr, sigma, sigmaerr
)
def _make_resolution_summary_plot(
data: Iterable[_SubplotData], dest: Path, ext: str = ".svg"
):
data = list(data)
assert len(data) > 0
plotcombos = [
(dest.with_name(dest.name + "_mean" + ext), "mean", "meanerr", r"$\mu$"),
(dest.with_name(dest.name + "_stddev" + ext), "sigma", "sigmaerr", r"$\sigma$"),
]
for fname, yval, yerr, ylabel in plotcombos:
fig = plt.figure()
ax = fig.add_subplot(111)
for d in data:
xvalues = d.x
yvalues = getattr(d, yval)
yerrs = getattr(d, yerr)
label = f"{d.groupname}={d.groupvalue}"
ax.errorbar(
xvalues,
yvalues,
yerr=yerrs,
label=label,
marker="o",
capsize=10.0,
alpha=0.9,
)
ax.set_xlabel(d.xvarname)
ax.set_ylabel(ylabel)
ax.legend(fontsize="small")
fname.parent.mkdir(exist_ok=True, parents=True)
fig.tight_layout()
fig.savefig(fname)
plt.close(fig)
| [
"watchopticalanalysis.category.Category.fromAnalysisEventTuple",
"numpy.average",
"matplotlib.pyplot.close",
"watchopticalutils.histoutils.categorybootstraphistogram.CategoryBootstrapHistogram",
"watchopticalutils.collectionutils.summap",
"matplotlib.pyplot.figure",
"numpy.array",
"tabulate.tabulate",... | [((2504, 2568), 'itertools.product', 'itertools.product', (['SelectionDefs', '_BONSAIVARIABLES', '(None, 0, 1)'], {}), '(SelectionDefs, _BONSAIVARIABLES, (None, 0, 1))\n', (2521, 2568), False, 'import itertools\n'), ((2952, 3018), 'itertools.product', 'itertools.product', (['SelectionDefs', '_ANALYSISVARIABLES', '(None, 0, 1)'], {}), '(SelectionDefs, _ANALYSISVARIABLES, (None, 0, 1))\n', (2969, 3018), False, 'import itertools\n'), ((3581, 3616), 'watchopticalutils.histoutils.categorybootstraphistogram.CategoryBootstrapHistogram', 'CategoryBootstrapHistogram', (['binning'], {}), '(binning)\n', (3607, 3616), False, 'from watchopticalutils.histoutils.categorybootstraphistogram import CategoryBootstrapHistogram\n'), ((3632, 3669), 'watchopticalanalysis.category.Category.fromAnalysisEventTuple', 'Category.fromAnalysisEventTuple', (['tree'], {}), '(tree)\n', (3663, 3669), False, 'from watchopticalanalysis.category import Category\n'), ((4212, 4247), 'watchopticalutils.histoutils.categorybootstraphistogram.CategoryBootstrapHistogram', 'CategoryBootstrapHistogram', (['binning'], {}), '(binning)\n', (4238, 4247), False, 'from watchopticalutils.histoutils.categorybootstraphistogram import CategoryBootstrapHistogram\n'), ((4263, 4300), 'watchopticalanalysis.category.Category.fromAnalysisEventTuple', 'Category.fromAnalysisEventTuple', (['tree'], {}), '(tree)\n', (4294, 4300), False, 'from watchopticalanalysis.category import Category\n'), ((9421, 9433), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9431, 9433), True, 'import matplotlib.pyplot as plt\n'), ((10102, 10116), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (10111, 10116), True, 'import matplotlib.pyplot as plt\n'), ((6510, 6545), 'numpy.average', 'np.average', (['values'], {'weights': 'weights'}), '(values, weights=weights)\n', (6520, 6545), True, 'import numpy as np\n'), ((6564, 6611), 'numpy.average', 'np.average', (['((values - mu) ** 2)'], {'weights': 'weights'}), '((values - mu) ** 2, weights=weights)\n', (6574, 6611), True, 'import numpy as np\n'), ((6631, 6643), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (6638, 6643), True, 'import numpy as np\n'), ((8827, 8839), 'numpy.array', 'np.array', (['it'], {}), '(it)\n', (8835, 8839), True, 'import numpy as np\n'), ((1715, 1744), 'watchopticalutils.collectionutils.summap', 'summap', (['(self.hist, rhs.hist)'], {}), '((self.hist, rhs.hist))\n', (1721, 1744), False, 'from watchopticalutils.collectionutils import summap\n'), ((5085, 5119), 'tabulate.tabulate', 'tabulate', (['table'], {'tablefmt': 'tablefmt'}), '(table, tablefmt=tablefmt)\n', (5093, 5119), False, 'from tabulate import tabulate\n')] |
import os
from pathlib import Path
import astropy
import numpy as np
from astropy import constants
from astropy import units as u
from astropy.io import ascii
from astropy.time import Time
from .constants import *
jitter = True # Do this to infer w/ jitter
# get the root datadir from environment variables
p = Path(os.getenv("TWA_DATA_ROOT"))
closedir = p / "close"
widedir = p / "wide"
diskdir = p / "disk"
def get_arrays(asciiTable, errDict=None, jitter=False):
"""
Reformat ascii tables into pure numpy arrays of the right dimension.
"""
output = []
for star in ["Aa", "Ab"]:
# get the RVs
rv = asciiTable["RV_" + star]
if type(rv) is astropy.table.column.MaskedColumn:
mask = ~rv.mask # values we want to keep when indexing
else:
mask = np.ones(len(rv), dtype="bool")
rv = np.ascontiguousarray(rv[mask])
date = np.ascontiguousarray(asciiTable["HJD"][mask]) + 2400000 - jd0
if errDict is None:
err = np.ascontiguousarray(asciiTable["sigma_" + star][mask])
else:
err = np.ones(len(date), dtype=np.float64) * errDict[star]
if jitter:
err = (
np.ones(len(date), dtype=np.float64) * 0.1
) # [km/s] assume a small error, since we'll infer.
assert len(date) == len(rv), "date - rv length mismatch"
assert len(date) == len(err), "date - err length mismatch"
tup = (date, rv, err)
output.append(tup)
return output
# load all of the RV data
data_cfa = ascii.read(closedir / "cfa.dat")
# cfa errors are provided in table
cfa1, cfa2 = get_arrays(data_cfa, jitter=jitter)
data_keck = ascii.read(closedir / "keck.dat", format="tab", fill_values=[("X", 0)])
err_keck = {"Aa": 0.63, "Ab": 0.85, "B": 0.59} # km/s
keck1, keck2 = get_arrays(data_keck, err_keck, jitter=jitter)
data_feros = ascii.read(closedir / "feros.dat")
err_feros = {"Aa": 2.61, "Ab": 3.59, "B": 2.60} # km/s
feros1, feros2 = get_arrays(data_feros, err_feros, jitter=jitter)
data_dupont = ascii.read(closedir / "dupont.dat", fill_values=[("X", 0)])
err_dupont = {"Aa": 1.46, "Ab": 2.34, "B": 3.95} # km/s
dupont1, dupont2 = get_arrays(data_dupont, err_dupont, jitter=jitter)
rv_data = [data_cfa, data_keck, data_feros, data_dupont]
# specifically load the B velocities
mask = ~data_keck["RV_B"].mask
keck3 = (
np.ascontiguousarray(data_keck["HJD"][mask]) + 2400000 - jd0,
np.ascontiguousarray(data_keck["RV_B"][mask]),
0.2 * np.ones(np.sum(mask), dtype=np.float64),
)
# load the Anthonioz astrometric data
# keep in mind that the primary and secondary stars *could* be switched
# separation is in milliarcseconds
int_data = ascii.read(closedir / "int_data.dat")
astro_jd = int_data["epoch"][0] - jd0
rho_data = int_data["sep"][0] * 1e-3 # arcsec
rho_err = int_data["sep_err"][0] * 1e-3 # arcsec
theta_data = int_data["pa"][0] * deg # radians
theta_err = int_data["pa_err"][0] * deg # radians
anthonioz = (astro_jd, rho_data, rho_err, theta_data, theta_err)
# load the wide orbit astrometric dataset
data = ascii.read(
widedir / "visual_data_besselian.csv", format="csv", fill_values=[("X", "0")]
)
# convert years
jds = Time(np.ascontiguousarray(data["epoch"]), format="byear").jd - jd0
data["rho_err"][data["rho_err"].mask == True] = 0.05
data["PA_err"][data["PA_err"].mask == True] = 5.0
# convert all masked frames to be raw np arrays, since theano has issues with astropy masked columns
rho_data = np.ascontiguousarray(data["rho"], dtype=float) # arcsec
rho_err = np.ascontiguousarray(data["rho_err"], dtype=float)
# the position angle measurements come in degrees in the range [0, 360].
# we need to convert this to radians in the range [-pi, pi]
theta_data = np.ascontiguousarray(data["PA"] * deg, dtype=float)
theta_data[theta_data > np.pi] -= 2 * np.pi
theta_err = np.ascontiguousarray(data["PA_err"] * deg) # radians
wds = (jds, rho_data, rho_err, theta_data, theta_err)
# load the disk constraints
flatchain = np.load(diskdir / "flatchain.npy")
disk_samples = flatchain[:, [0, 9, 10]] # M_A, i_disk, PA_disk (DJ convention)
disk_samples[:, 2] -= 90.0 # convert conventions
disk_samples[:, [1, 2]] *= deg # convert *to* radians
mass_samples, incl_samples, Omega_samples = disk_samples.T
disk_properties = {
"MA": (np.mean(mass_samples), np.std(mass_samples)),
"incl": (np.mean(incl_samples), np.std(incl_samples)),
"Omega": (np.mean(Omega_samples), np.std(Omega_samples)),
}
# can we evaluate the multivariate normal approximation to these correlations?
disk_mu = np.mean(disk_samples, axis=0)
disk_cov = np.cov(disk_samples, rowvar=False)
| [
"numpy.load",
"astropy.io.ascii.read",
"numpy.sum",
"numpy.std",
"numpy.mean",
"numpy.cov",
"numpy.ascontiguousarray",
"os.getenv"
] | [((1585, 1617), 'astropy.io.ascii.read', 'ascii.read', (["(closedir / 'cfa.dat')"], {}), "(closedir / 'cfa.dat')\n", (1595, 1617), False, 'from astropy.io import ascii\n'), ((1715, 1786), 'astropy.io.ascii.read', 'ascii.read', (["(closedir / 'keck.dat')"], {'format': '"""tab"""', 'fill_values': "[('X', 0)]"}), "(closedir / 'keck.dat', format='tab', fill_values=[('X', 0)])\n", (1725, 1786), False, 'from astropy.io import ascii\n'), ((1918, 1952), 'astropy.io.ascii.read', 'ascii.read', (["(closedir / 'feros.dat')"], {}), "(closedir / 'feros.dat')\n", (1928, 1952), False, 'from astropy.io import ascii\n'), ((2090, 2149), 'astropy.io.ascii.read', 'ascii.read', (["(closedir / 'dupont.dat')"], {'fill_values': "[('X', 0)]"}), "(closedir / 'dupont.dat', fill_values=[('X', 0)])\n", (2100, 2149), False, 'from astropy.io import ascii\n'), ((2743, 2780), 'astropy.io.ascii.read', 'ascii.read', (["(closedir / 'int_data.dat')"], {}), "(closedir / 'int_data.dat')\n", (2753, 2780), False, 'from astropy.io import ascii\n'), ((3133, 3227), 'astropy.io.ascii.read', 'ascii.read', (["(widedir / 'visual_data_besselian.csv')"], {'format': '"""csv"""', 'fill_values': "[('X', '0')]"}), "(widedir / 'visual_data_besselian.csv', format='csv', fill_values\n =[('X', '0')])\n", (3143, 3227), False, 'from astropy.io import ascii\n'), ((3536, 3582), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (["data['rho']"], {'dtype': 'float'}), "(data['rho'], dtype=float)\n", (3556, 3582), True, 'import numpy as np\n'), ((3603, 3653), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (["data['rho_err']"], {'dtype': 'float'}), "(data['rho_err'], dtype=float)\n", (3623, 3653), True, 'import numpy as np\n'), ((3801, 3852), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (["(data['PA'] * deg)"], {'dtype': 'float'}), "(data['PA'] * deg, dtype=float)\n", (3821, 3852), True, 'import numpy as np\n'), ((3910, 3952), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (["(data['PA_err'] * deg)"], {}), "(data['PA_err'] * deg)\n", (3930, 3952), True, 'import numpy as np\n'), ((4060, 4094), 'numpy.load', 'np.load', (["(diskdir / 'flatchain.npy')"], {}), "(diskdir / 'flatchain.npy')\n", (4067, 4094), True, 'import numpy as np\n'), ((4630, 4659), 'numpy.mean', 'np.mean', (['disk_samples'], {'axis': '(0)'}), '(disk_samples, axis=0)\n', (4637, 4659), True, 'import numpy as np\n'), ((4671, 4705), 'numpy.cov', 'np.cov', (['disk_samples'], {'rowvar': '(False)'}), '(disk_samples, rowvar=False)\n', (4677, 4705), True, 'import numpy as np\n'), ((321, 347), 'os.getenv', 'os.getenv', (['"""TWA_DATA_ROOT"""'], {}), "('TWA_DATA_ROOT')\n", (330, 347), False, 'import os\n'), ((2485, 2530), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (["data_keck['RV_B'][mask]"], {}), "(data_keck['RV_B'][mask])\n", (2505, 2530), True, 'import numpy as np\n'), ((875, 905), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['rv[mask]'], {}), '(rv[mask])\n', (895, 905), True, 'import numpy as np\n'), ((4371, 4392), 'numpy.mean', 'np.mean', (['mass_samples'], {}), '(mass_samples)\n', (4378, 4392), True, 'import numpy as np\n'), ((4394, 4414), 'numpy.std', 'np.std', (['mass_samples'], {}), '(mass_samples)\n', (4400, 4414), True, 'import numpy as np\n'), ((4430, 4451), 'numpy.mean', 'np.mean', (['incl_samples'], {}), '(incl_samples)\n', (4437, 4451), True, 'import numpy as np\n'), ((4453, 4473), 'numpy.std', 'np.std', (['incl_samples'], {}), '(incl_samples)\n', (4459, 4473), True, 'import numpy as np\n'), ((4490, 4512), 'numpy.mean', 'np.mean', (['Omega_samples'], {}), '(Omega_samples)\n', (4497, 4512), True, 'import numpy as np\n'), ((4514, 4535), 'numpy.std', 'np.std', (['Omega_samples'], {}), '(Omega_samples)\n', (4520, 4535), True, 'import numpy as np\n'), ((1030, 1085), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (["asciiTable['sigma_' + star][mask]"], {}), "(asciiTable['sigma_' + star][mask])\n", (1050, 1085), True, 'import numpy as np\n'), ((2419, 2463), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (["data_keck['HJD'][mask]"], {}), "(data_keck['HJD'][mask])\n", (2439, 2463), True, 'import numpy as np\n'), ((2550, 2562), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (2556, 2562), True, 'import numpy as np\n'), ((3257, 3292), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (["data['epoch']"], {}), "(data['epoch'])\n", (3277, 3292), True, 'import numpy as np\n'), ((921, 966), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (["asciiTable['HJD'][mask]"], {}), "(asciiTable['HJD'][mask])\n", (941, 966), True, 'import numpy as np\n')] |
import os
import sys
import numpy as np
# import tensorflow as tf
sys.path.append('./fastfiz/fastfiz/')
# sys.path.append('./fastfiz/')
from fastfiz import fz as f
# from fastfiz import rules as r
game_type = 1 #'GT_EIGHTBALL'
"""
Parse the Gamestate from string to ball positions.
GameState format:
GameType, TurnType, timeleft, timeleft_opp, curplayerstarted, numballs
foreach ball : radius, state, type, Point.x, Point.y
1 1 (unknown)
"""
def parse_gamestate(s_gamestate):
s_splt = s_gamestate.split(' ')
s_splt = s_splt[:-3]
s_splt = [float(x) for x in s_splt]
gameType = s_splt[0]
turnType = s_splt[1]
print("Turn type : " + str(turnType))
timeLeft = s_splt[2]
timeLeft_opp = s_splt[3]
curPlayer_started = s_splt[4]
num_balls = s_splt[5]
balls_arr = [s_splt[6+i*5:11+i*5] for i in range((len(s_splt) - 5) // 5)]
return balls_arr
def gen_shot_params(pseed):
return
if __name__=="__main__":
## Define shot params
## Simulate a shot
## Start a new frame
#Gamestate.rack()
gamestate = f.GameState.RackedState(1)
string_gamestate = gamestate.toString()
ball_arr = parse_gamestate(string_gamestate)
# print(ball_arr)
shot = f.ShotParams(0., 0., 5., 270., 2.)
Gshot = f.GameShot()
Gshot.ball = f.Ball.ONE
Gshot.params = shot
Gshot.pocket = 1
Gshot.cue_x = 0.5
Gshot.cue_y = 1.7
gamestate.executeShot(Gshot)
string_gamestate_after = gamestate.toString()
ball_arr_after = parse_gamestate(string_gamestate_after)
print(np.array(ball_arr_after))
| [
"sys.path.append",
"fastfiz.fz.GameShot",
"numpy.array",
"fastfiz.fz.ShotParams",
"fastfiz.fz.GameState.RackedState"
] | [((67, 104), 'sys.path.append', 'sys.path.append', (['"""./fastfiz/fastfiz/"""'], {}), "('./fastfiz/fastfiz/')\n", (82, 104), False, 'import sys\n'), ((1023, 1049), 'fastfiz.fz.GameState.RackedState', 'f.GameState.RackedState', (['(1)'], {}), '(1)\n', (1046, 1049), True, 'from fastfiz import fz as f\n'), ((1164, 1203), 'fastfiz.fz.ShotParams', 'f.ShotParams', (['(0.0)', '(0.0)', '(5.0)', '(270.0)', '(2.0)'], {}), '(0.0, 0.0, 5.0, 270.0, 2.0)\n', (1176, 1203), True, 'from fastfiz import fz as f\n'), ((1208, 1220), 'fastfiz.fz.GameShot', 'f.GameShot', ([], {}), '()\n', (1218, 1220), True, 'from fastfiz import fz as f\n'), ((1467, 1491), 'numpy.array', 'np.array', (['ball_arr_after'], {}), '(ball_arr_after)\n', (1475, 1491), True, 'import numpy as np\n')] |
try:
import cv2
import numpy as np
except ImportError as e:
from pip._internal import main as install
packages = ["numpy", "opencv-python"]
for package in packages:
install(["install", package])
finally:
pass
from stackimages import stackImages
def horizontalStack(images):
horizontalImages = np.hstack(images)
cv2.imshow("Horizontal Images", horizontalImages)
cv2.waitKey(0)
return
def verticalStack(images):
verticalImages = np.vstack(images)
cv2.imshow("Vertical Images", verticalImages)
cv2.waitKey(0)
return
images = np.array([cv2.imread("images/person_4.jpg")
,cv2.imread("images/person_2.jpg"),
])
verticalStack(images)
horizontalStack(images)
img = cv2.imread("images/person_1.jpg")
imgGray = cv2.imread("images/person_1.jpg", 0)
imageStack = stackImages(0.5, ([img, img, img], [img, imgGray, img]))
cv2.imshow("Stuck Images", imageStack)
cv2.waitKey(0) | [
"stackimages.stackImages",
"cv2.waitKey",
"numpy.hstack",
"cv2.imread",
"pip._internal.main",
"cv2.imshow",
"numpy.vstack"
] | [((755, 788), 'cv2.imread', 'cv2.imread', (['"""images/person_1.jpg"""'], {}), "('images/person_1.jpg')\n", (765, 788), False, 'import cv2\n'), ((799, 835), 'cv2.imread', 'cv2.imread', (['"""images/person_1.jpg"""', '(0)'], {}), "('images/person_1.jpg', 0)\n", (809, 835), False, 'import cv2\n'), ((849, 905), 'stackimages.stackImages', 'stackImages', (['(0.5)', '([img, img, img], [img, imgGray, img])'], {}), '(0.5, ([img, img, img], [img, imgGray, img]))\n', (860, 905), False, 'from stackimages import stackImages\n'), ((906, 944), 'cv2.imshow', 'cv2.imshow', (['"""Stuck Images"""', 'imageStack'], {}), "('Stuck Images', imageStack)\n", (916, 944), False, 'import cv2\n'), ((945, 959), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (956, 959), False, 'import cv2\n'), ((329, 346), 'numpy.hstack', 'np.hstack', (['images'], {}), '(images)\n', (338, 346), True, 'import numpy as np\n'), ((351, 400), 'cv2.imshow', 'cv2.imshow', (['"""Horizontal Images"""', 'horizontalImages'], {}), "('Horizontal Images', horizontalImages)\n", (361, 400), False, 'import cv2\n'), ((405, 419), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (416, 419), False, 'import cv2\n'), ((480, 497), 'numpy.vstack', 'np.vstack', (['images'], {}), '(images)\n', (489, 497), True, 'import numpy as np\n'), ((502, 547), 'cv2.imshow', 'cv2.imshow', (['"""Vertical Images"""', 'verticalImages'], {}), "('Vertical Images', verticalImages)\n", (512, 547), False, 'import cv2\n'), ((552, 566), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (563, 566), False, 'import cv2\n'), ((597, 630), 'cv2.imread', 'cv2.imread', (['"""images/person_4.jpg"""'], {}), "('images/person_4.jpg')\n", (607, 630), False, 'import cv2\n'), ((648, 681), 'cv2.imread', 'cv2.imread', (['"""images/person_2.jpg"""'], {}), "('images/person_2.jpg')\n", (658, 681), False, 'import cv2\n'), ((193, 222), 'pip._internal.main', 'install', (["['install', package]"], {}), "(['install', package])\n", (200, 222), True, 'from pip._internal import main as install\n')] |
#!/usr/bin/env python
import rospy
import cv2
import duckietown_msgs.msg
#from virtual_mirror_nbuckman import util
from std_msgs.msg import String,UInt8
import numpy as np
from sensor_msgs.msg import CompressedImage,Image
from cv_bridge import CvBridge, CvBridgeError
# Initialize the node with rospy
class ImageAverageNode(object):
def __init__(self):
self.node_name = "Image Average"
self.sub_image = rospy.Subscriber("/ernie/camera_node/image/compressed", CompressedImage,self.avgImage)
self.sub_avgimage = rospy.Subscriber("~average_image/compressed",CompressedImage)
self.pub_avgimage = rospy.Publisher("~average_image/compressed", CompressedImage, queue_size=1)
self.sub_time = rospy.Subscriber("~avg_time", UInt8)
self.pub_time = rospy.Publisher("~avg_time", UInt8,queue_size=1)
self.bridge = CvBridge()
def avgImage(self,msg):
np_array = np.fromstring(msg.data, np.uint8)
image_np = cv2.imdecode(np_array, cv2.CV_LOAD_IMAGE_COLOR)
np_array_avg = np.fromstring(self.suv_abgimage.data, np.uint8)
image_np_avg = cv2.imdecode(np_array_avg, cv2.CV_LOAD_IMAGE_COLOR)
#Get number images before, publish increment
new_time = self.sub_time+1
self.pub_time(new_time)
alpha_new = 1/new_time
alpha_old = 1-alpha_new
avg_data = cv2.addWeighted(image_np,alpha_new,image_np_avg,alpha_old,0)
avg_im = CompressedImage()
avg_im.data = np.array(cv2.imencode('.jpg', avg_data)[1]).tostring()
#flip_im.data = flip_arr.tostring()
avg_im.header.stamp = msg.header.stamp
self.pub_avgimage.publish(avg_im)
def flipImage(self,msg):
# Decode from compressed image
# with OpenCV
cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8")
#image_cv = cv2.imdecode(np.fromstring(msg.data, np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
hei_original = image_cv.shape[0]
wid_original = image_cv.shape[1]
#reverseimage = image_cv[:,:,-1]
reverseimg=cv2.flip(cv_image,1)
image_msg_out = self.bridge.cv2_to_imgmsg(reverseimage, "bgr8")
image_msg_out.header.stamp = image_msg.header.stamp
self.pub_image.publish(image_msg_out)
#self.pub_image.publish(flippedMsg)
#image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
#flippedImage = image_cv
if __name__ == '__main__':
rospy.init_node('image_average_node')
virtual_mirror_node = ImageAverageNode()
rospy.spin() | [
"cv_bridge.CvBridge",
"rospy.Subscriber",
"cv2.imdecode",
"rospy.Publisher",
"cv2.addWeighted",
"cv2.flip",
"rospy.init_node",
"sensor_msgs.msg.CompressedImage",
"cv2.imencode",
"rospy.spin",
"numpy.fromstring"
] | [((2286, 2323), 'rospy.init_node', 'rospy.init_node', (['"""image_average_node"""'], {}), "('image_average_node')\n", (2301, 2323), False, 'import rospy\n'), ((2367, 2379), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (2377, 2379), False, 'import rospy\n'), ((411, 502), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/ernie/camera_node/image/compressed"""', 'CompressedImage', 'self.avgImage'], {}), "('/ernie/camera_node/image/compressed', CompressedImage,\n self.avgImage)\n", (427, 502), False, 'import rospy\n'), ((520, 582), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~average_image/compressed"""', 'CompressedImage'], {}), "('~average_image/compressed', CompressedImage)\n", (536, 582), False, 'import rospy\n'), ((604, 679), 'rospy.Publisher', 'rospy.Publisher', (['"""~average_image/compressed"""', 'CompressedImage'], {'queue_size': '(1)'}), "('~average_image/compressed', CompressedImage, queue_size=1)\n", (619, 679), False, 'import rospy\n'), ((698, 734), 'rospy.Subscriber', 'rospy.Subscriber', (['"""~avg_time"""', 'UInt8'], {}), "('~avg_time', UInt8)\n", (714, 734), False, 'import rospy\n'), ((753, 802), 'rospy.Publisher', 'rospy.Publisher', (['"""~avg_time"""', 'UInt8'], {'queue_size': '(1)'}), "('~avg_time', UInt8, queue_size=1)\n", (768, 802), False, 'import rospy\n'), ((818, 828), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (826, 828), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((869, 902), 'numpy.fromstring', 'np.fromstring', (['msg.data', 'np.uint8'], {}), '(msg.data, np.uint8)\n', (882, 902), True, 'import numpy as np\n'), ((916, 963), 'cv2.imdecode', 'cv2.imdecode', (['np_array', 'cv2.CV_LOAD_IMAGE_COLOR'], {}), '(np_array, cv2.CV_LOAD_IMAGE_COLOR)\n', (928, 963), False, 'import cv2\n'), ((984, 1031), 'numpy.fromstring', 'np.fromstring', (['self.suv_abgimage.data', 'np.uint8'], {}), '(self.suv_abgimage.data, np.uint8)\n', (997, 1031), True, 'import numpy as np\n'), ((1051, 1102), 'cv2.imdecode', 'cv2.imdecode', (['np_array_avg', 'cv2.CV_LOAD_IMAGE_COLOR'], {}), '(np_array_avg, cv2.CV_LOAD_IMAGE_COLOR)\n', (1063, 1102), False, 'import cv2\n'), ((1273, 1337), 'cv2.addWeighted', 'cv2.addWeighted', (['image_np', 'alpha_new', 'image_np_avg', 'alpha_old', '(0)'], {}), '(image_np, alpha_new, image_np_avg, alpha_old, 0)\n', (1288, 1337), False, 'import cv2\n'), ((1345, 1362), 'sensor_msgs.msg.CompressedImage', 'CompressedImage', ([], {}), '()\n', (1360, 1362), False, 'from sensor_msgs.msg import CompressedImage, Image\n'), ((1914, 1935), 'cv2.flip', 'cv2.flip', (['cv_image', '(1)'], {}), '(cv_image, 1)\n', (1922, 1935), False, 'import cv2\n'), ((1388, 1418), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'avg_data'], {}), "('.jpg', avg_data)\n", (1400, 1418), False, 'import cv2\n')] |
"""Tests for optimization module."""
import numpy as np
from hypothesis import given
from hypothesis.extra.numpy import arrays
from hypothesis.strategies import floats
from hypothesis.strategies import integers
from temfpy.optimization import ackley
from temfpy.optimization import carlberg
from temfpy.optimization import rastrigin
from temfpy.optimization import rosenbrock
def get_strategies(name):
if name == "ackley":
valid_floats = floats(-10000, 10000, allow_nan=False, allow_infinity=False)
x_strategy = arrays(np.float, shape=integers(1, 10), elements=valid_floats)
strategy = (x_strategy, valid_floats, valid_floats, valid_floats)
elif name == "carlberg":
valid_floats_x = floats(-10000, 10000, allow_nan=False, allow_infinity=False)
valid_floats_a = floats(-100, 100, allow_nan=False, allow_infinity=False)
valid_floats_b = floats(0, 10, allow_nan=False, allow_infinity=False)
dim = np.random.randint(1, 10)
x_strategy = arrays(np.float, shape=dim, elements=valid_floats_x)
a_strategy = arrays(np.float, shape=dim, elements=valid_floats_a)
strategy = (x_strategy, a_strategy, valid_floats_b)
elif name == "rastrigin":
valid_floats = floats(-10000, 10000, allow_nan=False, allow_infinity=False)
x_strategy = arrays(np.float, shape=integers(1, 10), elements=valid_floats)
strategy = (x_strategy, valid_floats)
elif name == "rosenbrock":
valid_floats = floats(-10000, 10000, allow_nan=False, allow_infinity=False)
x_strategy = arrays(np.float, shape=integers(2, 10), elements=valid_floats)
strategy = x_strategy
else:
raise NotImplementedError
return strategy
@given(*get_strategies("ackley"))
def test_ackley(x, a, b, c):
ackley(x, a, b, c)
@given(*get_strategies("carlberg"))
def test_carlberg(x, a, b):
carlberg(x, a, b)
@given(*get_strategies("rastrigin"))
def test_rastrigin(x, a):
rastrigin(x, a)
@given(get_strategies("rosenbrock"))
def test_rosenbrock(x):
rosenbrock(x)
| [
"temfpy.optimization.ackley",
"hypothesis.extra.numpy.arrays",
"temfpy.optimization.rosenbrock",
"temfpy.optimization.carlberg",
"numpy.random.randint",
"temfpy.optimization.rastrigin",
"hypothesis.strategies.integers",
"hypothesis.strategies.floats"
] | [((1843, 1861), 'temfpy.optimization.ackley', 'ackley', (['x', 'a', 'b', 'c'], {}), '(x, a, b, c)\n', (1849, 1861), False, 'from temfpy.optimization import ackley\n'), ((1937, 1954), 'temfpy.optimization.carlberg', 'carlberg', (['x', 'a', 'b'], {}), '(x, a, b)\n', (1945, 1954), False, 'from temfpy.optimization import carlberg\n'), ((2029, 2044), 'temfpy.optimization.rastrigin', 'rastrigin', (['x', 'a'], {}), '(x, a)\n', (2038, 2044), False, 'from temfpy.optimization import rastrigin\n'), ((2117, 2130), 'temfpy.optimization.rosenbrock', 'rosenbrock', (['x'], {}), '(x)\n', (2127, 2130), False, 'from temfpy.optimization import rosenbrock\n'), ((468, 528), 'hypothesis.strategies.floats', 'floats', (['(-10000)', '(10000)'], {'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(-10000, 10000, allow_nan=False, allow_infinity=False)\n', (474, 528), False, 'from hypothesis.strategies import floats\n'), ((745, 805), 'hypothesis.strategies.floats', 'floats', (['(-10000)', '(10000)'], {'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(-10000, 10000, allow_nan=False, allow_infinity=False)\n', (751, 805), False, 'from hypothesis.strategies import floats\n'), ((832, 888), 'hypothesis.strategies.floats', 'floats', (['(-100)', '(100)'], {'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(-100, 100, allow_nan=False, allow_infinity=False)\n', (838, 888), False, 'from hypothesis.strategies import floats\n'), ((915, 967), 'hypothesis.strategies.floats', 'floats', (['(0)', '(10)'], {'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(0, 10, allow_nan=False, allow_infinity=False)\n', (921, 967), False, 'from hypothesis.strategies import floats\n'), ((983, 1007), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (1000, 1007), True, 'import numpy as np\n'), ((1030, 1082), 'hypothesis.extra.numpy.arrays', 'arrays', (['np.float'], {'shape': 'dim', 'elements': 'valid_floats_x'}), '(np.float, shape=dim, elements=valid_floats_x)\n', (1036, 1082), False, 'from hypothesis.extra.numpy import arrays\n'), ((1105, 1157), 'hypothesis.extra.numpy.arrays', 'arrays', (['np.float'], {'shape': 'dim', 'elements': 'valid_floats_a'}), '(np.float, shape=dim, elements=valid_floats_a)\n', (1111, 1157), False, 'from hypothesis.extra.numpy import arrays\n'), ((574, 589), 'hypothesis.strategies.integers', 'integers', (['(1)', '(10)'], {}), '(1, 10)\n', (582, 589), False, 'from hypothesis.strategies import integers\n'), ((1274, 1334), 'hypothesis.strategies.floats', 'floats', (['(-10000)', '(10000)'], {'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(-10000, 10000, allow_nan=False, allow_infinity=False)\n', (1280, 1334), False, 'from hypothesis.strategies import floats\n'), ((1523, 1583), 'hypothesis.strategies.floats', 'floats', (['(-10000)', '(10000)'], {'allow_nan': '(False)', 'allow_infinity': '(False)'}), '(-10000, 10000, allow_nan=False, allow_infinity=False)\n', (1529, 1583), False, 'from hypothesis.strategies import floats\n'), ((1380, 1395), 'hypothesis.strategies.integers', 'integers', (['(1)', '(10)'], {}), '(1, 10)\n', (1388, 1395), False, 'from hypothesis.strategies import integers\n'), ((1629, 1644), 'hypothesis.strategies.integers', 'integers', (['(2)', '(10)'], {}), '(2, 10)\n', (1637, 1644), False, 'from hypothesis.strategies import integers\n')] |
"""
Copyright (c) 2020 <NAME>
Centrum Wiskunde & Informatica, Amsterdam, the Netherlands.
Code is available via LINKXXXX.
Reference paper: XXXXX
"""
import numpy as np
import astra
import odl
import os
from skimage.transform import resize
import tifffile
import sys, getopt
# create parallel beam geometry in ODL
# number of projection angles
NUM_ANGLES = 50
# shape of ground truth images
RECO_IM_SHAPE = (972,972)
# shape of resized ground truth images
IM_SHAPE = (1000,1000)
# backend used for projection
IMPL='astra_cuda'
# definition of projection and reconstruction spaces in ODL
MIN_PT = [-1.0, -1.0]
MAX_PT = [1.0, 1.0]
reco_space = odl.uniform_discr(min_pt=MIN_PT, max_pt=MAX_PT, shape=RECO_IM_SHAPE, dtype=np.float32)
space = odl.uniform_discr(min_pt=MIN_PT, max_pt=MAX_PT, shape=IM_SHAPE,
dtype=np.float64)
# definition of projection geometries
reco_geometry = odl.tomo.parallel_beam_geometry(reco_space, num_angles=NUM_ANGLES)
geometry = odl.tomo.parallel_beam_geometry(space, num_angles=NUM_ANGLES, det_shape=reco_geometry.detector.shape)
# definition of ray transform
ray_trafo = odl.tomo.RayTransform(space, geometry, impl=IMPL)
def forward_projection(file):
""" Generates parallel beam projection data from ground truth.
Parameters
----------
file : str
Ground truth filename.
Returns
-------
data : numpy array
Noiseless parallel beam projection data.
data_noisy : numpy array
Noisy parallel beam projection data
"""
# read tif file
image_org = tifffile.imread(file)
# resize image
image = resize(image_org, IM_SHAPE, order=1)
# forward project
data = ray_trafo(image)
# add 5% gaussian noise
data_noisy = data + odl.phantom.white_noise(ray_trafo.range, seed=None) * np.mean(data) * 0.05
return data.asarray(), data_noisy.asarray()
if __name__ == "__main__":
opts, args = getopt.getopt(sys.argv[1:],"s:d:n:")
for opt, arg in opts:
if opt == "-s":
source_dir = arg
elif opt == "-d":
save_dir_data = arg
elif opt == "-n":
save_dir_data_noisy = arg
os.makedirs(save_dir_data, exist_ok=True)
os.makedirs(save_dir_data_noisy, exist_ok=True)
files = [f for f in os.listdir(source_dir)]
for f in files:
d, d_noisy = forward_projection(source_dir+'/'+f)
tifffile.imsave(save_dir_data+'/data_'+f, d.astype(np.float32))
tifffile.imsave(save_dir_data_noisy+'/data_noisy_'+f, d_noisy.astype(np.float32))
print('Saving proj of '+str(f)) | [
"odl.tomo.RayTransform",
"getopt.getopt",
"os.makedirs",
"odl.uniform_discr",
"odl.phantom.white_noise",
"odl.tomo.parallel_beam_geometry",
"skimage.transform.resize",
"numpy.mean",
"tifffile.imread",
"os.listdir"
] | [((648, 739), 'odl.uniform_discr', 'odl.uniform_discr', ([], {'min_pt': 'MIN_PT', 'max_pt': 'MAX_PT', 'shape': 'RECO_IM_SHAPE', 'dtype': 'np.float32'}), '(min_pt=MIN_PT, max_pt=MAX_PT, shape=RECO_IM_SHAPE, dtype=\n np.float32)\n', (665, 739), False, 'import odl\n'), ((743, 829), 'odl.uniform_discr', 'odl.uniform_discr', ([], {'min_pt': 'MIN_PT', 'max_pt': 'MAX_PT', 'shape': 'IM_SHAPE', 'dtype': 'np.float64'}), '(min_pt=MIN_PT, max_pt=MAX_PT, shape=IM_SHAPE, dtype=np.\n float64)\n', (760, 829), False, 'import odl\n'), ((906, 972), 'odl.tomo.parallel_beam_geometry', 'odl.tomo.parallel_beam_geometry', (['reco_space'], {'num_angles': 'NUM_ANGLES'}), '(reco_space, num_angles=NUM_ANGLES)\n', (937, 972), False, 'import odl\n'), ((984, 1090), 'odl.tomo.parallel_beam_geometry', 'odl.tomo.parallel_beam_geometry', (['space'], {'num_angles': 'NUM_ANGLES', 'det_shape': 'reco_geometry.detector.shape'}), '(space, num_angles=NUM_ANGLES, det_shape=\n reco_geometry.detector.shape)\n', (1015, 1090), False, 'import odl\n'), ((1129, 1178), 'odl.tomo.RayTransform', 'odl.tomo.RayTransform', (['space', 'geometry'], {'impl': 'IMPL'}), '(space, geometry, impl=IMPL)\n', (1150, 1178), False, 'import odl\n'), ((1604, 1625), 'tifffile.imread', 'tifffile.imread', (['file'], {}), '(file)\n', (1619, 1625), False, 'import tifffile\n'), ((1662, 1698), 'skimage.transform.resize', 'resize', (['image_org', 'IM_SHAPE'], {'order': '(1)'}), '(image_org, IM_SHAPE, order=1)\n', (1668, 1698), False, 'from skimage.transform import resize\n'), ((1989, 2026), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""s:d:n:"""'], {}), "(sys.argv[1:], 's:d:n:')\n", (2002, 2026), False, 'import sys, getopt\n'), ((2236, 2277), 'os.makedirs', 'os.makedirs', (['save_dir_data'], {'exist_ok': '(True)'}), '(save_dir_data, exist_ok=True)\n', (2247, 2277), False, 'import os\n'), ((2282, 2329), 'os.makedirs', 'os.makedirs', (['save_dir_data_noisy'], {'exist_ok': '(True)'}), '(save_dir_data_noisy, exist_ok=True)\n', (2293, 2329), False, 'import os\n'), ((2364, 2386), 'os.listdir', 'os.listdir', (['source_dir'], {}), '(source_dir)\n', (2374, 2386), False, 'import os\n'), ((1811, 1862), 'odl.phantom.white_noise', 'odl.phantom.white_noise', (['ray_trafo.range'], {'seed': 'None'}), '(ray_trafo.range, seed=None)\n', (1834, 1862), False, 'import odl\n'), ((1865, 1878), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (1872, 1878), True, 'import numpy as np\n')] |
import torch
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_prediction(net, data):
output = net.fw_over_time(data)
_, am = torch.max(output, 1) # argmax over output units
return am
def compute_classification_accuracy(net, x_data, y_data, max_time, nb_steps):
""" Computes classification accuracy on supplied data in batches. """
accs = []
for x_local, y_local in sparse_data_generator_from_hdf5_spikes(
x_data, y_data, net.batch_size, nb_steps, net.nb_inputs, max_time, shuffle=False):
output, _ = net(x_local.to_dense())
m, _ = torch.max(output, 1) # max over time
_, am = torch.max(m, 1) # argmax over output units
tmp = np.mean((y_local == am).detach().cpu().numpy()) # compare to labels
accs.append(tmp)
return np.mean(accs)
def sparse_data_generator_from_hdf5_spikes(
X, y, batch_size, nb_steps, nb_units, max_time, shuffle=True):
""" This generator takes a spike dataset and generates spiking network input as sparse tensors.
Args:
X: The data ( sample x event x 2 ) the last dim holds (time,neuron) tuples
y: The labels
"""
labels_ = np.array(y, dtype=int)
number_of_batches = len(labels_)//batch_size
sample_index = np.arange(len(labels_))
# compute discrete firing times
firing_times = X['times']
units_fired = X['units']
time_bins = np.linspace(0, max_time, num=nb_steps)
if shuffle:
np.random.shuffle(sample_index)
counter = 0
while counter < number_of_batches:
batch_index = sample_index[batch_size*counter:batch_size*(counter+1)]
coo = [[] for i in range(3)]
for bc, idx in enumerate(batch_index):
times = np.digitize(firing_times[idx], time_bins)
units = units_fired[idx]
batch = [bc for _ in range(len(times))]
coo[0].extend(batch)
coo[1].extend(times)
coo[2].extend(units)
i = torch.LongTensor(coo).to(device)
v = torch.FloatTensor(np.ones(len(coo[0]))).to(device)
X_batch = torch.sparse.FloatTensor(
i, v, torch.Size([batch_size, nb_steps, nb_units])).to(device)
y_batch = torch.tensor(labels_[batch_index], device=device)
yield X_batch.to(device=device), y_batch.to(device=device)
counter += 1
def plot_voltage_traces(mem, spk=None, dim=(3, 5), spike_height=5):
gs = GridSpec(*dim)
if spk is not None:
dat = 1.0*mem
dat[spk > 0.0] = spike_height
dat = dat.detach().cpu().numpy()
else:
dat = mem.detach().cpu().numpy()
for i in range(np.prod(dim)):
if i == 0:
a0 = ax = plt.subplot(gs[i])
else:
ax = plt.subplot(gs[i], sharey=a0)
ax.plot(dat[i])
ax.axis("off")
| [
"matplotlib.pyplot.subplot",
"torch.tensor",
"torch.LongTensor",
"numpy.prod",
"numpy.mean",
"numpy.array",
"torch.max",
"numpy.linspace",
"torch.cuda.is_available",
"torch.Size",
"matplotlib.gridspec.GridSpec",
"numpy.digitize",
"numpy.random.shuffle"
] | [((257, 277), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (266, 277), False, 'import torch\n'), ((935, 948), 'numpy.mean', 'np.mean', (['accs'], {}), '(accs)\n', (942, 948), True, 'import numpy as np\n'), ((1305, 1327), 'numpy.array', 'np.array', (['y'], {'dtype': 'int'}), '(y, dtype=int)\n', (1313, 1327), True, 'import numpy as np\n'), ((1533, 1571), 'numpy.linspace', 'np.linspace', (['(0)', 'max_time'], {'num': 'nb_steps'}), '(0, max_time, num=nb_steps)\n', (1544, 1571), True, 'import numpy as np\n'), ((2565, 2579), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['*dim'], {}), '(*dim)\n', (2573, 2579), False, 'from matplotlib.gridspec import GridSpec\n'), ((138, 163), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (161, 163), False, 'import torch\n'), ((714, 734), 'torch.max', 'torch.max', (['output', '(1)'], {}), '(output, 1)\n', (723, 734), False, 'import torch\n'), ((768, 783), 'torch.max', 'torch.max', (['m', '(1)'], {}), '(m, 1)\n', (777, 783), False, 'import torch\n'), ((1597, 1628), 'numpy.random.shuffle', 'np.random.shuffle', (['sample_index'], {}), '(sample_index)\n', (1614, 1628), True, 'import numpy as np\n'), ((2346, 2395), 'torch.tensor', 'torch.tensor', (['labels_[batch_index]'], {'device': 'device'}), '(labels_[batch_index], device=device)\n', (2358, 2395), False, 'import torch\n'), ((2775, 2787), 'numpy.prod', 'np.prod', (['dim'], {}), '(dim)\n', (2782, 2787), True, 'import numpy as np\n'), ((1868, 1909), 'numpy.digitize', 'np.digitize', (['firing_times[idx]', 'time_bins'], {}), '(firing_times[idx], time_bins)\n', (1879, 1909), True, 'import numpy as np\n'), ((2831, 2849), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[i]'], {}), '(gs[i])\n', (2842, 2849), True, 'import matplotlib.pyplot as plt\n'), ((2881, 2910), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[i]'], {'sharey': 'a0'}), '(gs[i], sharey=a0)\n', (2892, 2910), True, 'import matplotlib.pyplot as plt\n'), ((2112, 2133), 'torch.LongTensor', 'torch.LongTensor', (['coo'], {}), '(coo)\n', (2128, 2133), False, 'import torch\n'), ((2271, 2315), 'torch.Size', 'torch.Size', (['[batch_size, nb_steps, nb_units]'], {}), '([batch_size, nb_steps, nb_units])\n', (2281, 2315), False, 'import torch\n')] |
import numpy as np
from .qac import Register, block_copy
# takes a qac with no inputs, returns the output state vector
def get_qac_state(qac):
assert len(qac.scope_inputs) == 0
assert len(qac.scope_outputs) == 0
# purification simulation: state is stored as state vector
# state = np.array of complex amplitudes for every nonzero entry
# registers = {reg.id: np.array} where each np.array is the list of values
# discards = list of np.array's corresponding to registers that have been discarded
# possibilities may contain duplicates, so need to prune once in a while
# cache performance relies on the idea that not too many registers are affected by a single instr
# looping through all possibilities of a single register is fast,
# but lookping through all possibilities of several registers is slow, since their arrays are far apart
state = np.array([1]).astype(complex)
registers = {}
discards = []
for reg in qac.unnamed_inputs:
registers[reg.trace()] = np.array([0]).astype(int)
def simulate_instruction(instr, state, registers, discards):
# for debugging:
# s = None
# if "reg" in instr: s = str(instr["reg"].trace())
# print(instr["kind"], s, registers.keys())
print(instr["kind"])
# {"kind":"qac_declare", "reg":<register>, "dim":<int>}
if instr["kind"] == "qac_declare":
tr = instr["reg"].trace()
assert tr not in registers
registers[tr] = np.zeros(len(state)).astype(int)
return state, registers, discards
# {"kind":"qac_discard", "reg":<register>}
if instr["kind"] == "qac_discard":
tr = instr["reg"].trace()
arr = registers[tr]
del registers[tr]
# don't have to make garbage if all the values are the same
# if not np.allclose(arr, arr[0]): discards.append(arr)
# ^ actually, this doesn't work, because get_kraus will be feeding
# this subroutine multiple circuits and comparing the results
# leaving the garbage around is key to interpreting the results correctly
discards.append(arr)
return state, registers, discards
# {"kind":"qac_maxmixed", "reg":<register>, "dim":<int>}
if instr["kind"] == "qac_maxmixed":
# make a bell state and discard half of it
# the reason we do this this way is because we'd only like to
# implement branching behavior once, namely in qac_unitary.
dim = instr["reg"].dim
tmp = Register(dim)
omega = np.exp(2j*np.pi/dim)
mat = []
for i in range(dim):
row = []
for j in range(dim):
row.append(omega**(i*j) / np.sqrt(dim))
mat.append(row)
dummy_instrs = [
{"kind":"qac_declare", "reg":instr["reg"], "dim":dim},
{"kind":"qac_declare", "reg":tmp, "dim":dim},
{"kind":"qac_unitary", "reg":instr["reg"], "mat":mat},
{"kind":"qac_increment", "reg":tmp, "expn":{"kind": "register_expn", "register":instr["reg"]}},
{"kind":"qac_discard", "reg":tmp},
]
for dummy_instr in dummy_instrs:
state, registers, discards = simulate_instruction(dummy_instr, state, registers, discards)
return state, registers, discards
# {"kind":"qac_zero", "reg":<register>}
if instr["kind"] == "qac_zero":
arr = registers[instr["reg"].trace()]
new_dim = 0
for i in range(len(arr)):
if arr[i] == 0: new_dim += 1
new_state = np.zeros(new_dim).astype(complex)
idx = 0
for i in range(len(arr)):
if arr[i] == 0:
new_state[idx] = state[i]
idx += 1
new_registers = {}
for reg in registers.keys():
if reg == instr["reg"].trace(): continue
new_arr = np.zeros(new_dim).astype(int)
idx = 0
for i in range(len(arr)):
if arr[i] == 0:
new_arr[idx] = registers[reg][i]
idx += 1
new_registers[reg] = new_arr
new_discards = []
for old_arr in discards:
new_arr = np.zeros(new_dim).astype(int)
idx = 0
for i in range(len(arr)):
if arr[i] == 0:
new_arr[idx] = old_arr[i]
idx += 1
new_discards.append(new_arr)
return new_state, new_registers, new_discards
# {"kind":"qac_increment", "reg":<register>, "expn":<expn>}
if instr["kind"] == "qac_increment":
def eval_expn(idx, expn, registers):
# {"kind": "register_expn", "register":<reg> }
if expn["kind"] == "register_expn":
return complex(registers[expn["register"].trace()][idx])
# {"kind": "value_expn", "value":5j}
if expn["kind"] == "value_expn":
return complex(expn["value"])
# {"kind": "sum_expn", "terms":[<linexp>] }
if expn["kind"] == "sum_expn":
return sum([eval_expn(idx,sub_expn,registers) for sub_expn in expn["terms"]])
# {"kind": "negate_expn", "expn":<linexp> }
if expn["kind"] == "negate_expn":
return -eval_expn(idx,expn["expn"],registers)
# {"kind": "adjoint_expn", "expn":<linexp> }
if expn["kind"] == "adjoint_expn":
return eval_expn(idx,expn["expn"],registers).conj()
# {"kind": "product_expn", "terms":[<linexp>] }
if expn["kind"] == "product_expn":
return np.prod([eval_expn(idx,sub_expn,registers) for sub_expn in expn["terms"]])
# {"kind": "division_expn", "dividend":<linexp>, "divisor":5j }
if expn["kind"] == "division_expn":
return eval_expn(idx,expn["dividend"],registers) / expn["divisor"]
# {"kind": "modulo_expn", "dividend":<linexp>, "divisor":5 }
if expn["kind"] == "modulo_expn":
out = eval_expn(idx,expn["dividend"],registers)
assert isinstance(expn["divisor"],int)
assert expn["divisor"] > 0
# see comment in runtime.py
while out.real < 0: out += expn["divisor"]
while out.real >= expn["divisor"]: out -= expn["divisor"]
return out
# {"kind": "boolean_expn", "terms":[<linexp>, <string>, <linexp>, <string>, ...] }
# <string> is one of ==, !=, >, <, >=, <=
if expn["kind"] == "boolean_expn":
terms = []
for i in range(len(expn["terms"])):
if i % 2 == 1:
assert expn["terms"][i] in ["==", "!=", "<", ">", ">=", "<="]
terms.append(expn["terms"][i])
else:
terms.append(eval_expn(idx,expn["terms"][i],registers))
for i in range(len(expn["terms"])):
if i % 2 == 0: continue
if terms[i] == "==": value = (terms[i-1] == terms[i+1])
elif terms[i] == "!=": value = (terms[i-1] != terms[i+1])
elif terms[i] == "<": value = (terms[i-1] < terms[i+1])
elif terms[i] == ">": value = (terms[i-1] > terms[i+1])
elif terms[i] == ">=": value = (terms[i-1] >= terms[i+1])
else:
assert terms[i] == "<="
value = (terms[i-1]["value"] <= terms[i+1]["value"])
if not value:
return complex(0)
return complex(1)
assert False # unreachable
arr = registers[instr["reg"].trace()]
dim = instr["reg"].dim
for i in range(len(state)):
val = eval_expn(i, instr["expn"], registers)
val = int(val.real)
arr[i] = (arr[i] + val) % dim
return state, registers, discards
# {"kind":"qac_unitary", "reg":<register>, "mat":<matrix>}
if instr["kind"] == "qac_unitary":
arr = registers[instr["reg"].trace()]
dim = instr["reg"].dim
mat = instr["mat"]
new_dim = len(state)*dim
new_state = np.zeros(new_dim).astype(complex)
idx = 0
for i in range(len(arr)):
val = arr[i]
for j in range(dim):
new_state[idx] = state[i] * mat[j][val]
idx += 1
new_registers = {}
for reg in registers.keys():
new_arr = np.zeros(new_dim).astype(int)
idx = 0
if reg == instr["reg"].trace():
for i in range(len(arr)):
for j in range(dim):
new_arr[idx] = j
idx += 1
else:
for i in range(len(arr)):
for j in range(dim):
new_arr[idx] = registers[reg][i]
idx += 1
new_registers[reg] = new_arr
new_discards = []
for old_arr in discards:
new_arr = np.zeros(new_dim).astype(int)
idx = 0
for i in range(len(arr)):
for j in range(dim):
new_arr[idx] = old_arr[i]
idx += 1
new_discards.append(new_arr)
return new_state, new_registers, new_discards
# {"kind":"qac_phase", "value":<complexnr>}
if instr["kind"] == "qac_phase":
for i in range(len(state)):
state[i] *= complex(instr["value"])
return state, registers, discards
# {"kind":"qac_swap", "reg1":<register>, "reg2": <register>}
if instr["kind"] == "qac_swap":
tr1 = instr["reg1"].trace()
tr2 = instr["reg2"].trace()
registers[tr1], registers[tr2] = registers[tr2], registers[tr1]
return state, registers, discards
# {"kind":"qac_rename", "source":<register>, "target": <register>}
if instr["kind"] == "qac_rename":
src = instr["source"].trace()
trg = instr["target"].trace()
registers[trg] = registers[src]
del registers[src]
return state, registers, discards
# {"kind":"qac_if", "cond":<register>, "instructions":[<instrs>] }
if instr["kind"] == "qac_if":
cond = instr["cond"].trace()
arr = registers[cond]
bad_dim = 0 # number of branches where we don't perform the instructions
for i in range(len(arr)):
if arr[i] == 0: bad_dim += 1
bad_state = np.zeros(bad_dim).astype(complex)
good_state = np.zeros(len(arr) - bad_dim).astype(complex)
bad_idx, good_idx = 0,0
for i in range(len(arr)):
if arr[i] == 0:
bad_state[bad_idx] = state[i]
bad_idx += 1
else:
good_state[good_idx] = state[i]
good_idx += 1
bad_registers = {}
good_registers = {}
for reg in registers.keys():
bad_arr = np.zeros(bad_dim).astype(int)
good_arr = np.zeros(len(arr) - bad_dim).astype(int)
bad_idx, good_idx = 0,0
for i in range(len(arr)):
if arr[i] == 0:
bad_arr[bad_idx] = registers[reg][i]
bad_idx += 1
else:
good_arr[good_idx] = registers[reg][i]
good_idx += 1
bad_registers[reg] = bad_arr
good_registers[reg] = good_arr
bad_discards = []
good_discards = []
for old_arr in discards:
bad_arr = np.zeros(bad_dim).astype(int)
good_arr = np.zeros(len(arr) - bad_dim).astype(int)
bad_idx, good_idx = 0,0
for i in range(len(arr)):
if arr[i] == 0:
bad_arr[bad_idx] = old_arr[i]
bad_idx += 1
else:
good_arr[good_idx] = old_arr[i]
good_idx += 1
bad_discards.append(bad_arr)
good_discards.append(good_arr)
for i,sub_instr in enumerate(instr["instructions"]):
good_state, good_registers, good_discards = simulate_instruction(sub_instr, good_state, good_registers, good_discards)
if len(good_state) < 100: continue
if sub_instr["kind"] not in ["qac_unitary", "qac_if"]: continue
if i == len(instr["instructions"])-1: continue
if instr["instructions"][i-1] in ["qac_unitary", "qac_if"]: continue
good_state, good_registers, good_discards = prune(good_state, good_registers, good_discards)
new_dim = len(good_state) + len(bad_state)
new_state = np.zeros(new_dim).astype(complex)
idx = 0
for i in range(len(bad_state)):
new_state[idx] = bad_state[i]
idx += 1
for i in range(len(good_state)):
new_state[idx] = good_state[i]
idx += 1
for reg in good_registers.keys(): assert reg in bad_registers
for reg in bad_registers.keys(): assert reg in good_registers
new_registers = {}
for reg in registers.keys():
new_arr = np.zeros(new_dim).astype(int)
idx = 0
for i in range(len(bad_state)):
new_arr[idx] = bad_registers[reg][i]
idx += 1
for i in range(len(good_state)):
new_arr[idx] = good_registers[reg][i]
idx += 1
new_registers[reg] = new_arr
assert len(good_discards) >= len(bad_discards)
new_discards = []
for i in range(len(good_discards)):
new_arr = np.zeros(new_dim).astype(int)
if i < len(bad_discards):
idx = 0
for j in range(len(bad_state)):
new_arr[idx] = bad_discards[i][j]
idx += 1
for j in range(len(good_state)):
new_arr[idx] = good_discards[i][j]
idx += 1
else:
idx = len(bad_state)
for j in range(len(good_state)):
new_arr[idx] = good_discards[i][j]
idx += 1
new_discards.append(new_arr)
return new_state, new_registers, new_discards
assert False # unreachable
def prune(state, registers, discards):
value_dict = {}
for i in range(len(state)):
if np.allclose(state[i], 0): continue
key = []
for reg in registers.keys():
key.append(registers[reg][i])
for disc in discards:
key.append(disc[i])
key = tuple(key)
if key not in value_dict: value_dict[key] = 0j
value_dict[key] += state[i]
if np.allclose(value_dict[key],0):
del value_dict[key]
new_dim = len(value_dict)
new_state = np.zeros(new_dim).astype(complex)
new_registers = {}
new_discards = []
for reg in registers.keys():
new_registers[reg] = np.zeros(new_dim).astype(int)
for i in range(len(discards)):
new_discards.append(np.zeros(new_dim).astype(int))
for i,key in enumerate(value_dict.keys()):
new_state[i] = value_dict[key]
idx = 0
for reg in registers.keys():
new_registers[reg][i] = key[idx]
idx += 1
for j in range(len(discards)):
new_discards[j][i] = key[idx]
idx += 1
# Can't do this, because Kraus operator calculation needs these
# to_remove = []
# for disc in new_discards:
# if np.allclose(disc,disc[0]): to_remove.append(disc)
# for disc in to_remove:
# new_discards.remove(disc)
return new_state, new_registers, new_discards
for i,instr in enumerate(qac.instrs):
state, registers, discards = simulate_instruction(instr, state, registers, discards)
if len(state) < 100: continue
if instr["kind"] not in ["qac_unitary", "qac_if"]: continue
if i == len(qac.instrs)-1: continue
if qac.instrs[i-1] in ["qac_unitary", "qac_if"]: continue
state, registers, discards = prune(state, registers, discards)
state, registers, discards = prune(state, registers, discards)
return state, registers, discards
def sample_state(state,registers,discards):
# state was pruned as a final step, so no duplicate branches exist.
probabilities = np.zeros(len(state))
for i in range(len(state)):
probabilities[i] = np.abs(state[i])**2
total = sum(probabilities)
if total > 1:
if np.allclose(total,1): probabilities /= total # small overshoot? fine. normalize.
else: assert False # probability distribution is over-normalized.
if np.random.random() > total: return None
idx = np.random.choice(len(state), p=probabilities/total)
out = []
for reg in registers.keys():
out.append(registers[reg][idx])
return tuple(out)
def get_kraus(orig_qac):
qac = block_copy(orig_qac)
assert len(qac.scope_inputs) == 0
assert len(qac.scope_outputs) == 0
assert len(qac.unnamed_inputs) in [0,1]
assert len(qac.unnamed_outputs) in [0,1]
if len(qac.unnamed_outputs) == 1:
out_reg = qac.unnamed_outputs[0]
out_dim = out_reg.dim
else:
out_dim = 1
# keys are tuples with the value of the discards
out = {}
if len(qac.unnamed_inputs) == 0:
state, registers, discards = get_qac_state(qac)
for j in range(len(state)):
key = []
for disc in discards: key.append(disc[j])
key = tuple(key)
if key not in out:
out[key] = np.zeros((out_dim,1)).astype(complex)
if len(qac.unnamed_outputs) == 0:
out[key][0, 0] = state[j]
else:
val = registers[out_reg.trace()][j]
out[key][val, 0] = state[j]
else:
in_reg = qac.unnamed_inputs[0]
in_dim = in_reg.dim
qac.unnamed_inputs = []
qac.instrs = [
{"kind":"qac_declare", "reg":in_reg, "dim":in_dim},
{"kind":"qac_increment", "reg":in_reg, "expn": {"kind": "value_expn", "value": complex(0)} },
] + qac.instrs
num_discards = None
for i in range(in_dim):
qac.instrs[1]["expn"]["value"] = complex(i)
state, registers, discards = get_qac_state(qac)
if num_discards is None: num_discards = len(discards)
assert num_discards == len(discards)
for j in range(len(state)):
key = []
for disc in discards: key.append(disc[j])
key = tuple(key)
if key not in out:
out[key] = np.zeros((out_dim,in_dim)).astype(complex)
if len(qac.unnamed_outputs) == 0:
out[key][0, i] = state[j]
else:
val = registers[out_reg.trace()][j]
out[key][val, i] = state[j]
return [ qac.scale*mat for mat in out.values()]
| [
"numpy.abs",
"numpy.allclose",
"numpy.zeros",
"numpy.random.random",
"numpy.array",
"numpy.exp",
"numpy.sqrt"
] | [((18040, 18061), 'numpy.allclose', 'np.allclose', (['total', '(1)'], {}), '(total, 1)\n', (18051, 18061), True, 'import numpy as np\n'), ((18202, 18220), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (18218, 18220), True, 'import numpy as np\n'), ((894, 907), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (902, 907), True, 'import numpy as np\n'), ((2654, 2680), 'numpy.exp', 'np.exp', (['(2.0j * np.pi / dim)'], {}), '(2.0j * np.pi / dim)\n', (2660, 2680), True, 'import numpy as np\n'), ((15765, 15789), 'numpy.allclose', 'np.allclose', (['state[i]', '(0)'], {}), '(state[i], 0)\n', (15776, 15789), True, 'import numpy as np\n'), ((16123, 16154), 'numpy.allclose', 'np.allclose', (['value_dict[key]', '(0)'], {}), '(value_dict[key], 0)\n', (16134, 16154), True, 'import numpy as np\n'), ((17959, 17975), 'numpy.abs', 'np.abs', (['state[i]'], {}), '(state[i])\n', (17965, 17975), True, 'import numpy as np\n'), ((1030, 1043), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (1038, 1043), True, 'import numpy as np\n'), ((16246, 16263), 'numpy.zeros', 'np.zeros', (['new_dim'], {}), '(new_dim)\n', (16254, 16263), True, 'import numpy as np\n'), ((3787, 3804), 'numpy.zeros', 'np.zeros', (['new_dim'], {}), '(new_dim)\n', (3795, 3804), True, 'import numpy as np\n'), ((8906, 8923), 'numpy.zeros', 'np.zeros', (['new_dim'], {}), '(new_dim)\n', (8914, 8923), True, 'import numpy as np\n'), ((11455, 11472), 'numpy.zeros', 'np.zeros', (['bad_dim'], {}), '(bad_dim)\n', (11463, 11472), True, 'import numpy as np\n'), ((13838, 13855), 'numpy.zeros', 'np.zeros', (['new_dim'], {}), '(new_dim)\n', (13846, 13855), True, 'import numpy as np\n'), ((16403, 16420), 'numpy.zeros', 'np.zeros', (['new_dim'], {}), '(new_dim)\n', (16411, 16420), True, 'import numpy as np\n'), ((4142, 4159), 'numpy.zeros', 'np.zeros', (['new_dim'], {}), '(new_dim)\n', (4150, 4159), True, 'import numpy as np\n'), ((4503, 4520), 'numpy.zeros', 'np.zeros', (['new_dim'], {}), '(new_dim)\n', (4511, 4520), True, 'import numpy as np\n'), ((9252, 9269), 'numpy.zeros', 'np.zeros', (['new_dim'], {}), '(new_dim)\n', (9260, 9269), True, 'import numpy as np\n'), ((9877, 9894), 'numpy.zeros', 'np.zeros', (['new_dim'], {}), '(new_dim)\n', (9885, 9894), True, 'import numpy as np\n'), ((11987, 12004), 'numpy.zeros', 'np.zeros', (['bad_dim'], {}), '(bad_dim)\n', (11995, 12004), True, 'import numpy as np\n'), ((12645, 12662), 'numpy.zeros', 'np.zeros', (['bad_dim'], {}), '(bad_dim)\n', (12653, 12662), True, 'import numpy as np\n'), ((14372, 14389), 'numpy.zeros', 'np.zeros', (['new_dim'], {}), '(new_dim)\n', (14380, 14389), True, 'import numpy as np\n'), ((14906, 14923), 'numpy.zeros', 'np.zeros', (['new_dim'], {}), '(new_dim)\n', (14914, 14923), True, 'import numpy as np\n'), ((16504, 16521), 'numpy.zeros', 'np.zeros', (['new_dim'], {}), '(new_dim)\n', (16512, 16521), True, 'import numpy as np\n'), ((19143, 19165), 'numpy.zeros', 'np.zeros', (['(out_dim, 1)'], {}), '((out_dim, 1))\n', (19151, 19165), True, 'import numpy as np\n'), ((2838, 2850), 'numpy.sqrt', 'np.sqrt', (['dim'], {}), '(dim)\n', (2845, 2850), True, 'import numpy as np\n'), ((20229, 20256), 'numpy.zeros', 'np.zeros', (['(out_dim, in_dim)'], {}), '((out_dim, in_dim))\n', (20237, 20256), True, 'import numpy as np\n')] |
import unittest
import numpy.testing as np_test
from scripts.algorithms.holtwinters_predictor import HoltWintersPredictor
class HoltWintersTests(unittest.TestCase):
def test_negatives_in_sequence(self):
time_series = [1, 1, -1, 1, 1]
num_predicted_periods = 3
expected_prediction = [0.8] * num_predicted_periods
hw = HoltWintersPredictor(time_series, num_predicted_periods)
actual_prediction = hw.predict_counts()
np_test.assert_almost_equal(actual_prediction, expected_prediction, decimal=4)
def test_zeros_in_sequence(self):
time_series = [1, 1, 0, 1, 1]
num_predicted_periods = 3
expected_prediction = [0.8] * num_predicted_periods
hw = HoltWintersPredictor(time_series, num_predicted_periods)
actual_prediction = hw.predict_counts()
np_test.assert_almost_equal(actual_prediction, expected_prediction, decimal=4)
def test_static_sequence(self):
time_series = [1.0, 1.0, 1.0, 1.0, 1.0]
num_predicted_periods = 3
expected_prediction = [1] * num_predicted_periods
hw = HoltWintersPredictor(time_series, num_predicted_periods)
actual_prediction = hw.predict_counts()
np_test.assert_almost_equal(actual_prediction, expected_prediction, decimal=4)
| [
"numpy.testing.assert_almost_equal",
"scripts.algorithms.holtwinters_predictor.HoltWintersPredictor"
] | [((358, 414), 'scripts.algorithms.holtwinters_predictor.HoltWintersPredictor', 'HoltWintersPredictor', (['time_series', 'num_predicted_periods'], {}), '(time_series, num_predicted_periods)\n', (378, 414), False, 'from scripts.algorithms.holtwinters_predictor import HoltWintersPredictor\n'), ((473, 551), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['actual_prediction', 'expected_prediction'], {'decimal': '(4)'}), '(actual_prediction, expected_prediction, decimal=4)\n', (500, 551), True, 'import numpy.testing as np_test\n'), ((736, 792), 'scripts.algorithms.holtwinters_predictor.HoltWintersPredictor', 'HoltWintersPredictor', (['time_series', 'num_predicted_periods'], {}), '(time_series, num_predicted_periods)\n', (756, 792), False, 'from scripts.algorithms.holtwinters_predictor import HoltWintersPredictor\n'), ((851, 929), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['actual_prediction', 'expected_prediction'], {'decimal': '(4)'}), '(actual_prediction, expected_prediction, decimal=4)\n', (878, 929), True, 'import numpy.testing as np_test\n'), ((1120, 1176), 'scripts.algorithms.holtwinters_predictor.HoltWintersPredictor', 'HoltWintersPredictor', (['time_series', 'num_predicted_periods'], {}), '(time_series, num_predicted_periods)\n', (1140, 1176), False, 'from scripts.algorithms.holtwinters_predictor import HoltWintersPredictor\n'), ((1235, 1313), 'numpy.testing.assert_almost_equal', 'np_test.assert_almost_equal', (['actual_prediction', 'expected_prediction'], {'decimal': '(4)'}), '(actual_prediction, expected_prediction, decimal=4)\n', (1262, 1313), True, 'import numpy.testing as np_test\n')] |
from keras.models import Sequential # basic class for specifying and training a neural network
from keras.layers import Convolution2D, MaxPooling2D, Dense, Dropout, Flatten, Activation, BatchNormalization, Input
from keras.utils import np_utils # utilities for one-hot encoding of ground truth values
from keras.callbacks import TensorBoard, EarlyStopping
from keras.regularizers import l2 # L2-regularisation
import time
import numpy as np
import gzip
import pickle
def load_data(dataset):
with gzip.open(dataset, 'rb') as f:
try:
X_train, y_train, X_test, y_test = pickle.load(f, encoding='latin1')
except:
X_train, y_train, X_test, y_test = pickle.load(f)
return X_train, y_train, X_test, y_test
def shuffle_in_unison(a, b):
rng_state = np.random.get_state()
np.random.shuffle(a)
np.random.set_state(rng_state)
np.random.shuffle(b)
timestr = time.strftime("%Y%m%d_%H%M")
batch_size = 32 # in each iteration, we consider 32 training examples at once
num_epochs = 50 # we iterate 200 times over the entire training set
kernel_size = 3 # we will use 3x3 kernels throughout
pool_size = 2 # we will use 2x2 pooling throughout
conv_depth_1 = 32 # we will initially have 32 kernels per conv. layer...
conv_depth_2 = 64 # ...switching to 64 after the first pooling layer
drop_prob_1 = 0.25
drop_prob_2 = 0.5 # dropout in the FC layer with probability 0.5
hidden_size = 512 # the FC layer will have 512 neurons
l2_lambda = 0.0001
X_train, y_train, X_test, y_test = load_data('./data/eyes_dataset.pkl.gz')
num_train, height, width, depth = X_train.shape # there are 50000 training examples in CIFAR-10
num_test = X_test.shape[0] # there are 10000 test examples in CIFAR-10
num_classes = np.unique(y_train).shape[0] # there are 10 image classes
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= np.max(X_train) # Normalise data to [0, 1] range
X_test /= np.max(X_test) # Normalise data to [0, 1] range
Y_train = np_utils.to_categorical(y_train, num_classes) # One-hot encode the labels
Y_test = np_utils.to_categorical(y_test, num_classes) # One-hot encode the labels
shuffle_in_unison(X_train, Y_train)
shuffle_in_unison(X_test, Y_test)
"""import cv2
for index in range(0, X_train.shape[0]):
cv2.imshow("image",X_train[index])
print(Y_train[index])
cv2.waitKey(0)"""
model = Sequential()
model.add(Convolution2D(conv_depth_1, (kernel_size, kernel_size), padding='same', input_shape=(height, width, depth),
kernel_initializer='he_uniform', kernel_regularizer=l2(l2_lambda)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(Convolution2D(conv_depth_1, (kernel_size, kernel_size), kernel_initializer='he_uniform',
kernel_regularizer=l2(l2_lambda)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(Dropout(drop_prob_1))
model.add(Convolution2D(conv_depth_2, (kernel_size, kernel_size), kernel_initializer='he_uniform',
kernel_regularizer=l2(l2_lambda)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(Flatten())
model.add(Dense(hidden_size, kernel_initializer='he_uniform', kernel_regularizer=l2(l2_lambda)))
model.add(Activation('relu'))
model.add(Dropout(drop_prob_2))
model.add(Dense(num_classes, kernel_initializer='glorot_uniform',
kernel_regularizer=l2(l2_lambda)))
model.add(Activation('softmax'))
tbCallback = TensorBoard(log_dir='./Graph', histogram_freq=25, write_graph=True, write_images=True)
model.compile(loss='categorical_crossentropy', # using the cross-entropy loss function
optimizer='sgd', # using the Adam optimiser
metrics=['accuracy']) # reporting the accuracy
model.fit(X_train, Y_train, # Train the model using the training set...
batch_size=batch_size, epochs=num_epochs,
verbose=1, validation_split=0.1, callbacks=[tbCallback, EarlyStopping(monitor='val_loss',
patience=5)]) # ...holding out 10% of the data for validation
score = model.evaluate(X_test, Y_test, verbose=1) # Evaluate the trained model on the test set!
print('Test score is: ', score[0])
print('Accuracy is: ', score[1])
model_json = model.to_json()
with open("./models/model_" + timestr + ".json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("./models/model_" + timestr + ".h5")
print("Saved model to disk")
| [
"keras.regularizers.l2",
"gzip.open",
"keras.layers.Activation",
"numpy.random.get_state",
"keras.layers.Dropout",
"numpy.unique",
"keras.layers.Flatten",
"time.strftime",
"numpy.random.set_state",
"numpy.max",
"keras.callbacks.TensorBoard",
"keras.utils.np_utils.to_categorical",
"pickle.loa... | [((923, 951), 'time.strftime', 'time.strftime', (['"""%Y%m%d_%H%M"""'], {}), "('%Y%m%d_%H%M')\n", (936, 951), False, 'import time\n'), ((1910, 1925), 'numpy.max', 'np.max', (['X_train'], {}), '(X_train)\n', (1916, 1925), True, 'import numpy as np\n'), ((1970, 1984), 'numpy.max', 'np.max', (['X_test'], {}), '(X_test)\n', (1976, 1984), True, 'import numpy as np\n'), ((2030, 2075), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (2053, 2075), False, 'from keras.utils import np_utils\n'), ((2114, 2158), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (2137, 2158), False, 'from keras.utils import np_utils\n'), ((2411, 2423), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2421, 2423), False, 'from keras.models import Sequential\n'), ((3590, 3680), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': '"""./Graph"""', 'histogram_freq': '(25)', 'write_graph': '(True)', 'write_images': '(True)'}), "(log_dir='./Graph', histogram_freq=25, write_graph=True,\n write_images=True)\n", (3601, 3680), False, 'from keras.callbacks import TensorBoard, EarlyStopping\n'), ((804, 825), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (823, 825), True, 'import numpy as np\n'), ((830, 850), 'numpy.random.shuffle', 'np.random.shuffle', (['a'], {}), '(a)\n', (847, 850), True, 'import numpy as np\n'), ((855, 885), 'numpy.random.set_state', 'np.random.set_state', (['rng_state'], {}), '(rng_state)\n', (874, 885), True, 'import numpy as np\n'), ((890, 910), 'numpy.random.shuffle', 'np.random.shuffle', (['b'], {}), '(b)\n', (907, 910), True, 'import numpy as np\n'), ((2644, 2662), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2654, 2662), False, 'from keras.layers import Convolution2D, MaxPooling2D, Dense, Dropout, Flatten, Activation, BatchNormalization, Input\n'), ((2674, 2720), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(pool_size, pool_size)'}), '(pool_size=(pool_size, pool_size))\n', (2686, 2720), False, 'from keras.layers import Convolution2D, MaxPooling2D, Dense, Dropout, Flatten, Activation, BatchNormalization, Input\n'), ((2890, 2908), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (2900, 2908), False, 'from keras.layers import Convolution2D, MaxPooling2D, Dense, Dropout, Flatten, Activation, BatchNormalization, Input\n'), ((2920, 2966), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(pool_size, pool_size)'}), '(pool_size=(pool_size, pool_size))\n', (2932, 2966), False, 'from keras.layers import Convolution2D, MaxPooling2D, Dense, Dropout, Flatten, Activation, BatchNormalization, Input\n'), ((2978, 2998), 'keras.layers.Dropout', 'Dropout', (['drop_prob_1'], {}), '(drop_prob_1)\n', (2985, 2998), False, 'from keras.layers import Convolution2D, MaxPooling2D, Dense, Dropout, Flatten, Activation, BatchNormalization, Input\n'), ((3168, 3186), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3178, 3186), False, 'from keras.layers import Convolution2D, MaxPooling2D, Dense, Dropout, Flatten, Activation, BatchNormalization, Input\n'), ((3198, 3244), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(pool_size, pool_size)'}), '(pool_size=(pool_size, pool_size))\n', (3210, 3244), False, 'from keras.layers import Convolution2D, MaxPooling2D, Dense, Dropout, Flatten, Activation, BatchNormalization, Input\n'), ((3256, 3265), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3263, 3265), False, 'from keras.layers import Convolution2D, MaxPooling2D, Dense, Dropout, Flatten, Activation, BatchNormalization, Input\n'), ((3374, 3392), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3384, 3392), False, 'from keras.layers import Convolution2D, MaxPooling2D, Dense, Dropout, Flatten, Activation, BatchNormalization, Input\n'), ((3404, 3424), 'keras.layers.Dropout', 'Dropout', (['drop_prob_2'], {}), '(drop_prob_2)\n', (3411, 3424), False, 'from keras.layers import Convolution2D, MaxPooling2D, Dense, Dropout, Flatten, Activation, BatchNormalization, Input\n'), ((3553, 3574), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (3563, 3574), False, 'from keras.layers import Convolution2D, MaxPooling2D, Dense, Dropout, Flatten, Activation, BatchNormalization, Input\n'), ((506, 530), 'gzip.open', 'gzip.open', (['dataset', '"""rb"""'], {}), "(dataset, 'rb')\n", (515, 530), False, 'import gzip\n'), ((1770, 1788), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (1779, 1788), True, 'import numpy as np\n'), ((597, 630), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (608, 630), False, 'import pickle\n'), ((2618, 2631), 'keras.regularizers.l2', 'l2', (['l2_lambda'], {}), '(l2_lambda)\n', (2620, 2631), False, 'from keras.regularizers import l2\n'), ((2864, 2877), 'keras.regularizers.l2', 'l2', (['l2_lambda'], {}), '(l2_lambda)\n', (2866, 2877), False, 'from keras.regularizers import l2\n'), ((3142, 3155), 'keras.regularizers.l2', 'l2', (['l2_lambda'], {}), '(l2_lambda)\n', (3144, 3155), False, 'from keras.regularizers import l2\n'), ((3348, 3361), 'keras.regularizers.l2', 'l2', (['l2_lambda'], {}), '(l2_lambda)\n', (3350, 3361), False, 'from keras.regularizers import l2\n'), ((3527, 3540), 'keras.regularizers.l2', 'l2', (['l2_lambda'], {}), '(l2_lambda)\n', (3529, 3540), False, 'from keras.regularizers import l2\n'), ((4079, 4124), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(5)'}), "(monitor='val_loss', patience=5)\n", (4092, 4124), False, 'from keras.callbacks import TensorBoard, EarlyStopping\n'), ((694, 708), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (705, 708), False, 'import pickle\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
BSD 3-Clause License
Copyright (c) 2020 Okinawa Institute of Science and Technology (OIST).
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Willow Garage, Inc. nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
Author: <NAME> <<EMAIL>>
Publication:
"Towards hybrid primary intersubjectivity: a neural robotics
library for human science"
<NAME>, <NAME>, <NAME>
Okinawa Institute of Science and Technology Graduate University (OIST)
Cognitive Neurorobotics Research Unit (CNRU)
1919-1, Tancha, Onna, Kunigami District, Okinawa 904-0495, Japan
"""
import sys
import ctypes
from collections import deque
import time
from NRL import NRL
import numpy as np
"""
Demonstration on how to train a model
"""
class DemonstrateTraining(object):
def __init__(self, nrl):
print ("training demonstration begin")
nrl.t_background();
print ("training demonstration end")
"""
Demonstration on the computation of on-line post-diction
"""
class DemonstratePostdiction(object):
def __init__(self, nrl):
print("simulation demonstration begin")
nrl.load();
nDof = nrl.getNDof();
if nDof > 0:
winSize = 15;
winBufferSize = winSize * nDof;
winBuffdata = deque(maxlen=winSize) # circular buffer
primId = 0;
# The e_w parameters set bellow assume the network has two layers
# as in the original distribution of the sources
# in case more layers are set by changing the properties.d file,
# the same dimension for e_w must be considered
e_w = [0.025,0.025];
expTimeSteps = 15;
postdiction_epochs = 15;
alpha = 0.1;
beta1 = 0.9;
beta2 = 0.999;
storeStates = False;
storeER = False;
showERLog = False;
nrl.e_enable(primId,\
winSize,
(ctypes.c_float * len(e_w))(*e_w),
expTimeSteps,
postdiction_epochs,
(ctypes.c_float)(alpha),
(ctypes.c_float)(beta1),
(ctypes.c_float)(beta2),
storeStates,
storeER)
# ctype input/output buffers to NRL
tgt_pos_buffer = np.zeros((nDof,), dtype=float)
dataOut = (ctypes.c_float * nDof)(*tgt_pos_buffer)
elbo_buffer = np.zeros((3,), dtype=float);
elboOut = (ctypes.c_float * 3)(*elbo_buffer)
stateBufferSize = nrl.getStateBufferSize()
m_state = np.zeros((stateBufferSize,), dtype=float);
m_stateOut = (ctypes.c_float * stateBufferSize)(*m_state)
t = 0
endExperiment = False
while not endExperiment:
# get time in ms
mst1 = int(round(time.time() * 1000))
# < --- Here you should read the robot's joint state
# Since this is a dummy example, the current posture
# 'cur_pos' is set to zero
cur_pos = np.zeros((nDof,), dtype=float)
# The target posture is generated by the RNN
nrl.e_generate(dataOut)
tgt_pos = np.frombuffer(dataOut, np.float32)
# < --- Here you should call asynchronously the robot driver
# and send it tgt_pos to move the robot
# store the current posture in the buffer
winBuffdata.append(cur_pos)
if len(winBuffdata) == winSize:
t += 1
posWinBufferArray = np.hstack(winBuffdata)
nrl.e_postdict((ctypes.c_float * winBufferSize)(*posWinBufferArray), elboOut, showERLog);
# Optional: Information on free energy minimization
# can be obtained and analyzed on-line
opt_elbo = np.frombuffer(elboOut, np.float32).tolist()
# Optional: The latent state of the network can be obtained
# analyzed on-line or stored for future analysis
nrl.e_getState(m_stateOut)
st_data = np.frombuffer(m_stateOut, np.float32)
# get time in ms
mst2 = int(round(time.time() * 1000))
detla_t = mst2 - mst1
if t > 0:
print("Step: {} in {} ms".format(t, detla_t ))
# For real applications you should set the program to sleep according to the
# desired loop period
period = 0.0 # loop period in ms
sleepTime = period - detla_t
if sleepTime > 0:
time.sleep(sleepTime/1000)
if t == expTimeSteps:
endExperiment = True
else:
print("Hint: the model path may be incorrect, or perhaps it requires to be trained !")
print("simulation demonstration end")
print("-----------------------------")
print("Neural Robotics Libray (NRL)")
print("-----------------------------")
print("This is a demonstration program for stand alone application ")
print("**** Instructions **** ")
print("To run: NRL_SA [PATH] [train|sim]")
print("Arguments")
print("PATH: Full path to the property file distributed in 'src/standalone/data/config/properties.d'" )
print("train: trains a model for a generic 16 degrees of freedom robot with dumb data")
print(" the parameters can be selected by editing the file 'properties.d'")
print("sim: simulates on-line interaction with the robot during 50 time steps")
print(" the loop time in milliseconds is shown in the standard output")
print("******************** ")
nrl = NRL()
# verifying the arguments
path = '';
argv = sys.argv
argc = len(argv)
if (argc > 1):
for i in range(argc):
if i == 1:
path = argv[i];
print("Loading the properties file from: [{}]".format(path));
nrl.newModel(path.encode('ascii'));
continue
arg_s = argv[i].lower()
if arg_s == "train":
DemonstrateTraining(nrl)
elif arg_s == "sim":
DemonstratePostdiction(nrl)
else:
print("Please indicate a valid argument [train,sim] !")
print("Program end")
| [
"numpy.frombuffer",
"numpy.zeros",
"numpy.hstack",
"time.sleep",
"time.time",
"ctypes.c_float",
"NRL.NRL",
"collections.deque"
] | [((7784, 7789), 'NRL.NRL', 'NRL', ([], {}), '()\n', (7787, 7789), False, 'from NRL import NRL\n'), ((2762, 2783), 'collections.deque', 'deque', ([], {'maxlen': 'winSize'}), '(maxlen=winSize)\n', (2767, 2783), False, 'from collections import deque\n'), ((3889, 3919), 'numpy.zeros', 'np.zeros', (['(nDof,)'], {'dtype': 'float'}), '((nDof,), dtype=float)\n', (3897, 3919), True, 'import numpy as np\n'), ((4073, 4100), 'numpy.zeros', 'np.zeros', (['(3,)'], {'dtype': 'float'}), '((3,), dtype=float)\n', (4081, 4100), True, 'import numpy as np\n'), ((4257, 4298), 'numpy.zeros', 'np.zeros', (['(stateBufferSize,)'], {'dtype': 'float'}), '((stateBufferSize,), dtype=float)\n', (4265, 4298), True, 'import numpy as np\n'), ((3612, 3633), 'ctypes.c_float', 'ctypes.c_float', (['alpha'], {}), '(alpha)\n', (3626, 3633), False, 'import ctypes\n'), ((3662, 3683), 'ctypes.c_float', 'ctypes.c_float', (['beta1'], {}), '(beta1)\n', (3676, 3683), False, 'import ctypes\n'), ((3712, 3733), 'ctypes.c_float', 'ctypes.c_float', (['beta2'], {}), '(beta2)\n', (3726, 3733), False, 'import ctypes\n'), ((4808, 4838), 'numpy.zeros', 'np.zeros', (['(nDof,)'], {'dtype': 'float'}), '((nDof,), dtype=float)\n', (4816, 4838), True, 'import numpy as np\n'), ((4988, 5022), 'numpy.frombuffer', 'np.frombuffer', (['dataOut', 'np.float32'], {}), '(dataOut, np.float32)\n', (5001, 5022), True, 'import numpy as np\n'), ((5443, 5465), 'numpy.hstack', 'np.hstack', (['winBuffdata'], {}), '(winBuffdata)\n', (5452, 5465), True, 'import numpy as np\n'), ((6097, 6134), 'numpy.frombuffer', 'np.frombuffer', (['m_stateOut', 'np.float32'], {}), '(m_stateOut, np.float32)\n', (6110, 6134), True, 'import numpy as np\n'), ((6711, 6739), 'time.sleep', 'time.sleep', (['(sleepTime / 1000)'], {}), '(sleepTime / 1000)\n', (6721, 6739), False, 'import time\n'), ((4577, 4588), 'time.time', 'time.time', ([], {}), '()\n', (4586, 4588), False, 'import time\n'), ((5760, 5794), 'numpy.frombuffer', 'np.frombuffer', (['elboOut', 'np.float32'], {}), '(elboOut, np.float32)\n', (5773, 5794), True, 'import numpy as np\n'), ((6240, 6251), 'time.time', 'time.time', ([], {}), '()\n', (6249, 6251), False, 'import time\n')] |
import numpy as np
import os
import sys
import plaidml2
import plaidml2.edsl as edsl
import plaidml2.exec as pld_exec
import plaidml2.op as op
import unittest
import numpy.testing as npt
def matmul_2_2(A, B):
I, J, K = edsl.TensorDims(3)
i, j, k = edsl.TensorIndexes(3)
A.bind_dims(I, J)
B.bind_dims(J, K)
C = edsl.TensorOutput(I, K)
C[(i, k)] += A[i, j] * B[j, k]
return C
def matmul_2_1(A, b):
I, J = edsl.TensorDims(2)
i, j = edsl.TensorIndexes(2)
A.bind_dims(I, J)
b.bind_dims(J)
C = edsl.TensorOutput(I)
C[(i)] += A[i, j] * b[j]
return C
def dist(a, b):
I, J = edsl.TensorDims(2)
i, j = edsl.TensorIndexes(2)
a.bind_dims(I)
neg = -b
neg.bind_dims(J)
C = edsl.TensorOutput(I, J)
C[(i, j)] = a[i] + neg[j]
return C
def get_jacobian(Is, I_dat, O, wrt):
dy = edsl.jacobian(O, [wrt])[0]
program = edsl.Program('program', [O, dy])
binder = pld_exec.Binder(program)
executable = binder.compile()
for i in range(len(Is)):
binder.input(Is[i]).copy_from_ndarray(I_dat[i])
executable.run()
return binder.output(dy).as_ndarray()
class GradTest(unittest.TestCase):
def test_ident(self):
np_x = np.array([1, 2, 3])
dtype = plaidml2.DType.FLOAT32
x = edsl.Tensor(edsl.LogicalShape(dtype, np_x.shape))
test_result = get_jacobian([x], [np_x], x, x)
true_result = np.eye(3)
npt.assert_allclose(test_result, true_result)
def test_square(self):
np_x = np.array([1, 2, 3])
dtype = plaidml2.DType.FLOAT32
x = edsl.Tensor(edsl.LogicalShape(dtype, np_x.shape))
y = op.square(x)
test_result = get_jacobian([x], [np_x], y, x)
true_result = np.array([[2, 0, 0], [0, 4, 0], [0, 0, 6]])
npt.assert_allclose(test_result, true_result)
def test_assign(self):
np_x = np.array([1, 2, 3])
np_b = np.array([1, 1, 1])
dtype = plaidml2.DType.FLOAT32
x = edsl.Tensor(edsl.LogicalShape(dtype, np_x.shape))
b = edsl.Tensor(edsl.LogicalShape(dtype, np_b.shape))
y = op.square(dist(x, b))
test_result = get_jacobian([x, b], [np_x, np_b], y, x)
true_result = np.zeros((3, 3, 3))
true_result[0, :, 0] = 0
true_result[1, :, 1] = 2
true_result[2, :, 2] = 4
npt.assert_allclose(test_result, true_result)
def test_matmul_2_1(self):
np_A = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
np_x = np.array([1., 2., 3.])
dtype = plaidml2.DType.FLOAT32
A = edsl.Tensor(edsl.LogicalShape(dtype, np_A.shape))
x = edsl.Tensor(edsl.LogicalShape(dtype, np_x.shape))
y = matmul_2_1(A, x)
test_result = get_jacobian([A, x], [np_A, np_x], y, x)
true_result = np_A
npt.assert_allclose(test_result, true_result)
def test_matmul_2_2(self):
np_A = np.array([[1., 2.], [3., 4.]])
np_x = np.array([[5., 6.], [7., 8.]])
dtype = plaidml2.DType.FLOAT32
A = edsl.Tensor(edsl.LogicalShape(dtype, np_A.shape))
x = edsl.Tensor(edsl.LogicalShape(dtype, np_x.shape))
y = matmul_2_2(A, x)
test_result = get_jacobian([A, x], [np_A, np_x], y, x)
true_result = np.array([[[[1, 0], [2, 0]], [[0, 1], [0, 2]]],
[[[3, 0], [4, 0]], [[0, 3], [0, 4]]]])
npt.assert_allclose(test_result, true_result)
def test_chain(self):
np_x = np.array([1., 2., 3.])
np_A = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
dtype = plaidml2.DType.FLOAT32
A = edsl.Tensor(edsl.LogicalShape(dtype, np_A.shape))
x = edsl.Tensor(edsl.LogicalShape(dtype, np_x.shape))
y = matmul_2_2(A, dist(x, x))
J_test = get_jacobian([A, x], [np_A, np_x], y, x)
J_true = np.zeros((3, 3, 3))
J_true[:, :, 0] = [[-5, 1, 1], [-11, 4, 4], [-17, 7, 7]]
J_true[:, :, 1] = [[2, -4, 2], [5, -10, 5], [8, -16, 8]]
J_true[:, :, 2] = [[3, 3, -3], [6, 6, -9], [9, 9, -15]]
npt.assert_allclose(J_true, J_test)
if __name__ == '__main__':
unittest.main()
| [
"plaidml2.edsl.TensorDims",
"unittest.main",
"plaidml2.edsl.LogicalShape",
"plaidml2.edsl.TensorIndexes",
"plaidml2.exec.Binder",
"numpy.testing.assert_allclose",
"numpy.zeros",
"plaidml2.edsl.jacobian",
"numpy.array",
"plaidml2.op.square",
"numpy.eye",
"plaidml2.edsl.Program",
"plaidml2.eds... | [((227, 245), 'plaidml2.edsl.TensorDims', 'edsl.TensorDims', (['(3)'], {}), '(3)\n', (242, 245), True, 'import plaidml2.edsl as edsl\n'), ((260, 281), 'plaidml2.edsl.TensorIndexes', 'edsl.TensorIndexes', (['(3)'], {}), '(3)\n', (278, 281), True, 'import plaidml2.edsl as edsl\n'), ((334, 357), 'plaidml2.edsl.TensorOutput', 'edsl.TensorOutput', (['I', 'K'], {}), '(I, K)\n', (351, 357), True, 'import plaidml2.edsl as edsl\n'), ((441, 459), 'plaidml2.edsl.TensorDims', 'edsl.TensorDims', (['(2)'], {}), '(2)\n', (456, 459), True, 'import plaidml2.edsl as edsl\n'), ((471, 492), 'plaidml2.edsl.TensorIndexes', 'edsl.TensorIndexes', (['(2)'], {}), '(2)\n', (489, 492), True, 'import plaidml2.edsl as edsl\n'), ((542, 562), 'plaidml2.edsl.TensorOutput', 'edsl.TensorOutput', (['I'], {}), '(I)\n', (559, 562), True, 'import plaidml2.edsl as edsl\n'), ((634, 652), 'plaidml2.edsl.TensorDims', 'edsl.TensorDims', (['(2)'], {}), '(2)\n', (649, 652), True, 'import plaidml2.edsl as edsl\n'), ((664, 685), 'plaidml2.edsl.TensorIndexes', 'edsl.TensorIndexes', (['(2)'], {}), '(2)\n', (682, 685), True, 'import plaidml2.edsl as edsl\n'), ((747, 770), 'plaidml2.edsl.TensorOutput', 'edsl.TensorOutput', (['I', 'J'], {}), '(I, J)\n', (764, 770), True, 'import plaidml2.edsl as edsl\n'), ((903, 935), 'plaidml2.edsl.Program', 'edsl.Program', (['"""program"""', '[O, dy]'], {}), "('program', [O, dy])\n", (915, 935), True, 'import plaidml2.edsl as edsl\n'), ((949, 973), 'plaidml2.exec.Binder', 'pld_exec.Binder', (['program'], {}), '(program)\n', (964, 973), True, 'import plaidml2.exec as pld_exec\n'), ((4178, 4193), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4191, 4193), False, 'import unittest\n'), ((862, 885), 'plaidml2.edsl.jacobian', 'edsl.jacobian', (['O', '[wrt]'], {}), '(O, [wrt])\n', (875, 885), True, 'import plaidml2.edsl as edsl\n'), ((1235, 1254), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1243, 1254), True, 'import numpy as np\n'), ((1434, 1443), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1440, 1443), True, 'import numpy as np\n'), ((1453, 1498), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['test_result', 'true_result'], {}), '(test_result, true_result)\n', (1472, 1498), True, 'import numpy.testing as npt\n'), ((1542, 1561), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1550, 1561), True, 'import numpy as np\n'), ((1676, 1688), 'plaidml2.op.square', 'op.square', (['x'], {}), '(x)\n', (1685, 1688), True, 'import plaidml2.op as op\n'), ((1766, 1809), 'numpy.array', 'np.array', (['[[2, 0, 0], [0, 4, 0], [0, 0, 6]]'], {}), '([[2, 0, 0], [0, 4, 0], [0, 0, 6]])\n', (1774, 1809), True, 'import numpy as np\n'), ((1819, 1864), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['test_result', 'true_result'], {}), '(test_result, true_result)\n', (1838, 1864), True, 'import numpy.testing as npt\n'), ((1908, 1927), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1916, 1927), True, 'import numpy as np\n'), ((1943, 1962), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (1951, 1962), True, 'import numpy as np\n'), ((2247, 2266), 'numpy.zeros', 'np.zeros', (['(3, 3, 3)'], {}), '((3, 3, 3))\n', (2255, 2266), True, 'import numpy as np\n'), ((2375, 2420), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['test_result', 'true_result'], {}), '(test_result, true_result)\n', (2394, 2420), True, 'import numpy.testing as npt\n'), ((2468, 2529), 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]'], {}), '([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])\n', (2476, 2529), True, 'import numpy as np\n'), ((2536, 2561), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (2544, 2561), True, 'import numpy as np\n'), ((2852, 2897), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['test_result', 'true_result'], {}), '(test_result, true_result)\n', (2871, 2897), True, 'import numpy.testing as npt\n'), ((2945, 2979), 'numpy.array', 'np.array', (['[[1.0, 2.0], [3.0, 4.0]]'], {}), '([[1.0, 2.0], [3.0, 4.0]])\n', (2953, 2979), True, 'import numpy as np\n'), ((2991, 3025), 'numpy.array', 'np.array', (['[[5.0, 6.0], [7.0, 8.0]]'], {}), '([[5.0, 6.0], [7.0, 8.0]])\n', (2999, 3025), True, 'import numpy as np\n'), ((3301, 3391), 'numpy.array', 'np.array', (['[[[[1, 0], [2, 0]], [[0, 1], [0, 2]]], [[[3, 0], [4, 0]], [[0, 3], [0, 4]]]]'], {}), '([[[[1, 0], [2, 0]], [[0, 1], [0, 2]]], [[[3, 0], [4, 0]], [[0, 3],\n [0, 4]]]])\n', (3309, 3391), True, 'import numpy as np\n'), ((3429, 3474), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['test_result', 'true_result'], {}), '(test_result, true_result)\n', (3448, 3474), True, 'import numpy.testing as npt\n'), ((3517, 3542), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (3525, 3542), True, 'import numpy as np\n'), ((3555, 3616), 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]'], {}), '([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]])\n', (3563, 3616), True, 'import numpy as np\n'), ((3886, 3905), 'numpy.zeros', 'np.zeros', (['(3, 3, 3)'], {}), '((3, 3, 3))\n', (3894, 3905), True, 'import numpy as np\n'), ((4109, 4144), 'numpy.testing.assert_allclose', 'npt.assert_allclose', (['J_true', 'J_test'], {}), '(J_true, J_test)\n', (4128, 4144), True, 'import numpy.testing as npt\n'), ((1319, 1355), 'plaidml2.edsl.LogicalShape', 'edsl.LogicalShape', (['dtype', 'np_x.shape'], {}), '(dtype, np_x.shape)\n', (1336, 1355), True, 'import plaidml2.edsl as edsl\n'), ((1626, 1662), 'plaidml2.edsl.LogicalShape', 'edsl.LogicalShape', (['dtype', 'np_x.shape'], {}), '(dtype, np_x.shape)\n', (1643, 1662), True, 'import plaidml2.edsl as edsl\n'), ((2027, 2063), 'plaidml2.edsl.LogicalShape', 'edsl.LogicalShape', (['dtype', 'np_x.shape'], {}), '(dtype, np_x.shape)\n', (2044, 2063), True, 'import plaidml2.edsl as edsl\n'), ((2089, 2125), 'plaidml2.edsl.LogicalShape', 'edsl.LogicalShape', (['dtype', 'np_b.shape'], {}), '(dtype, np_b.shape)\n', (2106, 2125), True, 'import plaidml2.edsl as edsl\n'), ((2623, 2659), 'plaidml2.edsl.LogicalShape', 'edsl.LogicalShape', (['dtype', 'np_A.shape'], {}), '(dtype, np_A.shape)\n', (2640, 2659), True, 'import plaidml2.edsl as edsl\n'), ((2685, 2721), 'plaidml2.edsl.LogicalShape', 'edsl.LogicalShape', (['dtype', 'np_x.shape'], {}), '(dtype, np_x.shape)\n', (2702, 2721), True, 'import plaidml2.edsl as edsl\n'), ((3086, 3122), 'plaidml2.edsl.LogicalShape', 'edsl.LogicalShape', (['dtype', 'np_A.shape'], {}), '(dtype, np_A.shape)\n', (3103, 3122), True, 'import plaidml2.edsl as edsl\n'), ((3148, 3184), 'plaidml2.edsl.LogicalShape', 'edsl.LogicalShape', (['dtype', 'np_x.shape'], {}), '(dtype, np_x.shape)\n', (3165, 3184), True, 'import plaidml2.edsl as edsl\n'), ((3672, 3708), 'plaidml2.edsl.LogicalShape', 'edsl.LogicalShape', (['dtype', 'np_A.shape'], {}), '(dtype, np_A.shape)\n', (3689, 3708), True, 'import plaidml2.edsl as edsl\n'), ((3734, 3770), 'plaidml2.edsl.LogicalShape', 'edsl.LogicalShape', (['dtype', 'np_x.shape'], {}), '(dtype, np_x.shape)\n', (3751, 3770), True, 'import plaidml2.edsl as edsl\n')] |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle.fluid.framework import _test_eager_guard
import numpy as np
import scipy
import scipy.sparse as sp
import unittest
import os
import re
import math
np.random.seed(2022)
class TestCsrSoftmax(unittest.TestCase):
def test_softmax(self):
with _test_eager_guard():
mask = np.random.rand(1, 5) < 0.5
np_x = np.random.rand(1, 5) * mask
np_csr = sp.csr_matrix(np_x)
row_number = np_csr.shape[0]
np_out = np.array([])
for i in range(row_number):
start = np_csr.indptr[i]
end = np_csr.indptr[i + 1]
if start == end:
continue
x = np_csr.data[start:end]
x_max = np.max(x, keepdims=True)
x_exp = np.exp(x - x_max)
x_exp_sum = np.sum(x_exp, keepdims=True)
np_out = np.concatenate([np_out, x_exp / x_exp_sum])
csr = paddle.to_tensor(np_x, stop_gradient=False).to_sparse_csr()
m = paddle.incubate.sparse.nn.Softmax()
out = m(csr)
self.assertTrue(np.allclose(out.crows().numpy(), np_csr.indptr))
self.assertTrue(np.allclose(out.cols().numpy(), np_csr.indices))
self.assertTrue(np.allclose(out.values().numpy(), np_out))
# dx = (dout - sum(dout * out)) * out, dout=rand_x
out.backward(csr.detach())
for i in range(row_number):
start = np_csr.indptr[i]
end = np_csr.indptr[i + 1]
if start == end:
continue
out = np_out[start:end]
dout = np_csr.data[start:end]
sum = np.sum(dout * out, keepdims=True)
dx = (dout - sum) * out
self.assertTrue(np.allclose(csr.grad.crows().numpy(),
np_csr.indptr))
self.assertTrue(np.allclose(csr.grad.cols().numpy(),
np_csr.indices))
self.assertTrue(np.allclose(csr.grad.values().numpy(), dx))
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"numpy.random.seed",
"numpy.sum",
"paddle.incubate.sparse.nn.Softmax",
"numpy.max",
"scipy.sparse.csr_matrix",
"numpy.array",
"numpy.exp",
"numpy.random.rand",
"paddle.fluid.framework._test_eager_guard",
"paddle.to_tensor",
"numpy.concatenate"
] | [((786, 806), 'numpy.random.seed', 'np.random.seed', (['(2022)'], {}), '(2022)\n', (800, 806), True, 'import numpy as np\n'), ((2771, 2786), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2784, 2786), False, 'import unittest\n'), ((892, 911), 'paddle.fluid.framework._test_eager_guard', '_test_eager_guard', ([], {}), '()\n', (909, 911), False, 'from paddle.fluid.framework import _test_eager_guard\n'), ((1027, 1046), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['np_x'], {}), '(np_x)\n', (1040, 1046), True, 'import scipy.sparse as sp\n'), ((1110, 1122), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1118, 1122), True, 'import numpy as np\n'), ((1664, 1699), 'paddle.incubate.sparse.nn.Softmax', 'paddle.incubate.sparse.nn.Softmax', ([], {}), '()\n', (1697, 1699), False, 'import paddle\n'), ((932, 952), 'numpy.random.rand', 'np.random.rand', (['(1)', '(5)'], {}), '(1, 5)\n', (946, 952), True, 'import numpy as np\n'), ((978, 998), 'numpy.random.rand', 'np.random.rand', (['(1)', '(5)'], {}), '(1, 5)\n', (992, 998), True, 'import numpy as np\n'), ((1376, 1400), 'numpy.max', 'np.max', (['x'], {'keepdims': '(True)'}), '(x, keepdims=True)\n', (1382, 1400), True, 'import numpy as np\n'), ((1425, 1442), 'numpy.exp', 'np.exp', (['(x - x_max)'], {}), '(x - x_max)\n', (1431, 1442), True, 'import numpy as np\n'), ((1471, 1499), 'numpy.sum', 'np.sum', (['x_exp'], {'keepdims': '(True)'}), '(x_exp, keepdims=True)\n', (1477, 1499), True, 'import numpy as np\n'), ((1525, 1568), 'numpy.concatenate', 'np.concatenate', (['[np_out, x_exp / x_exp_sum]'], {}), '([np_out, x_exp / x_exp_sum])\n', (1539, 1568), True, 'import numpy as np\n'), ((2347, 2380), 'numpy.sum', 'np.sum', (['(dout * out)'], {'keepdims': '(True)'}), '(dout * out, keepdims=True)\n', (2353, 2380), True, 'import numpy as np\n'), ((1588, 1631), 'paddle.to_tensor', 'paddle.to_tensor', (['np_x'], {'stop_gradient': '(False)'}), '(np_x, stop_gradient=False)\n', (1604, 1631), False, 'import paddle\n')] |
"""Bot class."""
import action
import config_stat
import debug
from drifter_db import DataManager
import numpy as np
class Bot(object):
def __init__(self, username, bot_id=None):
self.username = username
self.db_manager = DataManager(username, bot_id=bot_id)
self.db_manager.SaveBackground()
def action(self):
keys = list(config_stat.prob_event)
prob_lst = [config_stat.prob_event[k] for k in keys]
selected_action = np.random.choice(keys, 1, p=prob_lst)[0]
if selected_action == 'like':
# event like
current_action = action.Like(self.username)
elif selected_action == 'retweet':
# event retweet
current_action = action.Retweet(self.username)
elif selected_action == 'follow':
# event follow
current_action = action.Follow(self.username)
elif selected_action == 'unfollow':
current_action = action.Unfollow(self.username, config_stat.unfollow_method)
elif selected_action == 'replymention':
current_action = action.ReplyMention(self.username)
elif selected_action == 'tweet':
current_action = action.PostTweet(self.username, True)
result = current_action.act()
source = current_action.select_source
return selected_action, source, result
| [
"action.Retweet",
"action.Like",
"drifter_db.DataManager",
"action.Unfollow",
"action.PostTweet",
"numpy.random.choice",
"action.ReplyMention",
"action.Follow"
] | [((234, 270), 'drifter_db.DataManager', 'DataManager', (['username'], {'bot_id': 'bot_id'}), '(username, bot_id=bot_id)\n', (245, 270), False, 'from drifter_db import DataManager\n'), ((449, 486), 'numpy.random.choice', 'np.random.choice', (['keys', '(1)'], {'p': 'prob_lst'}), '(keys, 1, p=prob_lst)\n', (465, 486), True, 'import numpy as np\n'), ((566, 592), 'action.Like', 'action.Like', (['self.username'], {}), '(self.username)\n', (577, 592), False, 'import action\n'), ((677, 706), 'action.Retweet', 'action.Retweet', (['self.username'], {}), '(self.username)\n', (691, 706), False, 'import action\n'), ((789, 817), 'action.Follow', 'action.Follow', (['self.username'], {}), '(self.username)\n', (802, 817), False, 'import action\n'), ((881, 940), 'action.Unfollow', 'action.Unfollow', (['self.username', 'config_stat.unfollow_method'], {}), '(self.username, config_stat.unfollow_method)\n', (896, 940), False, 'import action\n'), ((1008, 1042), 'action.ReplyMention', 'action.ReplyMention', (['self.username'], {}), '(self.username)\n', (1027, 1042), False, 'import action\n'), ((1103, 1140), 'action.PostTweet', 'action.PostTweet', (['self.username', '(True)'], {}), '(self.username, True)\n', (1119, 1140), False, 'import action\n')] |
import numpy as np
import pandas as pd
import sklearn as scikit
import tensorflow as tf
from preprocessing import Preprocessing
from evaluation import EvaluationClient
from sklearn.model_selection import train_test_split
# #####################################################################################################################
# Implementation of Pre-Processing
# #####################################################################################################################
class MyPreprocess(Preprocessing):
def prepare(self, data):
x = data[: , 1:].reshape((-1, 28, 28, 1))
x = x/255.
y = np.abs(data[:, 0])
y = tf.keras.utils.to_categorical(y)
return x, y
if __name__ == "__main__":
MODEL_NAME = "MNIST_Synthetic"
input_shape = (28, 28, 1)
print (f"""Using Tensorflow version {tf.__version__}""")
print ("*" * 80)
print ("""---- THIS IS THE EVALUATION OF THE MODEL TRAINED DIRECTLY WITH REAL DATA""")
print("*" * 80)
# ################################################################################
# LOADING REAL DATA
mnist_data = pd.read_csv("../../data/source/mnist.csv")
print(f"""MNIST DS shape:{mnist_data.shape}""")
train, test = train_test_split(mnist_data.values[:7000], train_size=0.8)
# ################################################################################
# Preprocessing
pre_proc = MyPreprocess()
x_train, y_train = pre_proc.prepare(train)
x_test, y_test = pre_proc.prepare(test)
print(f"""TRAIN Preprocessed data: x:{x_train.shape}, y:{y_train.shape}""")
print(f"""TEST Preprocessed data: x:{x_test.shape}, y:{y_test.shape}""")
#
# x_train, x_test, y_train, y_test = train_test_split(x, y)
# print(f"""Train: x:{x_train.shape}, y:{y_train.shape}. Test: x:{x_test.shape}, y:{y_test.shape}""")
# ################################################################################
# DEFINING THE MODEL AND TRAINING
model = tf.keras.models.Sequential(name=MODEL_NAME)
model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(28, 28, 1)))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(100, activation='relu', kernel_initializer='he_uniform'))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9),
loss=tf.keras.losses.CategoricalCrossentropy() ,
metrics=['accuracy'])
model.summary()
# ################################################################################
# Training
model.fit(x_train, y_train, batch_size=32, epochs=10)
# ################################################################################
# Local Evaluation
print()
print(f"={'Evaluating using Real data':^78}=")
print(model.evaluate(x_test, y_test))
| [
"tensorflow.keras.utils.to_categorical",
"numpy.abs",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.losses.CategoricalCross... | [((1147, 1189), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/source/mnist.csv"""'], {}), "('../../data/source/mnist.csv')\n", (1158, 1189), True, 'import pandas as pd\n'), ((1261, 1319), 'sklearn.model_selection.train_test_split', 'train_test_split', (['mnist_data.values[:7000]'], {'train_size': '(0.8)'}), '(mnist_data.values[:7000], train_size=0.8)\n', (1277, 1319), False, 'from sklearn.model_selection import train_test_split\n'), ((2023, 2066), 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {'name': 'MODEL_NAME'}), '(name=MODEL_NAME)\n', (2049, 2066), True, 'import tensorflow as tf\n'), ((645, 663), 'numpy.abs', 'np.abs', (['data[:, 0]'], {}), '(data[:, 0])\n', (651, 663), True, 'import numpy as np\n'), ((676, 708), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['y'], {}), '(y)\n', (705, 708), True, 'import tensorflow as tf\n'), ((2081, 2197), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_uniform"""', 'input_shape': '(28, 28, 1)'}), "(32, (3, 3), activation='relu', kernel_initializer=\n 'he_uniform', input_shape=(28, 28, 1))\n", (2103, 2197), True, 'import tensorflow as tf\n'), ((2208, 2244), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (2236, 2244), True, 'import tensorflow as tf\n'), ((2260, 2351), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_uniform"""'}), "(64, (3, 3), activation='relu', kernel_initializer=\n 'he_uniform')\n", (2282, 2351), True, 'import tensorflow as tf\n'), ((2362, 2453), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_uniform"""'}), "(64, (3, 3), activation='relu', kernel_initializer=\n 'he_uniform')\n", (2384, 2453), True, 'import tensorflow as tf\n'), ((2464, 2500), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (2492, 2500), True, 'import tensorflow as tf\n'), ((2516, 2541), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (2539, 2541), True, 'import tensorflow as tf\n'), ((2557, 2635), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(100)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_uniform"""'}), "(100, activation='relu', kernel_initializer='he_uniform')\n", (2578, 2635), True, 'import tensorflow as tf\n'), ((2651, 2698), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (2672, 2698), True, 'import tensorflow as tf\n'), ((2729, 2786), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': '(0.01)', 'momentum': '(0.9)'}), '(learning_rate=0.01, momentum=0.9)\n', (2752, 2786), True, 'import tensorflow as tf\n'), ((2811, 2852), 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {}), '()\n', (2850, 2852), True, 'import tensorflow as tf\n')] |
"""Evaluate Baseline.
Baseline results are saved in the ```./baseline``` folder.
Examples
--------
python baseline.py --problem=conv_train --optimizer=adam
Arguments
---------
--vgpu : int >= 1
(debug) Number of virtual GPUs to create for testing. If 1, no virtual GPUs
are created, and a mirrored strategy is created with all physical GPUs.
--cpu : bool
Whether to run on CPU instead of GPU.
--gpus : int[]
Comma separated list of GPU indices to use on a multi-gpu system.
--keras : bool
Whether to use keras versions of each optimizer or manually coded version.
--problem : str
Training problem to use.
--optimizer : str
Name of optimizer to use.
--repeat : int
Number of times to run evaluation.
"""
import os
import sys
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import l2o
from config import ArgParser, get_eval_problem
from gpu_setup import create_distribute
args = ArgParser(sys.argv[1:])
vgpus = args.pop_get("--vgpu", default=1, dtype=int)
cpu = args.pop_get("--cpu", default=False, dtype=bool)
gpus = args.pop_get("--gpus", default=None)
use_keras = args.pop_get("--keras", default=True, dtype=bool)
distribute = create_distribute(vgpus=vgpus, do_cpu=cpu, gpus=gpus)
problems = args.pop_get("--problem", "conv_train")
target = args.pop_get("--optimizer", "adam")
target_cfg = {
"adam": {
"class_name": "Adam",
"config": {"learning_rate": 0.005, "beta_1": 0.9, "beta_2": 0.999}
},
"rmsprop": {
"class_name": "RMSProp",
"config": {"learning_rate": 0.005, "rho": 0.9}
},
"sgd": {
"class_name": "SGD",
"config": {"learning_rate": 0.2}
},
"momentum": {
"class_name": "SGD",
"config": {"learning_rate": 0.5, "momentum": 0.9}
},
"momentum_custom": {
"class_name": "Momentum",
"config": {"learning_rate": 0.5, "beta_1": 0.9}
},
"addsign": {
"class_name": "AddSign",
"config": {"learning_rate": 0.1, "beta_1": 0.9, "beta_2": 0.999}
},
"powersign": {
"class_name": "PowerSign",
"config": {"learning_rate": 0.1, "beta_1": 0.9, "beta_2": 0.999}
},
"adam_deep": {
"class_name": "Adam",
"config": {"learning_rate": 0.001, "beta_1": 0.9, "beta_2": 0.999}
},
"rmsprop_deep": {
"class_name": "RMSProp",
"config": {"learning_rate": 0.0005, "rho": 0.9}
},
"sgd_deep": {
"class_name": "SGD",
"config": {"learning_rate": 0.2}
},
"momentum_deep": {
"class_name": "Momentum",
"config": {"learning_rate": 0.2, "beta_1": 0.9}
},
"addsign_deep": {
"class_name": "AddSign",
"config": {"learning_rate": 0.05, "beta_1": 0.9, "beta_2": 0.999}
},
"powersign_deep": {
"class_name": "PowerSign",
"config": {"learning_rate": 0.05, "beta_1": 0.9, "beta_2": 0.999}
},
}[target]
repeat = args.pop_get("--repeat", default=10, dtype=int)
problems = problems.split(",")
args.assert_empty()
for problem in problems:
kwargs = get_eval_problem(problem)
if "steps" in kwargs:
evaluator = l2o.evaluate.evaluate_function
else:
evaluator = l2o.evaluate.evaluate_model
with distribute.scope():
results = []
for i in range(repeat):
print("Evaluation Training {}/{}".format(i + 1, repeat))
if use_keras:
opt = tf.keras.optimizers.get(target_cfg)
else:
pol = l2o.deserialize.policy(target_cfg)
opt = pol.architecture(pol)
results.append(evaluator(opt, **kwargs))
results = {k: np.stack([d[k] for d in results]) for k in results[0]}
os.makedirs(os.path.join("baseline", target), exist_ok=True)
np.savez(os.path.join("baseline", target, problem), **results)
| [
"numpy.stack",
"tensorflow.keras.optimizers.get",
"gpu_setup.create_distribute",
"l2o.deserialize.policy",
"config.get_eval_problem",
"os.path.join",
"config.ArgParser"
] | [((952, 975), 'config.ArgParser', 'ArgParser', (['sys.argv[1:]'], {}), '(sys.argv[1:])\n', (961, 975), False, 'from config import ArgParser, get_eval_problem\n'), ((1203, 1256), 'gpu_setup.create_distribute', 'create_distribute', ([], {'vgpus': 'vgpus', 'do_cpu': 'cpu', 'gpus': 'gpus'}), '(vgpus=vgpus, do_cpu=cpu, gpus=gpus)\n', (1220, 1256), False, 'from gpu_setup import create_distribute\n'), ((3096, 3121), 'config.get_eval_problem', 'get_eval_problem', (['problem'], {}), '(problem)\n', (3112, 3121), False, 'from config import ArgParser, get_eval_problem\n'), ((3688, 3721), 'numpy.stack', 'np.stack', (['[d[k] for d in results]'], {}), '([d[k] for d in results])\n', (3696, 3721), True, 'import numpy as np\n'), ((3764, 3796), 'os.path.join', 'os.path.join', (['"""baseline"""', 'target'], {}), "('baseline', target)\n", (3776, 3796), False, 'import os\n'), ((3830, 3871), 'os.path.join', 'os.path.join', (['"""baseline"""', 'target', 'problem'], {}), "('baseline', target, problem)\n", (3842, 3871), False, 'import os\n'), ((3458, 3493), 'tensorflow.keras.optimizers.get', 'tf.keras.optimizers.get', (['target_cfg'], {}), '(target_cfg)\n', (3481, 3493), True, 'import tensorflow as tf\n'), ((3534, 3568), 'l2o.deserialize.policy', 'l2o.deserialize.policy', (['target_cfg'], {}), '(target_cfg)\n', (3556, 3568), False, 'import l2o\n')] |
import itertools
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
mpl.rcParams['pdf.fonttype'] = 42
plt.style.use('fivethirtyeight')
mpl.rcParams['savefig.transparent'] = True
mpl.rcParams['savefig.pad_inches'] = 0.1
# mpl.rcParams['figure.facecolor'] = 'white'
# mpl.rcParams['patch.facecolor'] = 'white'
# mpl.rcParams['figure.figsize'] = 10, 8
#plt.style.use('seaborn-paper')
# Change Config!
config = "2_Seq_c"
true_epoch_len = 1
smoothing_param = 30 # math.floor(221 / 2)
history = np.load("../training_results/" + config + "/history.npy").item()
loss_reg = history['loss']
loss_unreg = history['unreg_loss_loss']
acc_2 = history['matches_accuracy_metric_2']
acc_5 = history['matches_accuracy_metric_5']
acc_10 = history['matches_accuracy_metric_10']
epe = history['matches_end_point_error_metric']
max_loss = max(max(loss_reg), max(loss_unreg))+0.1
min_loss = min(min(loss_reg), min(loss_unreg))-0.1
max_acc = max(max(acc_2), max(acc_5), max(acc_10))+0.004
min_acc = min(min(acc_2), min(acc_5), min(acc_10))-0.1
# N = 100
# x = range(0, len(acc))
# smooth_acc_1 = pd.Series(acc[:true_epoch_len]).rolling(window=N).mean().values
# smooth_acc_2 = pd.Series(acc[true_epoch_len:]).rolling(window=N).mean().values
# plot_acc = plt.plot(x, acc, linewidth=1)
# plot_acc = plt.plot(x[:true_epoch_len], smooth_acc_1)
# plot_acc = plt.plot(x[true_epoch_len:], smooth_acc_2, color=plot_acc[0].get_color())
# plt.xticks([0, true_epoch_len, 2942])
# plt.ylabel("accuracy@3")#
# plt.xlabel("epochs")
#
def plot_accuracy(data, smoothing):
epochs = len(data)
x = range(1, epochs)
ax = plt.gca()
ax.spines['bottom'].set_color('white')
ax.spines['top'].set_color('white')
ax.spines['right'].set_color('white')
ax.spines['left'].set_color('white')
num_passings = math.floor(epochs / true_epoch_len)
ticks = [i * true_epoch_len for i in range(0, num_passings+1, 10)]
plt.xticks(ticks)
plot = plt.plot(range(1, epochs+1), data, linewidth=3)
for i in range(0, num_passings):
s = pd.Series(data[true_epoch_len*i:true_epoch_len*(i+1)]).rolling(window=smoothing).mean().values
plt.plot(range(true_epoch_len*i, true_epoch_len*(i+1)), s, color="red")
# plot = plt.plot(range(0, len(loss)), loss, linewidth=1)
# PLOT: regularized loss
plot_accuracy(loss_reg, smoothing_param)
plt.ylabel("loss (regularized)")
plt.xlabel("epochs")
# plt.xlim(xmin=1)
# plt.ylim(ymin=min_loss)
# plt.ylim(ymax=max_loss)
plt.savefig("../training_results/" + config + "/regularized_loss.pdf", dpi=300, format='pdf', bbox_inches='tight')
plt.clf()
# PLOT: unregularized loss
plot_accuracy(loss_unreg, smoothing_param)
plt.ylabel("loss")
plt.xlabel("epochs")
# plt.xlim(xmin=1)
# plt.ylim(ymin=min_loss)
# plt.ylim(ymax=max_loss)
plt.savefig("../training_results/" + config + "/unregularized_loss.pdf", dpi=300, format='pdf', bbox_inches='tight')
plt.clf()
# PLOT: accuracy@2
plot_accuracy(acc_2, smoothing_param)
plt.ylabel("accuracy@2")
plt.xlabel("epochs")
# plt.xlim(xmin=1)
# plt.ylim(ymin=0.918)
# plt.ylim(ymax=0.924)
# plt.ylim(ymin=0.935)
# plt.ylim(ymax=0.9405)
plt.savefig("../training_results/" + config + "/train_acc_2.pdf", dpi=300, format='pdf', bbox_inches='tight')
plt.clf()
# PLOT: accuracy@5
plot_accuracy(acc_5, smoothing_param)
plt.ylabel("accuracy@5")
plt.xlabel("epochs")
# plt.xlim(xmin=1)
# plt.ylim(ymin=0.969)
# plt.ylim(ymax=0.974)
# plt.ylim(ymin=0)
# plt.ylim(ymax=1)
plt.savefig("../training_results/" + config + "/train_acc_5.pdf", dpi=300, format='pdf', bbox_inches='tight')
plt.clf()
# PLOT: accuracy@10
plot_accuracy(acc_10, smoothing_param)
plt.ylabel("accuracy@10")
plt.xlabel("epochs")
# plt.xlim(xmin=1)
# plt.ylim(ymin=0.979)
# plt.ylim(ymax=0.985)
# plt.ylim(ymin=0.987)
# plt.ylim(ymax=0.9888)
plt.savefig("../training_results/" + config + "/train_acc_10.pdf", dpi=300, format='pdf', bbox_inches='tight')
plt.clf()
# PLOT: end-point-error
plot_accuracy(epe, smoothing_param)
plt.ylabel("end-point-error")
plt.xlabel("epochs")
# plt.xlim(xmin=1)
plt.savefig("../training_results/" + config + "/train_epe.pdf", dpi=300, format='pdf', bbox_inches='tight')
plt.clf()
# plot = plt.plot(x, loss_reg)
# plt.plot()#
# plt.axvline(true_epoch_len, color='black', linewidth=2)
# plt.xlim(xmin=1.0)
# plt.xlim(xmax=2942.0)
# plt.title("Evolution of exponents")
# plot_loss = plt.plot(x, loss_reg)
# fig = plt.gcf()
# ax = plt.gca()
# lgd = ax.legend()
# plt.legend(iter(plot), [r'$\nu_{}$'.format(i) for i in range(1, exponents.shape[1]+1)])
# Epoch 2942/2942
# All sequences processed. Skipped windows: 5784
# Total windows: 88060
# --- Runtime without imports: 8439.563656330109 seconds ---
# Total windows: 352240
#
# --- Runtime without imports: 34036.670357227325 seconds --- | [
"numpy.load",
"matplotlib.pyplot.clf",
"math.floor",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.xticks",
"pandas.Series",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((161, 193), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (174, 193), True, 'import matplotlib.pyplot as plt\n'), ((2405, 2437), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss (regularized)"""'], {}), "('loss (regularized)')\n", (2415, 2437), True, 'import matplotlib.pyplot as plt\n'), ((2438, 2458), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (2448, 2458), True, 'import matplotlib.pyplot as plt\n'), ((2532, 2651), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../training_results/' + config + '/regularized_loss.pdf')"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('../training_results/' + config + '/regularized_loss.pdf', dpi=\n 300, format='pdf', bbox_inches='tight')\n", (2543, 2651), True, 'import matplotlib.pyplot as plt\n'), ((2647, 2656), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2654, 2656), True, 'import matplotlib.pyplot as plt\n'), ((2728, 2746), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (2738, 2746), True, 'import matplotlib.pyplot as plt\n'), ((2747, 2767), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (2757, 2767), True, 'import matplotlib.pyplot as plt\n'), ((2839, 2959), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../training_results/' + config + '/unregularized_loss.pdf')"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('../training_results/' + config + '/unregularized_loss.pdf',\n dpi=300, format='pdf', bbox_inches='tight')\n", (2850, 2959), True, 'import matplotlib.pyplot as plt\n'), ((2956, 2965), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2963, 2965), True, 'import matplotlib.pyplot as plt\n'), ((3024, 3048), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy@2"""'], {}), "('accuracy@2')\n", (3034, 3048), True, 'import matplotlib.pyplot as plt\n'), ((3049, 3069), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (3059, 3069), True, 'import matplotlib.pyplot as plt\n'), ((3182, 3295), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../training_results/' + config + '/train_acc_2.pdf')"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('../training_results/' + config + '/train_acc_2.pdf', dpi=300,\n format='pdf', bbox_inches='tight')\n", (3193, 3295), True, 'import matplotlib.pyplot as plt\n'), ((3292, 3301), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3299, 3301), True, 'import matplotlib.pyplot as plt\n'), ((3360, 3384), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy@5"""'], {}), "('accuracy@5')\n", (3370, 3384), True, 'import matplotlib.pyplot as plt\n'), ((3385, 3405), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (3395, 3405), True, 'import matplotlib.pyplot as plt\n'), ((3510, 3623), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../training_results/' + config + '/train_acc_5.pdf')"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('../training_results/' + config + '/train_acc_5.pdf', dpi=300,\n format='pdf', bbox_inches='tight')\n", (3521, 3623), True, 'import matplotlib.pyplot as plt\n'), ((3620, 3629), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3627, 3629), True, 'import matplotlib.pyplot as plt\n'), ((3691, 3716), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy@10"""'], {}), "('accuracy@10')\n", (3701, 3716), True, 'import matplotlib.pyplot as plt\n'), ((3717, 3737), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (3727, 3737), True, 'import matplotlib.pyplot as plt\n'), ((3850, 3964), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../training_results/' + config + '/train_acc_10.pdf')"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('../training_results/' + config + '/train_acc_10.pdf', dpi=300,\n format='pdf', bbox_inches='tight')\n", (3861, 3964), True, 'import matplotlib.pyplot as plt\n'), ((3961, 3970), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3968, 3970), True, 'import matplotlib.pyplot as plt\n'), ((4032, 4061), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""end-point-error"""'], {}), "('end-point-error')\n", (4042, 4061), True, 'import matplotlib.pyplot as plt\n'), ((4062, 4082), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (4072, 4082), True, 'import matplotlib.pyplot as plt\n'), ((4102, 4213), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('../training_results/' + config + '/train_epe.pdf')"], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('../training_results/' + config + '/train_epe.pdf', dpi=300,\n format='pdf', bbox_inches='tight')\n", (4113, 4213), True, 'import matplotlib.pyplot as plt\n'), ((4210, 4219), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4217, 4219), True, 'import matplotlib.pyplot as plt\n'), ((1662, 1671), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1669, 1671), True, 'import matplotlib.pyplot as plt\n'), ((1859, 1894), 'math.floor', 'math.floor', (['(epochs / true_epoch_len)'], {}), '(epochs / true_epoch_len)\n', (1869, 1894), False, 'import math\n'), ((1970, 1987), 'matplotlib.pyplot.xticks', 'plt.xticks', (['ticks'], {}), '(ticks)\n', (1980, 1987), True, 'import matplotlib.pyplot as plt\n'), ((552, 609), 'numpy.load', 'np.load', (["('../training_results/' + config + '/history.npy')"], {}), "('../training_results/' + config + '/history.npy')\n", (559, 609), True, 'import numpy as np\n'), ((2098, 2158), 'pandas.Series', 'pd.Series', (['data[true_epoch_len * i:true_epoch_len * (i + 1)]'], {}), '(data[true_epoch_len * i:true_epoch_len * (i + 1)])\n', (2107, 2158), True, 'import pandas as pd\n')] |
import os
import time
import imageio
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from data import set_up_data
from dciknn_cuda.dciknn_cuda import DCI
from train_helpers import set_up_hyperparams, load_vaes, load_opt, accumulate_stats, save_model, update_ema, \
save_latents
from utils import get_cpu_stats_over_ranks
def training_step(H, data_input, target, vae, ema_vae, optimizer, iterate):
t0 = time.time()
vae.zero_grad()
stats = vae.forward(data_input, target)
stats['elbo'].backward()
grad_norm = torch.nn.utils.clip_grad_norm_(vae.parameters(), H.grad_clip).item()
distortion_nans = torch.isnan(stats['distortion']).sum()
rate_nans = torch.isnan(stats['rate']).sum()
stats.update(dict(rate_nans=0 if rate_nans == 0 else 1, distortion_nans=0 if distortion_nans == 0 else 1))
stats = get_cpu_stats_over_ranks(stats)
skipped_updates = 1
# only update if no rank has a nan and if the grad norm is below a specific threshold
if stats['distortion_nans'] == 0 and stats['rate_nans'] == 0 and (H.skip_threshold == -1 or grad_norm < H.skip_threshold):
optimizer.step()
skipped_updates = 0
update_ema(vae, ema_vae, H.ema_rate)
t1 = time.time()
stats.update(skipped_updates=skipped_updates, iter_time=t1 - t0, grad_norm=grad_norm)
return stats
def training_step_imle(H, data_input, latents_eps, eps, u, vae, ema_vae, optimizer, loss_fn):
"""
This method performs a training step for a batch of data
This doesn't use elbo to back propagate - instead expects proper mapping between
the data_inputs and their respective nearest neighbors among some randomly generated samples
it uses this correspondence to pull the nearest neighbors closer to their associated data input which
is specifically the IMLE part. See http://www.sfu.ca/~keli/projects/imle/ for more details about how IMLE works
"""
t0 = time.time()
vae.zero_grad()
latents_eps = [torch.from_numpy(z).cuda() for z in latents_eps]
# eps = torch.from_numpy(eps).cuda()
# u = torch.from_numpy(u).cuda()
stats = vae.forward(data_input.shape[0], data_input, eps=latents_eps, log_eps=eps, log_u=u)
res = stats['res']
stats.pop('res', None)
loss = loss_fn(res, data_input.float().cuda()) / float(2 * data_input.size(0))
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(vae.parameters(), H.grad_clip).item()
loss_nan = torch.isnan(loss).sum()
stats.update(dict(loss=loss))
stats.update(dict(loss_nans=0 if loss_nan == 0 else 1))
stats = get_cpu_stats_over_ranks(stats)
skipped_updates = 1
# only update if no rank has a nan and if the grad norm is below a specific threshold
if stats['loss_nans'] == 0 and (H.skip_threshold == -1 or grad_norm < H.skip_threshold):
optimizer.step()
skipped_updates = 0
update_ema(vae, ema_vae, H.ema_rate)
t1 = time.time()
stats.update(skipped_updates=skipped_updates, iter_time=t1 - t0, grad_norm=grad_norm)
return stats
def eval_step(data_input, target, ema_vae):
with torch.no_grad():
stats = ema_vae.forward(data_input, target)
stats = get_cpu_stats_over_ranks(stats)
return stats
def get_sample_for_visualization(data, preprocess_fn, num, dataset):
for x in DataLoader(data, batch_size=num):
break
orig_image = (x[0] * 255.0).to(torch.uint8).permute(0, 2, 3, 1) if dataset == 'ffhq_1024' else x[0]
preprocessed = preprocess_fn(x)[0]
return orig_image, preprocessed
def train_loop_imle(H, data_train, data_valid, preprocess_fn, vae, ema_vae, logprint):
optimizer, scheduler, cur_eval_loss, iterate, starting_epoch = load_opt(H, vae, logprint)
loss_fn = torch.nn.MSELoss(reduction='sum').cuda()
# train_sampler = DistributedSampler(data_train, num_replicas=H.mpi_size, rank=H.rank)
viz_batch_original, viz_batch_processed = get_sample_for_visualization(data_valid, preprocess_fn,
H.num_images_visualize, H.dataset)
early_evals = set([1] + [2 ** exp for exp in range(3, 14)])
stats = []
iters_since_starting = 0
H.ema_rate = torch.as_tensor(H.ema_rate)
selected_dists = None
selected_latent_rnds = None
# selected_logistic_eps_rnds = None
# selected_logistic_u_rnds = None
for epoch in range(starting_epoch, H.num_epochs):
# for now, we will draw random samples in each epoch and find nearest neighbor of each of test data.
# it should be made more efficient for sure.
# train_sampler.set_epoch(epoch)
if epoch == starting_epoch or epoch % H.imle_staleness == 0:
last_generated_epoch = epoch - (epoch % H.imle_staleness)
cur_z_data_file = '{}/latent/{}-{}.npy'.format(H.restore_latent_path, last_generated_epoch, 0, starting_epoch)
print(cur_z_data_file, os.path.isfile(cur_z_data_file), os.path.exists(cur_z_data_file))
if os.path.isfile(cur_z_data_file):
print('Loading latents from disk.')
selected_latent_rnds = []
for ind in range(len(vae.module.decoder.dec_blocks)):
cur_z_data_file = '{}/latent/{}-{}.npy'.format(H.restore_latent_path, last_generated_epoch, ind)
selected_latent_rnds.append(np.load(cur_z_data_file))
print('loaded {}'.format(ind))
print("Loaded database of selected z's; skipping sample generation.")
else:
t0 = time.time()
print('Calculating new samples and NN')
num_samples = len(data_train) * H.sample_size_factor
sample_db_size = H.sample_db_size
if selected_dists is not None:
del selected_dists
if selected_latent_rnds is not None:
for z in selected_latent_rnds:
del z
selected_dists = np.tile(np.inf, (len(data_train)))
selected_latent_rnds = [np.empty((len(data_train), bl.zdim, bl.base, bl.base), dtype=np.float32) for bl in
vae.module.decoder.dec_blocks]
print("Memory allocated!")
# selected_logistic_eps_rnds = np.empty(
# (len(data_train), H.image_size, H.image_size, H.num_mixtures), dtype=np.float32)
# selected_logistic_u_rnds = np.empty((len(data_train), H.image_size, H.image_size, 3),
# dtype=np.float32)
for i in range(num_samples // sample_db_size):
print("Doing a batch of new samples...")
samples = np.empty((sample_db_size, H.image_size, H.image_size, 3))
latent_rnds = [np.empty((sample_db_size, bl.zdim, bl.base, bl.base), dtype=np.float32) for bl in
vae.module.decoder.dec_blocks]
# logistic_eps_rnds = np.empty(
# (sample_db_size, H.image_size, H.image_size, H.num_mixtures), dtype=np.float32)
# logistic_u_rnds = np.empty((sample_db_size, H.image_size, H.image_size, 3),
# dtype=np.float32)
for j in range(sample_db_size // H.n_batch):
# TODO set appropriate t
cur, sts = ema_vae.forward_uncond_imle(H.n_batch)
batch_slice = slice(j * H.n_batch, (j + 1) * H.n_batch)
samples[batch_slice] = cur
# logistic_eps_rnds[batch_slice] = logistic_eps.cpu().numpy()
# logistic_u_rnds[batch_slice] = logistic_u.cpu().numpy()
for ind, l in enumerate(sts):
latent_rnds[ind][batch_slice] = l['eps'].cpu().numpy()
samples_reshape = torch.from_numpy(np.array(np.reshape(samples,
(samples.shape[0], -1)))).cuda().float()
if not vae.module.dci_db:
vae.module.dci_db = DCI(samples_reshape.shape[1], num_comp_indices=H.num_comp_indices,
num_simp_indices=H.num_simp_indices)
vae.module.dci_db.reset()
vae.module.dci_db.add(samples_reshape)
for ind, x in enumerate(DataLoader(data_train, batch_size=H.n_batch_NN, pin_memory=True)):
x = x[0]
cur_batch_data_flat = x.reshape(x.shape[0], -1).float().cuda()
nearest_indices, nearest_dists = vae.module.dci_db.query(cur_batch_data_flat, num_neighbours=1)
nearest_indices = np.array(nearest_indices.cpu().int())[:, 0]
nearest_dists = np.array(nearest_dists.cpu())[:, 0]
batch_slice = slice(ind * H.n_batch_NN, ind * H.n_batch_NN + x.size()[0])
to_update = np.nonzero(nearest_dists < selected_dists[batch_slice])[0]
selected_dists[ind * H.n_batch_NN + to_update] = nearest_dists[to_update]
for k in range(len(selected_latent_rnds)):
selected_latent_rnds[k][ind * H.n_batch_NN + to_update] =\
latent_rnds[k][nearest_indices[to_update]]
del cur_batch_data_flat
# selected_logistic_eps_rnds[ind * H.n_batch_NN + to_update] = logistic_eps_rnds[
# nearest_indices[to_update]]
# selected_logistic_u_rnds[ind * H.n_batch_NN + to_update] = logistic_u_rnds[nearest_indices[to_update]]
print("NN calculated for this batch {}".format(i))
del samples
print("Samples and NN are calculated, time: {}, dists ok: {}".format(time.time() - t0,
np.isinf(selected_dists).any()))
save_latents(H, epoch, selected_latent_rnds)
for ind, x in enumerate(DataLoader(data_train, batch_size=H.n_batch, pin_memory=True, drop_last=True)):
_, target = preprocess_fn(x)
batch_slice = slice(ind * H.n_batch, ind * H.n_batch + x[0].size()[0])
latents = [z[batch_slice] for z in selected_latent_rnds]
stat = training_step_imle(H, x[0], latents, None,
None, vae, ema_vae, optimizer, loss_fn)
stats.append(stat)
scheduler.step()
if iterate % H.iters_per_print == 0 or iters_since_starting in early_evals:
logprint(model=H.desc, type='train_loss', lr=scheduler.get_last_lr()[0], epoch=epoch, step=iterate,
**accumulate_stats(stats, H.iters_per_print))
if iterate % H.iters_per_images == 0 or (
iters_since_starting in early_evals and H.dataset != 'ffhq_1024') and H.rank == 0:
generate_images(H, viz_batch_processed.shape, ema_vae,
f'{H.save_dir}/samples-{iterate}.png', logprint)
iterate += 1
iters_since_starting += 1
if iterate % H.iters_per_save == 0 and H.rank == 0:
logprint(model=H.desc, type='train_loss', epoch=epoch, step=iterate,
**accumulate_stats(stats, H.iters_per_print))
fp = os.path.join(H.save_dir, 'latest')
logprint(f'Saving model@ {iterate} to {fp}')
save_model(fp, vae, ema_vae, optimizer, H)
if iterate % H.iters_per_ckpt == 0 and H.rank == 0:
save_model(os.path.join(H.save_dir, f'iter-{iterate}'), vae, ema_vae, optimizer, H)
def train_loop(H, data_train, data_valid, preprocess_fn, vae, ema_vae, logprint):
optimizer, scheduler, cur_eval_loss, iterate, starting_epoch = load_opt(H, vae, logprint)
train_sampler = DistributedSampler(data_train, num_replicas=H.mpi_size, rank=H.rank)
viz_batch_original, viz_batch_processed = get_sample_for_visualization(data_valid, preprocess_fn,
H.num_images_visualize, H.dataset)
early_evals = set([1] + [2 ** exp for exp in range(3, 14)])
stats = []
iters_since_starting = 0
H.ema_rate = torch.as_tensor(H.ema_rate).cuda()
for epoch in range(starting_epoch, H.num_epochs):
train_sampler.set_epoch(epoch)
for x in DataLoader(data_train, batch_size=H.n_batch, drop_last=True, pin_memory=True, sampler=train_sampler):
data_input, target = preprocess_fn(x)
training_stats = training_step(H, data_input, target, vae, ema_vae, optimizer, iterate)
stats.append(training_stats)
scheduler.step()
if iterate % H.iters_per_print == 0 or iters_since_starting in early_evals:
logprint(model=H.desc, type='train_loss', lr=scheduler.get_last_lr()[0], epoch=epoch, step=iterate,
**accumulate_stats(stats, H.iters_per_print))
if iterate % H.iters_per_images == 0 or (
iters_since_starting in early_evals and H.dataset != 'ffhq_1024') and H.rank == 0:
write_images(H, ema_vae, viz_batch_original, viz_batch_processed, f'{H.save_dir}/samples-{iterate}.png',
logprint)
iterate += 1
iters_since_starting += 1
if iterate % H.iters_per_save == 0 and H.rank == 0:
if np.isfinite(stats[-1]['elbo']):
logprint(model=H.desc, type='train_loss', epoch=epoch, step=iterate,
**accumulate_stats(stats, H.iters_per_print))
fp = os.path.join(H.save_dir, 'latest')
logprint(f'Saving model@ {iterate} to {fp}')
save_model(fp, vae, ema_vae, optimizer, H)
if iterate % H.iters_per_ckpt == 0 and H.rank == 0:
save_model(os.path.join(H.save_dir, f'iter-{iterate}'), vae, ema_vae, optimizer, H)
if epoch % H.epochs_per_eval == 0:
valid_stats = evaluate(H, ema_vae, data_valid, preprocess_fn)
logprint(model=H.desc, type='eval_loss', epoch=epoch, step=iterate, **valid_stats)
def evaluate(H, ema_vae, data_valid, preprocess_fn):
stats_valid = []
valid_sampler = DistributedSampler(data_valid, num_replicas=H.mpi_size, rank=H.rank)
for x in DataLoader(data_valid, batch_size=H.n_batch, drop_last=True, pin_memory=True, sampler=valid_sampler):
data_input, target = preprocess_fn(x)
stats_valid.append(eval_step(data_input, target, ema_vae))
vals = [a['elbo'] for a in stats_valid]
finites = np.array(vals)[np.isfinite(vals)]
stats = dict(n_batches=len(vals), filtered_elbo=np.mean(finites),
**{k: np.mean([a[k] for a in stats_valid]) for k in stats_valid[-1]})
return stats
def write_images(H, vae, ema_vae, viz_batch_original, viz_batch_processed, fname, logprint):
zs = [s['z'].cuda() for s in ema_vae.forward_get_latents(viz_batch_processed)]
batches = [viz_batch_original.numpy()]
mb = viz_batch_processed.shape[0]
lv_points = np.floor(np.linspace(0, 1, H.num_variables_visualize + 2) * len(zs)).astype(int)[1:-1]
for i in lv_points:
batches.append(ema_vae.forward_samples_set_latents(mb, zs[:i], t=0.1))
for t in [1.0, 0.9, 0.8, 0.7][:H.num_temperatures_visualize]:
batches.append(ema_vae.forward_uncond_samples(mb, t=t))
n_rows = len(batches)
im = np.concatenate(batches, axis=0).reshape((n_rows, mb, *viz_batch_processed.shape[1:])).transpose(
[0, 2, 1, 3, 4]).reshape([n_rows * viz_batch_processed.shape[1], mb * viz_batch_processed.shape[2], 3])
logprint(f'printing samples to {fname}')
imageio.imwrite(fname, im)
def generate_images(H, shape, ema_vae, fname, logprint):
mb = shape[0]
batches = []
cur, stats = ema_vae.forward_uncond_imle(mb, get_latents=True)
batches.append(cur)
zs = [s['z'].cuda() for s in stats]
lv_points = np.floor(np.linspace(0, 1, H.num_variables_visualize + 2) * len(zs)).astype(int)[1:-1]
for i in lv_points:
batches.append(ema_vae.forward_samples_set_latents(mb, zs[:i], t=0.1))
for t in [1.0, 0.9, 0.8, 0.7][:H.num_temperatures_visualize]:
cur, _ = ema_vae.forward_uncond_imle(mb, t=t)
batches.append(cur)
n_rows = len(batches)
im = np.concatenate(batches, axis=0).reshape((n_rows, mb, *shape[1:])).transpose([0, 2, 1, 3, 4]).reshape(
[n_rows * shape[1], mb * shape[2], 3]).astype(np.uint8)
logprint(f'printing samples to {fname}')
imageio.imwrite(fname, im)
def run_test_eval(H, ema_vae, data_test, preprocess_fn, logprint):
print('evaluating')
stats = evaluate(H, ema_vae, data_test, preprocess_fn)
print('test results')
for k in stats:
print(k, stats[k])
logprint(type='test_loss', **stats)
def main():
# torch.autograd.set_detect_anomaly(True)
H, logprint = set_up_hyperparams()
H, data_train, data_valid_or_test, preprocess_fn = set_up_data(H)
vae, ema_vae = load_vaes(H, logprint)
if H.test_eval:
# run_test_eval(H, ema_vae, data_valid_or_test, preprocess_fn, logprint)
viz_batch_original, viz_batch_processed = get_sample_for_visualization(data_valid_or_test, preprocess_fn,
H.num_images_visualize, H.dataset)
generate_images(H, viz_batch_processed.shape, ema_vae,
f'{H.save_dir}/samples-mine.png', logprint)
else:
# train_loop(H, data_train, data_valid_or_test, preprocess_fn, vae, ema_vae, logprint)
train_loop_imle(H, data_train, data_valid_or_test, preprocess_fn, vae, ema_vae, logprint)
if __name__ == "__main__":
main()
| [
"numpy.load",
"train_helpers.set_up_hyperparams",
"train_helpers.load_vaes",
"numpy.empty",
"train_helpers.update_ema",
"os.path.isfile",
"numpy.mean",
"torch.no_grad",
"os.path.join",
"torch.isnan",
"torch.nn.MSELoss",
"torch.utils.data.DataLoader",
"os.path.exists",
"numpy.isfinite",
"... | [((498, 509), 'time.time', 'time.time', ([], {}), '()\n', (507, 509), False, 'import time\n'), ((921, 952), 'utils.get_cpu_stats_over_ranks', 'get_cpu_stats_over_ranks', (['stats'], {}), '(stats)\n', (945, 952), False, 'from utils import get_cpu_stats_over_ranks\n'), ((1303, 1314), 'time.time', 'time.time', ([], {}), '()\n', (1312, 1314), False, 'import time\n'), ((2008, 2019), 'time.time', 'time.time', ([], {}), '()\n', (2017, 2019), False, 'import time\n'), ((2666, 2697), 'utils.get_cpu_stats_over_ranks', 'get_cpu_stats_over_ranks', (['stats'], {}), '(stats)\n', (2690, 2697), False, 'from utils import get_cpu_stats_over_ranks\n'), ((3014, 3025), 'time.time', 'time.time', ([], {}), '()\n', (3023, 3025), False, 'import time\n'), ((3269, 3300), 'utils.get_cpu_stats_over_ranks', 'get_cpu_stats_over_ranks', (['stats'], {}), '(stats)\n', (3293, 3300), False, 'from utils import get_cpu_stats_over_ranks\n'), ((3402, 3434), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'batch_size': 'num'}), '(data, batch_size=num)\n', (3412, 3434), False, 'from torch.utils.data import DataLoader\n'), ((3785, 3811), 'train_helpers.load_opt', 'load_opt', (['H', 'vae', 'logprint'], {}), '(H, vae, logprint)\n', (3793, 3811), False, 'from train_helpers import set_up_hyperparams, load_vaes, load_opt, accumulate_stats, save_model, update_ema, save_latents\n'), ((4296, 4323), 'torch.as_tensor', 'torch.as_tensor', (['H.ema_rate'], {}), '(H.ema_rate)\n', (4311, 4323), False, 'import torch\n'), ((12233, 12259), 'train_helpers.load_opt', 'load_opt', (['H', 'vae', 'logprint'], {}), '(H, vae, logprint)\n', (12241, 12259), False, 'from train_helpers import set_up_hyperparams, load_vaes, load_opt, accumulate_stats, save_model, update_ema, save_latents\n'), ((12280, 12348), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['data_train'], {'num_replicas': 'H.mpi_size', 'rank': 'H.rank'}), '(data_train, num_replicas=H.mpi_size, rank=H.rank)\n', (12298, 12348), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((14751, 14819), 'torch.utils.data.distributed.DistributedSampler', 'DistributedSampler', (['data_valid'], {'num_replicas': 'H.mpi_size', 'rank': 'H.rank'}), '(data_valid, num_replicas=H.mpi_size, rank=H.rank)\n', (14769, 14819), False, 'from torch.utils.data.distributed import DistributedSampler\n'), ((14833, 14938), 'torch.utils.data.DataLoader', 'DataLoader', (['data_valid'], {'batch_size': 'H.n_batch', 'drop_last': '(True)', 'pin_memory': '(True)', 'sampler': 'valid_sampler'}), '(data_valid, batch_size=H.n_batch, drop_last=True, pin_memory=\n True, sampler=valid_sampler)\n', (14843, 14938), False, 'from torch.utils.data import DataLoader\n'), ((16202, 16228), 'imageio.imwrite', 'imageio.imwrite', (['fname', 'im'], {}), '(fname, im)\n', (16217, 16228), False, 'import imageio\n'), ((17060, 17086), 'imageio.imwrite', 'imageio.imwrite', (['fname', 'im'], {}), '(fname, im)\n', (17075, 17086), False, 'import imageio\n'), ((17430, 17450), 'train_helpers.set_up_hyperparams', 'set_up_hyperparams', ([], {}), '()\n', (17448, 17450), False, 'from train_helpers import set_up_hyperparams, load_vaes, load_opt, accumulate_stats, save_model, update_ema, save_latents\n'), ((17506, 17520), 'data.set_up_data', 'set_up_data', (['H'], {}), '(H)\n', (17517, 17520), False, 'from data import set_up_data\n'), ((17540, 17562), 'train_helpers.load_vaes', 'load_vaes', (['H', 'logprint'], {}), '(H, logprint)\n', (17549, 17562), False, 'from train_helpers import set_up_hyperparams, load_vaes, load_opt, accumulate_stats, save_model, update_ema, save_latents\n'), ((1256, 1292), 'train_helpers.update_ema', 'update_ema', (['vae', 'ema_vae', 'H.ema_rate'], {}), '(vae, ema_vae, H.ema_rate)\n', (1266, 1292), False, 'from train_helpers import set_up_hyperparams, load_vaes, load_opt, accumulate_stats, save_model, update_ema, save_latents\n'), ((2967, 3003), 'train_helpers.update_ema', 'update_ema', (['vae', 'ema_vae', 'H.ema_rate'], {}), '(vae, ema_vae, H.ema_rate)\n', (2977, 3003), False, 'from train_helpers import set_up_hyperparams, load_vaes, load_opt, accumulate_stats, save_model, update_ema, save_latents\n'), ((3188, 3203), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3201, 3203), False, 'import torch\n'), ((12831, 12936), 'torch.utils.data.DataLoader', 'DataLoader', (['data_train'], {'batch_size': 'H.n_batch', 'drop_last': '(True)', 'pin_memory': '(True)', 'sampler': 'train_sampler'}), '(data_train, batch_size=H.n_batch, drop_last=True, pin_memory=\n True, sampler=train_sampler)\n', (12841, 12936), False, 'from torch.utils.data import DataLoader\n'), ((15106, 15120), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (15114, 15120), True, 'import numpy as np\n'), ((15121, 15138), 'numpy.isfinite', 'np.isfinite', (['vals'], {}), '(vals)\n', (15132, 15138), True, 'import numpy as np\n'), ((710, 742), 'torch.isnan', 'torch.isnan', (["stats['distortion']"], {}), "(stats['distortion'])\n", (721, 742), False, 'import torch\n'), ((765, 791), 'torch.isnan', 'torch.isnan', (["stats['rate']"], {}), "(stats['rate'])\n", (776, 791), False, 'import torch\n'), ((2536, 2553), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (2547, 2553), False, 'import torch\n'), ((3826, 3859), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (3842, 3859), False, 'import torch\n'), ((5096, 5127), 'os.path.isfile', 'os.path.isfile', (['cur_z_data_file'], {}), '(cur_z_data_file)\n', (5110, 5127), False, 'import os\n'), ((10398, 10475), 'torch.utils.data.DataLoader', 'DataLoader', (['data_train'], {'batch_size': 'H.n_batch', 'pin_memory': '(True)', 'drop_last': '(True)'}), '(data_train, batch_size=H.n_batch, pin_memory=True, drop_last=True)\n', (10408, 10475), False, 'from torch.utils.data import DataLoader\n'), ((12686, 12713), 'torch.as_tensor', 'torch.as_tensor', (['H.ema_rate'], {}), '(H.ema_rate)\n', (12701, 12713), False, 'import torch\n'), ((15192, 15208), 'numpy.mean', 'np.mean', (['finites'], {}), '(finites)\n', (15199, 15208), True, 'import numpy as np\n'), ((2059, 2078), 'torch.from_numpy', 'torch.from_numpy', (['z'], {}), '(z)\n', (2075, 2078), False, 'import torch\n'), ((5015, 5046), 'os.path.isfile', 'os.path.isfile', (['cur_z_data_file'], {}), '(cur_z_data_file)\n', (5029, 5046), False, 'import os\n'), ((5048, 5079), 'os.path.exists', 'os.path.exists', (['cur_z_data_file'], {}), '(cur_z_data_file)\n', (5062, 5079), False, 'import os\n'), ((5661, 5672), 'time.time', 'time.time', ([], {}), '()\n', (5670, 5672), False, 'import time\n'), ((10320, 10364), 'train_helpers.save_latents', 'save_latents', (['H', 'epoch', 'selected_latent_rnds'], {}), '(H, epoch, selected_latent_rnds)\n', (10332, 10364), False, 'from train_helpers import set_up_hyperparams, load_vaes, load_opt, accumulate_stats, save_model, update_ema, save_latents\n'), ((11762, 11796), 'os.path.join', 'os.path.join', (['H.save_dir', '"""latest"""'], {}), "(H.save_dir, 'latest')\n", (11774, 11796), False, 'import os\n'), ((11874, 11916), 'train_helpers.save_model', 'save_model', (['fp', 'vae', 'ema_vae', 'optimizer', 'H'], {}), '(fp, vae, ema_vae, optimizer, H)\n', (11884, 11916), False, 'from train_helpers import set_up_hyperparams, load_vaes, load_opt, accumulate_stats, save_model, update_ema, save_latents\n'), ((13893, 13923), 'numpy.isfinite', 'np.isfinite', (["stats[-1]['elbo']"], {}), "(stats[-1]['elbo'])\n", (13904, 13923), True, 'import numpy as np\n'), ((15233, 15269), 'numpy.mean', 'np.mean', (['[a[k] for a in stats_valid]'], {}), '([a[k] for a in stats_valid])\n', (15240, 15269), True, 'import numpy as np\n'), ((6866, 6923), 'numpy.empty', 'np.empty', (['(sample_db_size, H.image_size, H.image_size, 3)'], {}), '((sample_db_size, H.image_size, H.image_size, 3))\n', (6874, 6923), True, 'import numpy as np\n'), ((12009, 12052), 'os.path.join', 'os.path.join', (['H.save_dir', 'f"""iter-{iterate}"""'], {}), "(H.save_dir, f'iter-{iterate}')\n", (12021, 12052), False, 'import os\n'), ((14114, 14148), 'os.path.join', 'os.path.join', (['H.save_dir', '"""latest"""'], {}), "(H.save_dir, 'latest')\n", (14126, 14148), False, 'import os\n'), ((14234, 14276), 'train_helpers.save_model', 'save_model', (['fp', 'vae', 'ema_vae', 'optimizer', 'H'], {}), '(fp, vae, ema_vae, optimizer, H)\n', (14244, 14276), False, 'from train_helpers import set_up_hyperparams, load_vaes, load_opt, accumulate_stats, save_model, update_ema, save_latents\n'), ((14369, 14412), 'os.path.join', 'os.path.join', (['H.save_dir', 'f"""iter-{iterate}"""'], {}), "(H.save_dir, f'iter-{iterate}')\n", (14381, 14412), False, 'import os\n'), ((5458, 5482), 'numpy.load', 'np.load', (['cur_z_data_file'], {}), '(cur_z_data_file)\n', (5465, 5482), True, 'import numpy as np\n'), ((6959, 7030), 'numpy.empty', 'np.empty', (['(sample_db_size, bl.zdim, bl.base, bl.base)'], {'dtype': 'np.float32'}), '((sample_db_size, bl.zdim, bl.base, bl.base), dtype=np.float32)\n', (6967, 7030), True, 'import numpy as np\n'), ((8346, 8453), 'dciknn_cuda.dciknn_cuda.DCI', 'DCI', (['samples_reshape.shape[1]'], {'num_comp_indices': 'H.num_comp_indices', 'num_simp_indices': 'H.num_simp_indices'}), '(samples_reshape.shape[1], num_comp_indices=H.num_comp_indices,\n num_simp_indices=H.num_simp_indices)\n', (8349, 8453), False, 'from dciknn_cuda.dciknn_cuda import DCI\n'), ((8648, 8712), 'torch.utils.data.DataLoader', 'DataLoader', (['data_train'], {'batch_size': 'H.n_batch_NN', 'pin_memory': '(True)'}), '(data_train, batch_size=H.n_batch_NN, pin_memory=True)\n', (8658, 8712), False, 'from torch.utils.data import DataLoader\n'), ((11103, 11145), 'train_helpers.accumulate_stats', 'accumulate_stats', (['stats', 'H.iters_per_print'], {}), '(stats, H.iters_per_print)\n', (11119, 11145), False, 'from train_helpers import set_up_hyperparams, load_vaes, load_opt, accumulate_stats, save_model, update_ema, save_latents\n'), ((11697, 11739), 'train_helpers.accumulate_stats', 'accumulate_stats', (['stats', 'H.iters_per_print'], {}), '(stats, H.iters_per_print)\n', (11713, 11739), False, 'from train_helpers import set_up_hyperparams, load_vaes, load_opt, accumulate_stats, save_model, update_ema, save_latents\n'), ((13384, 13426), 'train_helpers.accumulate_stats', 'accumulate_stats', (['stats', 'H.iters_per_print'], {}), '(stats, H.iters_per_print)\n', (13400, 13426), False, 'from train_helpers import set_up_hyperparams, load_vaes, load_opt, accumulate_stats, save_model, update_ema, save_latents\n'), ((15598, 15646), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(H.num_variables_visualize + 2)'], {}), '(0, 1, H.num_variables_visualize + 2)\n', (15609, 15646), True, 'import numpy as np\n'), ((16480, 16528), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(H.num_variables_visualize + 2)'], {}), '(0, 1, H.num_variables_visualize + 2)\n', (16491, 16528), True, 'import numpy as np\n'), ((9252, 9307), 'numpy.nonzero', 'np.nonzero', (['(nearest_dists < selected_dists[batch_slice])'], {}), '(nearest_dists < selected_dists[batch_slice])\n', (9262, 9307), True, 'import numpy as np\n'), ((10168, 10179), 'time.time', 'time.time', ([], {}), '()\n', (10177, 10179), False, 'import time\n'), ((14045, 14087), 'train_helpers.accumulate_stats', 'accumulate_stats', (['stats', 'H.iters_per_print'], {}), '(stats, H.iters_per_print)\n', (14061, 14087), False, 'from train_helpers import set_up_hyperparams, load_vaes, load_opt, accumulate_stats, save_model, update_ema, save_latents\n'), ((15944, 15975), 'numpy.concatenate', 'np.concatenate', (['batches'], {'axis': '(0)'}), '(batches, axis=0)\n', (15958, 15975), True, 'import numpy as np\n'), ((10271, 10295), 'numpy.isinf', 'np.isinf', (['selected_dists'], {}), '(selected_dists)\n', (10279, 10295), True, 'import numpy as np\n'), ((16845, 16876), 'numpy.concatenate', 'np.concatenate', (['batches'], {'axis': '(0)'}), '(batches, axis=0)\n', (16859, 16876), True, 'import numpy as np\n'), ((8120, 8163), 'numpy.reshape', 'np.reshape', (['samples', '(samples.shape[0], -1)'], {}), '(samples, (samples.shape[0], -1))\n', (8130, 8163), True, 'import numpy as np\n')] |
import numpy as np
import collections
import copy
class VectorVar:
# W is weights matrix, b is bias vector, dim is dimensions of vector
def __init__(self, name, dim, W=None, b=None, cf=1):
self.name = name
self.dim = dim
if W is None:
self.W = np.identity(dim)
else:
self.W = W
if b is None:
self.b = np.zeros((dim))
else:
self.b = b
self.cf = cf
class Term:
# vars is a list of VectorVars, cfs is the list of corresponding coefficients, and isAbs is whether term is in absolute value
# caseConds is set of case conditions, which are triples, first element is W, second element is var, third element is b
def __init__(self, name, vars, isAbs=False, cf=1, caseConds=[]):
self.varArray = vars
self.vars = {var.name: var for var in vars}
self.cf = cf
self.isAbs = isAbs
self.caseConds = caseConds
self.name = name
def copy(self):
vars = [VectorVar(var.name, var.dim, np.copy(var.W), np.copy(var.b), var.cf) for var in self.varArray]
return Term(self.name, vars, self.isAbs, self.cf, copy.deepcopy(self.caseConds))
class Condition:
def __init__(self, terms):
# self.terms = terms
self.termNameMap = {term.name: term for term in terms}
def condsToString(conds):
for disj, cond in enumerate(conds):
s = ''
if disj != 0:
s += '\nv '
termConds = ''
for i, term in enumerate(cond.termNameMap.values()):
if term.cf == 0:
break
if i != 0:
s += ' + '
if term.cf != 1:
s += str(term.cf)
if term.isAbs:
s += '|'
else:
s += '('
for j, var in enumerate(term.vars):
if j != 0:
s += ' + '
if term.vars[var].cf != 1:
s += str(term.vars[var].cf)
s += '('
s += str(term.vars[var].W) + term.vars[var].name + ' + ' + str(term.vars[var].b)
# s += 'W' + term.vars[var].name + ' + b'
s += ')'
if term.isAbs:
s += '|'
else:
s += ')'
for varCond in term.caseConds:
termConds += ' ^ ('
for l, c in enumerate(varCond):
if l != 0 and l % 3 != 1:
termConds += ' + '
termConds += str(c)
# if varCond[l] < 0:
# termConds += '-'+ varCond[0] + '_' + str((varCond[l]+1)*-1)
# else:
# termConds += varCond[0] + '_' + str(varCond[l]-1)
termConds += ' >= 0)'
s += ' >= 0 '
s += termConds
print(s)
# def matmul(conds, termName, var, W):
# for cond in conds:
# cond.termNameMap[termName].vars[var].W = cond.termNameMap[termName].vars[var].W.dot(W)
# cond.termNameMap[termName].vars[var].dim = cond.termNameMap[termName].vars[var].W.shape[0]
#for each of these transformations, make sure var dim changes (not completed)
def matmulTerm(conds, termName, W):
for cond in conds:
for var in cond.termNameMap[termName].vars:
cond.termNameMap[termName].vars[var].W = cond.termNameMap[termName].vars[var].W.dot(W)
cond.termNameMap[termName].vars[var].dim = cond.termNameMap[termName].vars[var].W.shape[0]
for i in range(len(cond.termNameMap[termName].caseConds)):
if cond.termNameMap[termName].caseConds[i][1] == var:
cond.termNameMap[termName].caseConds[i][0] = cond.termNameMap[termName].caseConds[i][0].dot(W)
# this would be a place where turning caseConds into a map with var as key would be more efficient
# def biasAdd(conds, termName, var, b):
# for cond in conds:
# cond.termNameMap[termName].vars[var].b = cond.termNameMap[termName].vars[var].b + cond.termNameMap[termName].vars[var].W.dot(b)
def biasAddTerm(conds, termName, b):
for cond in conds:
for var in cond.termNameMap[termName].vars:
cond.termNameMap[termName].vars[var].b = cond.termNameMap[termName].vars[var].b + \
cond.termNameMap[termName].vars[var].W.dot(b)
for i in range(len(cond.termNameMap[termName].caseConds)):
if cond.termNameMap[termName].caseConds[i][1] == var:
cond.termNameMap[termName].caseConds[i][2] = cond.termNameMap[termName].caseConds[i][2] + \
cond.termNameMap[termName].caseConds[i][0].dot(b)
# this would be a place where turning caseConds into a map with var as key would be more efficient
#all of the relu methods are not optimized to work together, loops through conds many more times than needed
# comp is component of vectorVar to apply relu to
def relu(conds, termName, var, comp):
for i in range(len(conds)):
cond = conds.pop()
# I think this can be made more efficient by just reusing the condition instead of making copy
cond1 = Condition(cond.termNameMap.values())
term1 = cond1.termNameMap[termName].copy()
cond1.termNameMap[termName] = term1
cond2 = Condition(cond.termNameMap.values())
term2 = cond2.termNameMap[termName].copy()
cond2.termNameMap[termName] = term2
dim = term1.vars[var].dim
term1.caseConds.append([np.identity(dim)[comp], var, 0])
term2.caseConds.append([-1*np.identity(dim)[comp], var, 0])
term2.vars[var].W[:,comp] = 0
conds.appendleft(cond2)
conds.appendleft(cond1)
def reluLayer(conds, termName, var):
layerSize = conds[-1].termNameMap[termName].vars[var].dim
for i in range(layerSize):
relu(conds, termName, var, i)
def reluLayerTerm(conds, termName):
for var in conds[-1].termNameMap[termName].vars:
layerSize = conds[-1].termNameMap[termName].vars[var].dim
for i in range(layerSize):
relu(conds, termName, var, i)
def conv2DLayerTerm(stride, W, xdim=None, ydim=None, termName=None, conds=None):
for index in range(len(conds)):
cond = conds[index]
for var in cond.termNameMap[termName].vars:
reshapedW = np.zeros((ydim[1]*ydim[2]*ydim[3],xdim[0]*xdim[1]*xdim[2]))
for filter in range(ydim[3]):
linearConvs = np.zeros((xdim[2],((xdim[1] * (W.shape[0] - 1)) + W.shape[1])))
for i in range(W.shape[0]):
for j in range(W.shape[1]):
for k in range(W.shape[2]):
linearConvs[k][(i * xdim[1]) + j] += W[i][j][k][filter]
offset = 0
resets = 0
for i in range(ydim[1] * ydim[2]):
if offset + W.shape[1] > xdim[1]:
resets += stride[0]
offset = 0
for j in range(W.shape[2]):
reshapedW[filter + (i * ydim[3])][(resets * xdim[1]) + offset + (j * xdim[1]): (resets * xdim[1]) + offset + len(linearConvs[j]) + (j * xdim[1])] = linearConvs[j]
offset += stride[1]
cond.termNameMap[termName].vars[var].W = cond.termNameMap[termName].vars[var].W.dot(reshapedW)
# cond.termNameMap[termName].vars[var].dim = cond.termNameMap[termName].vars[var].W.shape[0]
for i in range(len(cond.termNameMap[termName].caseConds)):
#this can be made more efficient with restructuring of caseConds
if cond.termNameMap[termName].caseConds[i][1] == var:
cond.termNameMap[termName].caseConds[i][0] = cond.termNameMap[termName].caseConds[i][0].dot(reshapedW)
#inspired by ELINA's maxpool_approx
def maxpoolLayerTerm(poolDim, inputDim, termName, conds):
outputDim = [inputDim[0]//poolDim[0], inputDim[1]//poolDim[1], inputDim[2]]
o12 = outputDim[1] * outputDim[2]
i12 = inputDim[1] * inputDim[2]
numOut = outputDim[0] * outputDim[1] * outputDim[2]
W_mp = np.zeros((inputDim[0] * i12, inputDim[0] * i12))
# reshapedOrder = np.zeros(len(W_mp))
counter = 0
for outPos in range(numOut):
outX = outPos // o12
outY = (outPos-outX*o12) // outputDim[2]
outZ = outPos - outX * o12 - outY * outputDim[2]
inpX = outX * poolDim[0]
inpY = outY * poolDim[1]
inpZ = outZ
inpPos = inpX*i12 + inpY*inputDim[2] + inpZ
for xShift in range(poolDim[0]):
for yShift in range(poolDim[1]):
poolCurrDim = inpPos + xShift*i12 + yShift*inputDim[2]
W_mp[counter][poolCurrDim] = 1
# reshapedOrder[counter] = poolCurrDim
counter += 1
for i in range(len(conds)):
cond = conds.pop()
for var in cond.termNameMap[termName].vars:
maxpoolHelper(maxpoolConds, np.array([]), W_mp, poolDim, 0, np.identity(inputDim[0] * i12), cond, termName, var)
def maxpoolHelper(maxpoolConds, caseConds, W_mp, poolDim, depth, maxMatrix, cond, termName, var):
if depth == len(W_mp) // (poolDim[0] * poolDim[1]):
W = maxMatrix.dot(W_mp)
newCond = Condition(cond.termNameMap.values())
newTerm = newCond.termNameMap[termName].copy()
newCond.termNameMap[termName] = newTerm
#I have to fix the dimension changes in each of the backwards transformations
# dim = term1.vars[var].dim
for i in range(len(caseConds)):
for j in range(1,len(caseConds[0])):
newTerm.caseConds.append([np.identity(len(W[0]))[caseConds[i][0]], var, 0, np.identity(len(W[0]))[caseConds[i][j]], var, 0]) #find a better way to get component of identity matrix, same with relu transformation
newTerm.vars[var].W = newTerm.vars[var].W.dot(W)
maxpoolConds.appendleft(newCond)
return
for i in range(poolDim[0]*poolDim[1]):
pool = np.array([1,1,1,1]) * depth * poolDim[0] * poolDim[1] + np.array([0,1,2,3])
newConds = np.append(pool[i], np.delete(pool, i))
if len(caseConds) == 0:
maxpoolHelper(maxpoolConds, np.vstack((newConds,)), W_mp, poolDim, depth + 1, np.delete(maxMatrix, newConds[1:]), cond, termName, var)
else:
maxpoolHelper(maxpoolConds, np.vstack((caseConds, newConds)), W_mp, poolDim, depth + 1, np.delete(maxMatrix, newConds[1:]), cond, termName, var)
| [
"copy.deepcopy",
"numpy.copy",
"numpy.zeros",
"numpy.identity",
"numpy.array",
"numpy.delete",
"numpy.vstack"
] | [((8256, 8304), 'numpy.zeros', 'np.zeros', (['(inputDim[0] * i12, inputDim[0] * i12)'], {}), '((inputDim[0] * i12, inputDim[0] * i12))\n', (8264, 8304), True, 'import numpy as np\n'), ((290, 306), 'numpy.identity', 'np.identity', (['dim'], {}), '(dim)\n', (301, 306), True, 'import numpy as np\n'), ((387, 400), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (395, 400), True, 'import numpy as np\n'), ((1177, 1206), 'copy.deepcopy', 'copy.deepcopy', (['self.caseConds'], {}), '(self.caseConds)\n', (1190, 1206), False, 'import copy\n'), ((6443, 6511), 'numpy.zeros', 'np.zeros', (['(ydim[1] * ydim[2] * ydim[3], xdim[0] * xdim[1] * xdim[2])'], {}), '((ydim[1] * ydim[2] * ydim[3], xdim[0] * xdim[1] * xdim[2]))\n', (6451, 6511), True, 'import numpy as np\n'), ((10210, 10232), 'numpy.array', 'np.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (10218, 10232), True, 'import numpy as np\n'), ((10268, 10286), 'numpy.delete', 'np.delete', (['pool', 'i'], {}), '(pool, i)\n', (10277, 10286), True, 'import numpy as np\n'), ((1053, 1067), 'numpy.copy', 'np.copy', (['var.W'], {}), '(var.W)\n', (1060, 1067), True, 'import numpy as np\n'), ((1069, 1083), 'numpy.copy', 'np.copy', (['var.b'], {}), '(var.b)\n', (1076, 1083), True, 'import numpy as np\n'), ((6576, 6636), 'numpy.zeros', 'np.zeros', (['(xdim[2], xdim[1] * (W.shape[0] - 1) + W.shape[1])'], {}), '((xdim[2], xdim[1] * (W.shape[0] - 1) + W.shape[1]))\n', (6584, 6636), True, 'import numpy as np\n'), ((9109, 9121), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9117, 9121), True, 'import numpy as np\n'), ((9141, 9171), 'numpy.identity', 'np.identity', (['(inputDim[0] * i12)'], {}), '(inputDim[0] * i12)\n', (9152, 9171), True, 'import numpy as np\n'), ((10360, 10382), 'numpy.vstack', 'np.vstack', (['(newConds,)'], {}), '((newConds,))\n', (10369, 10382), True, 'import numpy as np\n'), ((10410, 10444), 'numpy.delete', 'np.delete', (['maxMatrix', 'newConds[1:]'], {}), '(maxMatrix, newConds[1:])\n', (10419, 10444), True, 'import numpy as np\n'), ((10521, 10553), 'numpy.vstack', 'np.vstack', (['(caseConds, newConds)'], {}), '((caseConds, newConds))\n', (10530, 10553), True, 'import numpy as np\n'), ((10581, 10615), 'numpy.delete', 'np.delete', (['maxMatrix', 'newConds[1:]'], {}), '(maxMatrix, newConds[1:])\n', (10590, 10615), True, 'import numpy as np\n'), ((5610, 5626), 'numpy.identity', 'np.identity', (['dim'], {}), '(dim)\n', (5621, 5626), True, 'import numpy as np\n'), ((5678, 5694), 'numpy.identity', 'np.identity', (['dim'], {}), '(dim)\n', (5689, 5694), True, 'import numpy as np\n'), ((10154, 10176), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (10162, 10176), True, 'import numpy as np\n')] |
import cv2
from functools import lru_cache
import numpy as np
from typing import List
from urllib.request import urlopen
from app.cv.openpose import get_openpose_engine
from app.schemas.pose import Keypoint, Person
from app.cv.engine import Engine
BLACK = (0, 0, 0)
GREY = (127, 127, 127)
WHITE = (255, 200, 200)
BLUE = (255, 0, 0)
GREEN = (0, 180, 0)
RED = (0, 0, 200)
font = cv2.FONT_HERSHEY_PLAIN
fontScale = 1
default_colour = (0, 0, 0)
thickness = 3
colours = [WHITE, RED, GREEN]
BODY_DRAW_STYLE = {
'color': RED,
'thickness': 3,
}
HAND_DRAW_STYLE = {
'color': WHITE,
'thickness': 1,
}
FACE_DRAW_STYLE = {
'color': WHITE,
'thickness': 1,
}
def pixel(keypoint: Keypoint, width: int, height: int):
if keypoint is None:
return 0, 0
x = 0 if keypoint.x is None or np.isnan(keypoint.x) else int(keypoint.x * width)
y = 0 if keypoint.y is None or np.isnan(keypoint.y) else int(keypoint.y * height)
return x, y
def url_to_image(url):
# download the image, convert it to a NumPy array, and then read
# it into OpenCV format
resp = urlopen(url)
image = np.asarray(bytearray(resp.read()), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
return image
def file_to_image(file):
contents = file.file.read()
print(f">> Contents: [{contents[1:5]}...] of length {len(contents)}")
image = np.fromstring(contents, np.uint8)
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
return image
def draw_persons(image, persons: List[Person]):
height, width, _ = np.shape(image)
for person in persons:
for pose, draw_style in zip(
[person.pose, person.face_pose, person.left_hand_pose, person.right_hand_pose],
[BODY_DRAW_STYLE, FACE_DRAW_STYLE, HAND_DRAW_STYLE, HAND_DRAW_STYLE]):
if pose is None:
continue
for line in pose._skeleton:
for kpid1, kpid2 in zip(line[:-1], line[1:]):
kp1: Keypoint = pose.get_keypoint(kpid1)
kp2: Keypoint = pose.get_keypoint(kpid2)
pixel1 = pixel(kp1, width=width, height=height)
pixel2 = pixel(kp2, width=width, height=height)
if pixel1 == (0, 0) or pixel2 == (0, 0):
continue
image = cv2.line(
image,
pixel1,
pixel2,
draw_style['color'],
draw_style['thickness'],
)
bbox_pixels = tuple(np.multiply(np.array(person.bounding_box).reshape((2, 2)), np.array([width, height])).
round().astype(int).flatten())
cv2.rectangle(image,
bbox_pixels[0:2],
bbox_pixels[2:4],
BLACK) # ,
# thickness=1)
return image
def infer_url(engine: Engine, url):
print(f"inferring from url:{url}")
image = url_to_image(url)
return engine.infer_image(image)
def draw_url(engine: Engine, url):
image = url_to_image(url)
persons = engine.infer_image(image)
draw_persons(image, persons)
res, out_image = cv2.imencode(".jpg", image)
return out_image.tostring()
def infer_file(engine: Engine, file):
print("inferring from file")
image = file_to_image(file)
return engine.infer_image(image)
def draw_file(engine: Engine, file):
image = file_to_image(file)
persons = engine.infer_image(image)
draw_persons(image, persons)
res, out_image = cv2.imencode(".jpg", image)
return out_image.tostring()
@lru_cache()
def get_engine() -> Engine:
return get_openpose_engine()
| [
"cv2.line",
"app.cv.openpose.get_openpose_engine",
"cv2.imdecode",
"urllib.request.urlopen",
"numpy.isnan",
"numpy.shape",
"cv2.rectangle",
"numpy.array",
"cv2.imencode",
"functools.lru_cache",
"numpy.fromstring"
] | [((3708, 3719), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (3717, 3719), False, 'from functools import lru_cache\n'), ((1117, 1129), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (1124, 1129), False, 'from urllib.request import urlopen\n'), ((1204, 1241), 'cv2.imdecode', 'cv2.imdecode', (['image', 'cv2.IMREAD_COLOR'], {}), '(image, cv2.IMREAD_COLOR)\n', (1216, 1241), False, 'import cv2\n'), ((1404, 1437), 'numpy.fromstring', 'np.fromstring', (['contents', 'np.uint8'], {}), '(contents, np.uint8)\n', (1417, 1437), True, 'import numpy as np\n'), ((1450, 1487), 'cv2.imdecode', 'cv2.imdecode', (['image', 'cv2.IMREAD_COLOR'], {}), '(image, cv2.IMREAD_COLOR)\n', (1462, 1487), False, 'import cv2\n'), ((1579, 1594), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (1587, 1594), True, 'import numpy as np\n'), ((3278, 3305), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'image'], {}), "('.jpg', image)\n", (3290, 3305), False, 'import cv2\n'), ((3645, 3672), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'image'], {}), "('.jpg', image)\n", (3657, 3672), False, 'import cv2\n'), ((3759, 3780), 'app.cv.openpose.get_openpose_engine', 'get_openpose_engine', ([], {}), '()\n', (3778, 3780), False, 'from app.cv.openpose import get_openpose_engine\n'), ((2782, 2845), 'cv2.rectangle', 'cv2.rectangle', (['image', 'bbox_pixels[0:2]', 'bbox_pixels[2:4]', 'BLACK'], {}), '(image, bbox_pixels[0:2], bbox_pixels[2:4], BLACK)\n', (2795, 2845), False, 'import cv2\n'), ((832, 852), 'numpy.isnan', 'np.isnan', (['keypoint.x'], {}), '(keypoint.x)\n', (840, 852), True, 'import numpy as np\n'), ((917, 937), 'numpy.isnan', 'np.isnan', (['keypoint.y'], {}), '(keypoint.y)\n', (925, 937), True, 'import numpy as np\n'), ((2379, 2456), 'cv2.line', 'cv2.line', (['image', 'pixel1', 'pixel2', "draw_style['color']", "draw_style['thickness']"], {}), "(image, pixel1, pixel2, draw_style['color'], draw_style['thickness'])\n", (2387, 2456), False, 'import cv2\n'), ((2687, 2712), 'numpy.array', 'np.array', (['[width, height]'], {}), '([width, height])\n', (2695, 2712), True, 'import numpy as np\n'), ((2640, 2669), 'numpy.array', 'np.array', (['person.bounding_box'], {}), '(person.bounding_box)\n', (2648, 2669), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('_mpl-gallery-nogrid')
# make data
X, Y = np.meshgrid(np.linspace(-3, 3, 256), np.linspace(-3, 3, 256))
Z = (1 - X/2 + X**5 + Y**3) * np.exp(-X**2 - Y**2)
levels = np.linspace(Z.min(), Z.max(), 7)
# plot
fig, ax = plt.subplots()
ax.contourf(X, Y, Z, levels=levels)
plt.show() | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.style.use",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.subplots"
] | [((52, 88), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""_mpl-gallery-nogrid"""'], {}), "('_mpl-gallery-nogrid')\n", (65, 88), True, 'import matplotlib.pyplot as plt\n'), ((282, 296), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (294, 296), True, 'import matplotlib.pyplot as plt\n'), ((335, 345), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (343, 345), True, 'import matplotlib.pyplot as plt\n'), ((121, 144), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(256)'], {}), '(-3, 3, 256)\n', (132, 144), True, 'import numpy as np\n'), ((146, 169), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(256)'], {}), '(-3, 3, 256)\n', (157, 169), True, 'import numpy as np\n'), ((201, 225), 'numpy.exp', 'np.exp', (['(-X ** 2 - Y ** 2)'], {}), '(-X ** 2 - Y ** 2)\n', (207, 225), True, 'import numpy as np\n')] |
import asyncio
import concurrent.futures
import datetime
import json
import os
from collections import Counter
from typing import Optional
import discord
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from discord.ext import commands
from discord_slash import cog_ext, SlashContext, ComponentContext
from discord_slash.utils import manage_components
from discord_slash.utils.manage_commands import create_option, create_choice
from pyvis.network import Network
from ElevatorBot.database.database import (
getForges,
getLastActivity,
getDestinyDefinition,
getWeaponInfo,
getPgcrActivity,
getTopWeapons,
getActivityHistory,
getPgcrActivitiesUsersStats,
getClearCount,
get_d2_steam_player_info,
getTimePlayed,
)
from ElevatorBot.backendNetworking.authfunctions import getSpiderMaterials
from ElevatorBot.backendNetworking.dataLoading import (
searchForItem,
getClanMembers,
translateWeaponSlot,
)
from ElevatorBot.backendNetworking.dataTransformation import getSeasonalChallengeInfo
from ElevatorBot.backendNetworking.destinyPlayer import DestinyPlayer
from ElevatorBot.backendNetworking.formating import embed_message
from ElevatorBot.backendNetworking.miscFunctions import (
get_emoji,
write_line,
has_elevated_permissions,
check_if_mutually_exclusive,
convert_expansion_or_season_dates,
)
from ElevatorBot.backendNetworking.persistentMessages import (
get_persistent_message_or_channel,
make_persistent_message,
delete_persistent_message,
)
from ElevatorBot.backendNetworking.slashCommandFunctions import (
get_user_obj,
get_user_obj_admin,
verify_time_input,
)
from ElevatorBot.backendNetworking.tournament import startTournamentEvents
from ElevatorBot.networking.network import get_json_from_url
from ElevatorBot.static.config import CLANID
from ElevatorBot.static.dict import (
raidHashes,
gmHashes,
expansion_dates,
season_dates,
zeroHashes,
herzeroHashes,
whisperHashes,
herwhisperHashes,
presageHashes,
presageMasterHashes,
prophHashes,
pitHashes,
throneHashes,
harbHashes,
requirementHashes,
)
from ElevatorBot.static.globals import (
titan_emoji_id,
hunter_emoji_id,
warlock_emoji_id,
light_level_icon_emoji_id,
tournament,
enter_emoji_id,
)
from ElevatorBot.static.slashCommandOptions import (
choices_mode,
options_stat,
options_user,
)
class DestinyCommands(commands.Cog):
def __init__(self, client):
self.client = client
self.classes = {
"Warlock": warlock_emoji_id,
"Hunter": hunter_emoji_id,
"Titan": titan_emoji_id,
}
self.season_and_expansion_dates = sorted(expansion_dates + season_dates, key=lambda x: x[0])
self.other_dates = [
["2019-10-04", "GoS"],
["2019-10-29", "PoH"],
["2020-01-14", "Corridors of Time"],
["2020-06-06", "Almighty Live Event"],
["2020-08-11", "Solstice of Heroes"],
["2020-11-21", "DSC"],
["2021-04-21", "Guardian Games"],
]
self.other_dates_lower = [
["2020-02-04", "Empyrean Foundation"],
["2020-04-21", "Guardian Games"],
["2020-07-07", "Moments of Triumph"],
["2021-05-22", "VoG"],
]
# @cog_ext.cog_slash(
# name="solos",
# description="Shows you an overview of your Destiny 2 solo activity completions",
# options=[options_user()],
# )
# async def _solos(self, ctx: SlashContext, **kwargs):
# user = await get_user_obj(ctx, kwargs)
# destiny_player = await DestinyPlayer.from_discord_id(user.id, ctx=ctx)
# if not destiny_player:
# return
#
# await ctx.defer()
#
# interesting_solos = {
# "Shattered Throne": throneHashes,
# "Pit of Heresy": pitHashes,
# "Prophecy": prophHashes,
# "Harbinger": harbHashes,
# "Presage": presageHashes,
# "Master Presage": presageMasterHashes,
# "The Whisper": whisperHashes + herwhisperHashes,
# "Zero Hour": zeroHashes + herzeroHashes,
# "Grandmaster Nightfalls": gmHashes,
# }
#
# # get the return text in a gather
# interesting_solos_texts = await asyncio.gather(
# *[
# self.get_formatted_solos_data(
# destiny_player=destiny_player,
# solo_activity_ids=solo_activity_ids,
# )
# for solo_activity_ids in interesting_solos.values()
# ]
# )
#
# # start building the return embed
# embed = embed_message(f"{user.display_name}'s Destiny Solos")
#
# # add the fields
# for solo_name, solo_activity_ids, solo_text in zip(
# interesting_solos.keys(),
# interesting_solos.values(),
# interesting_solos_texts,
# ):
# embed.add_field(
# name=solo_name,
# value=solo_text,
# inline=True,
# )
#
# await ctx.send(embed=embed)
# @staticmethod
# async def get_formatted_solos_data(destiny_player: DestinyPlayer, solo_activity_ids: list[int]) -> str:
# """returns the formatted string to be used in self.solos()"""
#
# results = await destiny_player.get_lowman_count(solo_activity_ids)
#
# return (
# f"Solo Completions: **{results[0]}**\nSolo Flawless Count: **{results[1]}**\nFastest Solo: **{results[2]}**"
# )
@cog_ext.cog_slash(
name="time",
description="Shows you your Destiny 2 playtime split up by season",
options=[
create_option(
name="class",
description="Default: 'Everything' - Which class you want to limit your playtime to",
option_type=3,
required=False,
choices=[
create_choice(name="Everything", value="Everything"),
create_choice(name="Warlock", value="Warlock"),
create_choice(name="Hunter", value="Hunter"),
create_choice(name="Titan", value="Titan"),
],
),
options_user(),
],
)
async def _time(self, ctx: SlashContext, **kwargs):
user = await get_user_obj(ctx, kwargs)
destiny_player = await DestinyPlayer.from_discord_id(user.id, ctx=ctx)
if not destiny_player:
return
await ctx.defer()
# init the db request function with all the args
args = {
"destinyID": destiny_player.destiny_id,
"character_class": kwargs["class"] if ("class" in kwargs and kwargs["class"] != "Everything") else None,
}
# prepare embed for later use
embed = embed_message(
f"""{user.display_name} D2 Time Played {"- " + args["character_class"] if args["character_class"] else ""}""",
f"**Total:** {str(datetime.timedelta(seconds=await getTimePlayed(**args)))} \n**PvE:** {str(datetime.timedelta(seconds=await getTimePlayed(**args, mode=7)))} \n**PvP:** {str(datetime.timedelta(seconds=await getTimePlayed(**args, mode=5)))}",
)
# init the dict where the results get saved
results = {}
# loop through the seasons
for season in self.season_and_expansion_dates:
season_date = datetime.datetime.strptime(season[0], "%Y-%m-%d")
season_name = season[1]
results[season_name] = {
"Total": 0,
"PvE": 0,
"PvP": 0,
}
# get the next seasons start time as the cutoff or now if its the current season
try:
next_season_date = self.season_and_expansion_dates[(self.season_and_expansion_dates.index(season) + 1)][
0
]
next_season_date = datetime.datetime.strptime(next_season_date, "%Y-%m-%d")
except IndexError:
next_season_date = datetime.datetime.now()
args.update(
{
"start_time": season_date,
"end_time": next_season_date,
}
)
# loop through the modes
for mode_name, mode in {"Total": None, "PvE": 7, "PvP": 5}.items():
args.update({"mode": mode})
# actually get time played now, using the definied args
time_played = await getTimePlayed(**args)
results[season_name].update({mode_name: time_played})
# loop through the results and add embed fields
for season_name, season_values in results.items():
# only append season info if they actually played that season
if season_values["Total"] == 0:
continue
text = []
for activity, value in season_values.items():
text.append(f"**{activity}**: {str(datetime.timedelta(seconds=value))}")
embed.add_field(name=season_name, value="\n".join(text), inline=True)
await ctx.send(embed=embed)
@cog_ext.cog_slash(
name="poptimeline",
description="Shows the Destiny 2 steam population timeline",
)
async def _poptimeline(self, ctx: SlashContext):
# reading data from the DB
data = await get_d2_steam_player_info()
# Create figure and plot space
fig, ax = plt.subplots(figsize=(20, 10))
ax.yaxis.grid(True)
# filling plot
ax.plot(data["dateobj"], data["numberofplayers"], "darkred", zorder=2)
# Set title and labels for axes
ax.set_xlabel("Date", fontsize=20, fontweight="bold")
ax.set_ylabel("Players", fontsize=20, fontweight="bold")
# adding nice lines to mark important events
for dates in self.season_and_expansion_dates[7:]:
date = datetime.datetime.strptime(dates[0], "%Y-%m-%d")
ax.axvline(date, color="darkgreen", zorder=1)
ax.text(
date + datetime.timedelta(days=2),
(max(data["numberofplayers"]) - min(data["numberofplayers"])) * 1.02 + min(data["numberofplayers"]),
dates[1],
color="darkgreen",
fontweight="bold",
bbox=dict(facecolor="white", edgecolor="darkgreen", pad=4, zorder=3),
)
for dates in self.other_dates:
date = datetime.datetime.strptime(dates[0], "%Y-%m-%d")
ax.axvline(date, color="mediumaquamarine", zorder=1)
ax.text(
date + datetime.timedelta(days=2),
(max(data["numberofplayers"]) - min(data["numberofplayers"])) * 0.95 + min(data["numberofplayers"]),
dates[1],
color="mediumaquamarine",
bbox=dict(
facecolor="white",
edgecolor="mediumaquamarine",
boxstyle="round",
zorder=3,
),
)
for dates in self.other_dates_lower:
date = datetime.datetime.strptime(dates[0], "%Y-%m-%d")
ax.axvline(date, color="mediumaquamarine", zorder=1)
ax.text(
date + datetime.timedelta(days=2),
(max(data["numberofplayers"]) - min(data["numberofplayers"])) * 0.90 + min(data["numberofplayers"]),
dates[1],
color="mediumaquamarine",
bbox=dict(
facecolor="white",
edgecolor="mediumaquamarine",
boxstyle="round",
zorder=3,
),
)
# saving file
title = "d2population.png"
plt.savefig(title, bbox_inches="tight")
# sending them the file
embed = embed_message("Destiny 2 - Steam Player Count")
image = discord.File(title)
embed.set_image(url=f"attachment://{title}")
await ctx.send(file=image, embed=embed)
# _delete file
await asyncio.sleep(10)
os.remove(title)
@cog_ext.cog_slash(
name="last",
description="Stats for the last activity you played",
options=[
create_option(
name="activity",
description="The type of the activity",
option_type=3,
required=True,
choices=choices_mode,
),
options_user(),
],
)
async def _last(self, ctx: SlashContext, **kwargs):
user = await get_user_obj(ctx, kwargs)
destiny_player = await DestinyPlayer.from_discord_id(user.id, ctx=ctx)
if not destiny_player:
return
# might take a sec
await ctx.defer()
# get data for the mode specified
await destiny_player.update_activity_db()
data = await getLastActivity(
destiny_player.destiny_id,
mode=int(kwargs["activity"]) if "activity" in kwargs and kwargs["activity"] != "0" else None,
)
if not data:
await ctx.send(
embed=embed_message(
"Error",
"Couldn't find any data for that mode. If you think this is an error DM me",
)
)
return
# make data pretty and send msg
activity_name = (await getDestinyDefinition("DestinyActivityDefinition", data["directorActivityHash"]))[2]
embed = embed_message(
f"{user.display_name}'s Last Activity",
f"**{activity_name}{(' - ' + str(data['score']) + ' Points') if data['score'] > 0 else ''} - {str(datetime.timedelta(seconds=data['activityDurationSeconds']))}**",
f"Date: {data['period'].strftime('%d/%m/%Y, %H:%M')} - InstanceID: {data['instanceID']}",
)
for player in data["entries"]:
player_data = [
f"K: **{player['opponentsDefeated']}**, D: **{player['deaths']}**, A: **{player['assists']}**",
f"K/D: **{round((player['opponentsDefeated'] / player['deaths']) if player['deaths'] > 0 else player['opponentsDefeated'], 2)}** {'(DNF)' if not player['completed'] else ''}",
str(datetime.timedelta(seconds=player["timePlayedSeconds"])),
]
# sometimes people dont have a class for some reason. Skipping that
if player["characterClass"] == "":
continue
embed.add_field(
name=f"{await get_emoji(self.client, self.classes[player['characterClass']])} {(await destiny_player.get_destiny_name_and_last_played())[0]} {await get_emoji(self.client, light_level_icon_emoji_id)} {player['lightLevel']}",
value="\n".join(player_data),
inline=True,
)
await ctx.send(embed=embed)
@cog_ext.cog_slash(
name="challenges",
description="Shows you the seasonal challenges and your completion status",
options=[options_user()],
)
async def _challenges(self, ctx: SlashContext, **kwargs):
await ctx.defer()
user = await get_user_obj(ctx, kwargs)
destiny_player = await DestinyPlayer.from_discord_id(user.id, ctx=ctx)
if not destiny_player:
return
# get seasonal challenge info
seasonal_challenges = await getSeasonalChallengeInfo()
start = list(seasonal_challenges)[0]
# get player triumphs
user_triumphs = await destiny_player.get_triumphs()
# get select components
components = [
manage_components.create_actionrow(
manage_components.create_select(
options=[
manage_components.create_select_option(
emoji="📅",
label=week,
value=week,
)
for week in seasonal_challenges
],
placeholder="Select the week you want to see",
min_values=1,
max_values=1,
)
),
]
# send data and wait for new user input
await self._send_challenge_info(
ctx.author,
user,
start,
seasonal_challenges,
user_triumphs,
components,
ctx=ctx,
)
async def _send_challenge_info(
self,
author: discord.Member,
user: discord.Member,
week: str,
seasonal_challenges: dict,
user_triumphs: dict,
select_components: list,
ctx: SlashContext = None,
select_ctx: ComponentContext = None,
message: discord.Message = None,
) -> None:
# this is a recursive commmand.
# make data pretty
embed = await self._get_challenge_info(user, week, seasonal_challenges, user_triumphs)
# send message
if not select_ctx:
message = await ctx.send(embed=embed, components=select_components)
else:
await select_ctx.edit_origin(embed=embed)
# wait 60s for selection
def check(select_ctx: ComponentContext):
return select_ctx.author == author
try:
select_ctx: ComponentContext = await manage_components.wait_for_component(
select_ctx.bot if select_ctx else ctx.bot,
components=select_components,
timeout=60,
)
except asyncio.TimeoutError:
await message.edit(components=None)
return
else:
new_week = select_ctx.selected_options[0]
# recursively call this function
await self._send_challenge_info(
author,
user,
new_week,
seasonal_challenges,
user_triumphs,
select_components,
select_ctx=select_ctx,
message=message,
)
@staticmethod
async def _get_challenge_info(
user: discord.Member, week: str, seasonal_challenges: dict, user_triumphs: dict
) -> discord.Embed:
"""Returns an embed for the specified week"""
embed = embed_message(f"{user.display_name}'s Seasonal Challenges - {week}")
# add the triumphs and what the user has done
for triumph in seasonal_challenges[week]:
user_triumph = user_triumphs[str(triumph["referenceID"])]
# calculate completion rate
rate = []
for objective in user_triumph["objectives"]:
rate.append(objective["progress"] / objective["completionValue"] if not objective["complete"] else 1)
rate = sum(rate) / len(rate)
# make emoji art for completion rate
bar_length = 10
bar_text = ""
for i in range(bar_length):
if round(rate, 1) <= 1 / bar_length * i:
bar_text += "░"
else:
bar_text += "▓"
# add field to embed
embed.add_field(
name=f"""{triumph["name"]} | {bar_text} {int(rate * 100)}%""",
value=triumph["description"],
inline=False,
)
return embed
# todo not really needed
@cog_ext.cog_slash(
name="spoder",
description="The better /spider command to show Spiders current inventory",
options=[options_user()],
)
async def _spoder(self, ctx: SlashContext, **kwargs):
await ctx.defer()
user = await get_user_obj_admin(ctx, kwargs)
if not user:
return
destiny_player = await DestinyPlayer.from_discord_id(user.id, ctx=ctx)
if not destiny_player:
return
anyCharID = list(await destiny_player.get_character_info())[0]
# get and send spider inv
materialtext = await getSpiderMaterials(destiny_player.discord_id, destiny_player.destiny_id, anyCharID)
if "embed" in materialtext:
await ctx.send(embed=materialtext["embed"])
elif materialtext["result"]:
await ctx.send(materialtext["result"])
else:
await ctx.send(materialtext["error"])
# @cog_ext.cog_slash(
# name="destiny",
# description="Gives you various destiny stats",
# options=[options_user()],
# )
# async def _destiny(self, ctx: SlashContext, **kwargs):
# await ctx.defer()
#
# # get basic user data
# user = await get_user_obj(ctx, kwargs)
# destiny_player = await DestinyPlayer.from_discord_id(user.id, ctx=ctx)
# if not destiny_player:
# return
#
# heatmap_url = f"https://chrisfried.github.io/secret-scrublandeux/guardian/{destiny_player.system}/{destiny_player.destiny_id}"
#
# # get character infos
# characters = await destiny_player.get_character_info()
#
# character_playtime = {} # in seconds
# for characterID in characters:
# character_playtime[characterID] = await destiny_player.get_stat_value(
# "secondsPlayed", character_id=characterID
# )
#
# embed = embed_message(
# f"{user.display_name}'s Destiny Stats",
# f"**Total Playtime:** {str(datetime.timedelta(seconds=sum(character_playtime.values())))} \n[Click to see your heatmap]({heatmap_url})",
# "For info on achievable discord roles, type !roles",
# )
#
# """ char info field """
# embed.add_field(name="", value=f"__**Characters:**__", inline=False)
# for characterID in characters:
# text = f"""Playtime: {str(datetime.timedelta(seconds=character_playtime[characterID]))} \n\nPower: {await destiny_player.get_stat_value("highestLightLevel", character_id=characterID):,} \nActivities: {await destiny_player.get_stat_value("activitiesCleared", character_id=characterID):,} \nKills: {await destiny_player.get_stat_value("kills", character_id=characterID):,} \nDeaths: {await destiny_player.get_stat_value("deaths", character_id=characterID):,} \nEfficiency: {round(await destiny_player.get_stat_value("efficiency", character_id=characterID), 2)}"""
# embed.add_field(
# name=f"""{characters[characterID]["class"]} ({characters[characterID]["race"]} / {characters[characterID]["gender"]})""",
# value=text,
# inline=True,
# )
#
# """ triumph info field """
# embed.add_field(name="", value=f"__**Triumphs:**__", inline=False)
#
# # get triumph data
# triumphs = await destiny_player.get_triumphs()
# embed.add_field(
# name="Lifetime Triumph Score",
# value=f"""{triumphs["profileRecords"]["data"]["lifetimeScore"]:,}""",
# inline=True,
# )
# embed.add_field(
# name="Active Triumph Score",
# value=f"""{triumphs["profileRecords"]["data"]["activeScore"]:,}""",
# inline=True,
# )
# embed.add_field(
# name="Legacy Triumph Score",
# value=f"""{triumphs["profileRecords"]["data"]["legacyScore"]:,}""",
# inline=True,
# )
#
# # get triumph completion rate
# triumphs_data = triumphs["profileRecords"]["data"]["records"]
# triumphs_completed = 0
# triumphs_no_data = 0
# for triumph in triumphs_data.values():
# status = True
# if "objectives" in triumph:
# for part in triumph["objectives"]:
# status &= part["complete"]
# elif "intervalObjectives" in triumph:
# for part in triumph["intervalObjectives"]:
# status &= part["complete"]
# else:
# triumphs_no_data += 1
# continue
# if status:
# triumphs_completed += 1
# embed.add_field(
# name="Triumphs",
# value=f"{triumphs_completed} / {len(triumphs_data) - triumphs_no_data}",
# inline=True,
# )
#
# # get seal completion rate
# total_seals, completed_seals = await destiny_player.get_player_seals()
# embed.add_field(
# name="Seals",
# value=f"{len(completed_seals)} / {len(total_seals)}",
# inline=True,
# )
#
# await ctx.send(embed=embed)
@cog_ext.cog_subcommand(
base="stat",
base_description="Shows you various Destiny 2 stats",
name="everything",
description="Displays information for all activities",
options=[options_stat, options_user()],
)
async def _stat_everything(self, ctx: SlashContext, **kwargs):
# get basic user data
user = await get_user_obj(ctx, kwargs)
destiny_player = await DestinyPlayer.from_discord_id(user.id, ctx=ctx)
if not destiny_player:
return
# might take a sec
await ctx.defer()
# get stat
stat = await destiny_player.get_stat_value(kwargs["name"])
await ctx.send(
embed=embed_message(
f"{user.display_name}'s Stat Info",
f"Your `{kwargs['name']}` stat is currently at **{stat:,}**",
)
)
@cog_ext.cog_subcommand(
base="stat",
base_description="Shows you various Destiny 2 stats",
name="pve",
description="Displays information for all PvE activities",
options=[options_stat, options_user()],
)
async def _stat_pve(self, ctx: SlashContext, **kwargs):
# get basic user data
user = await get_user_obj(ctx, kwargs)
destiny_player = await DestinyPlayer.from_discord_id(user.id, ctx=ctx)
if not destiny_player:
return
# might take a sec
await ctx.defer()
# get stat
stat = await destiny_player.get_stat_value(kwargs["name"], stat_category="allPvE")
await ctx.send(
embed=embed_message(
f"{user.display_name}'s PvE Stat Info",
f"Your `{kwargs['name']}` stat is currently at **{stat:,}**",
)
)
@cog_ext.cog_subcommand(
base="stat",
base_description="Shows you various Destiny 2 stats",
name="pvp",
description="Displays information for all PvP activities",
options=[options_stat, options_user()],
)
async def _stat_pvp(self, ctx: SlashContext, **kwargs):
# get basic user data
user = await get_user_obj(ctx, kwargs)
destiny_player = await DestinyPlayer.from_discord_id(user.id, ctx=ctx)
if not destiny_player:
return
# might take a sec
await ctx.defer()
# get stat
stat = await destiny_player.get_stat_value(kwargs["name"], stat_category="allPvP")
await ctx.send(
embed=embed_message(
f"{user.display_name}'s PvP Stat Info",
f"Your `{kwargs['name']}` stat is currently at **{stat:,}**",
)
)
class ClanActivitiesCommands(commands.Cog):
def __init__(self, client):
self.client = client
@cog_ext.cog_slash(
name="clanactivity",
description="Shows information about who from the clan plays with whom (Default: in the last 7 days)",
options=[
create_option(
name="mode",
description="You can restrict the game mode",
option_type=3,
required=False,
choices=choices_mode,
),
create_option(
name="starttime",
description="Format: 'DD/MM/YY' - You can restrict the start (lower cutoff). Note: Can break for long timespan",
option_type=3,
required=False,
),
create_option(
name="endtime",
description="Format: 'DD/MM/YY' - You can restrict the end (higher cutoff)",
option_type=3,
required=False,
),
options_user(flavor_text="The name of the user you want to highlight"),
],
)
async def _clanactivity(self, ctx: SlashContext, **kwargs):
# edge_list = [person, size, size_desc, display_names, colors]
self.edge_list = []
self.ignore = []
user = await get_user_obj(ctx, kwargs)
destiny_player = await DestinyPlayer.from_discord_id(user.id, ctx=ctx)
if not destiny_player:
return
# get params
mode = int(kwargs["mode"]) if "mode" in kwargs else None
start_time = (
await verify_time_input(ctx, kwargs["starttime"])
if "starttime" in kwargs
else datetime.datetime.now() - datetime.timedelta(days=7)
)
if not start_time:
return
end_time = await verify_time_input(ctx, kwargs["endtime"]) if "endtime" in kwargs else datetime.datetime.now()
if not end_time:
return
# this might take a sec
await ctx.defer()
# get clanmembers
self.clan_members = await getClanMembers(self.client)
self.activities_from_user_who_got_looked_at = {}
self.friends = {}
result = await asyncio.gather(
*[
self._handle_members(destinyID, mode, start_time, end_time, user.display_name)
for destinyID in self.clan_members
]
)
for res in result:
if res is not None:
destinyID = res[0]
self.activities_from_user_who_got_looked_at[destinyID] = res[1]
self.friends[destinyID] = res[2]
data_temp = []
for destinyID in self.friends:
for friend in self.friends[destinyID]:
# data = [destinyID1, destinyID2, number of activities together]
data_temp.append(
[
int(str(destinyID)[-9:]),
int(str(friend)[-9:]),
self.friends[destinyID][friend],
]
)
data = np.array(data_temp)
del data_temp
# getting the display names, colors for users in discord, size of blob
await asyncio.gather(
*[
self._prep_data(
await DestinyPlayer.from_destiny_id(destinyID),
destiny_player.destiny_id,
)
for destinyID in self.clan_members
]
)
# building the network graph
net = Network()
# adding nodes
# edge_list = [person, size, size_desc, display_names, colors]
for edge_data in self.edge_list:
net.add_node(
int(str(edge_data[0])[-9:]),
value=edge_data[1],
title=edge_data[2],
label=edge_data[3],
color=edge_data[4],
)
# adding edges with data = [user1, user2, number of activities together]
with concurrent.futures.ThreadPoolExecutor(os.cpu_count() * 5) as pool:
futurelist = [pool.submit(self._add_edge, net, edge) for edge in data]
for _ in concurrent.futures.as_completed(futurelist):
pass
net.barnes_hut(
gravity=-200000,
central_gravity=0.3,
spring_length=200,
spring_strength=0.005,
damping=0.09,
overlap=0,
)
net.show_buttons(filter_=["physics"])
# saving the file
title = user.display_name + ".html"
net.save_graph(title)
# letting user know it's done
await ctx.send(
embed=embed_message(
f"{user.display_name}'s Friends",
f"Click the download button below and open the file with your browser to view your Network",
f"The file may load for a while, that's normal.",
)
)
# sending them the file
await ctx.channel.send(file=discord.File(title))
# _delete file
os.remove(title)
async def _handle_members(self, destinyID, mode, start_time, end_time, name):
# getting the activities for the
result = await self._return_activities(destinyID, mode, start_time, end_time)
activities_from_user_who_got_looked_at = len(result[1])
# getting the friends from his activities
destinyIDs_friends = []
for ID in result[1]:
result = await self._return_friends(destinyID, ID)
destinyIDs_friends.extend(result)
friends = dict(Counter(destinyIDs_friends))
return [destinyID, activities_from_user_who_got_looked_at, friends]
async def _return_activities(self, destinyID, mode, start_time, end_time):
destinyID = int(destinyID)
# get all activities
activities = await getActivityHistory(destinyID, mode=mode, start_time=start_time, end_time=end_time)
list_of_activities = []
for instanceID in activities:
list_of_activities.append(instanceID)
return [destinyID, set(list_of_activities)]
async def _return_friends(self, destinyID, instanceID):
# list in which the connections are saved
friends = []
# get instance id info
data = await getPgcrActivitiesUsersStats(instanceID)
for player in data:
friendID = player[1]
# only look at clan members
if friendID in self.clan_members:
# doesn't make sense to add yourself
if friendID != destinyID:
friends.append(friendID)
# sort and count friends
return friends
async def _prep_data(self, destiny_player: DestinyPlayer, orginal_user_destiny_id):
display_name, _ = await destiny_player.get_destiny_name_and_last_played()
size = self.activities_from_user_who_got_looked_at[destiny_player.destiny_id] * 50
size_desc = str(self.activities_from_user_who_got_looked_at[destiny_player.destiny_id]) + " Activities"
colors = "#850404" if orginal_user_destiny_id == destiny_player.destiny_id else "#006aff"
# edge_list = [person, size, size_desc, display_names, colors]
self.edge_list.append([destiny_player.destiny_id, size, size_desc, display_name, colors])
def _add_edge(self, network, edge):
src = int(edge[0])
dst = int(edge[1])
value = int(edge[2])
# add the edge
try:
network.add_edge(dst, src, value=value, title=value, physics=True)
except Exception:
print("error adding node")
class MysticCommands(commands.Cog):
def __init__(self, client):
self.client = client
def names(self, userdict):
return "\n".join(
map(
lambda p: c.name if (c := self.client.get_user(p["id"])) else "InvalidUser",
userdict,
)
)
# todo not needed
@cog_ext.cog_subcommand(
base="mystic",
base_description="Everything concerning Mystic's abandoned carry list. Tbf he said he tried ¯\_(ツ)_/¯",
name="list",
description="Displays the current list",
)
async def _list(self, ctx: SlashContext):
with open("database/mysticlist.json", "r+") as mlist:
players = json.load(mlist)
embed = embed_message("Mystic List", f"The following users are currently in the list:")
embed.add_field(name="Users", value=self.names(players), inline=True)
await ctx.send(embed=embed)
# todo not needed
@cog_ext.cog_subcommand(
base="mystic",
base_description="Everything concerning Mystic's abandoned carry list. Tbf he said he tried ¯\_(ツ)_/¯",
name="add",
description="Add a user to the list",
options=[options_user(flavor_text="Requires elevated permissions")],
)
async def _add(self, ctx: SlashContext, **kwargs):
# allow mystic himself
user = await get_user_obj_admin(ctx, kwargs, allowed_users=[211838266834550785])
if not user:
return
with open("database/mysticlist.json", "r") as mlist:
players = json.load(mlist)
# add new player
players.append({"name": user.display_name, "id": user.id})
with open("commands/mysticlog.log", "a") as mlog:
mlog.write(f"\n{ctx.author.name} added {user.name}")
with open("database/mysticlist.json", "w") as mlist:
json.dump(players, mlist)
embed = embed_message("Mystic List", f"Added {user.name} to the mystic list, it now has:")
embed.add_field(name="Users", value=self.names(players), inline=True)
await ctx.send(embed=embed)
# todo not needed
@cog_ext.cog_subcommand(
base="mystic",
base_description="Everything concerning Mystic's abandoned carry list. Tbf he said he tried ¯\_(ツ)_/¯",
name="_delete",
description="Remove a user from the list",
options=[options_user(flavor_text="Requires elevated permissions")],
)
async def _remove(self, ctx: SlashContext, **kwargs):
# allow mystic himself
user = await get_user_obj_admin(ctx, kwargs, allowed_users=[211838266834550785])
if not user:
return
with open("database/mysticlist.json", "r") as mlist:
players = json.load(mlist)
if len(player := list(filter(lambda muser: muser["id"] == user.id, players))) == 1:
# _delete player
players._delete(player[0])
with open("commands/mysticlog.log", "a") as mlog:
mlog.write(f"\n{ctx.author.name} removed {user.name}")
with open("database/mysticlist.json", "w+") as mlist:
json.dump(players, mlist)
embed = embed_message("Mystic List", f"Removed {user.name} from the mystic list, it now has:")
embed.add_field(name="Users", value=self.names(players), inline=True)
await ctx.send(embed=embed)
return
await ctx.send(embed=embed_message("Mystic List", f"User {user.name} was not found in the player list"))
class RankCommands(commands.Cog):
def __init__(self, client):
self.client = client
self.stats = {
"mobility": 2996146975,
"resilience": 392767087,
"recovery": 1943323491,
"discipline": 1735777505,
"intellect": 144602215,
"strength": 4244567218,
}
@cog_ext.cog_slash(
name="rank",
description="Display Destiny 2 leaderboard for clanmates",
options=[
create_option(
name="leaderboard",
description="The name of the leaderboard you want to see",
option_type=3,
required=True,
choices=[
create_choice(name="Join-Date of this Discord Server", value="discordjoindate"),
create_choice(name="Roles Earned on this Discord Server", value="roles"),
create_choice(name="Total Playtime", value="totaltime"),
create_choice(name="Max. Power Level", value="maxpower"),
create_choice(name="Vault Space Used", value="vaultspace"),
create_choice(name="Orbs of Power Generated", value="orbs"),
create_choice(name="Melee Kills", value="meleekills"),
create_choice(name="Super Kills", value="superkills"),
create_choice(name="Grenade Kills", value="grenadekills"),
create_choice(name="Deaths", value="deaths"),
create_choice(name="Suicides", value="suicides"),
create_choice(name="Kills", value="kills"),
create_choice(name="Raids Done", value="raids"),
create_choice(name="Raid Time", value="raidtime"),
create_choice(name="Grandmaster Nightfalls Done", value="gm"),
create_choice(name="Weapon Kills", value="weapon"),
create_choice(name="Weapon Precision Kills", value="weaponprecision"),
create_choice(name="% Weapon Precision Kills", value="weaponprecisionpercent"),
create_choice(name="Enhancement Cores", value="enhancementcores"),
create_choice(name="Forges Done", value="forges"),
# create_choice(
# name="AFK Forges Done",
# value="afkforges"
# ),
create_choice(name="Active Triumph Score", value="activetriumphs"),
create_choice(name="Legacy Triumph Score", value="legacytriumphs"),
create_choice(name="Triumph Score", value="triumphs"),
# create_choice(
# name="Laurels collected",
# value="laurels"
# ),
],
),
create_option(
name="arg",
description="Depending on which leaderboard you want to see, you might need to add an additional argument",
option_type=3,
required=False,
),
create_option(
name="reverse",
description="Default: 'False' - If you want to flip the sorting",
option_type=5,
required=False,
),
options_user(),
],
)
async def _rank(self, ctx: SlashContext, *args, **kwargs):
if not ctx.deferred:
await ctx.defer()
if args:
print(f"Got unexpected args: {args}")
user = await get_user_obj(ctx, kwargs)
leaderboard = kwargs["leaderboard"]
item_name = None
item_hashes = None
# if a weapon leaderboard is asked for
if leaderboard in ["weapon", "weaponprecision", "weaponprecisionpercent"]:
if "arg" in kwargs:
item_name, item_hashes = await searchForItem(ctx, kwargs["arg"])
if not item_name:
return
# send error message and exit
else:
await ctx.send(
hidden=True,
embed=embed_message(
f"Error",
f"Please specify a weapon in the command argument `arg`",
),
)
return
# calculate the leaderboard
if embed := (
await self._handle_users(
leaderboard,
user.display_name,
ctx.guild,
item_hashes,
item_name,
reverse=kwargs["reverse"] if "reverse" in kwargs else False,
)
):
await ctx.send(embed=embed)
else:
await ctx.send(embed_message("Error", "Failed handling users"))
async def _handle_users(self, stat, display_name, guild, extra_hash, extra_name, reverse=False):
# init DF. "stat_sort" is only here, since I want to save numbers fancy (1,000,000) and that is a string and not an int so sorting wont work
data = pd.DataFrame(columns=["member", "stat", "stat_sort"])
# loop through the clan members
clan_members = (await get_json_from_url(f"https://www.bungie.net/Platform/GroupV2/{CLANID}/Members/")).content[
"Response"
]["results"]
results = await asyncio.gather(
*[self._handle_user(stat, member, guild, extra_hash, extra_name) for member in clan_members]
)
if len(results) < 1:
return embed_message("Error", "No users found")
sort_by_ascending = None
for ret in results:
# add user to DF
if ret:
data = data.append(
{"member": ret[0], "stat": ret[1], "stat_sort": ret[2]},
ignore_index=True,
)
# the flavor text of the leaderboard, fe "Top Clanmembers by D2 Total Time Logged In" for totaltime
leaderboard_text = ret[3]
# the flavor text the stat will have, fe. "hours" for totaltime
stat_text = ret[4]
# for some stats lower might be better
sort_by_ascending = ret[5]
if data.empty:
return embed_message("Error", "No data found")
if reverse:
sort_by_ascending = not sort_by_ascending
# sort and prepare DF
data.sort_values(by=["stat_sort"], inplace=True, ascending=sort_by_ascending)
data.reset_index(drop=True, inplace=True)
# calculate the data for the embed
ranking = []
emoji = self.client.get_emoji(enter_emoji_id)
found = False
for index, row in data.iterrows():
if len(ranking) < 12:
# setting a flag if user is in list
if row["member"] == display_name:
found = True
ranking.append(
write_line(
index + 1,
f"""**{row["member"]}**""",
stat_text,
row["stat"],
emoji,
)
)
else:
ranking.append(write_line(index + 1, row["member"], stat_text, row["stat"], emoji))
# looping through rest until original user is found
elif (len(ranking) >= 12) and (not found):
# adding only this user
if row["member"] == display_name:
ranking.append("...")
ranking.append(write_line(index + 1, row["member"], stat_text, row["stat"], emoji))
break
else:
break
# make and return embed
return embed_message(leaderboard_text, "\n".join(ranking))
async def _handle_user(self, stat, member, guild, extra_hash, extra_name):
destiny_player = await DestinyPlayer.from_destiny_id(int(member["destinyUserInfo"]["membershipId"]))
if not (destiny_player and await destiny_player.has_token()):
return None
sort_by_ascending = False
# catch people that are in the clan but not in discord, shouldn't happen tho
discord_member = destiny_player.get_discord_member(guild)
if not discord_member:
return None
name = discord_member.display_name
# get the stat that we are looking for
if stat == "discordjoindate":
sort_by_ascending = True
leaderboard_text = "Top Clanmembers by Discord Join Date"
stat_text = "Date"
result_sort = discord_member.joined_at
result = discord_member.joined_at.strftime("%d/%m/%Y, %H:%M")
elif stat == "roles":
sort_by_ascending = True
leaderboard_text = "Top Clanmembers by Discord Roles Earned"
stat_text = "Roles missing"
earned_roles = [role.name for role in discord_member.roles]
missing_roles = []
missing_roles_legacy = []
# loop through the dict
for topic in requirementHashes:
for role in requirementHashes[topic]:
# check if user has the role / a superior one
if role not in earned_roles:
replaced_by_role_earned = False
if "replaced_by" in requirementHashes[topic][role]:
for replaced_role in requirementHashes[topic][role]["replaced_by"]:
if replaced_role in earned_roles:
replaced_by_role_earned = True
if not replaced_by_role_earned:
if "deprecated" not in requirementHashes[topic][role]:
missing_roles.append(role)
else:
missing_roles_legacy.append(role)
result_sort = len(set(missing_roles))
result = f"{result_sort:,} ({len(set(missing_roles_legacy)):,} Legacy Roles)"
elif stat == "totaltime":
leaderboard_text = "Top Clanmembers by D2 Total Time Logged In"
stat_text = "Total"
# in hours
result_sort = await destiny_player.get_stat_value("secondsPlayed")
result = str(datetime.timedelta(seconds=result_sort))
elif stat == "orbs":
leaderboard_text = "Top Clanmembers by PvE Orbs Generated"
stat_text = "Orbs"
result_sort = await destiny_player.get_stat_value("orbsDropped", stat_category="pve")
result = f"{result_sort:,}"
elif stat == "meleekills":
leaderboard_text = "Top Clanmembers by D2 PvE Meleekills"
stat_text = "Kills"
result_sort = await destiny_player.get_stat_value("weaponKillsMelee", stat_category="pve")
result = f"{result_sort:,}"
elif stat == "superkills":
leaderboard_text = "Top Clanmembers by D2 PvE Superkills"
stat_text = "Kills"
result_sort = await destiny_player.get_stat_value("weaponKillsSuper", stat_category="pve")
result = f"{result_sort:,}"
elif stat == "grenadekills":
leaderboard_text = "Top Clanmembers by D2 PvE Grenadekills"
stat_text = "Kills"
result_sort = await destiny_player.get_stat_value("weaponKillsGrenade", stat_category="pve")
result = f"{result_sort:,}"
elif stat == "deaths":
leaderboard_text = "Top Clanmembers by D2 PvE Deaths"
stat_text = "Deaths"
result_sort = await destiny_player.get_stat_value("deaths", stat_category="pve")
result = f"{result_sort:,}"
elif stat == "suicides":
leaderboard_text = "Top Clanmembers by D2 PvE Suicides"
stat_text = "Suicides"
result_sort = await destiny_player.get_stat_value("suicides", stat_category="pve")
result = f"{result_sort:,}"
elif stat == "kills":
leaderboard_text = "Top Clanmembers by D2 PvE Kills"
stat_text = "Kills"
result_sort = await destiny_player.get_stat_value("suicides", stat_category="pve")
result = f"{result_sort:,}"
elif stat == "maxpower":
# # TODO efficiency
# leaderboard_text = "Top Clanmembers by D2 Maximum Reported Power"
# stat_text = "Power"
#
# artifact_power = (await destiny_player.get_artifact())["powerBonus"]
#
# items = await getCharacterGearAndPower(destiny_player.destiny_id)
# items = self._sort_gear_by_slot(items)
#
# results = await asyncio.gather(*[self._get_highest_item_light_level(slot) for slot in items])
#
# total_power = 0
# for ret in results:
# total_power += ret
# total_power /= 8
#
# result_sort = int(total_power + artifact_power)
# result = f"{int(total_power):,} + {artifact_power:,}"
# temporay result
result_sort = 0
result = "0"
elif stat == "vaultspace":
sort_by_ascending = True
leaderboard_text = "Top Clanmembers by D2 Vaultspace Used"
stat_text = "Used Space"
result_sort = len(await destiny_player.get_inventory_bucket())
result = f"{result_sort:,}"
elif stat == "raids":
leaderboard_text = "Top Clanmembers by D2 Total Raid Completions"
stat_text = "Total"
result_sort = await getClearCount(destiny_player.destiny_id, mode=4)
result = f"{result_sort:,}"
elif stat == "raidtime":
leaderboard_text = "Top Clanmembers by D2 Total Raid Time"
stat_text = "Hours"
# in hours
result_sort = int(
(await self._add_activity_stats(destiny_player, raidHashes, "activitySecondsPlayed")) / 60 / 60
)
result = f"{result_sort:,}"
elif stat == "forges":
leaderboard_text = "Top Clanmembers by D2 Forge Completions"
stat_text = "Total"
result_sort = 0
farmed_runs = 0
for _, kills in await getForges(destiny_player.destiny_id):
if kills > 0:
result_sort += 1
else:
farmed_runs += 1
result = f"{result_sort:,} + {farmed_runs:,} AFK runs"
elif stat == "afkforges":
leaderboard_text = "Top Clanmembers by D2 AFK Forge Completions"
stat_text = "Total"
farmed_runs = 0
result_sort = 0
for _, kills in await getForges(destiny_player.destiny_id):
if kills > 0:
farmed_runs += 1
else:
result_sort += 1
result = f"{farmed_runs:,} + {result_sort:,} AFK runs"
elif stat == "enhancementcores":
leaderboard_text = "Top Clanmembers by D2 Total Enhancement Cores"
stat_text = "Total"
result_sort = 0
# check vault
items = await destiny_player.get_inventory_bucket()
for item in items:
if item["itemHash"] == 3853748946:
result_sort += item["quantity"]
items = await destiny_player.get_inventory_bucket(bucket=1469714392)
for item in items:
if item["itemHash"] == 3853748946:
result_sort += item["quantity"]
result = f"{result_sort:,}"
elif stat == "weapon":
leaderboard_text = f"Top Clanmembers by {extra_name} Kills"
stat_text = "Kills"
result_sort, _ = await destiny_player.get_weapon_stats(extra_hash)
result = f"{result_sort:,}"
elif stat == "weaponprecision":
leaderboard_text = f"Top Clanmembers by {extra_name} Precision Kills"
stat_text = "Kills"
_, result_sort = await destiny_player.get_weapon_stats(extra_hash)
result = f"{result_sort:,}"
elif stat == "weaponprecisionpercent":
leaderboard_text = f"Top Clanmembers by {extra_name} % Precision Kills"
stat_text = "Kills"
kills, prec_kills = await destiny_player.get_weapon_stats(extra_hash)
result_sort = prec_kills / kills if kills != 0 else 0
result = f"{round(result_sort * 100, 2)}%"
elif stat == "activetriumphs":
leaderboard_text = f"Top Clanmembers by D2 Active Triumph Score"
stat_text = "Score"
result_sort = (await destiny_player.get_triumphs())["profileRecords"]["data"]["activeScore"]
result = f"{result_sort:,}"
elif stat == "legacytriumphs":
leaderboard_text = f"Top Clanmembers by D2 Legacy Triumph Score"
stat_text = "Score"
result_sort = (await destiny_player.get_triumphs())["profileRecords"]["data"]["legacyScore"]
result = f"{result_sort:,}"
elif stat == "triumphs":
leaderboard_text = f"Top Clanmembers by D2 Lifetime Triumph Score"
stat_text = "Score"
result_sort = (await destiny_player.get_triumphs())["profileRecords"]["data"]["lifetimeScore"]
result = f"{result_sort:,}"
elif stat == "gm":
leaderboard_text = f"Top Clanmembers by D2 Grandmaster Nightfall Completions"
stat_text = "Total"
result_sort = await getClearCount(destiny_player.destiny_id, activityHashes=gmHashes)
result = f"{result_sort:,}"
elif stat == "laurels":
leaderboard_text = f"Top Clanmembers by Laurels collected in S13"
stat_text = "Count"
result_sort = await destiny_player.get_metric_value("473272243")
if not result_sort:
result_sort = 0
result = f"{result_sort:,}"
else:
return
return [
name,
result,
result_sort,
leaderboard_text,
stat_text,
sort_by_ascending,
]
async def _add_activity_stats(self, destiny_player: DestinyPlayer, hashes, stat):
result_sort = 0
chars = await destiny_player.get_character_info()
for characterID in chars:
aggregateStats = await destiny_player.get_character_activity_stats(characterID)
try:
for activities in aggregateStats["activities"]:
found = False
for hash in hashes:
if found:
break
for hashID in hash:
if hashID == activities["activityHash"]:
result_sort += int(activities["values"][stat]["basic"]["value"])
found = True
break
except Exception:
pass
return result_sort
async def _get_highest_item_light_level(self, items):
max_power = 0
for item in items:
if item["lightlevel"] > max_power:
max_power = item["lightlevel"]
return max_power
def _sort_gear_by_slot(self, items):
helmet = [] # 3448274439
gauntlet = [] # 3551918588
chest = [] # 14239492
leg = [] # 20886954
class_item = [] # 1585787867
kinetic = [] # 1498876634
energy = [] # 2465295065
power = [] # 953998645
for item in items:
if item["bucketHash"] == 3448274439:
helmet.append(item)
elif item["bucketHash"] == 3551918588:
gauntlet.append(item)
elif item["bucketHash"] == 14239492:
chest.append(item)
elif item["bucketHash"] == 20886954:
leg.append(item)
elif item["bucketHash"] == 1585787867:
class_item.append(item)
elif item["bucketHash"] == 1498876634:
kinetic.append(item)
elif item["bucketHash"] == 2465295065:
energy.append(item)
elif item["bucketHash"] == 953998645:
power.append(item)
return [helmet, gauntlet, chest, leg, class_item, kinetic, energy, power]
# todo _delete
def _add_stats(self, stat_json, stat, scope="all"):
result_sort = 0
if scope == "all":
result_sort = int(stat_json["mergedAllCharacters"]["merged"]["allTime"][stat]["basic"]["value"])
try:
result_sort += int(stat_json["mergedDeletedCharacters"]["merged"]["allTime"][stat]["basic"]["value"])
except:
pass
elif scope == "pve":
result_sort = int(stat_json["mergedAllCharacters"]["results"]["allPvE"]["allTime"][stat]["basic"]["value"])
try:
result_sort += int(
stat_json["mergedDeletedCharacters"]["results"]["allPvE"]["allTime"][stat]["basic"]["value"]
)
except:
pass
elif scope == "pvp":
result_sort = int(stat_json["mergedAllCharacters"]["results"]["allPvP"]["allTime"][stat]["basic"]["value"])
try:
result_sort += int(
stat_json["mergedDeletedCharacters"]["results"]["allPvP"]["allTime"][stat]["basic"]["value"]
)
except:
pass
return result_sort
class WeaponCommands(commands.Cog):
def __init__(self, client):
self.client = client
@cog_ext.cog_slash(
name="weapon",
description="Shows weapon stats for the specified weapon with in-depth customisation",
options=[
create_option(
name="weapon",
description="The name of the weapon you want to see stats for",
option_type=3,
required=True,
),
create_option(
name="stat",
description="Which stat you want to see for the weapon",
option_type=3,
required=False,
choices=[
create_choice(name="Kills (default)", value="kills"),
create_choice(name="Precision Kills", value="precisionkills"),
create_choice(name="% Precision Kills", value="precisionkillspercent"),
],
),
create_option(
name="graph",
description="Default: 'False' - See a timeline of your weapon usage instead of an overview of key stats",
option_type=5,
required=False,
),
create_option(
name="class",
description="You can restrict the class where the weapon stats count",
option_type=3,
required=False,
choices=[
create_choice(name="Warlock", value="2271682572"),
create_choice(name="Hunter", value="671679327"),
create_choice(name="Titan", value="3655393761"),
],
),
create_option(
name="starttime",
description="Format: 'DD/MM/YY' - You can restrict the time from when the weapon stats start counting",
option_type=3,
required=False,
),
create_option(
name="endtime",
description="Format: 'DD/MM/YY' - You can restrict the time up until which the weapon stats count",
option_type=3,
required=False,
),
create_option(
name="mode",
description="You can restrict the game mode where the weapon stats count",
option_type=3,
required=False,
choices=choices_mode,
),
create_option(
name="activityhash",
description="You can restrict the activity where the weapon stats count (advanced)",
option_type=4,
required=False,
),
options_user(),
],
)
async def _weapon(self, ctx: SlashContext, **kwargs):
user = await get_user_obj(ctx, kwargs)
destiny_player = await DestinyPlayer.from_discord_id(user.id, ctx=ctx)
if not destiny_player:
return
# get other params
(
stat,
graph,
character_class,
mode,
activity_hash,
starttime,
endtime,
) = await self._compute_params(ctx, kwargs)
if not stat:
return
# get weapon info
weapon_name, weapon_hashes = await searchForItem(ctx, kwargs["weapon"])
if not weapon_name:
return
# _update user db
await destiny_player.update_activity_db()
# get the char class if that is asked for
charID = await destiny_player.get_character_id_by_class(character_class) if character_class else None
# get all weapon infos
kwargs = {
"characterID": charID,
"mode": mode,
"activityID": activity_hash,
"start": starttime,
"end": endtime,
}
# loop through every variant of the weapon and add that together
result = []
for entry in weapon_hashes:
result.extend(
await getWeaponInfo(
destiny_player.destiny_id,
entry,
**{k: v for k, v in kwargs.items() if v is not None},
)
)
# throw error if no weapon
if not result:
await ctx.send(embed=embed_message("Error", f"No weapon stats found for {weapon_name}"))
return
# either text
if not graph:
# get data
kills = 0
precision_kills = 0
max_kills = 0
max_kills_id = None
for instanceID, uniqueweaponkills, uniqueweaponprecisionkills in result:
kills += uniqueweaponkills
precision_kills += uniqueweaponprecisionkills
if uniqueweaponkills > max_kills:
max_kills = uniqueweaponkills
max_kills_id = instanceID
percent_precision_kills = precision_kills / kills if kills else 0
avg_kills = kills / len(result)
res = await getPgcrActivity(max_kills_id)
max_kills_date = res[3]
max_kills_mode = (await getDestinyDefinition("DestinyActivityModeDefinition", res[5]))[2]
max_kills_name = (await getDestinyDefinition("DestinyActivityDefinition", res[2]))[2]
# make and post embed
embed = embed_message(f"{weapon_name} stats for {user.display_name}")
embed.add_field(name="Total Kills", value=f"**{kills:,}**", inline=True)
embed.add_field(
name="Total Precision Kills",
value=f"**{precision_kills:,}**",
inline=True,
)
embed.add_field(
name="% Precision Kills",
value=f"**{round(percent_precision_kills * 100, 2)}%**",
inline=True,
)
embed.add_field(
name="Average Kills",
value=f"**{round(avg_kills, 2)}**\nIn {len(result)} Activities",
inline=True,
)
embed.add_field(
name="Maximum Kills",
value=f"**{max_kills:,}**\nIn Activity ID: {max_kills_id}\n{max_kills_mode} - {max_kills_name}\nOn: {max_kills_date.strftime('%d/%m/%y')}",
inline=True,
)
await ctx.send(embed=embed)
# or do a graph
else:
# get the time instead of the instance id and sort it so the earliest date is first
weapon_hashes = []
for instanceID, uniqueweaponkills, uniqueweaponprecisionkills in result:
instance_time = (await getPgcrActivity(instanceID))[3]
weapon_hashes.append((instance_time, uniqueweaponkills, uniqueweaponprecisionkills))
weapon_hashes = sorted(weapon_hashes, key=lambda x: x[0])
# get clean, relevant data in a DF. easier for the graph later
df = pd.DataFrame(columns=["datetime", "statistic"])
name = ""
statistic1 = 0
statistic2 = 0
time = weapon_hashes[0][0]
for (
instance_time,
uniqueweaponkills,
uniqueweaponprecisionkills,
) in weapon_hashes:
if instance_time.date() == time.date():
if stat == "kills":
statistic1 += uniqueweaponkills
name = "Kills"
elif stat == "precisionkills":
statistic1 += uniqueweaponprecisionkills
name = "Precision Kills"
elif stat == "precisionkillspercent":
statistic1 += uniqueweaponkills
statistic2 += uniqueweaponprecisionkills
name = "% Precision Kills"
time = instance_time
else:
# append to DF
entry = {
"datetime": time.date(),
"statistic": statistic2 / statistic1 if stat == "precisionkillspercent" else statistic1,
}
df = df.append(entry, ignore_index=True)
# save new data
if stat == "kills":
statistic1 = uniqueweaponkills
name = "Kills"
elif stat == "precisionkills":
statistic1 = uniqueweaponprecisionkills
name = "Precision Kills"
elif stat == "precisionkillspercent":
statistic1 = uniqueweaponkills
statistic2 = uniqueweaponprecisionkills
name = "% Precision Kills"
time = instance_time
# append to DF
entry = {
"datetime": time,
"statistic": statistic2 / statistic1 if stat == "precisionkillspercent" else statistic1,
}
df = df.append(entry, ignore_index=True)
# convert to correct file types
df["datetime"] = pd.to_datetime(df["datetime"])
df["statistic"] = pd.to_numeric(df["statistic"])
# building the graph
# Create figure and plot space
fig, ax = plt.subplots(figsize=(20, 10))
ax.yaxis.grid(True)
# filling bar chart
ax.bar(df["datetime"], df["statistic"], color="#45b6fe")
# Set title and labels for axes
ax.set_title(
f"{weapon_name} stats for {user.display_name}",
fontweight="bold",
size=30,
pad=20,
)
ax.set_xlabel("Date", fontsize=20)
ax.set_ylabel(name, fontsize=20)
# saving file
title = "weapon.png"
plt.savefig(title)
# sending them the file
await ctx.send(file=discord.File(title))
# _delete file
os.remove(title)
@cog_ext.cog_slash(
name="topweapons",
description="Shows your top weapon ranking with in-depth customisation",
options=[
create_option(
name="weapon",
description="If you want a specific weapon to be included on the ranking",
option_type=3,
required=False,
),
create_option(
name="stat",
description="Which stat you want to see for the weapon ranking",
option_type=3,
required=False,
choices=[
create_choice(name="Kills (default)", value="kills"),
create_choice(name="Precision Kills", value="precisionkills"),
create_choice(name="% Precision Kills", value="precisionkillspercent"),
],
),
create_option(
name="class",
description="You can restrict the class where the weapon stats count",
option_type=3,
required=False,
choices=[
create_choice(name="Warlock", value="2271682572"),
create_choice(name="Hunter", value="671679327"),
create_choice(name="Titan", value="3655393761"),
],
),
create_option(
name="expansion",
description="You can restrict the expansion (usually a year) to look at",
option_type=3,
required=False,
choices=[
create_choice(name=expansion[1], value=f"{expansion[0]},{expansion[1]}")
for expansion in expansion_dates
],
),
create_option(
name="season",
description="You can restrict the season to look at",
option_type=3,
required=False,
choices=[create_choice(name=season[1], value=f"{season[0]},{season[1]}") for season in season_dates],
),
create_option(
name="starttime",
description="Format: 'DD/MM/YY' - You can restrict the time from when the weapon stats start counting",
option_type=3,
required=False,
),
create_option(
name="endtime",
description="Format: 'DD/MM/YY' - You can restrict the time up until which the weapon stats count",
option_type=3,
required=False,
),
create_option(
name="mode",
description="You can restrict the game mode where the weapon stats count",
option_type=3,
required=False,
choices=choices_mode,
),
create_option(
name="activityhash",
description="You can restrict the activity where the weapon stats count (advanced)",
option_type=4,
required=False,
),
options_user(),
],
)
async def _topweapons(self, ctx: SlashContext, **kwargs):
user = await get_user_obj(ctx, kwargs)
destiny_player = await DestinyPlayer.from_discord_id(user.id, ctx=ctx)
if not destiny_player:
return
# get other params
(
stat,
_,
character_class,
mode,
activity_hash,
starttime,
endtime,
) = await self._compute_params(ctx, kwargs)
if not stat:
return
# get the real weapon name if that param is given
weapon_name = None
if "weapon" in kwargs:
weapon_name, weapon_id = await searchForItem(ctx, kwargs["weapon"])
weapon_id = weapon_id[0]
if not weapon_name:
return
# might take a sec
if not ctx.deferred:
await ctx.defer()
# _update user db
await destiny_player.update_activity_db()
# get the char class if that is asked for
charID = await destiny_player.get_character_id_by_class(character_class) if character_class else None
# get all weaponID infos
kwargs = {
"characterID": charID,
"mode": mode,
"activityID": activity_hash,
"start": starttime,
"end": endtime,
}
result = await getTopWeapons(
destiny_player.destiny_id,
**{k: v for k, v in kwargs.items() if v is not None},
)
# loop through all weapons and divide them into kinetic / energy / power
weapons_by_slot = {
"Kinetic": [],
"Energy": [],
"Power": [],
}
for weapon in result:
if stat == "kills":
statistic_data = weapon[1]
statistic_visual = f"{statistic_data:,}"
elif stat == "precisionkills":
statistic_data = weapon[2]
statistic_visual = f"{statistic_data:,}"
else: # precisionkillspercent
statistic_data = weapon[1] / weapon[2] if weapon[2] != 0 else 0
statistic_visual = f"{round(statistic_data * 100, 2)}%"
weapons_by_slot[translateWeaponSlot(weapon[4])].append(
{
"weapon_id": weapon[0],
"weapon_name": weapon[3],
"weapon_stat": statistic_data,
"weapon_stat_visual": statistic_visual,
}
)
# prepare embed
embed = embed_message(
f"Top Weapons for {user.display_name}",
footer=f"""Date: {starttime.strftime("%d/%m/%Y")} - {endtime.strftime("%d/%m/%Y")}""",
)
emoji = self.client.get_emoji(enter_emoji_id)
# loop through the slots
found = False if weapon_name else True
for slot, weapons in weapons_by_slot.items():
# sort the slots
sorted_weapons = sorted(weapons, key=lambda x: x["weapon_stat"], reverse=True)
# loop through the weapons
i = 0
max_weapons = 8
ranking = []
for weapon in sorted_weapons:
i += 1
if len(ranking) < max_weapons:
# setting a flag if name is in list
if weapon_name == weapon["weapon_name"]:
found = True
ranking.append(
write_line(
i,
f"""**[{weapon["weapon_name"]}](https://www.light.gg/db/items/{weapon["weapon_id"]})**""",
stat.capitalize(),
weapon["weapon_stat_visual"],
emoji,
)
)
else:
ranking.append(
write_line(
i,
f"""[{weapon["weapon_name"]}](https://www.light.gg/db/items/{weapon["weapon_id"]})""",
stat.capitalize(),
weapon["weapon_stat_visual"],
emoji,
)
)
# looping through rest until original user is found
elif (len(ranking) >= max_weapons) and (not found):
# adding only this name
if weapon_name == weapon["weapon_name"]:
ranking.append("...")
ranking.append(
write_line(
i,
f"""[{weapon["weapon_name"]}](https://www.light.gg/db/items/{weapon["weapon_id"]})""",
stat.capitalize(),
weapon["weapon_stat_visual"],
emoji,
)
)
found = True
break
else:
break
# write that info in an embed field
embed.add_field(name=slot, value="\n".join(ranking), inline=True)
# write a message in the embed, since it is not in there
if not found:
embed.description = f"No stats found for `{weapon_name}`, here are your top weapons anyways"
# post embed
await ctx.send(embed=embed)
async def _compute_params(self, ctx, kwargs):
# set default values for the args
stat = kwargs["stat"] if "stat" in kwargs else "kills"
graph = bool(kwargs["graph"]) if "graph" in kwargs else False
character_class = int(kwargs["class"]) if "class" in kwargs else None
mode = int(kwargs["mode"]) if "mode" in kwargs else 0
try:
activity_hash = int(kwargs["activityhash"]) if "activityhash" in kwargs else None
except ValueError:
await ctx.send(
hidden=True,
embed=embed_message(f"Error", f"The argument `activityhash` must be a number"),
)
return None, None, None, None, None, None, None
# parse the three different time arguments, since they are mutually exclusive
if not check_if_mutually_exclusive(["expansion", "season", ["starttime", "endtime"]], kwargs):
await ctx.send(
hidden=True,
embed=embed_message(f"Error", f"You can only specify one time parameter"),
)
return None, None, None, None, None, None, None
# make sure the times are valid
starttime = (
await verify_time_input(ctx, kwargs["starttime"]) if "starttime" in kwargs else datetime.datetime.min
)
if not starttime:
return None, None, None, None, None, None, None
endtime = await verify_time_input(ctx, kwargs["endtime"]) if "endtime" in kwargs else datetime.datetime.now()
if not endtime:
return None, None, None, None, None, None, None
# convert expansion dates to datetimes
dummy_starttime, dummy_endtime = convert_expansion_or_season_dates(kwargs)
if dummy_starttime:
starttime = dummy_starttime
endtime = dummy_endtime
return stat, graph, character_class, mode, activity_hash, starttime, endtime
@cog_ext.cog_slash(
name="meta",
description="Displays most used weapons by clanmembers (Default: in the last 30 days)",
options=[
create_option(
name="class",
description="You can restrict the class where the weapon stats count",
option_type=3,
required=False,
choices=[
create_choice(name="Warlock", value="2271682572"),
create_choice(name="Hunter", value="671679327"),
create_choice(name="Titan", value="3655393761"),
],
),
create_option(
name="expansion",
description="You can restrict the expansion (usually a year) to look at",
option_type=3,
required=False,
choices=[
create_choice(name=expansion[1], value=f"{expansion[0]},{expansion[1]}")
for expansion in expansion_dates
],
),
create_option(
name="season",
description="You can restrict the season to look at",
option_type=3,
required=False,
choices=[create_choice(name=season[1], value=f"{season[0]},{season[1]}") for season in season_dates],
),
create_option(
name="starttime",
description="Format: 'DD/MM/YY' - You can restrict the time from when the weapon stats start counting",
option_type=3,
required=False,
),
create_option(
name="endtime",
description="Format: 'DD/MM/YY' - You can restrict the time up until which the weapon stats count",
option_type=3,
required=False,
),
create_option(
name="mode",
description="You can restrict the game mode (Default: Everything)",
option_type=3,
required=False,
choices=choices_mode,
),
create_option(
name="activityhash",
description="You can restrict the activity (advanced)",
option_type=4,
required=False,
),
],
)
async def _meta(self, ctx: SlashContext, **kwargs):
weapons_by_slot = {
"Kinetic": {},
"Energy": {},
"Power": {},
}
weapons_by_id = {}
# set default values for the args
character_class = int(kwargs["class"]) if "class" in kwargs else None
mode = int(kwargs["mode"]) if "mode" in kwargs else 0
try:
activity_hash = int(kwargs["activityhash"]) if "activityhash" in kwargs else None
except ValueError:
await ctx.send(
hidden=True,
embed=embed_message(f"Error", f"The argument `activityhash` must be a number"),
)
return
# parse the three different time arguments, since they are mutually exclusive
if not check_if_mutually_exclusive(["expansion", "season", ["starttime", "endtime"]], kwargs):
await ctx.send(
hidden=True,
embed=embed_message(f"Error", f"You can only specify one time parameter"),
)
return
# if given, make sure the times are valid
starttime = (
await verify_time_input(ctx, kwargs["starttime"])
if "starttime" in kwargs
else datetime.datetime.now() - datetime.timedelta(days=30)
)
if not starttime:
return
endtime = await verify_time_input(ctx, kwargs["endtime"]) if "endtime" in kwargs else datetime.datetime.now()
if not endtime:
return
# convert expansion dates to datetimes
dummy_starttime, dummy_endtime = convert_expansion_or_season_dates(kwargs)
if dummy_starttime:
starttime = dummy_starttime
endtime = dummy_endtime
# might take a sec
await ctx.defer()
# loop through all users and get their stats
clan_members = await getClanMembers(self.client)
result = await asyncio.gather(
*[
self._handle_user(
await DestinyPlayer.from_destiny_id(destinyID),
mode,
activity_hash,
starttime,
endtime,
character_class,
)
for destinyID in clan_members
]
)
for clan_member in result:
if clan_member is not None:
for weapon in clan_member:
translated_weapon_slot = translateWeaponSlot(weapon[4])
try:
weapons_by_slot[translated_weapon_slot].update(
{weapon[0]: weapons_by_slot[translated_weapon_slot][weapon[0]] + weapon[1]}
)
except KeyError:
weapons_by_slot[translated_weapon_slot].update({weapon[0]: weapon[1]})
weapons_by_id.update({weapon[0]: weapon[3]})
# prepare embed
embed = embed_message(
"Clanmember Weapon Meta",
footer=f"Date: {starttime.strftime('%d/%m/%Y')} - {endtime.strftime('%d/%m/%Y')}",
)
# loop through the slots and write the text
emoji = self.client.get_emoji(enter_emoji_id)
for slot, weapons in weapons_by_slot.items():
# sort it and only get the first 8 slots
sorted_weapons = dict(sorted(weapons.items(), key=lambda x: x[1], reverse=True)[:8])
# loop through the top
slot_text = []
i = 1
for weapon_id, weapon_kills in sorted_weapons.items():
text = write_line(
i,
f"[{weapons_by_id[weapon_id]}](https://www.light.gg/db/items/{weapon_id})",
"Kills",
f"{weapon_kills:,}",
emoji,
)
slot_text.append(text)
i += 1
# write that info in an embed field
embed.add_field(name=slot, value="\n".join(slot_text), inline=True)
# post embed
await ctx.send(embed=embed)
async def _handle_user(
self,
destiny_player: Optional[DestinyPlayer],
mode,
activity_hash,
starttime,
endtime,
character_class,
):
# get character id if asked for
charID = await destiny_player.get_character_id_by_class(character_class) if character_class else None
# get all weapon kills
kwargs = {
"characterID": charID,
"mode": mode,
"activityID": activity_hash,
"start": starttime,
"end": endtime,
}
return await getTopWeapons(
destiny_player.destiny_id,
**{k: v for k, v in kwargs.items() if v is not None},
)
class TournamentCommands(commands.Cog):
def __init__(self, client):
self.client = client
self.creator = None
@cog_ext.cog_subcommand(
base="tournament",
base_description="Everything you need for in-house PvP tournaments",
name="_insert",
description="Opens up registration. Can only be used if no other tournament is currently running",
)
async def _create(self, ctx: SlashContext):
# check if tourn already exists
message = await get_persistent_message_or_channel(self.client, "tournament", ctx.guild.id)
if message:
await ctx.send(
hidden=True,
embed=embed_message(
f"Error",
f"A tournament already exists. \nPlease wait until it is completed and then try again or ask a member of staff to _delete it",
),
)
return
# get the tourn channel id
channel = (await get_persistent_message_or_channel(self.client, "tournamentChannel", ctx.guild.id)).channel
# make registration message
embed = embed_message(
"Registration",
f"{ctx.author.display_name} startet a tournament!\nTo enter it, please react accordingly",
)
await make_persistent_message(
self.client,
"tournament",
ctx.guild.id,
channel.id,
reaction_id_list=tournament,
message_embed=embed,
)
# to remember who started the tournament, we set ctx.author as the message author
self.creator = ctx.author
# let user know
await ctx.send(
embed=embed_message(
"Success",
f"Registration for the tournament has started, visit {channel.mention} to join the fun!",
)
)
@cog_ext.cog_subcommand(
base="tournament",
base_description="Everything you need for in-house PvP tournaments",
name="start",
description="Starts the tournament. Can only be used by the user who used '/tournament _insert' or an Admin",
)
async def _start(self, ctx: SlashContext):
# check if tourn exists
message = await get_persistent_message_or_channel(self.client, "tournament", ctx.guild.id)
if not message:
await ctx.send(
hidden=True,
embed=embed_message(
f"Error",
f"You need to start the registration by using `/tournament _insert` first",
),
)
return
# check if author has permissions to start
if not (message.author == ctx.author) and not (await has_elevated_permissions(ctx.author, ctx.guild)):
await ctx.send(
hidden=True,
embed=embed_message(
f"Error",
f"Only admins and the tournament creator can start the tournament",
),
)
return
# check that at least two people (3, since bot counts too) have reacted and get the users
for reaction in message.reactions:
if reaction.emoji.id in tournament:
if reaction.count < 3:
await ctx.send(
hidden=True,
embed=embed_message(f"Error", f"At least two people need to sign up"),
)
return
participants = []
async for user in reaction.users():
if not user.bot:
participants.append(user)
# start the tourn and wait for it to play out
await ctx.send(embed=embed_message("Success", "The tournament is now starting"))
winner = await startTournamentEvents(self.client, message, message.channel, participants)
# _delete registration message
channel = message.channel
await delete_persistent_message(message, "tournament", ctx.guild.id)
# announce winner
embed = embed_message("We have a winner", f"Congratulation {winner.mention}!")
msg = await channel.send(embed=embed)
# wait 10 mins and then _delete
await asyncio.sleep(60 * 10)
await msg._delete()
@cog_ext.cog_subcommand(
base="tournament",
base_description="Everything you need for in-house PvP tournaments",
name="_delete",
description="Delete the tournament. Can only be used by the user who used '/tournament _insert' or an Admin",
)
async def _delete(self, ctx: SlashContext):
# check if tourn exists
message = await get_persistent_message_or_channel(self.client, "tournament", ctx.guild.id)
if not message:
await ctx.send(
hidden=True,
embed=embed_message(f"Error", f"There is no tournament to _delete"),
)
return
# check if author has permissions to start
if not (message.author == ctx.author) and not (await has_elevated_permissions(ctx.author, ctx.guild)):
await ctx.send(
hidden=True,
embed=embed_message(
f"Error",
f"Only admins and the tournament creator can _delete the tournament",
),
)
return
# _delete msg
await delete_persistent_message(message, "tournament", ctx.guild.id)
await ctx.send(embed=embed_message("Success", "The tournament has been deleted"))
def setup(client):
client.add_cog(DestinyCommands(client))
client.add_cog(MysticCommands(client))
client.add_cog(RankCommands(client))
client.add_cog(WeaponCommands(client))
client.add_cog(ClanActivitiesCommands(client))
client.add_cog(TournamentCommands(client))
| [
"discord_slash.utils.manage_components.create_select_option",
"os.remove",
"ElevatorBot.backendNetworking.slashCommandFunctions.verify_time_input",
"ElevatorBot.backendNetworking.dataLoading.searchForItem",
"ElevatorBot.backendNetworking.miscFunctions.check_if_mutually_exclusive",
"ElevatorBot.backendNetw... | [((9385, 9488), 'discord_slash.cog_ext.cog_slash', 'cog_ext.cog_slash', ([], {'name': '"""poptimeline"""', 'description': '"""Shows the Destiny 2 steam population timeline"""'}), "(name='poptimeline', description=\n 'Shows the Destiny 2 steam population timeline')\n", (9402, 9488), False, 'from discord_slash import cog_ext, SlashContext, ComponentContext\n'), ((35688, 35894), 'discord_slash.cog_ext.cog_subcommand', 'cog_ext.cog_subcommand', ([], {'base': '"""mystic"""', 'base_description': '"""Everything concerning Mystic\'s abandoned carry list. Tbf he said he tried ¯\\\\_(ツ)_/¯"""', 'name': '"""list"""', 'description': '"""Displays the current list"""'}), '(base=\'mystic\', base_description=\n "Everything concerning Mystic\'s abandoned carry list. Tbf he said he tried ¯\\\\_(ツ)_/¯"\n , name=\'list\', description=\'Displays the current list\')\n', (35710, 35894), False, 'from discord_slash import cog_ext, SlashContext, ComponentContext\n'), ((89038, 89282), 'discord_slash.cog_ext.cog_subcommand', 'cog_ext.cog_subcommand', ([], {'base': '"""tournament"""', 'base_description': '"""Everything you need for in-house PvP tournaments"""', 'name': '"""_insert"""', 'description': '"""Opens up registration. Can only be used if no other tournament is currently running"""'}), "(base='tournament', base_description=\n 'Everything you need for in-house PvP tournaments', name='_insert',\n description=\n 'Opens up registration. Can only be used if no other tournament is currently running'\n )\n", (89060, 89282), False, 'from discord_slash import cog_ext, SlashContext, ComponentContext\n'), ((90788, 91041), 'discord_slash.cog_ext.cog_subcommand', 'cog_ext.cog_subcommand', ([], {'base': '"""tournament"""', 'base_description': '"""Everything you need for in-house PvP tournaments"""', 'name': '"""start"""', 'description': '"""Starts the tournament. Can only be used by the user who used \'/tournament _insert\' or an Admin"""'}), '(base=\'tournament\', base_description=\n \'Everything you need for in-house PvP tournaments\', name=\'start\',\n description=\n "Starts the tournament. Can only be used by the user who used \'/tournament _insert\' or an Admin"\n )\n', (90810, 91041), False, 'from discord_slash import cog_ext, SlashContext, ComponentContext\n'), ((93247, 93502), 'discord_slash.cog_ext.cog_subcommand', 'cog_ext.cog_subcommand', ([], {'base': '"""tournament"""', 'base_description': '"""Everything you need for in-house PvP tournaments"""', 'name': '"""_delete"""', 'description': '"""Delete the tournament. Can only be used by the user who used \'/tournament _insert\' or an Admin"""'}), '(base=\'tournament\', base_description=\n \'Everything you need for in-house PvP tournaments\', name=\'_delete\',\n description=\n "Delete the tournament. Can only be used by the user who used \'/tournament _insert\' or an Admin"\n )\n', (93269, 93502), False, 'from discord_slash import cog_ext, SlashContext, ComponentContext\n'), ((9701, 9731), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (9713, 9731), True, 'import matplotlib.pyplot as plt\n'), ((12018, 12057), 'matplotlib.pyplot.savefig', 'plt.savefig', (['title'], {'bbox_inches': '"""tight"""'}), "(title, bbox_inches='tight')\n", (12029, 12057), True, 'import matplotlib.pyplot as plt\n'), ((12107, 12154), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['"""Destiny 2 - Steam Player Count"""'], {}), "('Destiny 2 - Steam Player Count')\n", (12120, 12154), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((12171, 12190), 'discord.File', 'discord.File', (['title'], {}), '(title)\n', (12183, 12190), False, 'import discord\n'), ((12356, 12372), 'os.remove', 'os.remove', (['title'], {}), '(title)\n', (12365, 12372), False, 'import os\n'), ((18606, 18674), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['f"""{user.display_name}\'s Seasonal Challenges - {week}"""'], {}), '(f"{user.display_name}\'s Seasonal Challenges - {week}")\n', (18619, 18674), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((30762, 30781), 'numpy.array', 'np.array', (['data_temp'], {}), '(data_temp)\n', (30770, 30781), True, 'import numpy as np\n'), ((31222, 31231), 'pyvis.network.Network', 'Network', ([], {}), '()\n', (31229, 31231), False, 'from pyvis.network import Network\n'), ((32754, 32770), 'os.remove', 'os.remove', (['title'], {}), '(title)\n', (32763, 32770), False, 'import os\n'), ((36087, 36166), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['"""Mystic List"""', 'f"""The following users are currently in the list:"""'], {}), "('Mystic List', f'The following users are currently in the list:')\n", (36100, 36166), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((37268, 37354), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['"""Mystic List"""', 'f"""Added {user.name} to the mystic list, it now has:"""'], {}), "('Mystic List',\n f'Added {user.name} to the mystic list, it now has:')\n", (37281, 37354), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((44033, 44086), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['member', 'stat', 'stat_sort']"}), "(columns=['member', 'stat', 'stat_sort'])\n", (44045, 44086), True, 'import pandas as pd\n'), ((81435, 81476), 'ElevatorBot.backendNetworking.miscFunctions.convert_expansion_or_season_dates', 'convert_expansion_or_season_dates', (['kwargs'], {}), '(kwargs)\n', (81468, 81476), False, 'from ElevatorBot.backendNetworking.miscFunctions import get_emoji, write_line, has_elevated_permissions, check_if_mutually_exclusive, convert_expansion_or_season_dates\n'), ((85667, 85708), 'ElevatorBot.backendNetworking.miscFunctions.convert_expansion_or_season_dates', 'convert_expansion_or_season_dates', (['kwargs'], {}), '(kwargs)\n', (85700, 85708), False, 'from ElevatorBot.backendNetworking.miscFunctions import get_emoji, write_line, has_elevated_permissions, check_if_mutually_exclusive, convert_expansion_or_season_dates\n'), ((90038, 90170), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['"""Registration"""', 'f"""{ctx.author.display_name} startet a tournament!\nTo enter it, please react accordingly"""'], {}), '(\'Registration\',\n f"""{ctx.author.display_name} startet a tournament!\nTo enter it, please react accordingly"""\n )\n', (90051, 90170), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((93018, 93088), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['"""We have a winner"""', 'f"""Congratulation {winner.mention}!"""'], {}), "('We have a winner', f'Congratulation {winner.mention}!')\n", (93031, 93088), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((6539, 6564), 'ElevatorBot.backendNetworking.slashCommandFunctions.get_user_obj', 'get_user_obj', (['ctx', 'kwargs'], {}), '(ctx, kwargs)\n', (6551, 6564), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((6596, 6643), 'ElevatorBot.backendNetworking.destinyPlayer.DestinyPlayer.from_discord_id', 'DestinyPlayer.from_discord_id', (['user.id'], {'ctx': 'ctx'}), '(user.id, ctx=ctx)\n', (6625, 6643), False, 'from ElevatorBot.backendNetworking.destinyPlayer import DestinyPlayer\n'), ((7623, 7672), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['season[0]', '"""%Y-%m-%d"""'], {}), "(season[0], '%Y-%m-%d')\n", (7649, 7672), False, 'import datetime\n'), ((9616, 9642), 'ElevatorBot.database.database.get_d2_steam_player_info', 'get_d2_steam_player_info', ([], {}), '()\n', (9640, 9642), False, 'from ElevatorBot.database.database import getForges, getLastActivity, getDestinyDefinition, getWeaponInfo, getPgcrActivity, getTopWeapons, getActivityHistory, getPgcrActivitiesUsersStats, getClearCount, get_d2_steam_player_info, getTimePlayed\n'), ((10162, 10210), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['dates[0]', '"""%Y-%m-%d"""'], {}), "(dates[0], '%Y-%m-%d')\n", (10188, 10210), False, 'import datetime\n'), ((10712, 10760), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['dates[0]', '"""%Y-%m-%d"""'], {}), "(dates[0], '%Y-%m-%d')\n", (10738, 10760), False, 'import datetime\n'), ((11364, 11412), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['dates[0]', '"""%Y-%m-%d"""'], {}), "(dates[0], '%Y-%m-%d')\n", (11390, 11412), False, 'import datetime\n'), ((12330, 12347), 'asyncio.sleep', 'asyncio.sleep', (['(10)'], {}), '(10)\n', (12343, 12347), False, 'import asyncio\n'), ((12852, 12877), 'ElevatorBot.backendNetworking.slashCommandFunctions.get_user_obj', 'get_user_obj', (['ctx', 'kwargs'], {}), '(ctx, kwargs)\n', (12864, 12877), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((12909, 12956), 'ElevatorBot.backendNetworking.destinyPlayer.DestinyPlayer.from_discord_id', 'DestinyPlayer.from_discord_id', (['user.id'], {'ctx': 'ctx'}), '(user.id, ctx=ctx)\n', (12938, 12956), False, 'from ElevatorBot.backendNetworking.destinyPlayer import DestinyPlayer\n'), ((15434, 15459), 'ElevatorBot.backendNetworking.slashCommandFunctions.get_user_obj', 'get_user_obj', (['ctx', 'kwargs'], {}), '(ctx, kwargs)\n', (15446, 15459), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((15491, 15538), 'ElevatorBot.backendNetworking.destinyPlayer.DestinyPlayer.from_discord_id', 'DestinyPlayer.from_discord_id', (['user.id'], {'ctx': 'ctx'}), '(user.id, ctx=ctx)\n', (15520, 15538), False, 'from ElevatorBot.backendNetworking.destinyPlayer import DestinyPlayer\n'), ((15664, 15690), 'ElevatorBot.backendNetworking.dataTransformation.getSeasonalChallengeInfo', 'getSeasonalChallengeInfo', ([], {}), '()\n', (15688, 15690), False, 'from ElevatorBot.backendNetworking.dataTransformation import getSeasonalChallengeInfo\n'), ((19989, 20020), 'ElevatorBot.backendNetworking.slashCommandFunctions.get_user_obj_admin', 'get_user_obj_admin', (['ctx', 'kwargs'], {}), '(ctx, kwargs)\n', (20007, 20020), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((20092, 20139), 'ElevatorBot.backendNetworking.destinyPlayer.DestinyPlayer.from_discord_id', 'DestinyPlayer.from_discord_id', (['user.id'], {'ctx': 'ctx'}), '(user.id, ctx=ctx)\n', (20121, 20139), False, 'from ElevatorBot.backendNetworking.destinyPlayer import DestinyPlayer\n'), ((20325, 20412), 'ElevatorBot.backendNetworking.authfunctions.getSpiderMaterials', 'getSpiderMaterials', (['destiny_player.discord_id', 'destiny_player.destiny_id', 'anyCharID'], {}), '(destiny_player.discord_id, destiny_player.destiny_id,\n anyCharID)\n', (20343, 20412), False, 'from ElevatorBot.backendNetworking.authfunctions import getSpiderMaterials\n'), ((25327, 25352), 'ElevatorBot.backendNetworking.slashCommandFunctions.get_user_obj', 'get_user_obj', (['ctx', 'kwargs'], {}), '(ctx, kwargs)\n', (25339, 25352), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((25384, 25431), 'ElevatorBot.backendNetworking.destinyPlayer.DestinyPlayer.from_discord_id', 'DestinyPlayer.from_discord_id', (['user.id'], {'ctx': 'ctx'}), '(user.id, ctx=ctx)\n', (25413, 25431), False, 'from ElevatorBot.backendNetworking.destinyPlayer import DestinyPlayer\n'), ((26199, 26224), 'ElevatorBot.backendNetworking.slashCommandFunctions.get_user_obj', 'get_user_obj', (['ctx', 'kwargs'], {}), '(ctx, kwargs)\n', (26211, 26224), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((26256, 26303), 'ElevatorBot.backendNetworking.destinyPlayer.DestinyPlayer.from_discord_id', 'DestinyPlayer.from_discord_id', (['user.id'], {'ctx': 'ctx'}), '(user.id, ctx=ctx)\n', (26285, 26303), False, 'from ElevatorBot.backendNetworking.destinyPlayer import DestinyPlayer\n'), ((27099, 27124), 'ElevatorBot.backendNetworking.slashCommandFunctions.get_user_obj', 'get_user_obj', (['ctx', 'kwargs'], {}), '(ctx, kwargs)\n', (27111, 27124), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((27156, 27203), 'ElevatorBot.backendNetworking.destinyPlayer.DestinyPlayer.from_discord_id', 'DestinyPlayer.from_discord_id', (['user.id'], {'ctx': 'ctx'}), '(user.id, ctx=ctx)\n', (27185, 27203), False, 'from ElevatorBot.backendNetworking.destinyPlayer import DestinyPlayer\n'), ((28968, 28993), 'ElevatorBot.backendNetworking.slashCommandFunctions.get_user_obj', 'get_user_obj', (['ctx', 'kwargs'], {}), '(ctx, kwargs)\n', (28980, 28993), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((29025, 29072), 'ElevatorBot.backendNetworking.destinyPlayer.DestinyPlayer.from_discord_id', 'DestinyPlayer.from_discord_id', (['user.id'], {'ctx': 'ctx'}), '(user.id, ctx=ctx)\n', (29054, 29072), False, 'from ElevatorBot.backendNetworking.destinyPlayer import DestinyPlayer\n'), ((29553, 29576), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (29574, 29576), False, 'import datetime\n'), ((29741, 29768), 'ElevatorBot.backendNetworking.dataLoading.getClanMembers', 'getClanMembers', (['self.client'], {}), '(self.client)\n', (29755, 29768), False, 'from ElevatorBot.backendNetworking.dataLoading import searchForItem, getClanMembers, translateWeaponSlot\n'), ((33289, 33316), 'collections.Counter', 'Counter', (['destinyIDs_friends'], {}), '(destinyIDs_friends)\n', (33296, 33316), False, 'from collections import Counter\n'), ((33567, 33654), 'ElevatorBot.database.database.getActivityHistory', 'getActivityHistory', (['destinyID'], {'mode': 'mode', 'start_time': 'start_time', 'end_time': 'end_time'}), '(destinyID, mode=mode, start_time=start_time, end_time=\n end_time)\n', (33585, 33654), False, 'from ElevatorBot.database.database import getForges, getLastActivity, getDestinyDefinition, getWeaponInfo, getPgcrActivity, getTopWeapons, getActivityHistory, getPgcrActivitiesUsersStats, getClearCount, get_d2_steam_player_info, getTimePlayed\n'), ((34009, 34048), 'ElevatorBot.database.database.getPgcrActivitiesUsersStats', 'getPgcrActivitiesUsersStats', (['instanceID'], {}), '(instanceID)\n', (34036, 34048), False, 'from ElevatorBot.database.database import getForges, getLastActivity, getDestinyDefinition, getWeaponInfo, getPgcrActivity, getTopWeapons, getActivityHistory, getPgcrActivitiesUsersStats, getClearCount, get_d2_steam_player_info, getTimePlayed\n'), ((36053, 36069), 'json.load', 'json.load', (['mlist'], {}), '(mlist)\n', (36062, 36069), False, 'import json\n'), ((36725, 36792), 'ElevatorBot.backendNetworking.slashCommandFunctions.get_user_obj_admin', 'get_user_obj_admin', (['ctx', 'kwargs'], {'allowed_users': '[211838266834550785]'}), '(ctx, kwargs, allowed_users=[211838266834550785])\n', (36743, 36792), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((36917, 36933), 'json.load', 'json.load', (['mlist'], {}), '(mlist)\n', (36926, 36933), False, 'import json\n'), ((37225, 37250), 'json.dump', 'json.dump', (['players', 'mlist'], {}), '(players, mlist)\n', (37234, 37250), False, 'import json\n'), ((37921, 37988), 'ElevatorBot.backendNetworking.slashCommandFunctions.get_user_obj_admin', 'get_user_obj_admin', (['ctx', 'kwargs'], {'allowed_users': '[211838266834550785]'}), '(ctx, kwargs, allowed_users=[211838266834550785])\n', (37939, 37988), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((38113, 38129), 'json.load', 'json.load', (['mlist'], {}), '(mlist)\n', (38122, 38129), False, 'import json\n'), ((38554, 38644), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['"""Mystic List"""', 'f"""Removed {user.name} from the mystic list, it now has:"""'], {}), "('Mystic List',\n f'Removed {user.name} from the mystic list, it now has:')\n", (38567, 38644), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((42517, 42542), 'ElevatorBot.backendNetworking.slashCommandFunctions.get_user_obj', 'get_user_obj', (['ctx', 'kwargs'], {}), '(ctx, kwargs)\n', (42529, 42542), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((44495, 44535), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['"""Error"""', '"""No users found"""'], {}), "('Error', 'No users found')\n", (44508, 44535), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((45231, 45270), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['"""Error"""', '"""No data found"""'], {}), "('Error', 'No data found')\n", (45244, 45270), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((63681, 63706), 'ElevatorBot.backendNetworking.slashCommandFunctions.get_user_obj', 'get_user_obj', (['ctx', 'kwargs'], {}), '(ctx, kwargs)\n', (63693, 63706), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((63738, 63785), 'ElevatorBot.backendNetworking.destinyPlayer.DestinyPlayer.from_discord_id', 'DestinyPlayer.from_discord_id', (['user.id'], {'ctx': 'ctx'}), '(user.id, ctx=ctx)\n', (63767, 63785), False, 'from ElevatorBot.backendNetworking.destinyPlayer import DestinyPlayer\n'), ((64191, 64227), 'ElevatorBot.backendNetworking.dataLoading.searchForItem', 'searchForItem', (['ctx', "kwargs['weapon']"], {}), "(ctx, kwargs['weapon'])\n", (64204, 64227), False, 'from ElevatorBot.backendNetworking.dataLoading import searchForItem, getClanMembers, translateWeaponSlot\n'), ((66272, 66333), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['f"""{weapon_name} stats for {user.display_name}"""'], {}), "(f'{weapon_name} stats for {user.display_name}')\n", (66285, 66333), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((67857, 67904), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['datetime', 'statistic']"}), "(columns=['datetime', 'statistic'])\n", (67869, 67904), True, 'import pandas as pd\n'), ((70073, 70103), 'pandas.to_datetime', 'pd.to_datetime', (["df['datetime']"], {}), "(df['datetime'])\n", (70087, 70103), True, 'import pandas as pd\n'), ((70134, 70164), 'pandas.to_numeric', 'pd.to_numeric', (["df['statistic']"], {}), "(df['statistic'])\n", (70147, 70164), True, 'import pandas as pd\n'), ((70264, 70294), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (70276, 70294), True, 'import matplotlib.pyplot as plt\n'), ((70826, 70844), 'matplotlib.pyplot.savefig', 'plt.savefig', (['title'], {}), '(title)\n', (70837, 70844), True, 'import matplotlib.pyplot as plt\n'), ((70975, 70991), 'os.remove', 'os.remove', (['title'], {}), '(title)\n', (70984, 70991), False, 'import os\n'), ((74245, 74270), 'ElevatorBot.backendNetworking.slashCommandFunctions.get_user_obj', 'get_user_obj', (['ctx', 'kwargs'], {}), '(ctx, kwargs)\n', (74257, 74270), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((74302, 74349), 'ElevatorBot.backendNetworking.destinyPlayer.DestinyPlayer.from_discord_id', 'DestinyPlayer.from_discord_id', (['user.id'], {'ctx': 'ctx'}), '(user.id, ctx=ctx)\n', (74331, 74349), False, 'from ElevatorBot.backendNetworking.destinyPlayer import DestinyPlayer\n'), ((80561, 80652), 'ElevatorBot.backendNetworking.miscFunctions.check_if_mutually_exclusive', 'check_if_mutually_exclusive', (["['expansion', 'season', ['starttime', 'endtime']]", 'kwargs'], {}), "(['expansion', 'season', ['starttime', 'endtime'\n ]], kwargs)\n", (80588, 80652), False, 'from ElevatorBot.backendNetworking.miscFunctions import get_emoji, write_line, has_elevated_permissions, check_if_mutually_exclusive, convert_expansion_or_season_dates\n'), ((81238, 81261), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (81259, 81261), False, 'import datetime\n'), ((84850, 84941), 'ElevatorBot.backendNetworking.miscFunctions.check_if_mutually_exclusive', 'check_if_mutually_exclusive', (["['expansion', 'season', ['starttime', 'endtime']]", 'kwargs'], {}), "(['expansion', 'season', ['starttime', 'endtime'\n ]], kwargs)\n", (84877, 84941), False, 'from ElevatorBot.backendNetworking.miscFunctions import get_emoji, write_line, has_elevated_permissions, check_if_mutually_exclusive, convert_expansion_or_season_dates\n'), ((85511, 85534), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (85532, 85534), False, 'import datetime\n'), ((85950, 85977), 'ElevatorBot.backendNetworking.dataLoading.getClanMembers', 'getClanMembers', (['self.client'], {}), '(self.client)\n', (85964, 85977), False, 'from ElevatorBot.backendNetworking.dataLoading import searchForItem, getClanMembers, translateWeaponSlot\n'), ((89415, 89489), 'ElevatorBot.backendNetworking.persistentMessages.get_persistent_message_or_channel', 'get_persistent_message_or_channel', (['self.client', '"""tournament"""', 'ctx.guild.id'], {}), "(self.client, 'tournament', ctx.guild.id)\n", (89448, 89489), False, 'from ElevatorBot.backendNetworking.persistentMessages import get_persistent_message_or_channel, make_persistent_message, delete_persistent_message\n'), ((90208, 90338), 'ElevatorBot.backendNetworking.persistentMessages.make_persistent_message', 'make_persistent_message', (['self.client', '"""tournament"""', 'ctx.guild.id', 'channel.id'], {'reaction_id_list': 'tournament', 'message_embed': 'embed'}), "(self.client, 'tournament', ctx.guild.id, channel.id,\n reaction_id_list=tournament, message_embed=embed)\n", (90231, 90338), False, 'from ElevatorBot.backendNetworking.persistentMessages import get_persistent_message_or_channel, make_persistent_message, delete_persistent_message\n'), ((91165, 91239), 'ElevatorBot.backendNetworking.persistentMessages.get_persistent_message_or_channel', 'get_persistent_message_or_channel', (['self.client', '"""tournament"""', 'ctx.guild.id'], {}), "(self.client, 'tournament', ctx.guild.id)\n", (91198, 91239), False, 'from ElevatorBot.backendNetworking.persistentMessages import get_persistent_message_or_channel, make_persistent_message, delete_persistent_message\n'), ((92749, 92823), 'ElevatorBot.backendNetworking.tournament.startTournamentEvents', 'startTournamentEvents', (['self.client', 'message', 'message.channel', 'participants'], {}), '(self.client, message, message.channel, participants)\n', (92770, 92823), False, 'from ElevatorBot.backendNetworking.tournament import startTournamentEvents\n'), ((92912, 92974), 'ElevatorBot.backendNetworking.persistentMessages.delete_persistent_message', 'delete_persistent_message', (['message', '"""tournament"""', 'ctx.guild.id'], {}), "(message, 'tournament', ctx.guild.id)\n", (92937, 92974), False, 'from ElevatorBot.backendNetworking.persistentMessages import get_persistent_message_or_channel, make_persistent_message, delete_persistent_message\n'), ((93190, 93212), 'asyncio.sleep', 'asyncio.sleep', (['(60 * 10)'], {}), '(60 * 10)\n', (93203, 93212), False, 'import asyncio\n'), ((93627, 93701), 'ElevatorBot.backendNetworking.persistentMessages.get_persistent_message_or_channel', 'get_persistent_message_or_channel', (['self.client', '"""tournament"""', 'ctx.guild.id'], {}), "(self.client, 'tournament', ctx.guild.id)\n", (93660, 93701), False, 'from ElevatorBot.backendNetworking.persistentMessages import get_persistent_message_or_channel, make_persistent_message, delete_persistent_message\n'), ((94367, 94429), 'ElevatorBot.backendNetworking.persistentMessages.delete_persistent_message', 'delete_persistent_message', (['message', '"""tournament"""', 'ctx.guild.id'], {}), "(message, 'tournament', ctx.guild.id)\n", (94392, 94429), False, 'from ElevatorBot.backendNetworking.persistentMessages import get_persistent_message_or_channel, make_persistent_message, delete_persistent_message\n'), ((8148, 8204), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['next_season_date', '"""%Y-%m-%d"""'], {}), "(next_season_date, '%Y-%m-%d')\n", (8174, 8204), False, 'import datetime\n'), ((6429, 6443), 'ElevatorBot.static.slashCommandOptions.options_user', 'options_user', ([], {}), '()\n', (6441, 6443), False, 'from ElevatorBot.static.slashCommandOptions import choices_mode, options_stat, options_user\n'), ((13682, 13761), 'ElevatorBot.database.database.getDestinyDefinition', 'getDestinyDefinition', (['"""DestinyActivityDefinition"""', "data['directorActivityHash']"], {}), "('DestinyActivityDefinition', data['directorActivityHash'])\n", (13702, 13761), False, 'from ElevatorBot.database.database import getForges, getLastActivity, getDestinyDefinition, getWeaponInfo, getPgcrActivity, getTopWeapons, getActivityHistory, getPgcrActivitiesUsersStats, getClearCount, get_d2_steam_player_info, getTimePlayed\n'), ((12511, 12637), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""activity"""', 'description': '"""The type of the activity"""', 'option_type': '(3)', 'required': '(True)', 'choices': 'choices_mode'}), "(name='activity', description='The type of the activity',\n option_type=3, required=True, choices=choices_mode)\n", (12524, 12637), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((12742, 12756), 'ElevatorBot.static.slashCommandOptions.options_user', 'options_user', ([], {}), '()\n', (12754, 12756), False, 'from ElevatorBot.static.slashCommandOptions import choices_mode, options_stat, options_user\n'), ((15302, 15316), 'ElevatorBot.static.slashCommandOptions.options_user', 'options_user', ([], {}), '()\n', (15314, 15316), False, 'from ElevatorBot.static.slashCommandOptions import choices_mode, options_stat, options_user\n'), ((17661, 17787), 'discord_slash.utils.manage_components.wait_for_component', 'manage_components.wait_for_component', (['(select_ctx.bot if select_ctx else ctx.bot)'], {'components': 'select_components', 'timeout': '(60)'}), '(select_ctx.bot if select_ctx else ctx.\n bot, components=select_components, timeout=60)\n', (17697, 17787), False, 'from discord_slash.utils import manage_components\n'), ((19861, 19875), 'ElevatorBot.static.slashCommandOptions.options_user', 'options_user', ([], {}), '()\n', (19873, 19875), False, 'from ElevatorBot.static.slashCommandOptions import choices_mode, options_stat, options_user\n'), ((25186, 25200), 'ElevatorBot.static.slashCommandOptions.options_user', 'options_user', ([], {}), '()\n', (25198, 25200), False, 'from ElevatorBot.static.slashCommandOptions import choices_mode, options_stat, options_user\n'), ((26065, 26079), 'ElevatorBot.static.slashCommandOptions.options_user', 'options_user', ([], {}), '()\n', (26077, 26079), False, 'from ElevatorBot.static.slashCommandOptions import choices_mode, options_stat, options_user\n'), ((26965, 26979), 'ElevatorBot.static.slashCommandOptions.options_user', 'options_user', ([], {}), '()\n', (26977, 26979), False, 'from ElevatorBot.static.slashCommandOptions import choices_mode, options_stat, options_user\n'), ((29251, 29294), 'ElevatorBot.backendNetworking.slashCommandFunctions.verify_time_input', 'verify_time_input', (['ctx', "kwargs['starttime']"], {}), "(ctx, kwargs['starttime'])\n", (29268, 29294), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((29349, 29372), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (29370, 29372), False, 'import datetime\n'), ((29375, 29401), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(7)'}), '(days=7)\n', (29393, 29401), False, 'import datetime\n'), ((29483, 29524), 'ElevatorBot.backendNetworking.slashCommandFunctions.verify_time_input', 'verify_time_input', (['ctx', "kwargs['endtime']"], {}), "(ctx, kwargs['endtime'])\n", (29500, 29524), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((27936, 28065), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""mode"""', 'description': '"""You can restrict the game mode"""', 'option_type': '(3)', 'required': '(False)', 'choices': 'choices_mode'}), "(name='mode', description='You can restrict the game mode',\n option_type=3, required=False, choices=choices_mode)\n", (27949, 28065), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((28170, 28355), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""starttime"""', 'description': '"""Format: \'DD/MM/YY\' - You can restrict the start (lower cutoff). Note: Can break for long timespan"""', 'option_type': '(3)', 'required': '(False)'}), '(name=\'starttime\', description=\n "Format: \'DD/MM/YY\' - You can restrict the start (lower cutoff). Note: Can break for long timespan"\n , option_type=3, required=False)\n', (28183, 28355), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((28438, 28585), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""endtime"""', 'description': '"""Format: \'DD/MM/YY\' - You can restrict the end (higher cutoff)"""', 'option_type': '(3)', 'required': '(False)'}), '(name=\'endtime\', description=\n "Format: \'DD/MM/YY\' - You can restrict the end (higher cutoff)",\n option_type=3, required=False)\n', (28451, 28585), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((28669, 28739), 'ElevatorBot.static.slashCommandOptions.options_user', 'options_user', ([], {'flavor_text': '"""The name of the user you want to highlight"""'}), "(flavor_text='The name of the user you want to highlight')\n", (28681, 28739), False, 'from ElevatorBot.static.slashCommandOptions import choices_mode, options_stat, options_user\n'), ((36552, 36609), 'ElevatorBot.static.slashCommandOptions.options_user', 'options_user', ([], {'flavor_text': '"""Requires elevated permissions"""'}), "(flavor_text='Requires elevated permissions')\n", (36564, 36609), False, 'from ElevatorBot.static.slashCommandOptions import choices_mode, options_stat, options_user\n'), ((38507, 38532), 'json.dump', 'json.dump', (['players', 'mlist'], {}), '(players, mlist)\n', (38516, 38532), False, 'import json\n'), ((37745, 37802), 'ElevatorBot.static.slashCommandOptions.options_user', 'options_user', ([], {'flavor_text': '"""Requires elevated permissions"""'}), "(flavor_text='Requires elevated permissions')\n", (37757, 37802), False, 'from ElevatorBot.static.slashCommandOptions import choices_mode, options_stat, options_user\n'), ((41796, 41970), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""arg"""', 'description': '"""Depending on which leaderboard you want to see, you might need to add an additional argument"""', 'option_type': '(3)', 'required': '(False)'}), "(name='arg', description=\n 'Depending on which leaderboard you want to see, you might need to add an additional argument'\n , option_type=3, required=False)\n", (41809, 41970), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((42053, 42188), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""reverse"""', 'description': '"""Default: \'False\' - If you want to flip the sorting"""', 'option_type': '(5)', 'required': '(False)'}), '(name=\'reverse\', description=\n "Default: \'False\' - If you want to flip the sorting", option_type=5,\n required=False)\n', (42066, 42188), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((42272, 42286), 'ElevatorBot.static.slashCommandOptions.options_user', 'options_user', ([], {}), '()\n', (42284, 42286), False, 'from ElevatorBot.static.slashCommandOptions import choices_mode, options_stat, options_user\n'), ((65951, 65980), 'ElevatorBot.database.database.getPgcrActivity', 'getPgcrActivity', (['max_kills_id'], {}), '(max_kills_id)\n', (65966, 65980), False, 'from ElevatorBot.database.database import getForges, getLastActivity, getDestinyDefinition, getWeaponInfo, getPgcrActivity, getTopWeapons, getActivityHistory, getPgcrActivitiesUsersStats, getClearCount, get_d2_steam_player_info, getTimePlayed\n'), ((61102, 61233), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""weapon"""', 'description': '"""The name of the weapon you want to see stats for"""', 'option_type': '(3)', 'required': '(True)'}), "(name='weapon', description=\n 'The name of the weapon you want to see stats for', option_type=3,\n required=True)\n", (61115, 61233), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((61818, 61992), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""graph"""', 'description': '"""Default: \'False\' - See a timeline of your weapon usage instead of an overview of key stats"""', 'option_type': '(5)', 'required': '(False)'}), '(name=\'graph\', description=\n "Default: \'False\' - See a timeline of your weapon usage instead of an overview of key stats"\n , option_type=5, required=False)\n', (61831, 61992), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((62551, 62727), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""starttime"""', 'description': '"""Format: \'DD/MM/YY\' - You can restrict the time from when the weapon stats start counting"""', 'option_type': '(3)', 'required': '(False)'}), '(name=\'starttime\', description=\n "Format: \'DD/MM/YY\' - You can restrict the time from when the weapon stats start counting"\n , option_type=3, required=False)\n', (62564, 62727), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((62810, 62980), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""endtime"""', 'description': '"""Format: \'DD/MM/YY\' - You can restrict the time up until which the weapon stats count"""', 'option_type': '(3)', 'required': '(False)'}), '(name=\'endtime\', description=\n "Format: \'DD/MM/YY\' - You can restrict the time up until which the weapon stats count"\n , option_type=3, required=False)\n', (62823, 62980), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((63063, 63226), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""mode"""', 'description': '"""You can restrict the game mode where the weapon stats count"""', 'option_type': '(3)', 'required': '(False)', 'choices': 'choices_mode'}), "(name='mode', description=\n 'You can restrict the game mode where the weapon stats count',\n option_type=3, required=False, choices=choices_mode)\n", (63076, 63226), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((63326, 63485), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""activityhash"""', 'description': '"""You can restrict the activity where the weapon stats count (advanced)"""', 'option_type': '(4)', 'required': '(False)'}), "(name='activityhash', description=\n 'You can restrict the activity where the weapon stats count (advanced)',\n option_type=4, required=False)\n", (63339, 63485), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((63569, 63583), 'ElevatorBot.static.slashCommandOptions.options_user', 'options_user', ([], {}), '()\n', (63581, 63583), False, 'from ElevatorBot.static.slashCommandOptions import choices_mode, options_stat, options_user\n'), ((74841, 74877), 'ElevatorBot.backendNetworking.dataLoading.searchForItem', 'searchForItem', (['ctx', "kwargs['weapon']"], {}), "(ctx, kwargs['weapon'])\n", (74854, 74877), False, 'from ElevatorBot.backendNetworking.dataLoading import searchForItem, getClanMembers, translateWeaponSlot\n'), ((71155, 71298), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""weapon"""', 'description': '"""If you want a specific weapon to be included on the ranking"""', 'option_type': '(3)', 'required': '(False)'}), "(name='weapon', description=\n 'If you want a specific weapon to be included on the ranking',\n option_type=3, required=False)\n", (71168, 71298), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((73111, 73287), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""starttime"""', 'description': '"""Format: \'DD/MM/YY\' - You can restrict the time from when the weapon stats start counting"""', 'option_type': '(3)', 'required': '(False)'}), '(name=\'starttime\', description=\n "Format: \'DD/MM/YY\' - You can restrict the time from when the weapon stats start counting"\n , option_type=3, required=False)\n', (73124, 73287), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((73370, 73540), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""endtime"""', 'description': '"""Format: \'DD/MM/YY\' - You can restrict the time up until which the weapon stats count"""', 'option_type': '(3)', 'required': '(False)'}), '(name=\'endtime\', description=\n "Format: \'DD/MM/YY\' - You can restrict the time up until which the weapon stats count"\n , option_type=3, required=False)\n', (73383, 73540), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((73623, 73786), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""mode"""', 'description': '"""You can restrict the game mode where the weapon stats count"""', 'option_type': '(3)', 'required': '(False)', 'choices': 'choices_mode'}), "(name='mode', description=\n 'You can restrict the game mode where the weapon stats count',\n option_type=3, required=False, choices=choices_mode)\n", (73636, 73786), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((73886, 74045), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""activityhash"""', 'description': '"""You can restrict the activity where the weapon stats count (advanced)"""', 'option_type': '(4)', 'required': '(False)'}), "(name='activityhash', description=\n 'You can restrict the activity where the weapon stats count (advanced)',\n option_type=4, required=False)\n", (73899, 74045), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((74129, 74143), 'ElevatorBot.static.slashCommandOptions.options_user', 'options_user', ([], {}), '()\n', (74141, 74143), False, 'from ElevatorBot.static.slashCommandOptions import choices_mode, options_stat, options_user\n'), ((80952, 80995), 'ElevatorBot.backendNetworking.slashCommandFunctions.verify_time_input', 'verify_time_input', (['ctx', "kwargs['starttime']"], {}), "(ctx, kwargs['starttime'])\n", (80969, 80995), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((81168, 81209), 'ElevatorBot.backendNetworking.slashCommandFunctions.verify_time_input', 'verify_time_input', (['ctx', "kwargs['endtime']"], {}), "(ctx, kwargs['endtime'])\n", (81185, 81209), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((85210, 85253), 'ElevatorBot.backendNetworking.slashCommandFunctions.verify_time_input', 'verify_time_input', (['ctx', "kwargs['starttime']"], {}), "(ctx, kwargs['starttime'])\n", (85227, 85253), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((85308, 85331), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (85329, 85331), False, 'import datetime\n'), ((85334, 85361), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(30)'}), '(days=30)\n', (85352, 85361), False, 'import datetime\n'), ((85441, 85482), 'ElevatorBot.backendNetworking.slashCommandFunctions.verify_time_input', 'verify_time_input', (['ctx', "kwargs['endtime']"], {}), "(ctx, kwargs['endtime'])\n", (85458, 85482), False, 'from ElevatorBot.backendNetworking.slashCommandFunctions import get_user_obj, get_user_obj_admin, verify_time_input\n'), ((87685, 87819), 'ElevatorBot.backendNetworking.miscFunctions.write_line', 'write_line', (['i', 'f"""[{weapons_by_id[weapon_id]}](https://www.light.gg/db/items/{weapon_id})"""', '"""Kills"""', 'f"""{weapon_kills:,}"""', 'emoji'], {}), "(i,\n f'[{weapons_by_id[weapon_id]}](https://www.light.gg/db/items/{weapon_id})',\n 'Kills', f'{weapon_kills:,}', emoji)\n", (87695, 87819), False, 'from ElevatorBot.backendNetworking.miscFunctions import get_emoji, write_line, has_elevated_permissions, check_if_mutually_exclusive, convert_expansion_or_season_dates\n'), ((83059, 83235), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""starttime"""', 'description': '"""Format: \'DD/MM/YY\' - You can restrict the time from when the weapon stats start counting"""', 'option_type': '(3)', 'required': '(False)'}), '(name=\'starttime\', description=\n "Format: \'DD/MM/YY\' - You can restrict the time from when the weapon stats start counting"\n , option_type=3, required=False)\n', (83072, 83235), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((83318, 83488), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""endtime"""', 'description': '"""Format: \'DD/MM/YY\' - You can restrict the time up until which the weapon stats count"""', 'option_type': '(3)', 'required': '(False)'}), '(name=\'endtime\', description=\n "Format: \'DD/MM/YY\' - You can restrict the time up until which the weapon stats count"\n , option_type=3, required=False)\n', (83331, 83488), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((83571, 83727), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""mode"""', 'description': '"""You can restrict the game mode (Default: Everything)"""', 'option_type': '(3)', 'required': '(False)', 'choices': 'choices_mode'}), "(name='mode', description=\n 'You can restrict the game mode (Default: Everything)', option_type=3,\n required=False, choices=choices_mode)\n", (83584, 83727), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((83827, 83953), 'discord_slash.utils.manage_commands.create_option', 'create_option', ([], {'name': '"""activityhash"""', 'description': '"""You can restrict the activity (advanced)"""', 'option_type': '(4)', 'required': '(False)'}), "(name='activityhash', description=\n 'You can restrict the activity (advanced)', option_type=4, required=False)\n", (83840, 83953), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((89894, 89980), 'ElevatorBot.backendNetworking.persistentMessages.get_persistent_message_or_channel', 'get_persistent_message_or_channel', (['self.client', '"""tournamentChannel"""', 'ctx.guild.id'], {}), "(self.client, 'tournamentChannel', ctx.\n guild.id)\n", (89927, 89980), False, 'from ElevatorBot.backendNetworking.persistentMessages import get_persistent_message_or_channel, make_persistent_message, delete_persistent_message\n'), ((8271, 8294), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8292, 8294), False, 'import datetime\n'), ((8739, 8760), 'ElevatorBot.database.database.getTimePlayed', 'getTimePlayed', ([], {}), '(**args)\n', (8752, 8760), False, 'from ElevatorBot.database.database import getForges, getLastActivity, getDestinyDefinition, getWeaponInfo, getPgcrActivity, getTopWeapons, getActivityHistory, getPgcrActivitiesUsersStats, getClearCount, get_d2_steam_player_info, getTimePlayed\n'), ((10313, 10339), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(2)'}), '(days=2)\n', (10331, 10339), False, 'import datetime\n'), ((10870, 10896), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(2)'}), '(days=2)\n', (10888, 10896), False, 'import datetime\n'), ((11522, 11548), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(2)'}), '(days=2)\n', (11540, 11548), False, 'import datetime\n'), ((14529, 14584), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': "player['timePlayedSeconds']"}), "(seconds=player['timePlayedSeconds'])\n", (14547, 14584), False, 'import datetime\n'), ((25665, 25780), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['f"""{user.display_name}\'s Stat Info"""', 'f"""Your `{kwargs[\'name\']}` stat is currently at **{stat:,}**"""'], {}), '(f"{user.display_name}\'s Stat Info",\n f"Your `{kwargs[\'name\']}` stat is currently at **{stat:,}**")\n', (25678, 25780), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((26561, 26680), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['f"""{user.display_name}\'s PvE Stat Info"""', 'f"""Your `{kwargs[\'name\']}` stat is currently at **{stat:,}**"""'], {}), '(f"{user.display_name}\'s PvE Stat Info",\n f"Your `{kwargs[\'name\']}` stat is currently at **{stat:,}**")\n', (26574, 26680), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((27461, 27580), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['f"""{user.display_name}\'s PvP Stat Info"""', 'f"""Your `{kwargs[\'name\']}` stat is currently at **{stat:,}**"""'], {}), '(f"{user.display_name}\'s PvP Stat Info",\n f"Your `{kwargs[\'name\']}` stat is currently at **{stat:,}**")\n', (27474, 27580), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((31730, 31744), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (31742, 31744), False, 'import os\n'), ((32369, 32568), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['f"""{user.display_name}\'s Friends"""', 'f"""Click the download button below and open the file with your browser to view your Network"""', 'f"""The file may load for a while, that\'s normal."""'], {}), '(f"{user.display_name}\'s Friends",\n f\'Click the download button below and open the file with your browser to view your Network\'\n , f"The file may load for a while, that\'s normal.")\n', (32382, 32568), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((32701, 32720), 'discord.File', 'discord.File', (['title'], {}), '(title)\n', (32713, 32720), False, 'import discord\n'), ((38813, 38899), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['"""Mystic List"""', 'f"""User {user.name} was not found in the player list"""'], {}), "('Mystic List',\n f'User {user.name} was not found in the player list')\n", (38826, 38899), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((42849, 42882), 'ElevatorBot.backendNetworking.dataLoading.searchForItem', 'searchForItem', (['ctx', "kwargs['arg']"], {}), "(ctx, kwargs['arg'])\n", (42862, 42882), False, 'from ElevatorBot.backendNetworking.dataLoading import searchForItem, getClanMembers, translateWeaponSlot\n'), ((43718, 43765), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['"""Error"""', '"""Failed handling users"""'], {}), "('Error', 'Failed handling users')\n", (43731, 43765), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((66053, 66114), 'ElevatorBot.database.database.getDestinyDefinition', 'getDestinyDefinition', (['"""DestinyActivityModeDefinition"""', 'res[5]'], {}), "('DestinyActivityModeDefinition', res[5])\n", (66073, 66114), False, 'from ElevatorBot.database.database import getForges, getLastActivity, getDestinyDefinition, getWeaponInfo, getPgcrActivity, getTopWeapons, getActivityHistory, getPgcrActivitiesUsersStats, getClearCount, get_d2_steam_player_info, getTimePlayed\n'), ((66155, 66212), 'ElevatorBot.database.database.getDestinyDefinition', 'getDestinyDefinition', (['"""DestinyActivityDefinition"""', 'res[2]'], {}), "('DestinyActivityDefinition', res[2])\n", (66175, 66212), False, 'from ElevatorBot.database.database import getForges, getLastActivity, getDestinyDefinition, getWeaponInfo, getPgcrActivity, getTopWeapons, getActivityHistory, getPgcrActivitiesUsersStats, getClearCount, get_d2_steam_player_info, getTimePlayed\n'), ((86545, 86575), 'ElevatorBot.backendNetworking.dataLoading.translateWeaponSlot', 'translateWeaponSlot', (['weapon[4]'], {}), '(weapon[4])\n', (86564, 86575), False, 'from ElevatorBot.backendNetworking.dataLoading import searchForItem, getClanMembers, translateWeaponSlot\n'), ((90610, 90733), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['"""Success"""', 'f"""Registration for the tournament has started, visit {channel.mention} to join the fun!"""'], {}), "('Success',\n f'Registration for the tournament has started, visit {channel.mention} to join the fun!'\n )\n", (90623, 90733), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((91649, 91696), 'ElevatorBot.backendNetworking.miscFunctions.has_elevated_permissions', 'has_elevated_permissions', (['ctx.author', 'ctx.guild'], {}), '(ctx.author, ctx.guild)\n', (91673, 91696), False, 'from ElevatorBot.backendNetworking.miscFunctions import get_emoji, write_line, has_elevated_permissions, check_if_mutually_exclusive, convert_expansion_or_season_dates\n'), ((92666, 92724), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['"""Success"""', '"""The tournament is now starting"""'], {}), "('Success', 'The tournament is now starting')\n", (92679, 92724), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((94014, 94061), 'ElevatorBot.backendNetworking.miscFunctions.has_elevated_permissions', 'has_elevated_permissions', (['ctx.author', 'ctx.guild'], {}), '(ctx.author, ctx.guild)\n', (94038, 94061), False, 'from ElevatorBot.backendNetworking.miscFunctions import get_emoji, write_line, has_elevated_permissions, check_if_mutually_exclusive, convert_expansion_or_season_dates\n'), ((94460, 94519), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['"""Success"""', '"""The tournament has been deleted"""'], {}), "('Success', 'The tournament has been deleted')\n", (94473, 94519), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((13418, 13526), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['"""Error"""', '"""Couldn\'t find any data for that mode. If you think this is an error DM me"""'], {}), '(\'Error\',\n "Couldn\'t find any data for that mode. If you think this is an error DM me"\n )\n', (13431, 13526), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((13959, 14018), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': "data['activityDurationSeconds']"}), "(seconds=data['activityDurationSeconds'])\n", (13977, 14018), False, 'import datetime\n'), ((44158, 44237), 'ElevatorBot.networking.network.get_json_from_url', 'get_json_from_url', (['f"""https://www.bungie.net/Platform/GroupV2/{CLANID}/Members/"""'], {}), "(f'https://www.bungie.net/Platform/GroupV2/{CLANID}/Members/')\n", (44175, 44237), False, 'from ElevatorBot.networking.network import get_json_from_url\n'), ((45926, 46002), 'ElevatorBot.backendNetworking.miscFunctions.write_line', 'write_line', (['(index + 1)', 'f"""**{row[\'member\']}**"""', 'stat_text', "row['stat']", 'emoji'], {}), '(index + 1, f"**{row[\'member\']}**", stat_text, row[\'stat\'], emoji)\n', (45936, 46002), False, 'from ElevatorBot.backendNetworking.miscFunctions import get_emoji, write_line, has_elevated_permissions, check_if_mutually_exclusive, convert_expansion_or_season_dates\n'), ((46253, 46320), 'ElevatorBot.backendNetworking.miscFunctions.write_line', 'write_line', (['(index + 1)', "row['member']", 'stat_text', "row['stat']", 'emoji'], {}), "(index + 1, row['member'], stat_text, row['stat'], emoji)\n", (46263, 46320), False, 'from ElevatorBot.backendNetworking.miscFunctions import get_emoji, write_line, has_elevated_permissions, check_if_mutually_exclusive, convert_expansion_or_season_dates\n'), ((49409, 49448), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'result_sort'}), '(seconds=result_sort)\n', (49427, 49448), False, 'import datetime\n'), ((65202, 65268), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['"""Error"""', 'f"""No weapon stats found for {weapon_name}"""'], {}), "('Error', f'No weapon stats found for {weapon_name}')\n", (65215, 65268), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((67561, 67588), 'ElevatorBot.database.database.getPgcrActivity', 'getPgcrActivity', (['instanceID'], {}), '(instanceID)\n', (67576, 67588), False, 'from ElevatorBot.database.database import getForges, getLastActivity, getDestinyDefinition, getWeaponInfo, getPgcrActivity, getTopWeapons, getActivityHistory, getPgcrActivitiesUsersStats, getClearCount, get_d2_steam_player_info, getTimePlayed\n'), ((70914, 70933), 'discord.File', 'discord.File', (['title'], {}), '(title)\n', (70926, 70933), False, 'import discord\n'), ((76400, 76430), 'ElevatorBot.backendNetworking.dataLoading.translateWeaponSlot', 'translateWeaponSlot', (['weapon[4]'], {}), '(weapon[4])\n', (76419, 76430), False, 'from ElevatorBot.backendNetworking.dataLoading import searchForItem, getClanMembers, translateWeaponSlot\n'), ((80728, 80795), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['f"""Error"""', 'f"""You can only specify one time parameter"""'], {}), "(f'Error', f'You can only specify one time parameter')\n", (80741, 80795), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((85017, 85084), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['f"""Error"""', 'f"""You can only specify one time parameter"""'], {}), "(f'Error', f'You can only specify one time parameter')\n", (85030, 85084), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((89589, 89751), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['f"""Error"""', 'f"""A tournament already exists. \nPlease wait until it is completed and then try again or ask a member of staff to _delete it"""'], {}), '(f\'Error\',\n f"""A tournament already exists. \nPlease wait until it is completed and then try again or ask a member of staff to _delete it"""\n )\n', (89602, 89751), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((91343, 91446), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['f"""Error"""', 'f"""You need to start the registration by using `/tournament _insert` first"""'], {}), "(f'Error',\n f'You need to start the registration by using `/tournament _insert` first')\n", (91356, 91446), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((91778, 91873), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['f"""Error"""', 'f"""Only admins and the tournament creator can start the tournament"""'], {}), "(f'Error',\n f'Only admins and the tournament creator can start the tournament')\n", (91791, 91873), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((93805, 93866), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['f"""Error"""', 'f"""There is no tournament to _delete"""'], {}), "(f'Error', f'There is no tournament to _delete')\n", (93818, 93866), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((94143, 94240), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['f"""Error"""', 'f"""Only admins and the tournament creator can _delete the tournament"""'], {}), "(f'Error',\n f'Only admins and the tournament creator can _delete the tournament')\n", (94156, 94240), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((6131, 6183), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Everything"""', 'value': '"""Everything"""'}), "(name='Everything', value='Everything')\n", (6144, 6183), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((6205, 6251), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Warlock"""', 'value': '"""Warlock"""'}), "(name='Warlock', value='Warlock')\n", (6218, 6251), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((6273, 6317), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Hunter"""', 'value': '"""Hunter"""'}), "(name='Hunter', value='Hunter')\n", (6286, 6317), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((6339, 6381), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Titan"""', 'value': '"""Titan"""'}), "(name='Titan', value='Titan')\n", (6352, 6381), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((16034, 16107), 'discord_slash.utils.manage_components.create_select_option', 'manage_components.create_select_option', ([], {'emoji': '"""📅"""', 'label': 'week', 'value': 'week'}), "(emoji='📅', label=week, value=week)\n", (16072, 16107), False, 'from discord_slash.utils import manage_components\n'), ((43096, 43181), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['f"""Error"""', 'f"""Please specify a weapon in the command argument `arg`"""'], {}), "(f'Error',\n f'Please specify a weapon in the command argument `arg`')\n", (43109, 43181), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((39623, 39702), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Join-Date of this Discord Server"""', 'value': '"""discordjoindate"""'}), "(name='Join-Date of this Discord Server', value='discordjoindate')\n", (39636, 39702), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((39724, 39796), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Roles Earned on this Discord Server"""', 'value': '"""roles"""'}), "(name='Roles Earned on this Discord Server', value='roles')\n", (39737, 39796), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((39818, 39873), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Total Playtime"""', 'value': '"""totaltime"""'}), "(name='Total Playtime', value='totaltime')\n", (39831, 39873), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((39895, 39951), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Max. Power Level"""', 'value': '"""maxpower"""'}), "(name='Max. Power Level', value='maxpower')\n", (39908, 39951), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((39973, 40031), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Vault Space Used"""', 'value': '"""vaultspace"""'}), "(name='Vault Space Used', value='vaultspace')\n", (39986, 40031), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((40053, 40112), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Orbs of Power Generated"""', 'value': '"""orbs"""'}), "(name='Orbs of Power Generated', value='orbs')\n", (40066, 40112), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((40134, 40187), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Melee Kills"""', 'value': '"""meleekills"""'}), "(name='Melee Kills', value='meleekills')\n", (40147, 40187), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((40209, 40262), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Super Kills"""', 'value': '"""superkills"""'}), "(name='Super Kills', value='superkills')\n", (40222, 40262), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((40284, 40341), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Grenade Kills"""', 'value': '"""grenadekills"""'}), "(name='Grenade Kills', value='grenadekills')\n", (40297, 40341), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((40363, 40407), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Deaths"""', 'value': '"""deaths"""'}), "(name='Deaths', value='deaths')\n", (40376, 40407), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((40429, 40477), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Suicides"""', 'value': '"""suicides"""'}), "(name='Suicides', value='suicides')\n", (40442, 40477), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((40499, 40541), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Kills"""', 'value': '"""kills"""'}), "(name='Kills', value='kills')\n", (40512, 40541), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((40563, 40610), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Raids Done"""', 'value': '"""raids"""'}), "(name='Raids Done', value='raids')\n", (40576, 40610), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((40632, 40681), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Raid Time"""', 'value': '"""raidtime"""'}), "(name='Raid Time', value='raidtime')\n", (40645, 40681), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((40703, 40764), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Grandmaster Nightfalls Done"""', 'value': '"""gm"""'}), "(name='Grandmaster Nightfalls Done', value='gm')\n", (40716, 40764), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((40786, 40836), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Weapon Kills"""', 'value': '"""weapon"""'}), "(name='Weapon Kills', value='weapon')\n", (40799, 40836), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((40858, 40927), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Weapon Precision Kills"""', 'value': '"""weaponprecision"""'}), "(name='Weapon Precision Kills', value='weaponprecision')\n", (40871, 40927), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((40949, 41027), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""% Weapon Precision Kills"""', 'value': '"""weaponprecisionpercent"""'}), "(name='% Weapon Precision Kills', value='weaponprecisionpercent')\n", (40962, 41027), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((41049, 41114), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Enhancement Cores"""', 'value': '"""enhancementcores"""'}), "(name='Enhancement Cores', value='enhancementcores')\n", (41062, 41114), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((41136, 41185), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Forges Done"""', 'value': '"""forges"""'}), "(name='Forges Done', value='forges')\n", (41149, 41185), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((41363, 41429), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Active Triumph Score"""', 'value': '"""activetriumphs"""'}), "(name='Active Triumph Score', value='activetriumphs')\n", (41376, 41429), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((41451, 41517), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Legacy Triumph Score"""', 'value': '"""legacytriumphs"""'}), "(name='Legacy Triumph Score', value='legacytriumphs')\n", (41464, 41517), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((41539, 41592), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Triumph Score"""', 'value': '"""triumphs"""'}), "(name='Triumph Score', value='triumphs')\n", (41552, 41592), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((46609, 46676), 'ElevatorBot.backendNetworking.miscFunctions.write_line', 'write_line', (['(index + 1)', "row['member']", 'stat_text', "row['stat']", 'emoji'], {}), "(index + 1, row['member'], stat_text, row['stat'], emoji)\n", (46619, 46676), False, 'from ElevatorBot.backendNetworking.miscFunctions import get_emoji, write_line, has_elevated_permissions, check_if_mutually_exclusive, convert_expansion_or_season_dates\n'), ((61543, 61595), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Kills (default)"""', 'value': '"""kills"""'}), "(name='Kills (default)', value='kills')\n", (61556, 61595), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((61617, 61678), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Precision Kills"""', 'value': '"""precisionkills"""'}), "(name='Precision Kills', value='precisionkills')\n", (61630, 61678), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((61700, 61770), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""% Precision Kills"""', 'value': '"""precisionkillspercent"""'}), "(name='% Precision Kills', value='precisionkillspercent')\n", (61713, 61770), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((62316, 62365), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Warlock"""', 'value': '"""2271682572"""'}), "(name='Warlock', value='2271682572')\n", (62329, 62365), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((62387, 62434), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Hunter"""', 'value': '"""671679327"""'}), "(name='Hunter', value='671679327')\n", (62400, 62434), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((62456, 62503), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Titan"""', 'value': '"""3655393761"""'}), "(name='Titan', value='3655393761')\n", (62469, 62503), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((71616, 71668), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Kills (default)"""', 'value': '"""kills"""'}), "(name='Kills (default)', value='kills')\n", (71629, 71668), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((71690, 71751), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Precision Kills"""', 'value': '"""precisionkills"""'}), "(name='Precision Kills', value='precisionkills')\n", (71703, 71751), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((71773, 71843), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""% Precision Kills"""', 'value': '"""precisionkillspercent"""'}), "(name='% Precision Kills', value='precisionkillspercent')\n", (71786, 71843), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((72132, 72181), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Warlock"""', 'value': '"""2271682572"""'}), "(name='Warlock', value='2271682572')\n", (72145, 72181), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((72203, 72250), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Hunter"""', 'value': '"""671679327"""'}), "(name='Hunter', value='671679327')\n", (72216, 72250), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((72272, 72319), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Titan"""', 'value': '"""3655393761"""'}), "(name='Titan', value='3655393761')\n", (72285, 72319), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((72615, 72687), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': 'expansion[1]', 'value': 'f"""{expansion[0]},{expansion[1]}"""'}), "(name=expansion[1], value=f'{expansion[0]},{expansion[1]}')\n", (72628, 72687), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((72991, 73054), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': 'season[1]', 'value': 'f"""{season[0]},{season[1]}"""'}), "(name=season[1], value=f'{season[0]},{season[1]}')\n", (73004, 73054), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((80311, 80383), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['f"""Error"""', 'f"""The argument `activityhash` must be a number"""'], {}), "(f'Error', f'The argument `activityhash` must be a number')\n", (80324, 80383), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((84641, 84713), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['f"""Error"""', 'f"""The argument `activityhash` must be a number"""'], {}), "(f'Error', f'The argument `activityhash` must be a number')\n", (84654, 84713), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((82080, 82129), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Warlock"""', 'value': '"""2271682572"""'}), "(name='Warlock', value='2271682572')\n", (82093, 82129), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((82151, 82198), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Hunter"""', 'value': '"""671679327"""'}), "(name='Hunter', value='671679327')\n", (82164, 82198), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((82220, 82267), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': '"""Titan"""', 'value': '"""3655393761"""'}), "(name='Titan', value='3655393761')\n", (82233, 82267), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((82563, 82635), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': 'expansion[1]', 'value': 'f"""{expansion[0]},{expansion[1]}"""'}), "(name=expansion[1], value=f'{expansion[0]},{expansion[1]}')\n", (82576, 82635), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((82939, 83002), 'discord_slash.utils.manage_commands.create_choice', 'create_choice', ([], {'name': 'season[1]', 'value': 'f"""{season[0]},{season[1]}"""'}), "(name=season[1], value=f'{season[0]},{season[1]}')\n", (82952, 83002), False, 'from discord_slash.utils.manage_commands import create_option, create_choice\n'), ((9222, 9255), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'value'}), '(seconds=value)\n', (9240, 9255), False, 'import datetime\n'), ((14813, 14875), 'ElevatorBot.backendNetworking.miscFunctions.get_emoji', 'get_emoji', (['self.client', "self.classes[player['characterClass']]"], {}), "(self.client, self.classes[player['characterClass']])\n", (14822, 14875), False, 'from ElevatorBot.backendNetworking.miscFunctions import get_emoji, write_line, has_elevated_permissions, check_if_mutually_exclusive, convert_expansion_or_season_dates\n'), ((14947, 14996), 'ElevatorBot.backendNetworking.miscFunctions.get_emoji', 'get_emoji', (['self.client', 'light_level_icon_emoji_id'], {}), '(self.client, light_level_icon_emoji_id)\n', (14956, 14996), False, 'from ElevatorBot.backendNetworking.miscFunctions import get_emoji, write_line, has_elevated_permissions, check_if_mutually_exclusive, convert_expansion_or_season_dates\n'), ((30988, 31028), 'ElevatorBot.backendNetworking.destinyPlayer.DestinyPlayer.from_destiny_id', 'DestinyPlayer.from_destiny_id', (['destinyID'], {}), '(destinyID)\n', (31017, 31028), False, 'from ElevatorBot.backendNetworking.destinyPlayer import DestinyPlayer\n'), ((86093, 86133), 'ElevatorBot.backendNetworking.destinyPlayer.DestinyPlayer.from_destiny_id', 'DestinyPlayer.from_destiny_id', (['destinyID'], {}), '(destinyID)\n', (86122, 86133), False, 'from ElevatorBot.backendNetworking.destinyPlayer import DestinyPlayer\n'), ((92295, 92358), 'ElevatorBot.backendNetworking.formating.embed_message', 'embed_message', (['f"""Error"""', 'f"""At least two people need to sign up"""'], {}), "(f'Error', f'At least two people need to sign up')\n", (92308, 92358), False, 'from ElevatorBot.backendNetworking.formating import embed_message\n'), ((7231, 7252), 'ElevatorBot.database.database.getTimePlayed', 'getTimePlayed', ([], {}), '(**args)\n', (7244, 7252), False, 'from ElevatorBot.database.database import getForges, getLastActivity, getDestinyDefinition, getWeaponInfo, getPgcrActivity, getTopWeapons, getActivityHistory, getPgcrActivitiesUsersStats, getClearCount, get_d2_steam_player_info, getTimePlayed\n'), ((7305, 7334), 'ElevatorBot.database.database.getTimePlayed', 'getTimePlayed', ([], {'mode': '(7)'}), '(**args, mode=7)\n', (7318, 7334), False, 'from ElevatorBot.database.database import getForges, getLastActivity, getDestinyDefinition, getWeaponInfo, getPgcrActivity, getTopWeapons, getActivityHistory, getPgcrActivitiesUsersStats, getClearCount, get_d2_steam_player_info, getTimePlayed\n'), ((7387, 7416), 'ElevatorBot.database.database.getTimePlayed', 'getTimePlayed', ([], {'mode': '(5)'}), '(**args, mode=5)\n', (7400, 7416), False, 'from ElevatorBot.database.database import getForges, getLastActivity, getDestinyDefinition, getWeaponInfo, getPgcrActivity, getTopWeapons, getActivityHistory, getPgcrActivitiesUsersStats, getClearCount, get_d2_steam_player_info, getTimePlayed\n'), ((52767, 52815), 'ElevatorBot.database.database.getClearCount', 'getClearCount', (['destiny_player.destiny_id'], {'mode': '(4)'}), '(destiny_player.destiny_id, mode=4)\n', (52780, 52815), False, 'from ElevatorBot.database.database import getForges, getLastActivity, getDestinyDefinition, getWeaponInfo, getPgcrActivity, getTopWeapons, getActivityHistory, getPgcrActivitiesUsersStats, getClearCount, get_d2_steam_player_info, getTimePlayed\n'), ((53442, 53478), 'ElevatorBot.database.database.getForges', 'getForges', (['destiny_player.destiny_id'], {}), '(destiny_player.destiny_id)\n', (53451, 53478), False, 'from ElevatorBot.database.database import getForges, getLastActivity, getDestinyDefinition, getWeaponInfo, getPgcrActivity, getTopWeapons, getActivityHistory, getPgcrActivitiesUsersStats, getClearCount, get_d2_steam_player_info, getTimePlayed\n'), ((53909, 53945), 'ElevatorBot.database.database.getForges', 'getForges', (['destiny_player.destiny_id'], {}), '(destiny_player.destiny_id)\n', (53918, 53945), False, 'from ElevatorBot.database.database import getForges, getLastActivity, getDestinyDefinition, getWeaponInfo, getPgcrActivity, getTopWeapons, getActivityHistory, getPgcrActivitiesUsersStats, getClearCount, get_d2_steam_player_info, getTimePlayed\n'), ((56769, 56834), 'ElevatorBot.database.database.getClearCount', 'getClearCount', (['destiny_player.destiny_id'], {'activityHashes': 'gmHashes'}), '(destiny_player.destiny_id, activityHashes=gmHashes)\n', (56782, 56834), False, 'from ElevatorBot.database.database import getForges, getLastActivity, getDestinyDefinition, getWeaponInfo, getPgcrActivity, getTopWeapons, getActivityHistory, getPgcrActivitiesUsersStats, getClearCount, get_d2_steam_player_info, getTimePlayed\n')] |
# -*- coding: utf-8 -*-
import pandas
import torch
# import horovod.torch as hvd
import time
import numpy as np
import sklearn
import deepctr_torch as deepctr
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data', required=True)
parser.add_argument('--optimizer', default='Adagrad', choices=['Adagrad'])
parser.add_argument('--model', default="DeepFM", choices=["WDL", 'DeepFM', 'XDeepFM'])
parser.add_argument('--embedding_dim', default=9, type=int)
parser.add_argument('--batch_size', default=4096, type=int)
parser.add_argument('--epochs', default=2, type=int)
parser.add_argument('--onnx', action='store_true')
parser.add_argument('--cpu', action='store_true')
args = parser.parse_args()
# hvd.init()
# if args.cpu:
# device = 'cpu'
# else:
# # torch.cuda.set_device(hvd.local_rank())
# device = 'cuda:{}'.format(hvd.local_rank())
device = 'cuda'
def train_model(model, x, y, batch_size, epochs=1, optimizer=torch.optim.Adagrad):
x = [np.expand_dims(tensor, 1) for tensor in x]
x = torch.from_numpy(np.concatenate(x, axis=-1))
y = torch.from_numpy(y)
train_tensor_data = torch.utils.data.TensorDataset(x, y)
train_loader = torch.utils.data.DataLoader(dataset=train_tensor_data, batch_size=batch_size)
loss_func = torch.nn.functional.binary_cross_entropy
for epoch in range(epochs):
start_time = time.time()
epoch_loss = 0.0
epoch_auc = 0.0
for x_train, y_train in train_loader:
x_train = x_train.to(device).float()
y_train = y_train.to(device).float()
y_pred = model(x_train).to(device).squeeze()
optimizer.zero_grad()
loss = loss_func(y_pred, y_train.squeeze(), reduction='sum')
epoch_loss += loss.item()
loss.backward()
optimizer.step()
# train_result["AUC"].append(sklearn.metrics.roc_auc_score(
# y.cpu().data.numpy(), y_pred.cpu().data.numpy().astype("float64")))
epoch_time = int(time.time() - start_time)
print('Epoch {0}/{1}'.format(epoch + 1, epochs))
eval_str = "{0}s - loss: {1: .4f}".format(epoch_time, epoch_loss)
# eval_str += " - " + name + ": {0: .4f}".format(epoch_logs[name])
print(eval_str)
if __name__ == "__main__":
data = pandas.read_csv(args.data)
num_lines = data.shape[0]
num_local_lines = num_lines // args.batch_size * args.batch_size
local_start = 0
# num_local_lines = int(num_lines / hvd.size()) // args.batch_size * args.batch_size
# local_start = hvd.local_rank() * num_local_lines
local_end = local_start + num_local_lines
print("num_lines:%d, num_local_lines:%d" % (num_lines, num_local_lines))
print("local_start:%d, local_end:%d" % (local_start, local_end))
target = ['label']
dense_features = ['I' + str(i) for i in range(1, 14)]
sparse_features = ['C' + str(i) for i in range(1, 27)]
print(data.columns)
feature_columns = []
for name in sparse_features:
feature_columns.append(deepctr.inputs.SparseFeat(name, data[name].max() + 1, dtype='int64'))
for name in dense_features:
feature_columns.append(deepctr.inputs.DenseFeat(name, 1, dtype='float32'))
train = data.iloc[local_start:local_end]
train_model_input = {name:train[name] for name in sparse_features + dense_features}
if args.model == 'WDL':
fc_sizes = (512, 256, 128, 32)
elif args.model in {'DeepFM', 'xDeepFM'}:
fc_sizes = (400, 400, 400)
else:
print("unknown model ", args.model)
model = eval("deepctr.models." + args.model)(feature_columns, feature_columns, device=device,
task='binary', dnn_hidden_units=fc_sizes, l2_reg_linear=0, l2_reg_embedding=0)
x = [train_model_input[name] for name in model.feature_index]
if args.onnx:
from onnxruntime.training.ortmodule import ORTModule
model = ORTModule(model)
optimizer=torch.optim.Adagrad(model.parameters())
train_model(model, x, train[target].values,
batch_size=args.batch_size, epochs=args.epochs, optimizer=optimizer)
| [
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"pandas.read_csv",
"onnxruntime.training.ortmodule.ORTModule",
"deepctr_torch.inputs.DenseFeat",
"numpy.expand_dims",
"time.time",
"torch.utils.data.TensorDataset",
"numpy.concatenate",
"torch.from_numpy"
] | [((185, 210), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (208, 210), False, 'import argparse\n'), ((1085, 1104), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (1101, 1104), False, 'import torch\n'), ((1129, 1165), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['x', 'y'], {}), '(x, y)\n', (1159, 1165), False, 'import torch\n'), ((1185, 1262), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'train_tensor_data', 'batch_size': 'batch_size'}), '(dataset=train_tensor_data, batch_size=batch_size)\n', (1212, 1262), False, 'import torch\n'), ((2319, 2345), 'pandas.read_csv', 'pandas.read_csv', (['args.data'], {}), '(args.data)\n', (2334, 2345), False, 'import pandas\n'), ((981, 1006), 'numpy.expand_dims', 'np.expand_dims', (['tensor', '(1)'], {}), '(tensor, 1)\n', (995, 1006), True, 'import numpy as np\n'), ((1049, 1075), 'numpy.concatenate', 'np.concatenate', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (1063, 1075), True, 'import numpy as np\n'), ((1373, 1384), 'time.time', 'time.time', ([], {}), '()\n', (1382, 1384), False, 'import time\n'), ((3933, 3949), 'onnxruntime.training.ortmodule.ORTModule', 'ORTModule', (['model'], {}), '(model)\n', (3942, 3949), False, 'from onnxruntime.training.ortmodule import ORTModule\n'), ((3197, 3247), 'deepctr_torch.inputs.DenseFeat', 'deepctr.inputs.DenseFeat', (['name', '(1)'], {'dtype': '"""float32"""'}), "(name, 1, dtype='float32')\n", (3221, 3247), True, 'import deepctr_torch as deepctr\n'), ((2023, 2034), 'time.time', 'time.time', ([], {}), '()\n', (2032, 2034), False, 'import time\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import json
import pathlib
from typing import Dict, Tuple
import numpy as np
import skimage.metrics
import torch
def get_user_dir() -> pathlib.Path:
# return pathlib.Path.home() / ".activemri"
return pathlib.Path.cwd() / ".activemri"
def maybe_create_datacache_dir() -> pathlib.Path:
datacache_dir = get_user_dir() / "__datacache__"
if not datacache_dir.is_dir():
datacache_dir.mkdir()
return datacache_dir
def get_defaults_json() -> Tuple[Dict[str, str], str]:
defaults_path = get_user_dir() / "defaults.json"
if not pathlib.Path.exists(defaults_path):
parent = defaults_path.parents[0]
parent.mkdir(exist_ok=True)
content = {"data_location": "", "saved_models_dir": ""}
with defaults_path.open("w", encoding="utf-8") as f:
json.dump(content, f)
else:
with defaults_path.open("r", encoding="utf-8") as f:
content = json.load(f)
return content, str(defaults_path)
def import_object_from_str(classname: str):
the_module, the_object = classname.rsplit(".", 1)
the_object = classname.split(".")[-1]
module = importlib.import_module(the_module)
return getattr(module, the_object)
def compute_ssim(xs: torch.Tensor, ys: torch.Tensor) -> np.ndarray:
ssims = []
for i in range(xs.shape[0]):
ssim = skimage.metrics.structural_similarity(
xs[i].cpu().numpy(),
ys[i].cpu().numpy(),
data_range=ys[i].cpu().numpy().max(),
)
ssims.append(ssim)
return np.array(ssims, dtype=np.float32)
def compute_psnr(xs: torch.Tensor, ys: torch.Tensor) -> np.ndarray:
psnrs = []
for i in range(xs.shape[0]):
psnr = skimage.metrics.peak_signal_noise_ratio(
xs[i].cpu().numpy(),
ys[i].cpu().numpy(),
data_range=ys[i].cpu().numpy().max(),
)
psnrs.append(psnr)
return np.array(psnrs, dtype=np.float32)
def compute_mse(xs: torch.Tensor, ys: torch.Tensor) -> np.ndarray:
dims = tuple(range(1, len(xs.shape)))
return np.mean((ys.cpu().numpy() - xs.cpu().numpy()) ** 2, axis=dims)
def compute_nmse(xs: torch.Tensor, ys: torch.Tensor) -> np.ndarray:
ys_numpy = ys.cpu().numpy()
nmses = []
for i in range(xs.shape[0]):
x = xs[i].cpu().numpy()
y = ys_numpy[i]
nmse = np.linalg.norm(y - x) ** 2 / np.linalg.norm(y) ** 2
nmses.append(nmse)
return np.array(nmses, dtype=np.float32)
def compute_mse_torch(xs: torch.Tensor, ys: torch.Tensor) -> torch.Tensor:
dims = tuple(range(1, len(xs.shape)))
return torch.mean((ys - xs) ** 2, dim=dims)
def compute_nmse_torch(xs: torch.Tensor, ys: torch.Tensor) -> torch.Tensor:
dims = tuple(range(1, len(xs.shape)))
nmse = torch.linalg.norm(ys-xs, dim=dims)**2 / torch.linalg.norm(ys, dim=dims)**2
return nmse
from torch import nn
import torch.nn.functional as F
class SSIMLoss(nn.Module):
"""
SSIM loss module.
"""
def __init__(self, win_size: int = 7, k1: float = 0.01, k2: float = 0.03):
"""
Args:
win_size: Window size for SSIM calculation.
k1: k1 parameter for SSIM calculation.
k2: k2 parameter for SSIM calculation.
"""
super().__init__()
self.win_size = win_size
self.k1, self.k2 = k1, k2
self.register_buffer("w", torch.ones(1, 1, win_size, win_size) / win_size ** 2)
NP = win_size ** 2
self.cov_norm = NP / (NP - 1)
def forward(self, X: torch.Tensor, Y: torch.Tensor, data_range: torch.Tensor):
assert isinstance(self.w, torch.Tensor)
data_range = data_range[:, None, None, None]
C1 = (self.k1 * data_range) ** 2
C2 = (self.k2 * data_range) ** 2
ux = F.conv2d(X, self.w) # typing: ignore
uy = F.conv2d(Y, self.w) #
uxx = F.conv2d(X * X, self.w)
uyy = F.conv2d(Y * Y, self.w)
uxy = F.conv2d(X * Y, self.w)
vx = self.cov_norm * (uxx - ux * ux)
vy = self.cov_norm * (uyy - uy * uy)
vxy = self.cov_norm * (uxy - ux * uy)
A1, A2, B1, B2 = (
2 * ux * uy + C1,
2 * vxy + C2,
ux ** 2 + uy ** 2 + C1,
vx + vy + C2,
)
D = B1 * B2
S = (A1 * A2) / D
dims = tuple(range(1, len(X.shape)))
return S.mean(dim=dims)
SSIM = SSIMLoss()
def compute_ssim_torch(xs: torch.Tensor, ys: torch.Tensor) -> torch.Tensor:
global SSIM
SSIM = SSIM.to(xs.device)
data_range = [y.max() for y in ys]
data_range = torch.stack(data_range, dim=0)
return SSIM(xs, ys, data_range=data_range.detach())
def compute_psnr_torch(xs: torch.Tensor, ys: torch.Tensor) -> torch.Tensor:
mse = compute_mse_torch(xs, ys)
data_range = [y.max() for y in ys]
data_range = torch.stack(data_range, dim=0)
return 10 * torch.log10((data_range ** 2) / mse)
def compute_gaussian_nll_loss(reconstruction, target, logvar):
l2 = F.mse_loss(reconstruction, target, reduce=False)
# Clip logvar to make variance in [0.0001, 0.1], for numerical stability
logvar = logvar.clamp(min=-9.2, max=1.609)
one_over_var = torch.exp(-logvar)
assert len(l2) == len(logvar)
return 0.5 * (one_over_var * l2 + logvar)
| [
"torch.mean",
"pathlib.Path.exists",
"json.dump",
"json.load",
"torch.ones",
"torch.stack",
"importlib.import_module",
"torch.nn.functional.mse_loss",
"torch.nn.functional.conv2d",
"torch.exp",
"numpy.array",
"numpy.linalg.norm",
"torch.linalg.norm",
"torch.log10",
"pathlib.Path.cwd"
] | [((1329, 1364), 'importlib.import_module', 'importlib.import_module', (['the_module'], {}), '(the_module)\n', (1352, 1364), False, 'import importlib\n'), ((1740, 1773), 'numpy.array', 'np.array', (['ssims'], {'dtype': 'np.float32'}), '(ssims, dtype=np.float32)\n', (1748, 1773), True, 'import numpy as np\n'), ((2112, 2145), 'numpy.array', 'np.array', (['psnrs'], {'dtype': 'np.float32'}), '(psnrs, dtype=np.float32)\n', (2120, 2145), True, 'import numpy as np\n'), ((2641, 2674), 'numpy.array', 'np.array', (['nmses'], {'dtype': 'np.float32'}), '(nmses, dtype=np.float32)\n', (2649, 2674), True, 'import numpy as np\n'), ((2804, 2840), 'torch.mean', 'torch.mean', (['((ys - xs) ** 2)'], {'dim': 'dims'}), '((ys - xs) ** 2, dim=dims)\n', (2814, 2840), False, 'import torch\n'), ((4799, 4829), 'torch.stack', 'torch.stack', (['data_range'], {'dim': '(0)'}), '(data_range, dim=0)\n', (4810, 4829), False, 'import torch\n'), ((5060, 5090), 'torch.stack', 'torch.stack', (['data_range'], {'dim': '(0)'}), '(data_range, dim=0)\n', (5071, 5090), False, 'import torch\n'), ((5222, 5270), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['reconstruction', 'target'], {'reduce': '(False)'}), '(reconstruction, target, reduce=False)\n', (5232, 5270), True, 'import torch.nn.functional as F\n'), ((5415, 5433), 'torch.exp', 'torch.exp', (['(-logvar)'], {}), '(-logvar)\n', (5424, 5433), False, 'import torch\n'), ((406, 424), 'pathlib.Path.cwd', 'pathlib.Path.cwd', ([], {}), '()\n', (422, 424), False, 'import pathlib\n'), ((756, 790), 'pathlib.Path.exists', 'pathlib.Path.exists', (['defaults_path'], {}), '(defaults_path)\n', (775, 790), False, 'import pathlib\n'), ((3989, 4008), 'torch.nn.functional.conv2d', 'F.conv2d', (['X', 'self.w'], {}), '(X, self.w)\n', (3997, 4008), True, 'import torch.nn.functional as F\n'), ((4040, 4059), 'torch.nn.functional.conv2d', 'F.conv2d', (['Y', 'self.w'], {}), '(Y, self.w)\n', (4048, 4059), True, 'import torch.nn.functional as F\n'), ((4077, 4100), 'torch.nn.functional.conv2d', 'F.conv2d', (['(X * X)', 'self.w'], {}), '(X * X, self.w)\n', (4085, 4100), True, 'import torch.nn.functional as F\n'), ((4115, 4138), 'torch.nn.functional.conv2d', 'F.conv2d', (['(Y * Y)', 'self.w'], {}), '(Y * Y, self.w)\n', (4123, 4138), True, 'import torch.nn.functional as F\n'), ((4153, 4176), 'torch.nn.functional.conv2d', 'F.conv2d', (['(X * Y)', 'self.w'], {}), '(X * Y, self.w)\n', (4161, 4176), True, 'import torch.nn.functional as F\n'), ((5112, 5146), 'torch.log10', 'torch.log10', (['(data_range ** 2 / mse)'], {}), '(data_range ** 2 / mse)\n', (5123, 5146), False, 'import torch\n'), ((1007, 1028), 'json.dump', 'json.dump', (['content', 'f'], {}), '(content, f)\n', (1016, 1028), False, 'import json\n'), ((1122, 1134), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1131, 1134), False, 'import json\n'), ((2971, 3007), 'torch.linalg.norm', 'torch.linalg.norm', (['(ys - xs)'], {'dim': 'dims'}), '(ys - xs, dim=dims)\n', (2988, 3007), False, 'import torch\n'), ((3011, 3042), 'torch.linalg.norm', 'torch.linalg.norm', (['ys'], {'dim': 'dims'}), '(ys, dim=dims)\n', (3028, 3042), False, 'import torch\n'), ((2551, 2572), 'numpy.linalg.norm', 'np.linalg.norm', (['(y - x)'], {}), '(y - x)\n', (2565, 2572), True, 'import numpy as np\n'), ((2580, 2597), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {}), '(y)\n', (2594, 2597), True, 'import numpy as np\n'), ((3589, 3625), 'torch.ones', 'torch.ones', (['(1)', '(1)', 'win_size', 'win_size'], {}), '(1, 1, win_size, win_size)\n', (3599, 3625), False, 'import torch\n')] |
from jupyterworkflow.data import get_fremont_data
import pandas as pd
import numpy as np
def test_fremont_data():
data=get_fremont_data()
assert all(data.columns==['Total', 'West', 'East'])
assert isinstance(data.index, pd.DatetimeIndex)
assert len(np.unique(data.index.time)==36)
| [
"jupyterworkflow.data.get_fremont_data",
"numpy.unique"
] | [((123, 141), 'jupyterworkflow.data.get_fremont_data', 'get_fremont_data', ([], {}), '()\n', (139, 141), False, 'from jupyterworkflow.data import get_fremont_data\n'), ((265, 291), 'numpy.unique', 'np.unique', (['data.index.time'], {}), '(data.index.time)\n', (274, 291), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
train_data_name = "../../data/ch08.train.npz"
test_data_name = "../../data/ch08.test.npz"
def TargetFunction(x):
p1 = np.sin(6.28*x)
y = p1
return y
def CreateSampleData(num_train, num_test):
# create train data
x1 = np.random.random((num_train,1))
y1 = TargetFunction(x1) + (np.random.random((num_train,1))-0.5)/5
np.savez(train_data_name, data=x1, label=y1)
# create test data
x2 = np.linspace(0,1,num_test).reshape(num_test,1)
y2 = TargetFunction(x2)
np.savez(test_data_name, data=x2, label=y2)
def GetSampleData():
Trainfile = Path(train_data_name)
Testfile = Path(test_data_name)
if Trainfile.exists() & Testfile.exists():
TrainData = np.load(Trainfile)
TestData = np.load(Testfile)
return TrainData, TestData
if __name__ == '__main__':
CreateSampleData(500, 100)
TrainData, TestData = GetSampleData()
plt.scatter(TrainData["data"], TrainData["label"], s=1, c='b')
#plt.scatter(TestData["data"], TestData["label"], s=4, c='r')
plt.show()
| [
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"pathlib.Path",
"numpy.sin",
"numpy.random.random",
"numpy.linspace",
"numpy.savez"
] | [((350, 366), 'numpy.sin', 'np.sin', (['(6.28 * x)'], {}), '(6.28 * x)\n', (356, 366), True, 'import numpy as np\n'), ((466, 498), 'numpy.random.random', 'np.random.random', (['(num_train, 1)'], {}), '((num_train, 1))\n', (482, 498), True, 'import numpy as np\n'), ((572, 616), 'numpy.savez', 'np.savez', (['train_data_name'], {'data': 'x1', 'label': 'y1'}), '(train_data_name, data=x1, label=y1)\n', (580, 616), True, 'import numpy as np\n'), ((728, 771), 'numpy.savez', 'np.savez', (['test_data_name'], {'data': 'x2', 'label': 'y2'}), '(test_data_name, data=x2, label=y2)\n', (736, 771), True, 'import numpy as np\n'), ((810, 831), 'pathlib.Path', 'Path', (['train_data_name'], {}), '(train_data_name)\n', (814, 831), False, 'from pathlib import Path\n'), ((847, 867), 'pathlib.Path', 'Path', (['test_data_name'], {}), '(test_data_name)\n', (851, 867), False, 'from pathlib import Path\n'), ((1131, 1193), 'matplotlib.pyplot.scatter', 'plt.scatter', (["TrainData['data']", "TrainData['label']"], {'s': '(1)', 'c': '"""b"""'}), "(TrainData['data'], TrainData['label'], s=1, c='b')\n", (1142, 1193), True, 'import matplotlib.pyplot as plt\n'), ((1264, 1274), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1272, 1274), True, 'import matplotlib.pyplot as plt\n'), ((935, 953), 'numpy.load', 'np.load', (['Trainfile'], {}), '(Trainfile)\n', (942, 953), True, 'import numpy as np\n'), ((973, 990), 'numpy.load', 'np.load', (['Testfile'], {}), '(Testfile)\n', (980, 990), True, 'import numpy as np\n'), ((650, 677), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'num_test'], {}), '(0, 1, num_test)\n', (661, 677), True, 'import numpy as np\n'), ((529, 561), 'numpy.random.random', 'np.random.random', (['(num_train, 1)'], {}), '((num_train, 1))\n', (545, 561), True, 'import numpy as np\n')] |
from functools import reduce
class Node():
def __init__(self, layer_index, node_index):
self.layer_index = layer_index
self.node_index = node_index
self.downstream =[]
self.upstream = []
self.output = 0
self.delta = 0
def set_out_put(self, output):
self.output = output
def append_downstream_connection(self, conn):
self.downstream.append(conn)
def append_upstream_connection(self, conn):
self.upstream.append(conn)
def calc_output(self):
import numpy as np
sigmoid = lambda x: 1/(1+np.exp(-x))
output=reduce(lambda ret, cnn: ret+cnn.upstream_node.output* cnn.weight, self.upstream, 0)
self.output = sigmoid(output)
def calc_hidden_layer_delta(self):
downstream_delta = reduce(lambda ret, cnn: ret+cnn.downsream_node.delta*cnn.weight, self.downstream, 0.0)
self.delta = self.output*(1-self.output)*downstream_delta
def calc_output_layer_delta(self, label):
self.delta = self.output*(1-self.output)*(label - self.output)
def __str__(self):
node_str = '%u-%u: output: %f delta: %f' %(self.layer_index, self.node_index, self.output, self.delta)
downstream_str = reduce(lambda ret, conn: ret +'\n\t'+str(conn), self.downstream, '')
upstream_str = reduce(lambda ret, conn: ret + '\n\t' +str(conn), self.upstream, '')
return node_str +'\n\tdownstream:' +downstream_str +'\n\tupstream:' + upstream_str
| [
"functools.reduce",
"numpy.exp"
] | [((627, 718), 'functools.reduce', 'reduce', (['(lambda ret, cnn: ret + cnn.upstream_node.output * cnn.weight)', 'self.upstream', '(0)'], {}), '(lambda ret, cnn: ret + cnn.upstream_node.output * cnn.weight, self.\n upstream, 0)\n', (633, 718), False, 'from functools import reduce\n'), ((816, 911), 'functools.reduce', 'reduce', (['(lambda ret, cnn: ret + cnn.downsream_node.delta * cnn.weight)', 'self.downstream', '(0.0)'], {}), '(lambda ret, cnn: ret + cnn.downsream_node.delta * cnn.weight, self.\n downstream, 0.0)\n', (822, 911), False, 'from functools import reduce\n'), ((600, 610), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (606, 610), True, 'import numpy as np\n')] |
import poseconnect.utils
import poseconnect.defaults
import smc_kalman
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tqdm
from uuid import uuid4
import logging
import time
import itertools
import functools
import copy
logger = logging.getLogger(__name__)
def track_poses_3d(
poses_3d,
max_match_distance=poseconnect.defaults.TRACKING_MAX_MATCH_DISTANCE,
max_iterations_since_last_match=poseconnect.defaults.TRACKING_MAX_ITERATIONS_SINCE_LAST_MATCH,
centroid_position_initial_sd=poseconnect.defaults.TRACKING_CENTROID_POSITION_INITIAL_SD,
centroid_velocity_initial_sd=poseconnect.defaults.TRACKING_CENTROID_VELOCITY_INITIAL_SD,
reference_delta_t_seconds=poseconnect.defaults.TRACKING_REFERENCE_DELTA_T_SECONDS,
reference_velocity_drift=poseconnect.defaults.TRACKING_REFERENCE_VELOCITY_DRIFT,
position_observation_sd=poseconnect.defaults.TRACKING_POSITION_OBSERVATION_SD,
num_poses_per_track_min=poseconnect.defaults.TRACKING_NUM_POSES_PER_TRACK_MIN,
progress_bar=poseconnect.defaults.PROGRESS_BAR,
notebook=poseconnect.defaults.NOTEBOOK
):
poses_3d = poseconnect.utils.ingest_poses_3d(poses_3d)
pose_tracks_3d = update_pose_tracks_3d(
poses_3d=poses_3d,
pose_tracks_3d=None,
max_match_distance=max_match_distance,
max_iterations_since_last_match=max_iterations_since_last_match,
centroid_position_initial_sd=centroid_position_initial_sd,
centroid_velocity_initial_sd=centroid_velocity_initial_sd,
reference_delta_t_seconds=reference_delta_t_seconds,
reference_velocity_drift=reference_velocity_drift,
position_observation_sd=position_observation_sd,
progress_bar=progress_bar,
notebook=notebook
)
if num_poses_per_track_min is not None:
pose_tracks_3d.filter(
num_poses_min=num_poses_per_track_min,
inplace=True
)
poses_3d_with_tracks = (
poses_3d
.join(
pose_tracks_3d.output_df(),
how='inner'
)
)
return poses_3d_with_tracks
def update_pose_tracks_3d(
poses_3d,
pose_tracks_3d=None,
max_match_distance=poseconnect.defaults.TRACKING_MAX_MATCH_DISTANCE,
max_iterations_since_last_match=poseconnect.defaults.TRACKING_MAX_ITERATIONS_SINCE_LAST_MATCH,
centroid_position_initial_sd=poseconnect.defaults.TRACKING_CENTROID_POSITION_INITIAL_SD,
centroid_velocity_initial_sd=poseconnect.defaults.TRACKING_CENTROID_VELOCITY_INITIAL_SD,
reference_delta_t_seconds=poseconnect.defaults.TRACKING_REFERENCE_DELTA_T_SECONDS,
reference_velocity_drift=poseconnect.defaults.TRACKING_REFERENCE_VELOCITY_DRIFT,
position_observation_sd=poseconnect.defaults.TRACKING_POSITION_OBSERVATION_SD,
progress_bar=poseconnect.defaults.PROGRESS_BAR,
notebook=poseconnect.defaults.NOTEBOOK
):
if len(poses_3d) == 0:
return pose_tracks_3d
if pose_tracks_3d is None:
initial_timestamp = poses_3d['timestamp'].min()
initial_pose_3d_ids = poses_3d.loc[
poses_3d['timestamp'] == initial_timestamp
].index.values.tolist()
initial_keypoint_coordinates_3d = poses_3d.loc[
poses_3d['timestamp'] == initial_timestamp,
'keypoint_coordinates_3d'
].values.tolist()
initial_poses_3d = dict(zip(initial_pose_3d_ids, initial_keypoint_coordinates_3d))
pose_tracks_3d = PoseTracks3D(
timestamp=initial_timestamp,
poses_3d=initial_poses_3d,
centroid_position_initial_sd=centroid_position_initial_sd,
centroid_velocity_initial_sd=centroid_velocity_initial_sd,
reference_delta_t_seconds=reference_delta_t_seconds,
reference_velocity_drift=reference_velocity_drift,
position_observation_sd=position_observation_sd
)
pose_tracks_3d.update_df(
poses_3d=poses_3d.loc[poses_3d['timestamp'] != initial_timestamp],
progress_bar=progress_bar,
notebook=notebook
)
else:
pose_tracks_3d.update_df(
poses_3d=poses_3d,
progress_bar=progress_bar,
notebook=notebook
)
return pose_tracks_3d
def interpolate_pose_tracks_3d(
poses_3d_with_tracks,
frames_per_second=poseconnect.defaults.FRAMES_PER_SECOND
):
poses_3d_with_tracks = poseconnect.utils.ingest_poses_3d_with_tracks(poses_3d_with_tracks)
poses_3d_new_list=list()
for pose_track_3d_id, pose_track in poses_3d_with_tracks.groupby('pose_track_3d_id'):
poses_3d_new_track = interpolate_pose_track(
pose_track,
frames_per_second=frames_per_second
)
poses_3d_new_track['pose_track_3d_id'] = pose_track_3d_id
poses_3d_new_list.append(poses_3d_new_track)
poses_3d_new = pd.concat(poses_3d_new_list)
poses_3d_with_tracks_interpolated= pd.concat((
poses_3d_with_tracks,
poses_3d_new
))
poses_3d_with_tracks_interpolated.sort_values('timestamp', inplace=True)
return poses_3d_with_tracks_interpolated
def interpolate_pose_track(
pose_track_3d,
frames_per_second=poseconnect.defaults.FRAMES_PER_SECOND
):
if not isinstance(frames_per_second, int):
raise ValueError('Only integer frame rates currently supported')
if not 1000 % frames_per_second == 0:
raise ValueError('Only frame periods with integer number of milliseconds currently supported')
frame_period_milliseconds = 1000//frames_per_second
if pose_track_3d['timestamp'].duplicated().any():
raise ValueError('Pose data for single pose track contains duplicate timestamps')
pose_track_3d = pose_track_3d.copy()
pose_track_3d.sort_values('timestamp', inplace=True)
old_time_index = pd.DatetimeIndex(pose_track_3d['timestamp'])
combined_time_index = pd.date_range(
start=pose_track_3d['timestamp'].min(),
end=pose_track_3d['timestamp'].max(),
freq='{}ms'.format(frame_period_milliseconds),
name='timestamp'
)
new_time_index = combined_time_index.difference(old_time_index)
old_num_poses = len(old_time_index)
combined_num_poses = len(combined_time_index)
new_num_poses = len(new_time_index)
keypoints_flattened_df = pd.DataFrame(
np.stack(pose_track_3d['keypoint_coordinates_3d']).reshape((old_num_poses, -1)),
index=old_time_index
)
keypoints_flattened_interpolated_df = keypoints_flattened_df.reindex(combined_time_index).interpolate(method='time')
keypoints_flattened_interpolated_array = keypoints_flattened_interpolated_df.values
keypoints_interpolated_array = keypoints_flattened_interpolated_array.reshape((combined_num_poses, -1, 3))
keypoints_interpolated_array_unstacked = [keypoints_interpolated_array[i] for i in range(keypoints_interpolated_array.shape[0])]
poses_3d_interpolated = pd.Series(
keypoints_interpolated_array_unstacked,
index=combined_time_index,
name='keypoint_coordinates_3d'
).to_frame()
poses_3d_new = poses_3d_interpolated.reindex(new_time_index)
pose_3d_ids_new = [uuid4().hex for _ in range(len(poses_3d_new))]
poses_3d_new['pose_3d_id'] = pose_3d_ids_new
poses_3d_new = poses_3d_new.reset_index().set_index('pose_3d_id')
return poses_3d_new
class PoseTracks3D:
def __init__(
self,
timestamp,
poses_3d,
max_match_distance=poseconnect.defaults.TRACKING_MAX_MATCH_DISTANCE,
max_iterations_since_last_match=poseconnect.defaults.TRACKING_MAX_ITERATIONS_SINCE_LAST_MATCH,
centroid_position_initial_sd=poseconnect.defaults.TRACKING_CENTROID_POSITION_INITIAL_SD,
centroid_velocity_initial_sd=poseconnect.defaults.TRACKING_CENTROID_VELOCITY_INITIAL_SD,
reference_delta_t_seconds=poseconnect.defaults.TRACKING_REFERENCE_DELTA_T_SECONDS,
reference_velocity_drift=poseconnect.defaults.TRACKING_REFERENCE_VELOCITY_DRIFT,
position_observation_sd=poseconnect.defaults.TRACKING_POSITION_OBSERVATION_SD
):
self.max_match_distance = max_match_distance
self.max_iterations_since_last_match = max_iterations_since_last_match
self.centroid_position_initial_sd = centroid_position_initial_sd
self.centroid_velocity_initial_sd = centroid_velocity_initial_sd
self.reference_delta_t_seconds = reference_delta_t_seconds
self.reference_velocity_drift = reference_velocity_drift
self.position_observation_sd = position_observation_sd
self.active_tracks = dict()
self.inactive_tracks = dict()
for pose_3d_id, keypoint_coordinates_3d in poses_3d.items():
pose_track_3d = PoseTrack3D(
timestamp=timestamp,
pose_3d_id = pose_3d_id,
keypoint_coordinates_3d=keypoint_coordinates_3d,
centroid_position_initial_sd=self.centroid_position_initial_sd,
centroid_velocity_initial_sd=self.centroid_velocity_initial_sd,
reference_delta_t_seconds=self.reference_delta_t_seconds,
reference_velocity_drift=self.reference_velocity_drift,
position_observation_sd=self.position_observation_sd
)
self.active_tracks[pose_track_3d.pose_track_3d_id] = pose_track_3d
def update_df(
self,
poses_3d,
progress_bar=poseconnect.defaults.PROGRESS_BAR,
notebook=poseconnect.defaults.NOTEBOOK
):
timestamps = np.sort(poses_3d['timestamp'].unique())
if progress_bar:
if notebook:
timestamp_iterator = tqdm.notebook.tqdm(timestamps)
else:
timestamp_iterator = tqdm.tqdm(timestamps)
else:
timestamp_iterator = timestamps
for current_timestamp in timestamp_iterator:
current_pose_3d_ids = poses_3d.loc[
poses_3d['timestamp'] == current_timestamp
].index.values.tolist()
current_keypoint_coordinates_3d = poses_3d.loc[
poses_3d['timestamp'] == current_timestamp,
'keypoint_coordinates_3d'
].values.tolist()
current_poses_3d = dict(zip(current_pose_3d_ids, current_keypoint_coordinates_3d))
self.update(
timestamp=current_timestamp,
poses_3d=current_poses_3d
)
def update(
self,
timestamp,
poses_3d
):
self.predict(
timestamp=timestamp
)
self.incorporate_observations(
timestamp=timestamp,
poses_3d=poses_3d
)
def predict(
self,
timestamp
):
for pose_track_3d in self.active_tracks.values():
pose_track_3d.predict(timestamp)
def incorporate_observations(
self,
timestamp,
poses_3d
):
matches = self.match_observations_to_pose_tracks_3d(
poses_3d=poses_3d
)
matched_pose_tracks_3d = set(matches.keys())
matched_poses = set(matches.values())
unmatched_pose_tracks_3d = set(self.active_tracks.keys()) - matched_pose_tracks_3d
unmatched_poses = set(poses_3d.keys()) - matched_poses
for pose_track_3d_id, pose_3d_id in matches.items():
self.active_tracks[pose_track_3d_id].iterations_since_last_match = 0
self.active_tracks[pose_track_3d_id].incorporate_observation(
pose_3d_id = pose_3d_id,
keypoint_coordinates_3d = poses_3d[pose_3d_id],
)
for pose_track_3d_id in unmatched_pose_tracks_3d:
self.active_tracks[pose_track_3d_id].iterations_since_last_match += 1
if self.active_tracks[pose_track_3d_id].iterations_since_last_match > self.max_iterations_since_last_match:
self.inactive_tracks[pose_track_3d_id] = self.active_tracks.pop(pose_track_3d_id)
for pose_3d_id in unmatched_poses:
pose_track_3d = PoseTrack3D(
timestamp=timestamp,
pose_3d_id=pose_3d_id,
keypoint_coordinates_3d=poses_3d[pose_3d_id],
centroid_position_initial_sd=self.centroid_position_initial_sd,
centroid_velocity_initial_sd=self.centroid_velocity_initial_sd,
reference_delta_t_seconds=self.reference_delta_t_seconds,
reference_velocity_drift=self.reference_velocity_drift,
position_observation_sd=self.position_observation_sd
)
self.active_tracks[pose_track_3d.pose_track_3d_id] = pose_track_3d
def match_observations_to_pose_tracks_3d(
self,
poses_3d
):
pose_track_3d_ids = self.active_tracks.keys()
pose_3d_ids = poses_3d.keys()
distances = pd.DataFrame(
index = pose_track_3d_ids,
columns = pose_3d_ids,
dtype='float'
)
for pose_track_3d_id, pose_3d_id in itertools.product(pose_track_3d_ids, pose_3d_ids):
track_position = self.active_tracks[pose_track_3d_id].centroid_distribution.mean[:3]
observation_position = np.nanmean(poses_3d[pose_3d_id], axis=0)
distance = np.linalg.norm(
np.subtract(
track_position,
observation_position
)
)
if distance < self.max_match_distance:
distances.loc[pose_track_3d_id, pose_3d_id] = distance
best_track_for_each_pose = distances.idxmin(axis=0)
best_pose_for_each_track = distances.idxmin(axis=1)
matches = dict(
set(zip(best_pose_for_each_track.index, best_pose_for_each_track.values)) &
set(zip(best_track_for_each_pose.values, best_track_for_each_pose.index))
)
return matches
def filter(
self,
num_poses_min=poseconnect.defaults.TRACKING_NUM_POSES_PER_TRACK_MIN,
inplace=False
):
if not inplace:
new_pose_tracks_3d = copy.deepcopy(self)
else:
new_pose_tracks_3d = self
new_pose_tracks_3d.active_tracks = dict(filter(
lambda key_value_tuple: key_value_tuple[1].num_poses() >= num_poses_min,
new_pose_tracks_3d.active_tracks.items()
))
new_pose_tracks_3d.inactive_tracks = dict(filter(
lambda key_value_tuple: key_value_tuple[1].num_poses() >= num_poses_min,
new_pose_tracks_3d.inactive_tracks.items()
))
if not inplace:
return new_pose_tracks_3d
def extract_pose_tracks_3d(
self,
poses_3d
):
input_index_name = poses_3d.index.name
poses_3d_with_tracks = poses_3d.join(
self.output_df(),
how='inner'
)
poses_3d_with_tracks.index.name = input_index_name
return poses_3d_with_tracks
def output(self):
output = {pose_track_3d_id: pose_track_3d.output() for pose_track_3d_id, pose_track_3d in self.tracks().items()}
return output
def output_df(self):
df = pd.concat(
[pose_track_3d.output_df() for pose_track_3d in self.tracks().values()]
)
return df
def tracks(self):
return {**self.active_tracks, **self.inactive_tracks}
def plot_trajectories(
self,
pose_track_3d_ids,
track_label_lookup=None,
fig_width_inches=8.0,
fig_height_inches=10.5,
show=True
):
if track_label_lookup is None:
track_label_lookup = {pose_track_3d_id: pose_track_3d_id[:2] for pose_track_3d_id in pose_track_3d_ids}
fig, axes = plt.subplots(3, 1, sharex=True)
for pose_track_3d_id in pose_track_3d_ids:
for axis_index, axis_name in enumerate(['x', 'y', 'z']):
self.tracks()[pose_track_3d_id].draw_trajectory(
axis_index=axis_index,
axis_name=axis_name,
axis_object=axes[axis_index],
track_label_lookup=track_label_lookup
)
axes[0].legend(loc='upper left', bbox_to_anchor=(1.0, 1.0))
axes[2].set_xlabel('Time')
fig.autofmt_xdate()
fig.set_size_inches(fig_width_inches, fig_height_inches)
if show:
plt.show()
class PoseTrack3D:
def __init__(
self,
timestamp,
pose_3d_id,
keypoint_coordinates_3d,
centroid_position_initial_sd=poseconnect.defaults.TRACKING_CENTROID_POSITION_INITIAL_SD,
centroid_velocity_initial_sd=poseconnect.defaults.TRACKING_CENTROID_VELOCITY_INITIAL_SD,
reference_delta_t_seconds=poseconnect.defaults.TRACKING_REFERENCE_DELTA_T_SECONDS,
reference_velocity_drift=poseconnect.defaults.TRACKING_REFERENCE_VELOCITY_DRIFT,
position_observation_sd=poseconnect.defaults.TRACKING_POSITION_OBSERVATION_SD
):
keypoint_coordinates_3d = np.asarray(keypoint_coordinates_3d)
if keypoint_coordinates_3d.ndim != 2:
raise ValueError('Keypoint coordinate array should be two dimensional (Number of keypoints x 3)')
centroid_position = np.nanmean(keypoint_coordinates_3d, axis=0)
self.pose_track_3d_id = pose_track_3d_id = uuid4().hex
self.initial_timestamp = timestamp
self.latest_timestamp = timestamp
self.pose_3d_ids = [pose_3d_id]
self.centroid_distribution = smc_kalman.GaussianDistribution(
mean=np.concatenate((centroid_position.reshape((3,)), np.repeat(0.0, 3))),
covariance=np.diag(np.concatenate((
np.repeat(centroid_position_initial_sd**2, 3),
np.repeat(centroid_velocity_initial_sd**2, 3)
)))
)
self.reference_delta_t_seconds = reference_delta_t_seconds
self.reference_velocity_drift = reference_velocity_drift
self.position_observation_sd = position_observation_sd
self.iterations_since_last_match = 0
self.centroid_distribution_trajectory = {
'timestamp': [self.latest_timestamp],
'observed_centroid': [centroid_position],
'mean': [self.centroid_distribution.mean],
'covariance': [self.centroid_distribution.covariance]
}
def predict(
self,
timestamp
):
delta_t_seconds = (timestamp - self.latest_timestamp).total_seconds()
self.centroid_distribution = self.centroid_distribution.predict(
linear_gaussian_model=constant_velocity_model(
delta_t_seconds=delta_t_seconds,
reference_delta_t_seconds=self.reference_delta_t_seconds,
reference_velocity_drift=self.reference_velocity_drift,
position_observation_sd=self.position_observation_sd
)
)
self.latest_timestamp=timestamp
self.centroid_distribution_trajectory['timestamp'].append(self.latest_timestamp)
self.centroid_distribution_trajectory['observed_centroid'].append(np.array([np.nan, np.nan, np.nan]))
self.centroid_distribution_trajectory['mean'].append(self.centroid_distribution.mean)
self.centroid_distribution_trajectory['covariance'].append(self.centroid_distribution.covariance)
def incorporate_observation(
self,
pose_3d_id,
keypoint_coordinates_3d
):
keypoint_coordinates_3d = np.asarray(keypoint_coordinates_3d)
if keypoint_coordinates_3d.ndim != 2:
raise ValueError('Keypoint coordinate array should be two dimensional (Number of keypoints x 3)')
centroid_position = np.nanmean(keypoint_coordinates_3d, axis=0)
self.pose_3d_ids.append(pose_3d_id)
self.centroid_distribution = self.centroid_distribution.incorporate_observation(
linear_gaussian_model=constant_velocity_model(
delta_t_seconds=None,
reference_delta_t_seconds=self.reference_delta_t_seconds,
reference_velocity_drift=self.reference_velocity_drift,
position_observation_sd=self.position_observation_sd
),
observation_vector=centroid_position
)
self.centroid_distribution_trajectory['observed_centroid'][-1] = centroid_position
self.centroid_distribution_trajectory['mean'][-1] = self.centroid_distribution.mean
self.centroid_distribution_trajectory['covariance'][-1] = self.centroid_distribution.covariance
def num_poses(
self
):
return(len(self.pose_3d_ids))
def centroid_distribution_trajectory_df(self):
df = pd.DataFrame({
'timestamp': self.centroid_distribution_trajectory['timestamp'],
'observed_centroid': self.centroid_distribution_trajectory['observed_centroid'],
'position': [mean[:3] for mean in self.centroid_distribution_trajectory['mean']],
'velocity': [mean[3:] for mean in self.centroid_distribution_trajectory['mean']],
'covariance': self.centroid_distribution_trajectory['covariance']
})
df.set_index('timestamp', inplace=True)
return df
def output(self):
output = {
'start': pd.to_datetime(self.initial_timestamp).to_pydatetime(),
'end': pd.to_datetime(self.latest_timestamp).to_pydatetime(),
'pose_3d_ids': self.pose_3d_ids
}
return output
def output_df(self):
df = pd.DataFrame([
{'pose_3d_id': pose_id, 'pose_track_3d_id': self.pose_track_3d_id}
for pose_id in self.pose_3d_ids
]).set_index('pose_3d_id')
return df
def plot_trajectory(
self,
track_label_lookup=None,
fig_width_inches=8.0,
fig_height_inches=10.5,
show=True
):
if track_label_lookup is None:
track_label_lookup = {self.pose_track_3d_id: self.pose_track_3d_id[:2]}
fig, axes = plt.subplots(3, 1, sharex=True)
for axis_index, axis_name in enumerate(['x', 'y', 'z']):
self.draw_trajectory(
axis_index=axis_index,
axis_name=axis_name,
axis_object=axes[axis_index],
track_label_lookup=track_label_lookup
)
axes[0].legend(loc='upper left', bbox_to_anchor=(1.0, 1.0))
axes[2].set_xlabel('Time')
fig.autofmt_xdate()
fig.set_size_inches(fig_width_inches, fig_height_inches)
if show:
plt.show()
def draw_trajectory(
self,
axis_index,
axis_name,
axis_object,
track_label_lookup=None
):
if track_label_lookup is None:
track_label_lookup = {self.pose_track_3d_id: self.pose_track_3d_id[:2]}
df = self.centroid_distribution_trajectory_df()
axis_object.fill_between(
df.index,
np.stack(df['position'])[:, axis_index] - np.sqrt(np.stack(df['covariance'])[:, axis_index, axis_index]),
np.stack(df['position'])[:, axis_index] + np.sqrt(np.stack(df['covariance'])[:, axis_index, axis_index]),
alpha = 0.4,
label='Track {} confidence interval'.format(track_label_lookup[self.pose_track_3d_id])
)
axis_object.plot(
df.index,
np.stack(df['observed_centroid'])[:, axis_index],
'.',
label='Track {} observation'.format(track_label_lookup[self.pose_track_3d_id])
)
axis_object.set_ylabel('${}$ position (meters)'.format(axis_name))
def constant_velocity_model(
delta_t_seconds,
reference_delta_t_seconds=poseconnect.defaults.TRACKING_REFERENCE_DELTA_T_SECONDS,
reference_velocity_drift=poseconnect.defaults.TRACKING_REFERENCE_VELOCITY_DRIFT,
position_observation_sd=poseconnect.defaults.TRACKING_POSITION_OBSERVATION_SD
):
if delta_t_seconds is not None:
velocity_drift = reference_velocity_drift*np.sqrt(delta_t_seconds/reference_delta_t_seconds)
else:
delta_t_seconds = 0.0
velocity_drift = 0.0
model = smc_kalman.LinearGaussianModel(
transition_model = np.array([
[1.0, 0.0, 0.0, delta_t_seconds, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, delta_t_seconds, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, delta_t_seconds],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0]
]),
transition_noise_covariance = np.array([
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, velocity_drift**2, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, velocity_drift**2, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, velocity_drift**2]
]),
observation_model = np.array([
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0]
]),
observation_noise_covariance = np.array([
[position_observation_sd**2, 0.0, 0.0],
[0.0, position_observation_sd**2, 0.0],
[0.0, 0.0, position_observation_sd**2]
]),
control_model = None
)
return model
| [
"tqdm.notebook.tqdm",
"pandas.DatetimeIndex",
"numpy.nanmean",
"pandas.DataFrame",
"itertools.product",
"matplotlib.pyplot.subplots",
"pandas.concat",
"numpy.repeat",
"numpy.stack",
"copy.deepcopy",
"tqdm.tqdm",
"matplotlib.pyplot.show",
"numpy.asarray",
"pandas.to_datetime",
"pandas.Ser... | [((260, 287), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (277, 287), False, 'import logging\n'), ((4869, 4897), 'pandas.concat', 'pd.concat', (['poses_3d_new_list'], {}), '(poses_3d_new_list)\n', (4878, 4897), True, 'import pandas as pd\n'), ((4937, 4984), 'pandas.concat', 'pd.concat', (['(poses_3d_with_tracks, poses_3d_new)'], {}), '((poses_3d_with_tracks, poses_3d_new))\n', (4946, 4984), True, 'import pandas as pd\n'), ((5825, 5869), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["pose_track_3d['timestamp']"], {}), "(pose_track_3d['timestamp'])\n", (5841, 5869), True, 'import pandas as pd\n'), ((12891, 12964), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'pose_track_3d_ids', 'columns': 'pose_3d_ids', 'dtype': '"""float"""'}), "(index=pose_track_3d_ids, columns=pose_3d_ids, dtype='float')\n", (12903, 12964), True, 'import pandas as pd\n'), ((13059, 13108), 'itertools.product', 'itertools.product', (['pose_track_3d_ids', 'pose_3d_ids'], {}), '(pose_track_3d_ids, pose_3d_ids)\n', (13076, 13108), False, 'import itertools\n'), ((15775, 15806), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)'}), '(3, 1, sharex=True)\n', (15787, 15806), True, 'import matplotlib.pyplot as plt\n'), ((17063, 17098), 'numpy.asarray', 'np.asarray', (['keypoint_coordinates_3d'], {}), '(keypoint_coordinates_3d)\n', (17073, 17098), True, 'import numpy as np\n'), ((17283, 17326), 'numpy.nanmean', 'np.nanmean', (['keypoint_coordinates_3d'], {'axis': '(0)'}), '(keypoint_coordinates_3d, axis=0)\n', (17293, 17326), True, 'import numpy as np\n'), ((19531, 19566), 'numpy.asarray', 'np.asarray', (['keypoint_coordinates_3d'], {}), '(keypoint_coordinates_3d)\n', (19541, 19566), True, 'import numpy as np\n'), ((19751, 19794), 'numpy.nanmean', 'np.nanmean', (['keypoint_coordinates_3d'], {'axis': '(0)'}), '(keypoint_coordinates_3d, axis=0)\n', (19761, 19794), True, 'import numpy as np\n'), ((20744, 21164), 'pandas.DataFrame', 'pd.DataFrame', (["{'timestamp': self.centroid_distribution_trajectory['timestamp'],\n 'observed_centroid': self.centroid_distribution_trajectory[\n 'observed_centroid'], 'position': [mean[:3] for mean in self.\n centroid_distribution_trajectory['mean']], 'velocity': [mean[3:] for\n mean in self.centroid_distribution_trajectory['mean']], 'covariance':\n self.centroid_distribution_trajectory['covariance']}"], {}), "({'timestamp': self.centroid_distribution_trajectory[\n 'timestamp'], 'observed_centroid': self.\n centroid_distribution_trajectory['observed_centroid'], 'position': [\n mean[:3] for mean in self.centroid_distribution_trajectory['mean']],\n 'velocity': [mean[3:] for mean in self.centroid_distribution_trajectory\n ['mean']], 'covariance': self.centroid_distribution_trajectory[\n 'covariance']})\n", (20756, 21164), True, 'import pandas as pd\n'), ((22074, 22105), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'sharex': '(True)'}), '(3, 1, sharex=True)\n', (22086, 22105), True, 'import matplotlib.pyplot as plt\n'), ((6937, 7049), 'pandas.Series', 'pd.Series', (['keypoints_interpolated_array_unstacked'], {'index': 'combined_time_index', 'name': '"""keypoint_coordinates_3d"""'}), "(keypoints_interpolated_array_unstacked, index=combined_time_index,\n name='keypoint_coordinates_3d')\n", (6946, 7049), True, 'import pandas as pd\n'), ((7175, 7182), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (7180, 7182), False, 'from uuid import uuid4\n'), ((13242, 13282), 'numpy.nanmean', 'np.nanmean', (['poses_3d[pose_3d_id]'], {'axis': '(0)'}), '(poses_3d[pose_3d_id], axis=0)\n', (13252, 13282), True, 'import numpy as np\n'), ((14127, 14146), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (14140, 14146), False, 'import copy\n'), ((16427, 16437), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16435, 16437), True, 'import matplotlib.pyplot as plt\n'), ((17378, 17385), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (17383, 17385), False, 'from uuid import uuid4\n'), ((19154, 19188), 'numpy.array', 'np.array', (['[np.nan, np.nan, np.nan]'], {}), '([np.nan, np.nan, np.nan])\n', (19162, 19188), True, 'import numpy as np\n'), ((22620, 22630), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22628, 22630), True, 'import matplotlib.pyplot as plt\n'), ((24072, 24124), 'numpy.sqrt', 'np.sqrt', (['(delta_t_seconds / reference_delta_t_seconds)'], {}), '(delta_t_seconds / reference_delta_t_seconds)\n', (24079, 24124), True, 'import numpy as np\n'), ((24263, 24514), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, delta_t_seconds, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0,\n delta_t_seconds, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, delta_t_seconds], [0.0,\n 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, \n 0.0, 0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0, delta_t_seconds, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0,\n delta_t_seconds, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, delta_t_seconds], [0.0,\n 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, \n 0.0, 0.0, 0.0, 1.0]])\n', (24271, 24514), True, 'import numpy as np\n'), ((24623, 24887), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, velocity_drift ** 2, 0.0, 0.0], [\n 0.0, 0.0, 0.0, 0.0, velocity_drift ** 2, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0,\n velocity_drift ** 2]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, velocity_drift ** 2, 0.0,\n 0.0], [0.0, 0.0, 0.0, 0.0, velocity_drift ** 2, 0.0], [0.0, 0.0, 0.0, \n 0.0, 0.0, velocity_drift ** 2]])\n', (24631, 24887), True, 'import numpy as np\n'), ((24979, 25090), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0,\n 1.0, 0.0, 0.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0, 0.0, 0.0], [\n 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]])\n', (24987, 25090), True, 'import numpy as np\n'), ((25172, 25317), 'numpy.array', 'np.array', (['[[position_observation_sd ** 2, 0.0, 0.0], [0.0, position_observation_sd **\n 2, 0.0], [0.0, 0.0, position_observation_sd ** 2]]'], {}), '([[position_observation_sd ** 2, 0.0, 0.0], [0.0, \n position_observation_sd ** 2, 0.0], [0.0, 0.0, position_observation_sd **\n 2]])\n', (25180, 25317), True, 'import numpy as np\n'), ((6340, 6390), 'numpy.stack', 'np.stack', (["pose_track_3d['keypoint_coordinates_3d']"], {}), "(pose_track_3d['keypoint_coordinates_3d'])\n", (6348, 6390), True, 'import numpy as np\n'), ((9680, 9710), 'tqdm.notebook.tqdm', 'tqdm.notebook.tqdm', (['timestamps'], {}), '(timestamps)\n', (9698, 9710), False, 'import tqdm\n'), ((9766, 9787), 'tqdm.tqdm', 'tqdm.tqdm', (['timestamps'], {}), '(timestamps)\n', (9775, 9787), False, 'import tqdm\n'), ((13338, 13387), 'numpy.subtract', 'np.subtract', (['track_position', 'observation_position'], {}), '(track_position, observation_position)\n', (13349, 13387), True, 'import numpy as np\n'), ((21580, 21699), 'pandas.DataFrame', 'pd.DataFrame', (["[{'pose_3d_id': pose_id, 'pose_track_3d_id': self.pose_track_3d_id} for\n pose_id in self.pose_3d_ids]"], {}), "([{'pose_3d_id': pose_id, 'pose_track_3d_id': self.\n pose_track_3d_id} for pose_id in self.pose_3d_ids])\n", (21592, 21699), True, 'import pandas as pd\n'), ((23435, 23468), 'numpy.stack', 'np.stack', (["df['observed_centroid']"], {}), "(df['observed_centroid'])\n", (23443, 23468), True, 'import numpy as np\n'), ((21335, 21373), 'pandas.to_datetime', 'pd.to_datetime', (['self.initial_timestamp'], {}), '(self.initial_timestamp)\n', (21349, 21373), True, 'import pandas as pd\n'), ((21410, 21447), 'pandas.to_datetime', 'pd.to_datetime', (['self.latest_timestamp'], {}), '(self.latest_timestamp)\n', (21424, 21447), True, 'import pandas as pd\n'), ((23017, 23041), 'numpy.stack', 'np.stack', (["df['position']"], {}), "(df['position'])\n", (23025, 23041), True, 'import numpy as np\n'), ((23135, 23159), 'numpy.stack', 'np.stack', (["df['position']"], {}), "(df['position'])\n", (23143, 23159), True, 'import numpy as np\n'), ((17651, 17668), 'numpy.repeat', 'np.repeat', (['(0.0)', '(3)'], {}), '(0.0, 3)\n', (17660, 17668), True, 'import numpy as np\n'), ((23067, 23093), 'numpy.stack', 'np.stack', (["df['covariance']"], {}), "(df['covariance'])\n", (23075, 23093), True, 'import numpy as np\n'), ((23185, 23211), 'numpy.stack', 'np.stack', (["df['covariance']"], {}), "(df['covariance'])\n", (23193, 23211), True, 'import numpy as np\n'), ((17736, 17783), 'numpy.repeat', 'np.repeat', (['(centroid_position_initial_sd ** 2)', '(3)'], {}), '(centroid_position_initial_sd ** 2, 3)\n', (17745, 17783), True, 'import numpy as np\n'), ((17799, 17846), 'numpy.repeat', 'np.repeat', (['(centroid_velocity_initial_sd ** 2)', '(3)'], {}), '(centroid_velocity_initial_sd ** 2, 3)\n', (17808, 17846), True, 'import numpy as np\n')] |
import random
import unittest
import os
import Bio.Seq
import Bio.Data.CodonTable
import pyoma.browser.db as pyomadb
import tables
import numpy
class DatabaseChecks(unittest.TestCase):
@classmethod
def setUpClass(cls):
try:
path = os.environ['PYOMA_DB2CHECK']
except KeyError:
raise unittest.SkipTest("No database specified in PYOMA_DB2CHECK")
cls.db = pyomadb.Database(path)
def translated_cdna_match_protein_sequence(self, cdna, prot):
cdna = cdna.replace('X', 'N')
for tab in Bio.Data.CodonTable.generic_by_id.keys():
tab_ok = True
trans = Bio.Seq.translate(cdna, table=tab)
if not 3 >= len(trans) - len(prot) >= 0:
return False
for pos, (trans_aa, prot_aa) in enumerate(zip(trans, prot)):
if trans_aa == prot_aa or trans_aa == 'X' or prot_aa == 'X':
continue
elif prot_aa == 'M' and pos == 0 and trans_aa != '*':
continue
else:
tab_ok = False
break
if tab_ok:
return True
def test_cdna_and_protein_sequence_match(self):
"""test translated cdna sequence and protein sequence match.
This is done for a random sample of 1000 entries"""
SAMPLES = 1000
nr_entries = self.db.id_resolver.max_entry_nr
for entry_nr in random.sample(range(nr_entries+1), SAMPLES):
with self.subTest(entry_nr=entry_nr):
cdna = self.db.get_cdna(entry_nr).decode()
prot = self.db.get_sequence(entry_nr).decode()
self.assertTrue(self.translated_cdna_match_protein_sequence(cdna, prot))
def test_increasing_offsets(self):
entry_tab = self.db.get_hdf5_handle().get_node('/Protein/Entries')
seq_off = -1
cds_off = -1
for row in entry_tab:
self.assertLess(seq_off, row['SeqBufferOffset'], "SeqBufferOffset decreases in row {}: {} vs {}"
.format(row.nrow, seq_off, row['SeqBufferOffset']))
self.assertLess(cds_off, row['CDNABufferOffset'], "CDNABufferOffset decreases in row {}: {} vs {}"
.format(row.nrow, seq_off, row['CDNABufferOffset']))
seq_off = row['SeqBufferOffset']
cds_off = row['CDNABufferOffset']
def test_homeology_flag(self):
genome_tab = self.db.get_hdf5_handle().get_node('/Genome')
for g in (b'WHEAT', b'GOSHI', b'BRANA'):
for row in genome_tab.read_where('UniProtSpeciesCode == g'):
self.assertTrue(row['IsPolyploid'], "{} is not recorded as polyploid genome".format(g))
for g in (b'YEAST', b'HUMAN', b'PLAF7', b'ARATH', b'MOUSE'):
for row in genome_tab.read_where('UniProtSpeciesCode == g'):
self.assertFalse(row['IsPolyploid'], "{} is recorded to be a ployploid genome".format(g))
def test_synteny_scores_exist(self):
for g in ('WHEAT', 'BRANA', 'GOSHI'):
try:
t = self.db.get_hdf5_handle().get_node('/PairwiseRelation/{}/within'.format(g))
except tables.NoSuchNodeError:
# if species does not exist, we skip - not all datasets will have these genomes
continue
syn_col = t.col('SyntenyConservationLocal')
computed_pairs = numpy.where(syn_col >= 0)
self.assertLess(0, len(computed_pairs[0]), "No synteny values computed for {}".format(g))
| [
"pyoma.browser.db.Database",
"numpy.where",
"unittest.SkipTest"
] | [((413, 435), 'pyoma.browser.db.Database', 'pyomadb.Database', (['path'], {}), '(path)\n', (429, 435), True, 'import pyoma.browser.db as pyomadb\n'), ((3453, 3478), 'numpy.where', 'numpy.where', (['(syn_col >= 0)'], {}), '(syn_col >= 0)\n', (3464, 3478), False, 'import numpy\n'), ((334, 394), 'unittest.SkipTest', 'unittest.SkipTest', (['"""No database specified in PYOMA_DB2CHECK"""'], {}), "('No database specified in PYOMA_DB2CHECK')\n", (351, 394), False, 'import unittest\n')] |
import png
import os
import sys
import pydicom
import numpy as np
ROOT_DIR = os.getcwd()
IN_DIR = ROOT_DIR + "/data"
PNG_IN_DIR = ROOT_DIR + "output/pix2pix/inputs"
PNG_OUT_DIR = ROOT_DIR + "output/pix2pix/outputs"
INPUT = "Features"
OUTPUT = "Labels"
def convert(mri_path, mri_filename):
""" Function to convert a DICOM MRI file to PNG
@param mri_path: The absolute path to the dicom file
@param mri_filename: The name of the file without the path
"""
# Read Dicom file
mri_file = open(mri_path, "rb")
ds = pydicom.read_file(mri_file)
mri_file.close()
# Get dicom data
shape = ds.pixel_array.shape
# Convert to float to avoid overflow or underflow losses.
image_2d = ds.pixel_array.astype(float)
# Rescaling grey scale
normalize_factor = 2000.0 if INPUT not in mri_filename else image_2d.max()
if INPUT not in mri_filename:
image_2d[image_2d==5000.0] = normalize_factor
for row in range(len(image_2d)):
for col in range(len(image_2d[0])):
image_2d[row][col] = (image_2d[row][col] / normalize_factor) * 255.0
# Convert to uint8
image_2d_scaled = np.uint8(image_2d)
if INPUT in mri_filename:
new_filename = mri_filename.replace(INPUT+'_', '')
png_fn = os.path.join(PNG_IN_DIR, new_filename + '.png')
elif OUTPUT in mri_filename:
new_filename = mri_filename.replace(OUTPUT+'_', '')
png_fn = os.path.join(PNG_OUT_DIR, new_filename + '.png')
print('Writing {}...'.format(png_fn))
# Create PNG file
png_file = open(png_fn, "wb")
# Write to png file
w = png.Writer(shape[1], shape[0], greyscale=True)
w.write(png_file, image_2d_scaled)
png_file.close()
def getFiles(root_dir, files):
for r, s, f in os.walk(root_dir):
if f == []:
for sn in s:
curr_path = os.path.join(r, sn)
getFiles(curr_path, files)
else:
for fn in f:
if os.path.splitext(fn)[1] == '.dcm':
curr_path = os.path.join(r, fn)
dirs = r.split('/')
pre = dirs[-2] + '_' + dirs[-1] + '_'
files.add((curr_path, pre + fn[:-4]))
def main():
# Create png directory
if not os.path.exists(PNG_IN_DIR):
os.makedirs(PNG_IN_DIR)
if not os.path.exists(PNG_OUT_DIR):
os.makedirs(PNG_OUT_DIR)
files = set()
# Get all files recursively in dicom directory
getFiles(IN_DIR, files)
# Convert all dicom files to pngs
for f, fn in files:
convert(f, fn)
if __name__ == '__main__':
main()
| [
"numpy.uint8",
"os.makedirs",
"pydicom.read_file",
"os.getcwd",
"os.walk",
"os.path.exists",
"os.path.splitext",
"png.Writer",
"os.path.join"
] | [((78, 89), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (87, 89), False, 'import os\n'), ((540, 567), 'pydicom.read_file', 'pydicom.read_file', (['mri_file'], {}), '(mri_file)\n', (557, 567), False, 'import pydicom\n'), ((1166, 1184), 'numpy.uint8', 'np.uint8', (['image_2d'], {}), '(image_2d)\n', (1174, 1184), True, 'import numpy as np\n'), ((1638, 1684), 'png.Writer', 'png.Writer', (['shape[1]', 'shape[0]'], {'greyscale': '(True)'}), '(shape[1], shape[0], greyscale=True)\n', (1648, 1684), False, 'import png\n'), ((1796, 1813), 'os.walk', 'os.walk', (['root_dir'], {}), '(root_dir)\n', (1803, 1813), False, 'import os\n'), ((1292, 1339), 'os.path.join', 'os.path.join', (['PNG_IN_DIR', "(new_filename + '.png')"], {}), "(PNG_IN_DIR, new_filename + '.png')\n", (1304, 1339), False, 'import os\n'), ((2304, 2330), 'os.path.exists', 'os.path.exists', (['PNG_IN_DIR'], {}), '(PNG_IN_DIR)\n', (2318, 2330), False, 'import os\n'), ((2340, 2363), 'os.makedirs', 'os.makedirs', (['PNG_IN_DIR'], {}), '(PNG_IN_DIR)\n', (2351, 2363), False, 'import os\n'), ((2375, 2402), 'os.path.exists', 'os.path.exists', (['PNG_OUT_DIR'], {}), '(PNG_OUT_DIR)\n', (2389, 2402), False, 'import os\n'), ((2412, 2436), 'os.makedirs', 'os.makedirs', (['PNG_OUT_DIR'], {}), '(PNG_OUT_DIR)\n', (2423, 2436), False, 'import os\n'), ((1450, 1498), 'os.path.join', 'os.path.join', (['PNG_OUT_DIR', "(new_filename + '.png')"], {}), "(PNG_OUT_DIR, new_filename + '.png')\n", (1462, 1498), False, 'import os\n'), ((1888, 1907), 'os.path.join', 'os.path.join', (['r', 'sn'], {}), '(r, sn)\n', (1900, 1907), False, 'import os\n'), ((2076, 2095), 'os.path.join', 'os.path.join', (['r', 'fn'], {}), '(r, fn)\n', (2088, 2095), False, 'import os\n'), ((2009, 2029), 'os.path.splitext', 'os.path.splitext', (['fn'], {}), '(fn)\n', (2025, 2029), False, 'import os\n')] |
from ScopeFoundry.data_browser import DataBrowserView
import pyqtgraph as pg
import numpy as np
import h5py
from collections import namedtuple, OrderedDict
import json
from qtpy import QtWidgets
from .scalebars import SEMScaleBar
AvailChan = namedtuple('AvailChan', ['type_', 'index', 'phys_chan',
'chan_name', 'term'])
class SyncRasterScanH5(DataBrowserView):
name = 'sync_raster_scan_h5'
supported_measurements = ['sem_sync_raster_scan',
'sync_raster_scan',
'hyperspec_cl']
def setup(self):
self.settings.New('frame', dtype=int)
self.settings.New('sub_frame', dtype=int)
self.settings.New('channel', dtype=str,
choices=('ai0', 'ai1', 'ctr0', 'ctr1'))
self.settings.New('auto_level', dtype=bool, initial=True)
self.settings.New('show_description', dtype=bool, initial=False)
self.ui = QtWidgets.QWidget()
self.ui.setLayout(QtWidgets.QVBoxLayout())
self.ui.layout().addWidget(self.settings.New_UI(), stretch=0)
self.info_label = QtWidgets.QLabel()
self.ui.layout().addWidget(self.info_label, stretch=0)
self.imitem = pg.ImageItem()
self.imview = pg.ImageView(imageItem=self.imitem)
self.ui.layout().addWidget(self.imview, stretch=1)
for name in ['frame', 'sub_frame', 'channel', 'auto_level',
'show_description']:
self.settings.get_lq(name).add_listener(self.update_display)
def reset(self):
if hasattr(self, 'dat'):
self.dat.close()
if hasattr(self, 'scalebar'):
self.imview.getView().removeItem(self.scalebar)
del self.scalebar
if hasattr(self, 'desc_txt'):
self.imview.getView().removeItem(self.desc_txt)
del self.desc_txt
def on_change_data_filename(self, fname):
self.reset()
try:
self.dat = h5py.File(fname)
for meas_name in self.supported_measurements:
node_name = 'measurement/' + meas_name
if node_name in self.dat:
self.M = self.measurement = self.dat[node_name]
nframe, nsubframe, ny, self.nx, nadc_chan = self.M['adc_map'].shape
self.settings.frame.change_min_max(0, nframe-1)
self.settings.sub_frame.change_min_max(0, nsubframe-1)
sem_remcon = self.dat['hardware/sem_remcon/settings'].attrs
self.mag = sem_remcon['magnification'] / \
(self.M['settings'].attrs['h_span']/20)
scanDAQ = self.dat['hardware/sync_raster_daq/settings'].attrs
self.available_chan_dict = OrderedDict()
for i, phys_chan in enumerate(json.loads(scanDAQ['adc_channels'])):
self.available_chan_dict[phys_chan] = AvailChan(
# type, index, physical_chan, channel_name, terminal
'ai', i, phys_chan,
json.loads(scanDAQ['adc_chan_names'])[i], phys_chan)
for i, phys_chan in enumerate(json.loads(scanDAQ['ctr_channels'])):
self.available_chan_dict[phys_chan] = AvailChan(
# type, index, physical_chan, channel_name, terminal
'ctr', i, phys_chan,
json.loads(scanDAQ['ctr_chan_names'])[i],
json.loads(scanDAQ['ctr_chan_terms'])[i])
self.settings.channel.change_choice_list([
(" ".join([chan.chan_name, chan.phys_chan]), key)
for key, chan in self.available_chan_dict.items()])
desc = self.M['settings'].attrs['description']
self.desc_txt = pg.TextItem(text=desc)
self.update_display()
except Exception as err:
self.imview.setImage(np.zeros((10, 10)))
self.databrowser.ui.statusbar.showMessage("failed to load %s:\n%s"
% (fname, err))
raise(err)
def is_file_supported(self, fname):
for meas in self.supported_measurements:
if meas + ".h5" in fname:
return True
return False
def update_display(self):
M = self.measurement
ii = self.settings['frame']
jj = self.settings['sub_frame']
chan_name = self.settings['channel']
chan = self.available_chan_dict[chan_name]
nframe, nsubframe, ny, nx, nadc_chan = M['adc_map'].shape
# if chan == 'ai0':
# im = M['adc_map'][ii, jj, :,:, 0]
# elif chan == 'ai1':
# im = M['adc_map'][ii, jj, :,:, 1]
# elif chan == 'ctr0':
# im = M['ctr_map'][ii, jj, :,:, 0]
# elif chan == 'ctr1':
# im = M['ctr_map'][ii, jj, :,:, 1]
if chan.type_ == 'ai':
im = M['adc_map'][ii, jj, :, :, chan.index]
if chan.type_ == 'ctr':
im = M['ctr_map'][ii, jj, :, :, chan.index]
self.imitem.setImage(im.T[:, ::-1],
autoLevels=self.settings['auto_level'])
if self.settings['auto_level']:
self.imview.setLevels(*np.percentile(im, (1, 99)))
if hasattr(self, 'desc_txt'):
if self.settings['show_description']:
self.imview.getView().addItem(self.desc_txt)
else:
self.imview.getView().removeItem(self.desc_txt)
# calculate full frame size based on Polaroid 545 width (11.4cm)
if hasattr(self, 'scalebar'):
self.imview.getView().removeItem(self.scalebar)
self.scalebar = SEMScaleBar(mag=self.mag, num_px=self.nx)
self.scalebar.setParentItem(self.imview.getView())
self.scalebar.anchor((1, 1), (1, 1), offset=(-20, -20))
self.imview.autoRange()
# self.info_label.setText("{} plane {}={} um (index={})".format(
# plane, other_ax, self.dat[other_ax+'_array'][ii], ii))
| [
"h5py.File",
"json.loads",
"pyqtgraph.TextItem",
"qtpy.QtWidgets.QLabel",
"numpy.zeros",
"pyqtgraph.ImageView",
"qtpy.QtWidgets.QVBoxLayout",
"numpy.percentile",
"qtpy.QtWidgets.QWidget",
"collections.namedtuple",
"collections.OrderedDict",
"pyqtgraph.ImageItem"
] | [((244, 321), 'collections.namedtuple', 'namedtuple', (['"""AvailChan"""', "['type_', 'index', 'phys_chan', 'chan_name', 'term']"], {}), "('AvailChan', ['type_', 'index', 'phys_chan', 'chan_name', 'term'])\n", (254, 321), False, 'from collections import namedtuple, OrderedDict\n'), ((976, 995), 'qtpy.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (993, 995), False, 'from qtpy import QtWidgets\n'), ((1143, 1161), 'qtpy.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (1159, 1161), False, 'from qtpy import QtWidgets\n'), ((1247, 1261), 'pyqtgraph.ImageItem', 'pg.ImageItem', ([], {}), '()\n', (1259, 1261), True, 'import pyqtgraph as pg\n'), ((1284, 1319), 'pyqtgraph.ImageView', 'pg.ImageView', ([], {'imageItem': 'self.imitem'}), '(imageItem=self.imitem)\n', (1296, 1319), True, 'import pyqtgraph as pg\n'), ((1022, 1045), 'qtpy.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (1043, 1045), False, 'from qtpy import QtWidgets\n'), ((2011, 2027), 'h5py.File', 'h5py.File', (['fname'], {}), '(fname)\n', (2020, 2027), False, 'import h5py\n'), ((2757, 2770), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2768, 2770), False, 'from collections import namedtuple, OrderedDict\n'), ((3763, 3785), 'pyqtgraph.TextItem', 'pg.TextItem', ([], {'text': 'desc'}), '(text=desc)\n', (3774, 3785), True, 'import pyqtgraph as pg\n'), ((2814, 2849), 'json.loads', 'json.loads', (["scanDAQ['adc_channels']"], {}), "(scanDAQ['adc_channels'])\n", (2824, 2849), False, 'import json\n'), ((3145, 3180), 'json.loads', 'json.loads', (["scanDAQ['ctr_channels']"], {}), "(scanDAQ['ctr_channels'])\n", (3155, 3180), False, 'import json\n'), ((3887, 3905), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (3895, 3905), True, 'import numpy as np\n'), ((5236, 5262), 'numpy.percentile', 'np.percentile', (['im', '(1, 99)'], {}), '(im, (1, 99))\n', (5249, 5262), True, 'import numpy as np\n'), ((3050, 3087), 'json.loads', 'json.loads', (["scanDAQ['adc_chan_names']"], {}), "(scanDAQ['adc_chan_names'])\n", (3060, 3087), False, 'import json\n'), ((3382, 3419), 'json.loads', 'json.loads', (["scanDAQ['ctr_chan_names']"], {}), "(scanDAQ['ctr_chan_names'])\n", (3392, 3419), False, 'import json\n'), ((3444, 3481), 'json.loads', 'json.loads', (["scanDAQ['ctr_chan_terms']"], {}), "(scanDAQ['ctr_chan_terms'])\n", (3454, 3481), False, 'import json\n')] |
# -*- coding: utf-8 -*-
"""
GripIt - UCF, Edge Base Gripper Implementation
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import super
from builtins import range
from builtins import int
from builtins import str
from future import standard_library
standard_library.install_aliases()
__author__ = '<NAME>'
__license__ = 'MIT'
__version__ = '1.1.20'
# Application Imports
import sys, math, collections, math, copy
import logging as log
import PyQt5.QtWidgets as QtWdgt
import PyQt5.QtGui as QtGui
import PyQt5.QtCore as QtCore
import pyqtgraph as pg
import pyqtgraph.opengl as gl
import numpy as np
import cv2
from gripit.core.context import EdgeProcessingDetectContext, ExecutionMode
from gripit.core.point_cloud_render_element import SceneElement
from gripit.core.ros_image import ROSImage, OnTopicReived
from gripit.gui.ImageViewerQt import ImageViewerQt
from gripit.gui.FrameLayout import FrameLayout as CollapsableBox
from gripit.gui.QLabelSlider import QLabelSlider
# Set this number of the image index that needs to be procesed
# (fixme) add file loading if necessary
IMAGE_NUM = 0
EXECUTION_MODE = ExecutionMode.DEVELOPMENT_ROS
DATA_STORE = "real"
class App(QtGui.QWidget):
edgeProcessorContext = None
currentImageModel = None
# Application Initializatoin
def __init__(self, app):
super().__init__()
global IMAGE_NUM
self.title = 'GripIt'
self.left = 10
self.top = 100
self.width = 1020
self.height = 640
self.isProcesed = False
self.processRuns = 0
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.initArg(app)
self.initEdgeProcessorContext(app)
self.initUI()
if (self.imageSelectBox.count() == 0) and (self.getContext().getMode() != ExecutionMode.DEVELOPMENT_ROS):
log.warning("Image datastore is empty.")
exit()
# load selected image else load first image in list
IMAGE_NUM = 0 if IMAGE_NUM == 0 else self.imageSelectBox.findData(str(IMAGE_NUM))
if IMAGE_NUM is -1: # If entered data is not present
log.warning("Invalid index entered.")
IMAGE_NUM = 0
self.imageSelectBox.currentIndexChanged.emit(IMAGE_NUM)
# self.initSidebarAttrbutes(self.currentImageModel, self.sideBarLayout)
# self.displayImage()
def initArg(self, app):
global IMAGE_NUM
global EXECUTION_MODE
global DATA_STORE
args = app.arguments()
#EXECUTION_MODE = ExecutionMode.DEVELOPMENT_ROS
DATA_STORE = "ros"
#return
for i in range(len(args)):
if args[i] == "-n":
IMAGE_NUM = args[i + 1]
elif args[i] == "-s":
DATA_STORE = args[i + 1]
elif args[i] == "-m":
if args[i+1] == "user":
EXECUTION_MODE = ExecutionMode.USER
elif args[i+1] == "developer":
EXECUTION_MODE = ExecutionMode.DEVELOPMENT
elif args[i] == "--ros":
EXECUTION_MODE = ExecutionMode.DEVELOPMENT_ROS
DATA_STORE = "ros"
def initUI(self):
"""Initialize Program UI
"""
log.info("Initializing program ui")
self.tabWidget = QtGui.QTabWidget()
self.tabWidget.setTabsClosable(False)
self.tabWidget.setMovable(True)
self.tabWidget.setMinimumWidth(self.width-280)
# self.tabWidget.tabCloseRequested.connect(self.close_window_tab)
appLayout = QtGui.QGridLayout(self)
appLayout.setSpacing(4)
self.sideBarTabWidget = QtGui.QTabWidget()
sideBarWdgt = QtGui.QWidget()
scroll = QtGui.QScrollArea()
# scroll.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.sideBarLayout = QtWdgt.QVBoxLayout()
sideBarWdgt.setLayout(self.sideBarLayout)
self.sideBarTabWidget.setMinimumWidth(320)
self.sideBarTabWidget.setMinimumHeight(600)
scroll.setWidget(sideBarWdgt)
scroll.setWidgetResizable(True)
scroll.setFixedHeight(450)
wrapper, self.imageSelectBox = self.initImageListComboBox()
# self.sideBarLayout.addWidget(wrapper)
tLayout = QtWdgt.QVBoxLayout()
tWdgt = QtGui.QWidget()
tWdgt.setLayout(tLayout)
tLayout.addWidget(wrapper)
tLayout.addWidget(scroll)
self.sideBarTabWidget.addTab(tWdgt, "Parameters")
self.imageWidget = QtGui.QWidget()
self.tabWidget.addTab(self.imageWidget, "Base Image")
baseImageLayout = QtWdgt.QHBoxLayout(self.imageWidget)
self.baseImage = ImageViewerQt()
appLayout.addWidget(self.sideBarTabWidget, 0,0, 1, 1)
appLayout.addWidget(self.tabWidget, 0, 1, 1, 1)
baseImageLayout.addWidget(self.baseImage)
self.loadTabUI(self)
self.show()
# Returns a UI combobox which has a list of avvialable image in datastore
def initImageListComboBox(self):
imageDescriptors = self.edgeProcessorContext.listAvailableImages()
comboBox = QtWdgt.QComboBox()
for descriptor in imageDescriptors:
# do not add depth map to color listings
if ("depth" in descriptor[0]) and (self.getContext().getMode() == ExecutionMode.DEVELOPMENT_ROS):
continue
comboBox.addItem(descriptor[0], descriptor[1])
def _imageSelectedEvent(val): # Image has been selected
index = self.imageSelectBox.itemData(val)
self.loadImageFile(index)
if len(imageDescriptors) > 0:
comboBox.currentIndexChanged.connect(_imageSelectedEvent)
else:
comboBox.addItem("Color Topic Found", 0)
qtComponentWrapper = QtWdgt.QGroupBox("Images")
qtComponentLayout = QtWdgt.QGridLayout()
qtComponentWrapper.setLayout(qtComponentLayout)
qtComponentLayout.addWidget(comboBox, 0, 0, 1, 2)
# # If ROS, add depth combobox option
if (self.getContext().getMode() == ExecutionMode.DEVELOPMENT_ROS):
dcomboBox = QtWdgt.QComboBox()
self.dImageSelectBox = dcomboBox
for descriptor in imageDescriptors:
if "depth" in descriptor[0]:
dcomboBox.addItem(descriptor[0], descriptor[1])
if len(imageDescriptors) == 0:
dcomboBox.addItem("Depth Topic Available", 0)
#def _imageSelectedEvent(val): # Image has been selected
# index = self.imageSelectBox.itemData(val)
# self.loadImageFile(index)
#dcomboBox.currentIndexChanged.connect(_imageSelectedEvent)
qtComponentLayout.addWidget(dcomboBox, 1, 0, 1, 2)
saveButton = QtGui.QPushButton("Save")
qtComponentLayout.addWidget(saveButton, 2, 0, 1, 1)
def _saveImageParamsEvent(val):
self.storeImageParameters(self.currentImageModel)
saveButton.clicked.connect(_saveImageParamsEvent)
resetButton = QtGui.QPushButton("Reset")
def _resetImageParamsEvent(val):
self.resetImageParameters(self.currentImageModel)
resetButton.clicked.connect(_resetImageParamsEvent)
qtComponentLayout.addWidget(resetButton, 2, 1, 1, 1)
# Add process button
self.processImgBtn = QtGui.QPushButton("Process")
self.processImgBtn.clicked.connect(self.processImage)
qtComponentLayout.addWidget(self.processImgBtn, 3, 0, 1, 2)
return qtComponentWrapper, comboBox
def initEdgeProcessorContext(self, app):
log.info("Initializing program Context")
self.edgeProcessorContext = EdgeProcessingDetectContext.initializeContext(
dataStore=DATA_STORE,
_mode=EXECUTION_MODE)
# event which loads ImageModel
def loadImageFile(self, imageNumber):
# remove all sidebar parameters except imageselect
self.clearLayout(self.sideBarLayout)
#get image numbers
imageId = self.imageSelectBox.currentText()
if self.getContext().getMode() == ExecutionMode.DEVELOPMENT_ROS:
dimgId = self.dImageSelectBox.currentText()
imageId = (imageId, dimgId)
self.currentImageModel = self.edgeProcessorContext.loadImage(imageId)
self.initSidebarAttrbutes(self.currentImageModel, self.sideBarLayout)
if self.edgeProcessorContext.getMode() == ExecutionMode.DEVELOPMENT_ROS:
self.displayROSImage(self.currentImageModel, 0) # select rgb as default image
else:
self.displayImage()
def storeImageParameters(self, imageModel):
params = imageModel._imageAttributes
self.getContext().saveImageModelParameters(params, imageModel.getName())
# save auxiliary images
if imageModel.isProcessed():
# try to incorporate pointcloud to image storage
pointCloudImage = self.glWidget.getRenderedImage()
imageModel.addAuxiliaryImage("point_cloud", pointCloudImage)
auxImages = imageModel.auxiliary_images
for imageName in auxImages:
imageModel.saveAuxiliaryImage(imageName)
def resetImageParameters(self, imageModel):
self.getContext().resetImageModelParameters(imageModel)
self.imageSelectBox.currentIndexChanged.emit(self.imageSelectBox.currentIndex())
def clearLayout(self, layout):
while layout.count():
child = layout.takeAt(0)
if child.widget() is not None:
child.widget().deleteLater()
elif child.layout() is not None:
self.clearLayout(child.layout())
def initSidebarAttrbutes(self, currentImageModel, parentLayout):
attributes = copy.deepcopy(currentImageModel._imageAttributes)
for key in attributes:
item = attributes[key]
if item['hidden']: continue
component = self.initImageAttributeUIComponents(key, item, currentImageModel)
parentLayout.addWidget(component)
# add stretch to window
parentLayout.addStretch(1)
# parentLayout.setSizeConstraint(parentLayout.SetMinAndMaxSize)
def initImageAttributeUIComponents(self, key, item, imageModel):
# qtComponentWrapper = CollapsableBox(item['label'])
# qtComponentLayout = qtComponentWrapper.getContentLayout()
qtComponentWrapper = QtWdgt.QGroupBox(item['label'])
qtComponentLayout = QtWdgt.QVBoxLayout()
qtComponentWrapper.setLayout(qtComponentLayout)
qtComponent = None
attributeType = item['type']
# component callback
def _componentCallback(val):
qtComponentWrapper.setTitle("{}:{}".format(item['label'], str(val)))
imageModel.setAttribute(key, val)
# If component is an integer
if attributeType == 'INT':
def _componentCallback(val):
if val == '':
val = 0
qtComponentWrapper.setTitle("{}:{}".format(item['label'], str(val)))
imageModel.setAttribute(key, int(val))
#define numeric text validator
onlyInt = QtGui.QIntValidator()
qtComponent = QtWdgt.QLineEdit()
qtComponent.setValidator(onlyInt)
qtComponent.setText("{}".format(item['value']))
qtComponent.textEdited.connect(_componentCallback)
elif attributeType == 'INT_RANGE':
def _componentCallback(val):
imageModel.setAttribute(key, int(val))
qtComponentWrapper.setTitle("{}:{}".format(item['label'], str(val)))
qtComponent = QtGui.QSlider(1)
qtComponent.setRange(item['min'], item['max'])
# qtComponent.setMaximum(item['max'])
# qtComponent._setTickPosition(QtWdgt.QSlider.NoTicks)
qtComponent.setTickInterval(int((item['max']-item['min'])/10))
qtComponent.setValue(item['value'])
qtComponent.valueChanged.connect(_componentCallback)
elif attributeType == 'REAL':
def _componentCallback(val):
if val == '':
val = 0.0
elif val[-1] == '.':
val = val + '0'
try:
float(val)
qtComponentWrapper.setTitle("{}:{}".format(item['label'], str(val)))
except ValueError:
log.warning("Non-numeric value added")
return
imageModel.setAttribute(key, float(val))
#define numeric text validator
onlyInt = QtGui.QDoubleValidator()
qtComponent = QtWdgt.QLineEdit()
qtComponent.setValidator(onlyInt)
qtComponent.setText("{}".format(item['value']))
qtComponent.textEdited.connect(_componentCallback)
qtComponent.textEdited.emit(qtComponent.text())
elif attributeType == 'STRING':
qtComponent = QtWdgt.QText()
elif attributeType == 'UI_GROUP':
qtComponentWrapper = CollapsableBox(item['label'])
qtComponentLayout = qtComponentWrapper.getContentLayout()
# qtComponent = QtWdgt.QGroupBox(item['label'])
# qtComponent.setLayout(qtComponentLayout)
for k in item['value']:
i = item['value'][k]
# if i['hidden'] == True: continue
imageModel.setAttribute(k, i['value'])
qtComponent = self.initImageAttributeUIComponents(k, i, imageModel)
# qtComponent.setEnabled(True)
qtComponentLayout.addWidget(qtComponent)
# qtComponentWrapper.addStretch(1)
return qtComponentWrapper
else:
raise RuntimeError("GUI Componenet not defined.")
qtComponent.setEnabled(True)
# Add button to interface
qtComponentLayout.addWidget(qtComponent)
qtComponentLayout.addStretch(1)
# parentLayout.addWidget(qtComponentWrapper) could be deleted
return qtComponentWrapper
def displayImage(self):
if self.currentImageModel is None:
raise RuntimeError("Unable to display image")
cv2Img = self.currentImageModel.getBaseRGBImage()
timage = QtGui.QImage(cv2Img.data, cv2Img.shape[1], cv2Img.shape[0], 3 * cv2Img.shape[1], QtGui.QImage.Format_RGB888)
self.baseImage.setImage(timage)
if self.currentImageModel.hasAttribute("crop_rectangle"):
crop_rect = self.currentImageModel.getAttribute("crop_rectangle")
self.baseImage.setCropRectangle(crop_rect[0], crop_rect[1], crop_rect[2], crop_rect[3])
def displayROSImage(self, imageModel, index):
if self.currentImageModel is None:
raise RuntimeError("Unable to display image")
try:
self.rosImg.unregisterReceiveSignal()
except AttributeError:
self.rosImg = None
if index == 0:
self.rosImg = imageModel.getBaseRGBImage()
else:
self.rosImg = imageModel.getBaseDepthImage()
signal = OnTopicReived()
def _updateImage(cv2Img):
timage = None
if index == 0:
timage = QtGui.QImage(cv2Img.data, cv2Img.shape[1], cv2Img.shape[0], 3 * cv2Img.shape[1], QtGui.QImage.Format_RGB888)
else:
print("TODO")
pass
#timage = QtGui.QImage(cv2Img.data, cv2Img.shape[1], cv2Img.shape[0], 3*)
self.baseImage.setImage(timage)
if imageModel.hasAttribute("crop_rectangle") and self.isProcesed == False:
crop_rect = imageModel.getAttribute("crop_rectangle")
self.baseImage.setCropRectangle(crop_rect[0], crop_rect[1], crop_rect[2], crop_rect[3])
signal.receivedSignal.connect(_updateImage)
self.rosImg.registerReceiveSignal(signal)
def clearImageCache(self, imageModel):
imageModel.deleteCache()
def getTabView(self, name):
pass
def loadTabUI(self, hidden=True):
self.tabItems = collections.OrderedDict()
# init contour tab
debugTab = QtGui.QWidget()
self.contourTabLayout = QtGui.QGridLayout(debugTab)
self.tabItems["Debug"] = debugTab
# init edge view tab
edgeView = QtGui.QWidget()
self.edgeViewLayout = QtGui.QGridLayout(edgeView)
self.tabItems["Edge Pairs"] = edgeView
# init point cloud view tab
pointCloudView = QtGui.QWidget()
self.pointCloudViewLayout = QtGui.QGridLayout(pointCloudView)
self.tabItems["Point Cloud"] = pointCloudView
self.loadPointCloudWidget(self.pointCloudViewLayout)
# Display Tabs
if hidden == False:
for name in self.tabItems:
item = self.tabItems[name]
self.tabWidget.addTab(item, name)
def displayTab(self, name):
if name not in self.tabItems.keys():
raise RuntimeError("Tab name '{}' not defined.".format(name))
tab = self.tabItems[name]
if tab.parent() == self.tabWidget:
return
else:
self.tabWidget.addTab(tab, name)
def hideTab(self, name):
if name not in self.tabItems.keys():
raise RuntimeError("Tab name '{}' not defined.".format(name))
tab = self.tabItems[name]
self.tabWidget.removeTab(tab, name)
def processImage(self):
currentlySelectedTabIndex = 0
# ensure image has been selected
if self.currentImageModel is None:
raise RuntimeError("Unable to process Image: Image not loaded.")
if self.isProcesed == True:
log.info("Reprocessing Image.")
self.clearImageCache(self.currentImageModel)
currentlySelectedTabIndex = self.tabWidget.currentIndex()
log.info("Processing image: {}".format(self.currentImageModel.name))
self.processRuns = self.processRuns + 1
# crop image
trect = self.baseImage.getCropRectangle()
self.currentImageModel.cropImage(trect.left(), trect.top(), trect.right(), trect.bottom())
# routine for profiling
# cProfile.runctx("self.currentImageModel.getLineData()", globals(), locals())
# point cloud
pointCloud = self.currentImageModel.getPointCloudFromCrop()
self.displayEdgeSegmentViewWidget(self.currentImageModel, self.contourTabLayout, self.isProcesed)
self.displayEdgeInfoTab(self.currentImageModel, self.edgeViewLayout, self.isProcesed)
self.displayEdgeAsPointCloud(self.currentImageModel)
# itnitalize views by manually selecting first items
if len(self.segmentList) > 0:
self.segmentItemSelected(0)
self.lineItemSelected(0)
self.tabWidget.setCurrentIndex(currentlySelectedTabIndex)
self.isProcesed = True
self.processImgBtn.setText("Update Parameters")
return
def displayEdgeSegmentViewWidget(self, imageModel, segmentViewWrapper, retainIndex = False):
try:
self.segmentList
except AttributeError:
self.segmentList = {}
else:
del self.segmentList
self.segmentList = {}
segmentListImageViewIndex = 0
try:
self.segmentListImageView
except AttributeError:
self.segmentListImageView = pg.ImageView(name="ImageView")
segmentViewWrapper.addWidget(self.segmentListImageView, 0,1,10,10)
else:
if retainIndex:
segmentListImageViewIndex = self.segmentListImageView.currentIndex
del self.segmentListImageView
self.segmentListImageView = pg.ImageView(name="ImageView")
segmentViewWrapper.addWidget(self.segmentListImageView, 0,1,10,10)
segmentSelectBoxIndex = 0
try:
self.segmentSelectBox
except AttributeError:
self.segmentSelectBox = QtWdgt.QComboBox()
segmentViewWrapper.addWidget(self.segmentSelectBox, 0,0,1,1)
else:
if retainIndex: # initialize segmentslect box to 0 on parameter update
segmentSelectBoxIndex = 0 #self.segmentSelectBox.currentIndex
self.segmentSelectBox.currentIndexChanged.disconnect()
segmentViewWrapper.removeWidget(self.segmentSelectBox)
self.segmentSelectBox.deleteLater()
del self.segmentSelectBox
self.segmentSelectBox = QtWdgt.QComboBox()
segmentViewWrapper.addWidget(self.segmentSelectBox, 0,0,1,1)
rgbImage = imageModel.getCroppedRGBImage()
depthImage = cv2.cvtColor(imageModel.getCroppedDepthImage(), cv2.COLOR_GRAY2RGB)
# Obtain Line data from image object (data is cached after first run)
lineData = imageModel.getLineData()
processed_images = lineData["processed_images"]
timage_list = [rgbImage, depthImage]
#timage_list.append(processed_images)
timg = np.stack(timage_list)
self.segmentListImageView.setImage(timg)
self.segmentSelectBox.addItem("Show All")
self.segmentSelectBox.addItem("None")
# Create Graphic object for each line (this is displayed on image)
for s in range(len(lineData["segment_list"])):
segments = lineData["segment_list"][s]
segmentGroup = []
segmentGroupName = "group-{}".format(s)
for index in range(len(segments)-1):
segmentName = "seg-{}-{}".format(s, index)
startPos = QtCore.QPointF(segments[index][0], segments[index][1])
endPos = QtCore.QPointF(segments[index+1][0], segments[index+1][1])
lineSegment = QtCore.QLineF(startPos, endPos)
color = np.random.rand(1, 1, 3).flatten() * 256
pen = QtGui.QPen()
pen.setColor(QtGui.QColor(color[0], color[1], color[2]))
lineWdgt = QtWdgt.QGraphicsLineItem()
lineWdgt.setLine(lineSegment)
lineWdgt.setPen(pen)
# Add line widget to view item
self.segmentListImageView.getView().addItem(lineWdgt)
# hide line widget
lineWdgt.hide()
segmentGroup.append((segmentName, lineWdgt))
self.segmentList[segmentGroupName] = segmentGroup
self.segmentSelectBox.addItem(segmentGroupName)
self.segmentSelectBox.currentIndexChanged.connect(self.segmentItemSelected)
if retainIndex == True:
self.segmentListImageView.setCurrentIndex(segmentListImageViewIndex)
# self.segmentSelectBox.setCurrentIndex(segmentSelectBoxIndex)
else:
self.displayTab("Debug")
def displayEdgeInfoTab(self, imageModel, viewWrapper, retainIndex):
self.lineViews = {}
self.currentLineItemSelected = None
lineDataViewIndex = 0
try:
self.lineDataView
except AttributeError:
self.lineDataView = pg.ImageView(name="ImageView")
else:
if retainIndex:
lineDataViewIndex = self.lineDataView.currentIndex
viewWrapper.removeWidget(self.lineDataView)
self.lineDataView.deleteLater()
del self.lineDataView
self.lineDataView = pg.ImageView(name="ImageView")
lineSelectBoxIndex = 0
try:
self.lineSelectBox
except AttributeError:
self.lineSelectBox = QtWdgt.QComboBox()
self.lineSelectBox.addItem("All Line Pairs")
else:
self.lineSelectBox.currentIndexChanged.disconnect()
if retainIndex:
lineSelectBoxIndex = self.lineSelectBox.currentIndex()
viewWrapper.removeWidget(self.lineSelectBox)
self.lineSelectBox.deleteLater()
del self.lineSelectBox
self.lineSelectBox = QtWdgt.QComboBox()
self.lineSelectBox.addItem("All Line Pairs")
rgbImage = imageModel.getCroppedRGBImage()
depthImage = cv2.cvtColor(imageModel.getCroppedDepthImage(), cv2.COLOR_GRAY2RGB)
timg = np.stack((rgbImage, depthImage))
self.lineDataView.setImage(timg)
viewWrapper.addWidget(self.lineSelectBox, 0,0,1,1)
viewWrapper.addWidget(self.lineDataView, 0,1,10,10)
# Obtain Line data from image object (data is cached after first run)
lineData = self.currentImageModel.getLineData()
# Create Graphic object for each line (this is displayed on image)
for edgePair in lineData["edge_pairs"]:
lineViewPair = []
for index in range(2):
edge = edgePair[index]
lineWdgt = QtWdgt.QGraphicsLineItem()
lineWdgt.setLine(edge)
lineWdgt.setPen(edge.getRenderData())
self.lineDataView.addItem(lineWdgt)
lineWdgt.hide()
lineViewPair.append(lineWdgt)
self.lineViews[edgePair.getID()] = (edgePair, lineViewPair)
self.lineSelectBox.addItem(edgePair.getID())
self.lineSelectBox.currentIndexChanged.connect(self.lineItemSelected)
# Display points that are part of a line
# Checkbox for displaying points
displayContourPoints = QtWdgt.QCheckBox("Display Edge Points")
viewWrapper.addWidget(displayContourPoints, 1,0,1,1)
self.shiftEdgeBtn = QtGui.QPushButton("Shift Edges")
# self.shiftEdgeBtn.clicked.connect(self.shiftEdge)
self.shiftEdgeBtn.hide()
viewWrapper.addWidget(self.shiftEdgeBtn, 2,0,1,1)
self.processFaceBtn = QtGui.QPushButton("Grip Pair")
self.processFaceBtn.clicked.connect(self.processFace)
self.processFaceBtn.hide()
viewWrapper.addWidget(self.processFaceBtn, 3,0,1,1)
self.pointViewItems = []
self.showEdgePoints = False
def _showEdgePoints(enabled):
if enabled == 2:
self.showEdgePoints = True
else:
self.showEdgePoints = False
self.lineItemSelected(self.lineSelectBox.currentIndex())
displayContourPoints.stateChanged.connect(_showEdgePoints)
if retainIndex == True:
self.lineDataView.setCurrentIndex(lineDataViewIndex)
else:
self.displayTab("Edge Pairs")
def lineItemSelected(self, itemKey):
strVal = self.lineSelectBox.itemText(itemKey)
if strVal == "All Line Pairs": # Display all items
self.displayEdgesOnImage(self.currentImageModel, showPoints=self.showEdgePoints)
self.displayEdgeAsPointCloud(self.currentImageModel)
self.currentEdgePair = None
self.shiftEdgeBtn.hide()
self.processFaceBtn.hide()
else:
edgePair = self.lineViews[strVal][0]
self.currentEdgePair = edgePair
self.displayEdgeAsPointCloud(self.currentImageModel, edgePair)
self.displayEdgesOnImage(self.currentImageModel, edgePair, showPoints=self.showEdgePoints)
# self.shiftEdgeBtn.show()
self.processFaceBtn.show()
def segmentItemSelected(self, itemKey):
strVal = self.segmentSelectBox.itemText(itemKey)
segmentGroup = []
if strVal not in ("None", "Show All"):
segmentGroup = self.segmentList[strVal]
elif strVal == "Show All":
for name in self.segmentList:
segmentGroup = segmentGroup + self.segmentList[name]
vb = self.segmentListImageView.getView()
# ensure no segments are in the image veiw
for key in self.segmentList:
for segmentItem in self.segmentList[key]:
segmentItem[1].hide()
# draw all line segments
for segmentItem in segmentGroup:
segmentItem[1].show()
def processFace(self):
"""
Using ransac, calculates the normals for the currently selected edge pairs and displays a vectors
corresponding the orientatoin of the face.
"""
self.glWidget.processFace(self.currentEdgePair)
return
length = 10
width = 2
params = self.edgeProcessorContext.processFace(self.currentImageModel, self.currentEdgePair)
position = params[0]
eigenVectors = params[2].astype(float)
direction = np.asarray((eigenVectors[0][0], eigenVectors[0][1], eigenVectors[0][2]))
latitude = np.asarray((eigenVectors[1][0], eigenVectors[1][1], eigenVectors[1][2]))
normal = np.asarray((eigenVectors[2][0], eigenVectors[2][1], eigenVectors[2][2]))
posFinal = np.vstack([position, normal])
normalLine = gl.GLLinePlotItem(pos=np.vstack([
np.asarray([position[0], position[1], position[2]]),
np.asarray([normal[0]*length+position[0], normal[1]*length+position[1], normal[2]*length+position[2]])
]),
color=(1,1,1,1), mode="lines", width=width)
normalLine.setTransform(self.pointCloudViewModel.transform())
directionLine = gl.GLLinePlotItem(pos=np.vstack([
np.asarray([position[0], position[1], position[2]]),
np.asarray([direction[0]*length+position[0], direction[1]*length+position[1], direction[2]*length+position[2]])
]),
color=(1,0,0,1), mode="lines", width=width)
directionLine.setTransform(self.pointCloudViewModel.transform())
latitudeLine = gl.GLLinePlotItem(pos=np.vstack([
np.asarray([position[0], position[1], position[2]]),
np.asarray([latitude[0]*length+position[0], latitude[1]*length+position[1], latitude[2]*length+position[2]])
]),
color=(0,1,0,1), mode="lines", width=width)
latitudeLine.setTransform(self.pointCloudViewModel.transform())
# self.glWidget.addItem(axisItem)
self.glWidget.addItem(normalLine)
self.glWidget.addItem(latitudeLine)
self.glWidget.addItem(directionLine)
def displayEdgesOnImage(self, imageModel, edgePair = None, showPoints = False):
"""
Renders the currently selected edgepair on the processed image
"""
vb = self.lineDataView.getView()
lineData = imageModel.getLineData()
edgePairList = None
if edgePair == None:
edgePairList = lineData["edge_pairs"]
else:
edgePairList = (edgePair,)
for key in self.lineViews:
self.lineViews[key][1][0].hide()
self.lineViews[key][1][1].hide()
for pv in self.pointViewItems:
vb.removeItem(pv)
self.pointViewItems = []
for edgePair in edgePairList:
wdgPair = []
for m in range(2):
edge = edgePair[m]
lineWdgt = self.lineViews[edgePair.getID()][1][m]
lineWdgt.setLine(edge)
lineWdgt.setPen(edge.getRenderData())
if showPoints == False:
lineWdgt.show()
continue
else:
lineWdgt.hide()
pointList = edge.getEdgePointCloudIndexes()
size = 1
if showPoints == True:
for point in pointList:
pointView = QtWdgt.QGraphicsRectItem()
pointView.setRect(point[0]-size, point[1]-size, size, size)
pointView.setPen(edge.getRenderData())
# pointView.show()
vb.addItem(pointView)
self.pointViewItems.append(pointView)
def displayEdgeAsPointCloud(self, imageModel, edgePair=None):
log.info("Rendering Point Cloud.")
if edgePair is None:
lineData = imageModel.getLineData()
edgePairList = lineData["edge_pairs"]
else:
edgePairList = (edgePair,)
pointCloud = imageModel.getPointCloud()
self.glWidget.setData(
imageModel=imageModel,
edgePairList=edgePairList,
pointCloud=pointCloud,
context=self.getContext())
self.displayTab("Point Cloud")
def loadPointCloudWidget(self, viewWrapper):
""" Loads widget which will be used to display point cloud """
try:
self.glWidget
except AttributeError:
self.glWidget = SceneElement()
else:
self.glWidget = SceneElement()
viewWrapper.addWidget(self.glWidget, 0, 0, 10, 10)
def getContext(self):
return self.edgeProcessorContext
# applicatoin access point
def main():
app = QtGui.QApplication(sys.argv)
ex = App(app)
sys.exit(app.exec_())
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
ex = App(app)
sys.exit(app.exec_())
| [
"gripit.gui.ImageViewerQt.ImageViewerQt",
"PyQt5.QtGui.QColor",
"PyQt5.QtWidgets.QGridLayout",
"future.standard_library.install_aliases",
"PyQt5.QtGui.QWidget",
"pyqtgraph.ImageView",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtCore.QLineF",
"PyQt5.QtGui.QScrollArea",
"builtins.range",
"gripit.core.... | [((368, 402), 'future.standard_library.install_aliases', 'standard_library.install_aliases', ([], {}), '()\n', (400, 402), False, 'from future import standard_library\n'), ((33435, 33463), 'PyQt5.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (33453, 33463), True, 'import PyQt5.QtGui as QtGui\n'), ((33546, 33574), 'PyQt5.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (33564, 33574), True, 'import PyQt5.QtGui as QtGui\n'), ((3428, 3463), 'logging.info', 'log.info', (['"""Initializing program ui"""'], {}), "('Initializing program ui')\n", (3436, 3463), True, 'import logging as log\n'), ((3489, 3507), 'PyQt5.QtGui.QTabWidget', 'QtGui.QTabWidget', ([], {}), '()\n', (3505, 3507), True, 'import PyQt5.QtGui as QtGui\n'), ((3751, 3774), 'PyQt5.QtGui.QGridLayout', 'QtGui.QGridLayout', (['self'], {}), '(self)\n', (3768, 3774), True, 'import PyQt5.QtGui as QtGui\n'), ((3840, 3858), 'PyQt5.QtGui.QTabWidget', 'QtGui.QTabWidget', ([], {}), '()\n', (3856, 3858), True, 'import PyQt5.QtGui as QtGui\n'), ((3881, 3896), 'PyQt5.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (3894, 3896), True, 'import PyQt5.QtGui as QtGui\n'), ((3915, 3934), 'PyQt5.QtGui.QScrollArea', 'QtGui.QScrollArea', ([], {}), '()\n', (3932, 3934), True, 'import PyQt5.QtGui as QtGui\n'), ((4037, 4057), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWdgt.QVBoxLayout', ([], {}), '()\n', (4055, 4057), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((4459, 4479), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWdgt.QVBoxLayout', ([], {}), '()\n', (4477, 4479), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((4496, 4511), 'PyQt5.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (4509, 4511), True, 'import PyQt5.QtGui as QtGui\n'), ((4709, 4724), 'PyQt5.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (4722, 4724), True, 'import PyQt5.QtGui as QtGui\n'), ((4813, 4849), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWdgt.QHBoxLayout', (['self.imageWidget'], {}), '(self.imageWidget)\n', (4831, 4849), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((4875, 4890), 'gripit.gui.ImageViewerQt.ImageViewerQt', 'ImageViewerQt', ([], {}), '()\n', (4888, 4890), False, 'from gripit.gui.ImageViewerQt import ImageViewerQt\n'), ((5320, 5338), 'PyQt5.QtWidgets.QComboBox', 'QtWdgt.QComboBox', ([], {}), '()\n', (5336, 5338), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((6010, 6036), 'PyQt5.QtWidgets.QGroupBox', 'QtWdgt.QGroupBox', (['"""Images"""'], {}), "('Images')\n", (6026, 6036), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((6065, 6085), 'PyQt5.QtWidgets.QGridLayout', 'QtWdgt.QGridLayout', ([], {}), '()\n', (6083, 6085), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((7026, 7051), 'PyQt5.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Save"""'], {}), "('Save')\n", (7043, 7051), True, 'import PyQt5.QtGui as QtGui\n'), ((7295, 7321), 'PyQt5.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Reset"""'], {}), "('Reset')\n", (7312, 7321), True, 'import PyQt5.QtGui as QtGui\n'), ((7605, 7633), 'PyQt5.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Process"""'], {}), "('Process')\n", (7622, 7633), True, 'import PyQt5.QtGui as QtGui\n'), ((7864, 7904), 'logging.info', 'log.info', (['"""Initializing program Context"""'], {}), "('Initializing program Context')\n", (7872, 7904), True, 'import logging as log\n'), ((7941, 8035), 'gripit.core.context.EdgeProcessingDetectContext.initializeContext', 'EdgeProcessingDetectContext.initializeContext', ([], {'dataStore': 'DATA_STORE', '_mode': 'EXECUTION_MODE'}), '(dataStore=DATA_STORE, _mode=\n EXECUTION_MODE)\n', (7986, 8035), False, 'from gripit.core.context import EdgeProcessingDetectContext, ExecutionMode\n'), ((10063, 10112), 'copy.deepcopy', 'copy.deepcopy', (['currentImageModel._imageAttributes'], {}), '(currentImageModel._imageAttributes)\n', (10076, 10112), False, 'import sys, math, collections, math, copy\n'), ((10722, 10753), 'PyQt5.QtWidgets.QGroupBox', 'QtWdgt.QGroupBox', (["item['label']"], {}), "(item['label'])\n", (10738, 10753), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((10782, 10802), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWdgt.QVBoxLayout', ([], {}), '()\n', (10800, 10802), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((14656, 14769), 'PyQt5.QtGui.QImage', 'QtGui.QImage', (['cv2Img.data', 'cv2Img.shape[1]', 'cv2Img.shape[0]', '(3 * cv2Img.shape[1])', 'QtGui.QImage.Format_RGB888'], {}), '(cv2Img.data, cv2Img.shape[1], cv2Img.shape[0], 3 * cv2Img.\n shape[1], QtGui.QImage.Format_RGB888)\n', (14668, 14769), True, 'import PyQt5.QtGui as QtGui\n'), ((15504, 15519), 'gripit.core.ros_image.OnTopicReived', 'OnTopicReived', ([], {}), '()\n', (15517, 15519), False, 'from gripit.core.ros_image import ROSImage, OnTopicReived\n'), ((16497, 16522), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (16520, 16522), False, 'import sys, math, collections, math, copy\n'), ((16570, 16585), 'PyQt5.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (16583, 16585), True, 'import PyQt5.QtGui as QtGui\n'), ((16618, 16645), 'PyQt5.QtGui.QGridLayout', 'QtGui.QGridLayout', (['debugTab'], {}), '(debugTab)\n', (16635, 16645), True, 'import PyQt5.QtGui as QtGui\n'), ((16738, 16753), 'PyQt5.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (16751, 16753), True, 'import PyQt5.QtGui as QtGui\n'), ((16784, 16811), 'PyQt5.QtGui.QGridLayout', 'QtGui.QGridLayout', (['edgeView'], {}), '(edgeView)\n', (16801, 16811), True, 'import PyQt5.QtGui as QtGui\n'), ((16922, 16937), 'PyQt5.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (16935, 16937), True, 'import PyQt5.QtGui as QtGui\n'), ((16974, 17007), 'PyQt5.QtGui.QGridLayout', 'QtGui.QGridLayout', (['pointCloudView'], {}), '(pointCloudView)\n', (16991, 17007), True, 'import PyQt5.QtGui as QtGui\n'), ((21499, 21520), 'numpy.stack', 'np.stack', (['timage_list'], {}), '(timage_list)\n', (21507, 21520), True, 'import numpy as np\n'), ((24705, 24737), 'numpy.stack', 'np.stack', (['(rgbImage, depthImage)'], {}), '((rgbImage, depthImage))\n', (24713, 24737), True, 'import numpy as np\n'), ((25880, 25919), 'PyQt5.QtWidgets.QCheckBox', 'QtWdgt.QCheckBox', (['"""Display Edge Points"""'], {}), "('Display Edge Points')\n", (25896, 25919), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((26017, 26049), 'PyQt5.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Shift Edges"""'], {}), "('Shift Edges')\n", (26034, 26049), True, 'import PyQt5.QtGui as QtGui\n'), ((26233, 26263), 'PyQt5.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Grip Pair"""'], {}), "('Grip Pair')\n", (26250, 26263), True, 'import PyQt5.QtGui as QtGui\n'), ((29006, 29078), 'numpy.asarray', 'np.asarray', (['(eigenVectors[0][0], eigenVectors[0][1], eigenVectors[0][2])'], {}), '((eigenVectors[0][0], eigenVectors[0][1], eigenVectors[0][2]))\n', (29016, 29078), True, 'import numpy as np\n'), ((29098, 29170), 'numpy.asarray', 'np.asarray', (['(eigenVectors[1][0], eigenVectors[1][1], eigenVectors[1][2])'], {}), '((eigenVectors[1][0], eigenVectors[1][1], eigenVectors[1][2]))\n', (29108, 29170), True, 'import numpy as np\n'), ((29188, 29260), 'numpy.asarray', 'np.asarray', (['(eigenVectors[2][0], eigenVectors[2][1], eigenVectors[2][2])'], {}), '((eigenVectors[2][0], eigenVectors[2][1], eigenVectors[2][2]))\n', (29198, 29260), True, 'import numpy as np\n'), ((29280, 29309), 'numpy.vstack', 'np.vstack', (['[position, normal]'], {}), '([position, normal])\n', (29289, 29309), True, 'import numpy as np\n'), ((32473, 32507), 'logging.info', 'log.info', (['"""Rendering Point Cloud."""'], {}), "('Rendering Point Cloud.')\n", (32481, 32507), True, 'import logging as log\n'), ((2007, 2047), 'logging.warning', 'log.warning', (['"""Image datastore is empty."""'], {}), "('Image datastore is empty.')\n", (2018, 2047), True, 'import logging as log\n'), ((2290, 2327), 'logging.warning', 'log.warning', (['"""Invalid index entered."""'], {}), "('Invalid index entered.')\n", (2301, 2327), True, 'import logging as log\n'), ((6354, 6372), 'PyQt5.QtWidgets.QComboBox', 'QtWdgt.QComboBox', ([], {}), '()\n', (6370, 6372), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((11492, 11513), 'PyQt5.QtGui.QIntValidator', 'QtGui.QIntValidator', ([], {}), '()\n', (11511, 11513), True, 'import PyQt5.QtGui as QtGui\n'), ((11540, 11558), 'PyQt5.QtWidgets.QLineEdit', 'QtWdgt.QLineEdit', ([], {}), '()\n', (11556, 11558), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((18127, 18158), 'logging.info', 'log.info', (['"""Reprocessing Image."""'], {}), "('Reprocessing Image.')\n", (18135, 18158), True, 'import logging as log\n'), ((20196, 20226), 'pyqtgraph.ImageView', 'pg.ImageView', ([], {'name': '"""ImageView"""'}), "(name='ImageView')\n", (20208, 20226), True, 'import pyqtgraph as pg\n'), ((20978, 20996), 'PyQt5.QtWidgets.QComboBox', 'QtWdgt.QComboBox', ([], {}), '()\n', (20994, 20996), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((23878, 23908), 'pyqtgraph.ImageView', 'pg.ImageView', ([], {'name': '"""ImageView"""'}), "(name='ImageView')\n", (23890, 23908), True, 'import pyqtgraph as pg\n'), ((24472, 24490), 'PyQt5.QtWidgets.QComboBox', 'QtWdgt.QComboBox', ([], {}), '()\n', (24488, 24490), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((25223, 25231), 'builtins.range', 'range', (['(2)'], {}), '(2)\n', (25228, 25231), False, 'from builtins import range\n'), ((31445, 31453), 'builtins.range', 'range', (['(2)'], {}), '(2)\n', (31450, 31453), False, 'from builtins import range\n'), ((33242, 33256), 'gripit.core.point_cloud_render_element.SceneElement', 'SceneElement', ([], {}), '()\n', (33254, 33256), False, 'from gripit.core.point_cloud_render_element import SceneElement\n'), ((1446, 1453), 'builtins.super', 'super', ([], {}), '()\n', (1451, 1453), False, 'from builtins import super\n'), ((2201, 2215), 'builtins.str', 'str', (['IMAGE_NUM'], {}), '(IMAGE_NUM)\n', (2204, 2215), False, 'from builtins import str\n'), ((11978, 11994), 'PyQt5.QtGui.QSlider', 'QtGui.QSlider', (['(1)'], {}), '(1)\n', (11991, 11994), True, 'import PyQt5.QtGui as QtGui\n'), ((15633, 15746), 'PyQt5.QtGui.QImage', 'QtGui.QImage', (['cv2Img.data', 'cv2Img.shape[1]', 'cv2Img.shape[0]', '(3 * cv2Img.shape[1])', 'QtGui.QImage.Format_RGB888'], {}), '(cv2Img.data, cv2Img.shape[1], cv2Img.shape[0], 3 * cv2Img.\n shape[1], QtGui.QImage.Format_RGB888)\n', (15645, 15746), True, 'import PyQt5.QtGui as QtGui\n'), ((19879, 19909), 'pyqtgraph.ImageView', 'pg.ImageView', ([], {'name': '"""ImageView"""'}), "(name='ImageView')\n", (19891, 19909), True, 'import pyqtgraph as pg\n'), ((20455, 20473), 'PyQt5.QtWidgets.QComboBox', 'QtWdgt.QComboBox', ([], {}), '()\n', (20471, 20473), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((22072, 22126), 'PyQt5.QtCore.QPointF', 'QtCore.QPointF', (['segments[index][0]', 'segments[index][1]'], {}), '(segments[index][0], segments[index][1])\n', (22086, 22126), True, 'import PyQt5.QtCore as QtCore\n'), ((22152, 22214), 'PyQt5.QtCore.QPointF', 'QtCore.QPointF', (['segments[index + 1][0]', 'segments[index + 1][1]'], {}), '(segments[index + 1][0], segments[index + 1][1])\n', (22166, 22214), True, 'import PyQt5.QtCore as QtCore\n'), ((22241, 22272), 'PyQt5.QtCore.QLineF', 'QtCore.QLineF', (['startPos', 'endPos'], {}), '(startPos, endPos)\n', (22254, 22272), True, 'import PyQt5.QtCore as QtCore\n'), ((22359, 22371), 'PyQt5.QtGui.QPen', 'QtGui.QPen', ([], {}), '()\n', (22369, 22371), True, 'import PyQt5.QtGui as QtGui\n'), ((22472, 22498), 'PyQt5.QtWidgets.QGraphicsLineItem', 'QtWdgt.QGraphicsLineItem', ([], {}), '()\n', (22496, 22498), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((23572, 23602), 'pyqtgraph.ImageView', 'pg.ImageView', ([], {'name': '"""ImageView"""'}), "(name='ImageView')\n", (23584, 23602), True, 'import pyqtgraph as pg\n'), ((24049, 24067), 'PyQt5.QtWidgets.QComboBox', 'QtWdgt.QComboBox', ([], {}), '()\n', (24065, 24067), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((25299, 25325), 'PyQt5.QtWidgets.QGraphicsLineItem', 'QtWdgt.QGraphicsLineItem', ([], {}), '()\n', (25323, 25325), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((33185, 33199), 'gripit.core.point_cloud_render_element.SceneElement', 'SceneElement', ([], {}), '()\n', (33197, 33199), False, 'from gripit.core.point_cloud_render_element import SceneElement\n'), ((11059, 11067), 'builtins.str', 'str', (['val'], {}), '(val)\n', (11062, 11067), False, 'from builtins import str\n'), ((11417, 11425), 'builtins.int', 'int', (['val'], {}), '(val)\n', (11420, 11425), False, 'from builtins import int\n'), ((12223, 12260), 'builtins.int', 'int', (["((item['max'] - item['min']) / 10)"], {}), "((item['max'] - item['min']) / 10)\n", (12226, 12260), False, 'from builtins import int\n'), ((12967, 12991), 'PyQt5.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', ([], {}), '()\n', (12989, 12991), True, 'import PyQt5.QtGui as QtGui\n'), ((13018, 13036), 'PyQt5.QtWidgets.QLineEdit', 'QtWdgt.QLineEdit', ([], {}), '()\n', (13034, 13036), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((22401, 22443), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['color[0]', 'color[1]', 'color[2]'], {}), '(color[0], color[1], color[2])\n', (22413, 22443), True, 'import PyQt5.QtGui as QtGui\n'), ((11361, 11369), 'builtins.str', 'str', (['val'], {}), '(val)\n', (11364, 11369), False, 'from builtins import str\n'), ((11857, 11865), 'builtins.int', 'int', (['val'], {}), '(val)\n', (11860, 11865), False, 'from builtins import int\n'), ((13332, 13346), 'PyQt5.QtWidgets.QText', 'QtWdgt.QText', ([], {}), '()\n', (13344, 13346), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((29381, 29432), 'numpy.asarray', 'np.asarray', (['[position[0], position[1], position[2]]'], {}), '([position[0], position[1], position[2]])\n', (29391, 29432), True, 'import numpy as np\n'), ((29450, 29569), 'numpy.asarray', 'np.asarray', (['[normal[0] * length + position[0], normal[1] * length + position[1], normal\n [2] * length + position[2]]'], {}), '([normal[0] * length + position[0], normal[1] * length + position\n [1], normal[2] * length + position[2]])\n', (29460, 29569), True, 'import numpy as np\n'), ((29778, 29829), 'numpy.asarray', 'np.asarray', (['[position[0], position[1], position[2]]'], {}), '([position[0], position[1], position[2]])\n', (29788, 29829), True, 'import numpy as np\n'), ((29847, 29974), 'numpy.asarray', 'np.asarray', (['[direction[0] * length + position[0], direction[1] * length + position[1], \n direction[2] * length + position[2]]'], {}), '([direction[0] * length + position[0], direction[1] * length +\n position[1], direction[2] * length + position[2]])\n', (29857, 29974), True, 'import numpy as np\n'), ((30186, 30237), 'numpy.asarray', 'np.asarray', (['[position[0], position[1], position[2]]'], {}), '([position[0], position[1], position[2]])\n', (30196, 30237), True, 'import numpy as np\n'), ((30255, 30379), 'numpy.asarray', 'np.asarray', (['[latitude[0] * length + position[0], latitude[1] * length + position[1], \n latitude[2] * length + position[2]]'], {}), '([latitude[0] * length + position[0], latitude[1] * length +\n position[1], latitude[2] * length + position[2]])\n', (30265, 30379), True, 'import numpy as np\n'), ((32073, 32099), 'PyQt5.QtWidgets.QGraphicsRectItem', 'QtWdgt.QGraphicsRectItem', ([], {}), '()\n', (32097, 32099), True, 'import PyQt5.QtWidgets as QtWdgt\n'), ((11941, 11949), 'builtins.str', 'str', (['val'], {}), '(val)\n', (11944, 11949), False, 'from builtins import str\n'), ((13422, 13451), 'gripit.gui.FrameLayout.FrameLayout', 'CollapsableBox', (["item['label']"], {}), "(item['label'])\n", (13436, 13451), True, 'from gripit.gui.FrameLayout import FrameLayout as CollapsableBox\n'), ((22297, 22320), 'numpy.random.rand', 'np.random.rand', (['(1)', '(1)', '(3)'], {}), '(1, 1, 3)\n', (22311, 22320), True, 'import numpy as np\n'), ((12779, 12817), 'logging.warning', 'log.warning', (['"""Non-numeric value added"""'], {}), "('Non-numeric value added')\n", (12790, 12817), True, 'import logging as log\n'), ((12713, 12721), 'builtins.str', 'str', (['val'], {}), '(val)\n', (12716, 12721), False, 'from builtins import str\n')] |
'''
Wrapper for gender recognition model
'''
import cv2
import mxnet as mx
import numpy as np
class GenderClassifier:
'''
Interface for gender classification model.
Must implement the predict() method.
predict() must accept an image (np.ndarray) as argument.
'''
def __init__(self):
raise NotImplementedError('NotImplementedError: attempted usage of abstract gender classifier')
def predict(self, image: np.ndarray):
raise NotImplementedError('NotImplementedError: attempted usage of abstract gender classifier')
class SSRNet(GenderClassifier):
'''
Soft Stagewise Regression Network
:param prefix: (str) prefix of path to model
:param epoch: (int) training epoch at which model is saved
:param input_height: (int) input height of the model
:param input_width: (int) input width of the model
'''
def __init__(
self,
prefix: str,
epoch: int,
input_height: int = 64,
input_width: int = 64
):
self.context = mx.cpu()
self.input_height = input_height
self.input_width = input_width
self.model = self.__load_model(prefix, epoch)
def __load_model(self, prefix: str, epoch: int):
print('loading SSR-Net...')
symbol, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
all_layers = symbol.get_internals()
symbol = all_layers['_mulscalar16_output']
module = mx.mod.Module(
symbol=symbol,
data_names=('data', 'stage_num0', 'stage_num1', 'stage_num2'),context=self.context,
label_names = None,
)
module.bind(data_shapes=[
('data', (1, 3, self.input_height, self.input_width)),
('stage_num0', (1, 3)),
('stage_num1', (1, 3)),
('stage_num2', (1, 3)),
])
module.set_params(arg_params, aux_params)
print('loaded SSR-Net successfully\n')
return module
def __preprocess_image(self, image: np.ndarray) -> mx.io.DataBatch:
image = cv2.resize(image, (self.input_height, self.input_width))
image = image[:, :, ::-1]
image = np.transpose(image, (2, 0, 1))
input_blob = np.expand_dims(image, axis=0)
data = mx.nd.array(input_blob)
return mx.io.DataBatch(data=(
data,
mx.nd.array([[0, 1, 2]]),
mx.nd.array([[0, 1, 2]]),
mx.nd.array([[0, 1, 2]]),
))
def predict(self, image: np.ndarray) -> (int, float):
'''
Forward pass / inference
:param image: (np.ndarray) input image
:return: (tuple) gender, score
'''
data_batch = self.__preprocess_image(image)
self.model.forward(data_batch, is_train=False)
gender_score = float(self.model.get_outputs()[0].asnumpy()[0])
gender = 1 if gender_score > 0.5 else 0
return gender, gender_score
| [
"numpy.transpose",
"mxnet.mod.Module",
"numpy.expand_dims",
"mxnet.cpu",
"mxnet.nd.array",
"mxnet.model.load_checkpoint",
"cv2.resize"
] | [((1049, 1057), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (1055, 1057), True, 'import mxnet as mx\n'), ((1325, 1364), 'mxnet.model.load_checkpoint', 'mx.model.load_checkpoint', (['prefix', 'epoch'], {}), '(prefix, epoch)\n', (1349, 1364), True, 'import mxnet as mx\n'), ((1479, 1614), 'mxnet.mod.Module', 'mx.mod.Module', ([], {'symbol': 'symbol', 'data_names': "('data', 'stage_num0', 'stage_num1', 'stage_num2')", 'context': 'self.context', 'label_names': 'None'}), "(symbol=symbol, data_names=('data', 'stage_num0', 'stage_num1',\n 'stage_num2'), context=self.context, label_names=None)\n", (1492, 1614), True, 'import mxnet as mx\n'), ((2089, 2145), 'cv2.resize', 'cv2.resize', (['image', '(self.input_height, self.input_width)'], {}), '(image, (self.input_height, self.input_width))\n', (2099, 2145), False, 'import cv2\n'), ((2197, 2227), 'numpy.transpose', 'np.transpose', (['image', '(2, 0, 1)'], {}), '(image, (2, 0, 1))\n', (2209, 2227), True, 'import numpy as np\n'), ((2250, 2279), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2264, 2279), True, 'import numpy as np\n'), ((2295, 2318), 'mxnet.nd.array', 'mx.nd.array', (['input_blob'], {}), '(input_blob)\n', (2306, 2318), True, 'import mxnet as mx\n'), ((2388, 2412), 'mxnet.nd.array', 'mx.nd.array', (['[[0, 1, 2]]'], {}), '([[0, 1, 2]])\n', (2399, 2412), True, 'import mxnet as mx\n'), ((2426, 2450), 'mxnet.nd.array', 'mx.nd.array', (['[[0, 1, 2]]'], {}), '([[0, 1, 2]])\n', (2437, 2450), True, 'import mxnet as mx\n'), ((2464, 2488), 'mxnet.nd.array', 'mx.nd.array', (['[[0, 1, 2]]'], {}), '([[0, 1, 2]])\n', (2475, 2488), True, 'import mxnet as mx\n')] |
from typing import List
import modelkit
import numpy as np
class Vectorizer(modelkit.Model[List[str], List[int]]):
CONFIGURATIONS = {
"imdb_vectorizer": {"asset": "imdb/vectorizer:0.0[/vocabulary.txt]"}
}
TEST_CASES = [
{"item": [], "result": []},
{"item": [], "keyword_args": {"length": 10}, "result": [0] * 10},
{"item": ["movie"], "result": [888]},
{"item": ["unknown_token"], "result": []},
{
"item": ["unknown_token"],
"keyword_args": {"drop_oov": False},
"result": [1],
},
{"item": ["movie", "unknown_token", "scenes"], "result": [888, 1156]},
{
"item": ["movie", "unknown_token", "scenes"],
"keyword_args": {"drop_oov": False},
"result": [888, 1, 1156],
},
{
"item": ["movie", "unknown_token", "scenes"],
"keyword_args": {"length": 10},
"result": [888, 1156, 0, 0, 0, 0, 0, 0, 0, 0],
},
{
"item": ["movie", "unknown_token", "scenes"],
"keyword_args": {"length": 10, "drop_oov": False},
"result": [888, 1, 1156, 0, 0, 0, 0, 0, 0, 0],
},
]
def _load(self):
self.vocabulary = {}
with open(self.asset_path, "r", encoding="utf-8") as f:
for i, k in enumerate(f):
self.vocabulary[k.strip()] = i + 2
self._vectorizer = np.vectorize(lambda x: self.vocabulary.get(x, 1))
def _predict(self, tokens, length=None, drop_oov=True):
vectorized = (
np.array(self._vectorizer(tokens), dtype=np.int)
if tokens
else np.array([], dtype=int)
)
if drop_oov and len(vectorized):
vectorized = np.delete(vectorized, vectorized == 1)
if not length:
return vectorized.tolist()
result = np.zeros(length)
vectorized = vectorized[:length]
result[: len(vectorized)] = vectorized
return result.tolist()
| [
"numpy.array",
"numpy.zeros",
"numpy.delete"
] | [((1903, 1919), 'numpy.zeros', 'np.zeros', (['length'], {}), '(length)\n', (1911, 1919), True, 'import numpy as np\n'), ((1685, 1708), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (1693, 1708), True, 'import numpy as np\n'), ((1785, 1823), 'numpy.delete', 'np.delete', (['vectorized', '(vectorized == 1)'], {}), '(vectorized, vectorized == 1)\n', (1794, 1823), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import shutil
from functools import lru_cache
import numpy as np
from ncc.data.constants import DEFAULT_MAX_TARGET_POSITIONS
from ncc.utils.file_ops import file_io
from ncc.utils.path_manager import PathManager
from .mmap_indexed_dataset import MMapIndexedDatasetBuilder
from ..ncc_dataset import NccDataset
def index_file_path(prefix_path):
return prefix_path + '.idx'
def seq_file_path(prefix_path):
return prefix_path + '.seq'
class SeqIndexedDataset(NccDataset):
_HDR_MAGIC = b'SEQIDX\x00\x00'
_dtype = np.int32
def __init__(self, path):
self.path = path
self.read_data(path)
def read_data(self, path):
with file_io.open(index_file_path(path), mode='rb') as stream:
magic_test = stream.read(8)
assert self._HDR_MAGIC == magic_test, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
buffer = stream.read()
self._data = np.frombuffer(buffer, dtype=self._dtype)
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._data[i]
@staticmethod
def exists(path):
return PathManager.exists(index_file_path(path))
def __len__(self):
return len(self._data)
def __eq__(self, other):
assert self._HDR_MAGIC == other._HDR_MAGIC
return self._data == other._data
def truncate(self, start=0, end=None):
if end is None:
end = len(self)
self._data = self._data[start:end]
def append(self, new_data):
self._data = np.concatenate([self._data, new_data._data])
def clip(self, min_position=0, max_position=DEFAULT_MAX_TARGET_POSITIONS):
self._data = np.clip(self._data, min_position, max_position)
class SeqIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=SeqIndexedDataset._dtype):
self._data_file = file_io.open(index_file_path(out_file), 'wb')
self._data_file.write(SeqIndexedDataset._HDR_MAGIC)
self._data = []
self._dtype = dtype
def add_item(self, idx):
self._data.append(idx)
def merge_file_(self, another_file):
with file_io.open(index_file_path(another_file), 'rb') as f:
version = f.read(8)
assert version == SeqIndexedDataset._HDR_MAGIC
np_array = np.frombuffer(f.read(), dtype=self._dtype)
self._data.extend(np_array.tolist())
def finalize(self):
np_array = np.array(self._data, dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._data_file.close()
class SeqIndexedDatasetBuilder(MMapIndexedDatasetBuilder):
def merge_file_(self, another_file):
"""merge sub file(bin/idx) for multi-processing"""
# Concatenate index
index = SeqIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with file_io.open(seq_file_path(another_file), 'rb') as f:
# append sub-bin/idx files to 1st bin/idx file
shutil.copyfileobj(f, self._data_file)
def add_item(self, tensor):
for t in tensor[..., None]:
# write an array
np_array = np.array(t.numpy(), dtype=self._dtype) # type transform
# bin file
self._data_file.write(np_array.tobytes(order='C')) # write np.array into C stream
# idx file
self._sizes.append(np_array.size)
def finalize(self, index_file):
# assert len(self._sizes) > 0, Exception('{} {}'.format(self._data_file, self._sizes))
self._data_file.close()
with SeqIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes)
| [
"numpy.frombuffer",
"numpy.clip",
"numpy.array",
"functools.lru_cache",
"shutil.copyfileobj",
"numpy.concatenate"
] | [((1099, 1119), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': '(8)'}), '(maxsize=8)\n', (1108, 1119), False, 'from functools import lru_cache\n'), ((1647, 1691), 'numpy.concatenate', 'np.concatenate', (['[self._data, new_data._data]'], {}), '([self._data, new_data._data])\n', (1661, 1691), True, 'import numpy as np\n'), ((1793, 1840), 'numpy.clip', 'np.clip', (['self._data', 'min_position', 'max_position'], {}), '(self._data, min_position, max_position)\n', (1800, 1840), True, 'import numpy as np\n'), ((2555, 2594), 'numpy.array', 'np.array', (['self._data'], {'dtype': 'self._dtype'}), '(self._data, dtype=self._dtype)\n', (2563, 2594), True, 'import numpy as np\n'), ((1052, 1092), 'numpy.frombuffer', 'np.frombuffer', (['buffer'], {'dtype': 'self._dtype'}), '(buffer, dtype=self._dtype)\n', (1065, 1092), True, 'import numpy as np\n'), ((3225, 3263), 'shutil.copyfileobj', 'shutil.copyfileobj', (['f', 'self._data_file'], {}), '(f, self._data_file)\n', (3243, 3263), False, 'import shutil\n')] |
from torch.utils.data import Dataset
from imgaug.augmentables.kps import Keypoint, KeypointsOnImage
import numpy as np
from Utils import *
import imgaug.augmenters as iaa
import imgaug.augmentables.kps
import cv2
import random
import yaml
import torchfile
import scipy.io
from pathlib import Path
class Database(Dataset):
def __init__(self,dataset_name,number_of_channels,test=False,image_keypoints=None, function_for_dataloading=None,augmentations=None,use_box=False):
self.image_keypoints = image_keypoints
self.number_of_channels=number_of_channels
self.test=test
self.use_box=use_box
self.dataset_name=dataset_name
self.preparedb()
self.function_for_dataloading = function_for_dataloading
self.augmentations=augmentations
self.SuperpointScaleDistill1 = iaa.Affine(scale={"x": 1.3, "y": 1.3})
self.SuperpointScaleDistill2 = iaa.Affine(scale={"x": 1.6, "y": 1.6})
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
return self.function_for_dataloading(self,idx)
def get_image_superpoint(self,idx):
name = self.files[idx]
image =self.getimage_superpoint(self,name)
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image_gray = torch.from_numpy(np.expand_dims(image_gray, 0) / 255.0).float()
if(self.use_box):
bbox=self.getbox(self,name)
bbox = torch.tensor(bbox)
sample={'image_gray': image_gray, 'filename': name,'bounding_box':bbox}
return sample
sample = {'image_gray': image_gray, 'filename': name}
return sample
def get_image_superpoint_multiple_scales(self,idx):
name = self.files[idx]
image =self.getimage_superpoint(self,name)
image1 = self.SuperpointScaleDistill1(image=image)
image2 = self.SuperpointScaleDistill2(image=image)
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
image_gray = torch.from_numpy(np.expand_dims(image_gray, 0) / 255.0).float()
image_gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
image_gray1 = torch.from_numpy(np.expand_dims(image_gray1, 0) / 255.0).float()
image_gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
image_gray2 = torch.from_numpy(np.expand_dims(image_gray2, 0) / 255.0).float()
imagegrayfinal = torch.cat((image_gray, image_gray1, image_gray2,), dim=0)
if(self.use_box):
bbox=self.getbox(self,name)
bbox = torch.tensor(bbox)
sample={'image_gray': imagegrayfinal, 'filename': name,'bounding_box':bbox}
return sample
sample = {'image_gray': imagegrayfinal, 'filename': name}
return sample
def get_FAN_inference(self,idx):
name = self.files[idx]
image =self.getimage_FAN(self,name)
image =torch.from_numpy(image / 255.0).permute(2, 0, 1).float()
sample = {'image': image, 'filename': name}
return sample
def get_FAN_secondStep_evaluation(self,idx):
name = self.files[idx]
is_it_test_sample=bool(self.is_test_sample[idx])
image =self.getimage_FAN(self,name,is_it_test_sample=is_it_test_sample)
image =torch.from_numpy(image / 255.0).permute(2, 0, 1).float()
groundtruth=torch.from_numpy(self.getGroundtruth(self,name,is_it_test_sample))
sample = {'image': image, 'filename': name,'groundtruth':groundtruth,'is_it_test_sample':is_it_test_sample}
return sample
def get_FAN_secondStep_train(self, idx):
name = self.files[idx]
keypoints = self.image_keypoints[name]
image,keypoints =self.getimage_FAN(self,name,self.augmentations, 4 * keypoints)
keypoints = keypoints/4
keypoints=keypoints.round()
image = torch.from_numpy(image / 255.0).permute(2, 0, 1).float()
heatmaps_with_keypoints = torch.zeros(self.number_of_channels).bool()
indeces = torch.from_numpy(keypoints[:, 2]).int().tolist()
heatmaps_with_keypoints[indeces] = True
shapegaussian = BuildMultiChannelGaussians(self.number_of_channels, keypoints.round())
sample = {'image': image, 'GaussianShape': shapegaussian, 'heatmaps_with_keypoints': heatmaps_with_keypoints}
return sample
def get_FAN_firstStep_train(self, idx):
heatmapsize=64
name1 = self.files[idx]
keypoints1 = self.image_keypoints[name1]
while(len(keypoints1)<9):
idx = random.randint(0, len(self.files) - 1)
name1 = self.files[idx]
keypoints1 = self.image_keypoints[name1]
image1 ,keypoints1=self.getimage_FAN(self,name1,self.augmentations,4*keypoints1)
keypoints1 = keypoints1/4
keypoints1=keypoints1.round()
image1 = torch.from_numpy(image1 / 255.0).permute(2, 0, 1).float()
#sample a different image or use the same image with probability 50%
if (random.random() <0.5):
idx2 = random.randint(0, len(self.files) - 1)
name2 = self.files[idx2]
keypoints2 = self.image_keypoints[name2]
while(len(keypoints2)<9):
idx2 = random.randint(0, len(self.files) - 1)
name2 = self.files[idx2]
keypoints2 = self.image_keypoints[name2]
image2 ,keypoints2=self.getimage_FAN(self,name2,self.augmentations,4*keypoints2)
else:
name2 = self.files[idx]
keypoints2 = self.image_keypoints[name2]
image2 ,keypoints2=self.getimage_FAN(self,name2,self.augmentations,4*keypoints2)
keypoints2=keypoints2.round()
keypoints2 = keypoints2/4
keypoints2=keypoints2.round()
image2 = torch.from_numpy(image2 / 255.0).permute(2, 0, 1).float()
image = torch.cat((image1, image2))
number_of_pairs=3000
pairs = -1*np.ones((number_of_pairs, 5))
pair_index = 0
# positive pairs
for i in range(len(keypoints1)):
if(keypoints1[i, 2]==-1):continue
indxes = keypoints2[:, 2] == keypoints1[i, 2]
coord1 = keypoints1[i, :2]
coord2 = keypoints2[indxes, :2]
if (len(coord2) == 0): continue
coord2 = coord2[0]
# check that not of the coordinates are out of range cause of the augmentations
if (sum(coord1 > heatmapsize-1) == 0 and sum(coord1 < 0) == 0) and (sum(coord2 > heatmapsize-1) == 0 and sum(coord2 < 0) == 0):
# if(np.random.rand(1)[0]<0.75):
if (pair_index >= number_of_pairs - 1): break
pairs[pair_index, :2] = coord1
pairs[pair_index, 2:4] = coord2
pairs[pair_index, 4] = 1.0
pair_index += 1
# negative pairs
for i in range(len(keypoints1)):
clust=keypoints1[i, 2]
if(clust==-1 or ((clust in keypoints2[:,2]) is False) ):continue
coord1 = keypoints1[i, :2]
coord2s=keypoints2[keypoints2[:,2]!=clust]
for j in range(len(coord2s)):
clust2=keypoints2[j,2]
if(clust2==-1 or (clust2 in keypoints1[:,2]) is False):continue
coord2=coord2s[j,:2]
if ((sum(coord1 > heatmapsize-2) == 0 and sum(coord1 < 0) == 0) and ( sum(coord2 > heatmapsize-2) == 0 and sum(coord2 < 0) == 0)):
if (pair_index >= number_of_pairs-1): break
pairs[pair_index, :2] = coord1
pairs[pair_index, 2:4] = coord2
pairs[pair_index, 4] = 0.0
pair_index += 1
pairs=torch.from_numpy(pairs)
gaussian1 = BuildGaussians(keypoints1)
gaussian2 = BuildGaussians(keypoints2)
gaussian=torch.cat((gaussian1.unsqueeze(0),gaussian2.unsqueeze(0)))
sample = {'image': image, 'keypoints': pairs, 'keypointHeatmaps': gaussian}
return sample
def preparedb(self):
def GetFullImagePath(self,imagefile,istestsample=False):
if self.dataset_name =='CelebA':
return self.datapath+imagefile
if self.dataset_name =='Human3.6':
return self.imagepath+imagefile
if self.dataset_name =='LS3D':
if(istestsample):
return imagefile
return self.datapath+imagefile
if self.dataset_name in ['CelebA','LS3D']:
def getFANBox(self,imagefile,W,H,is_test_sample=False):
bbox = self.getbox(self,imagefile,is_test_sample)
delta_x=1.2*bbox[2]-bbox[0]
delta_y=2*bbox[3]-bbox[1]
delta=0.5*(delta_x+delta_y)
if(delta<20): tight_aux=8
else: tight_aux=int(8*delta/100)
minx=int(max(bbox[0]-tight_aux,0))
miny=int(max(bbox[1]-tight_aux,0))
maxx=int(min(bbox[2]+tight_aux,W-1))
maxy=int(min(bbox[3]+tight_aux,H-1))
return minx,miny,maxx,maxy
def keypointsToFANResolution(self,imagefile,keypoints,W=None,H=None,is_test_sample=False):
if(W is None or H is None):
W=self.W
H=self.H
minx,miny,maxx,maxy=self.getFANBox(self,imagefile,W,H,is_test_sample)
keypoints[:,0]=keypoints[:,0]-minx
keypoints[:,1]=keypoints[:,1]-miny
keypoints[:,0]=keypoints[:,0]*(256/(maxx-minx))
keypoints[:,1]=keypoints[:,1]*(256/(maxy-miny))
return keypoints
def keypointsToOriginalResolution(self,imagefile,keypoints):
minx,miny,maxx,maxy=self.getFANBox(self,imagefile,self.W,self.H)
keypoints[:,0]=keypoints[:,0]*((maxx-minx)/256)
keypoints[:,1]=keypoints[:,1]*((maxy-miny)/256)
keypoints[:,0]=keypoints[:,0]+minx
keypoints[:,1]=keypoints[:,1]+miny
return keypoints
def getbox(self,imagefile,is_test_sample=False):
bbox = self.boxes[imagefile].copy()
return bbox
def getbox_fromlandmarks_ls3d_eval(self,imagefile,is_test_sample=False):
try:
if(is_test_sample):
gt=torchfile.load(imagefile[:-4]+'.t7')
else:
gt_filename=self.GetFullImagePath(self,imagefile,is_test_sample)[:-4]+'.t7'
tempstring=gt_filename.split('/')
tempstring.insert(-2,'landmarks')
tempstring='/'.join(tempstring)
gt_filename=tempstring[:-3]+'_pts.mat'
gt=scipy.io.loadmat(gt_filename)['pts_3d']
except:
pass
bbox=[0,0,0,0]
bbox[0]=int(min(gt[:,0]))
bbox[1]=int(min(gt[:,1]))
bbox[2]=int(max(gt[:,0]))
bbox[3]=int(max(gt[:,1]))
bbox[1]=bbox[1]-(bbox[3]-bbox[1])/3
return bbox
def getGroundtruth_MALF(self,imagefile,is_test_sample):
groundtruthpoints=self.groundtruth[imagefile]
groundtruthpoints=self.keypointsToFANResolution(self,imagefile,groundtruthpoints,self.W,self.H)
return groundtruthpoints
def getGroundtruth_LS3D(self,imagefile,is_test_sample):
image = cv2.cvtColor(cv2.imread(self.GetFullImagePath(self,imagefile,is_test_sample)), cv2.COLOR_BGR2RGB)
if(is_test_sample):
groundtruthpoints=torchfile.load(imagefile[:-4]+'.t7')
else:
gt_filename=self.GetFullImagePath(self,imagefile,is_test_sample)[:-4]+'.t7'
tempstring=gt_filename.split('/')
tempstring.insert(-2,'landmarks')
tempstring='/'.join(tempstring)
gt_filename=tempstring[:-3]+'_pts.mat'
groundtruthpoints=scipy.io.loadmat(gt_filename)['pts_3d']
groundtruthpoints=self.keypointsToFANResolution(self,imagefile,groundtruthpoints,image.shape[1],image.shape[0],is_test_sample)
return groundtruthpoints
def getimage_superpoint(self,imagefile):
image = cv2.cvtColor(cv2.imread(self.GetFullImagePath(self,imagefile,False)), cv2.COLOR_BGR2RGB)
return image
def getimage_FAN(self,imagefile, augmentations=None, keypoints=None,is_it_test_sample=False):
image = cv2.cvtColor(cv2.imread(self.GetFullImagePath(self,imagefile,is_it_test_sample)), cv2.COLOR_BGR2RGB)
if(augmentations is not None):
keypoints_originalres=self.keypointsToOriginalResolution(self,imagefile,keypoints)
imgaug_keypoints = []
for i in range(len(keypoints)):
imgaug_keypoints.append(Keypoint(x=keypoints_originalres[i, 0], y=keypoints_originalres[i, 1]))
kpsoi = KeypointsOnImage(imgaug_keypoints, shape=image.shape)
image, keypoitns_aug = self.augmentations(image=image, keypoints=kpsoi)
keypoints_originalres = np.column_stack((keypoitns_aug.to_xy_array(), keypoints_originalres[:, 2:]))
minx,miny,maxx,maxy=self.getFANBox(self,imagefile,image.shape[1],image.shape[0],is_it_test_sample)
image=image[miny:maxy,minx:maxx,:]
image=cv2.resize(image,dsize=(256,256))
if(keypoints is not None):
augmentedkeypoints=self.keypointsToFANResolution(self,imagefile,keypoints_originalres,self.W,self.H)
return image,augmentedkeypoints
return image
self.GetFullImagePath=GetFullImagePath
self.keypointsToOriginalResolution=keypointsToOriginalResolution
self.keypointsToFANResolution=keypointsToFANResolution
self.getimage_superpoint=getimage_superpoint
self.getimage_FAN=getimage_FAN
self.getbox=getbox
self.getFANBox=getFANBox
if self.dataset_name == 'CelebA':
#load CelebA paths
with open('paths/main.yaml') as file:
paths = yaml.load(file, Loader=yaml.FullLoader)
self.datapath = paths['CelebA_datapath']
assert self.datapath!=None, "Path missing!! Update 'CelebA_datapath' on paths/main.yaml with path to CelebA images."
assert Path(self.datapath).exists(), f'Specified path to CelebA images does not exists {self.datapath}'
with open('data/CelebA/list_eval_partition.txt', 'r') as f:
CelebAImages = f.read().splitlines()
assert len(list(Path(self.datapath).glob('*.jpg')))==len(CelebAImages), f"There are missing CelebA images from {self.datapath}. Please specify a path that includes all CelebA images"
self.boxes=load_keypoints('data/CelebA/CelebABoundingBoxes.pickle')
self.H=218
self.W=178
def init(self):
if (self.test):
with open('data/CelebA/mafl_testing.txt', 'r') as f:
TestImages = f.read().splitlines()
with open('data/CelebA/mafl_training.txt', 'r') as f:
TrainImages = f.read().splitlines()
self.groundtruth=load_keypoints('data/CelebA/MaflGroundtruthLandmarks.pickle')
self.files = TrainImages[:1000] + TestImages
self.is_test_sample = np.ones(len(self.files))
self.is_test_sample[:1000]=0
self.getGroundtruth=getGroundtruth_MALF
else:
with open('data/CelebA/list_eval_partition.txt', 'r') as f:
CelebAImages = f.read().splitlines()
CelebATrainImages=[f[:-2] for f in CelebAImages if f[-1]=='0']
with open('data/CelebA/mafl_testing.txt', 'r') as f:
MaflTestImages = f.read().splitlines()
CelebATrainImages=list(set(CelebATrainImages)-set(MaflTestImages))
self.files = CelebATrainImages
if self.dataset_name == 'LS3D':
self.boxes=load_keypoints('data/LS3D/300W_LPBoundingBoxes.pickle')
with open('paths/main.yaml') as file:
paths = yaml.load(file, Loader=yaml.FullLoader)
self.datapath = paths['300WLP_datapath']
assert self.datapath!=None, "Path missing!! Update '300WLP_datapath' on paths/main.yaml with path to 300WLP images."
self.path_to_LS3Dbalanced=paths['LS3Dbalanced_datapath']
self.H=450
self.W=450
def init(self):
if (self.test):
assert self.path_to_LS3Dbalanced!=None, "Path missing!! Update 'LS3Dbalanced_datapath' on paths/main.yaml with path to LS3Dbalanced images."
self.getbox=getbox_fromlandmarks_ls3d_eval
self.getGroundtruth=getGroundtruth_LS3D
testfiles = glob.glob(self.path_to_LS3Dbalanced + '/**/*.jpg', recursive=True)
trainfiles= list(self.boxes.keys())
self.files=trainfiles[:1000]+testfiles
self.is_test_sample = np.ones(len(self.files))
self.is_test_sample[:1000]=0
else:
self.files = list(self.boxes.keys())
if self.dataset_name == 'Human3.6':
def tranformKeypoints(self,keypoints,augmentation,imageshape):
imgaug_keypoints = []
for i in range(len(keypoints)):
imgaug_keypoints.append(Keypoint(x=keypoints[i, 0], y=keypoints[i, 1]))
kpsoi = KeypointsOnImage(imgaug_keypoints, shape=imageshape)
keypoitns_aug = augmentation(keypoints=kpsoi)
if(isinstance(keypoints,np.ndarray)):
keypoints[:,:2] = keypoitns_aug.to_xy_array()
else:
keypoints[:,:2] = torch.from_numpy(keypoitns_aug.to_xy_array())
return keypoints
def keypointsToFANResolution(self,imagefile,keypoints):
return self.tranformKeypoints(self,keypoints,self.scaleToFANRes,(450,450))
def keypointsToOriginalResolution(self,imagefile,keypoints):
return self.tranformKeypoints(self,keypoints,self.scaleToOriginalRes,(256,256))
def getbox(self,imagefile,is_test_sample=False):
bbox = self.boxes[imagefile].copy()
return bbox
def getGroundtruth(self,imagefile,is_test_sample):
groundtruthpoints=self.groundtruth[imagefile]
groundtruthpoints=self.flipGroundtruths(self,groundtruthpoints)
groundtruthpoints=self.keypointsToFANResolution(self,imagefile,groundtruthpoints)
return groundtruthpoints
def getimage_superpoint(self,imagefile):
image = cv2.cvtColor(cv2.imread(self.GetFullImagePath(self,imagefile,False)), cv2.COLOR_BGR2RGB)
return image
def getimage_FAN(self,imagefile, augmentations=None, keypoints=None,is_it_test_sample=False):
image = cv2.cvtColor(cv2.imread(self.GetFullImagePath(self,imagefile,is_it_test_sample)), cv2.COLOR_BGR2RGB)
if(augmentations is not None):
keypoints_originalres=self.keypointsToOriginalResolution(self,imagefile,keypoints)
imgaug_keypoints = []
for i in range(len(keypoints)):
imgaug_keypoints.append(Keypoint(x=keypoints_originalres[i, 0], y=keypoints_originalres[i, 1]))
kpsoi = KeypointsOnImage(imgaug_keypoints, shape=image.shape)
image, keypoitns_aug = self.augmentations(image=image, keypoints=kpsoi)
keypoints_originalres = np.column_stack((keypoitns_aug.to_xy_array(), keypoints_originalres[:, 2:]))
scaledImage=self.scaleToFANRes(image=image)
if(keypoints is not None):
augmentedkeypoints=self.keypointsToFANResolution(self,imagefile,keypoints_originalres)
return scaledImage,augmentedkeypoints
return scaledImage
def flipGroundtruths(self,keypoints):
keypoints = np.concatenate( (keypoints,np.expand_dims(np.array(range(len(keypoints))), axis=1)), axis=1)
matchedPart1 = np.array( [[1, 6], [25, 17], [18, 26], [27, 19], [20, 28], [29, 21], [30, 22], [31, 23], ])
matchedPart2 = np.array( [[2, 7],[3, 8], [4, 9], [5, 10]])
if (keypoints[1, 0] >keypoints[6, 0]):
for i in range(matchedPart1.shape[0]):
idx1, idx2 = matchedPart1[i]
temp = keypoints[idx1,2]
keypoints[idx1,2] = keypoints[idx2,2]
keypoints[idx2,2] = temp
if (keypoints[2, 0] > keypoints[7, 0]):
for i in range(matchedPart2.shape[0]):
idx1, idx2 = matchedPart2[i]
temp = keypoints[idx1, 2]
keypoints[idx1, 2] = keypoints[idx2, 2]
keypoints[idx2, 2] = temp
keypoints=keypoints[np.argsort(keypoints[:,2])]
keypoints=keypoints[:,:2]
return keypoints
self.flipGroundtruths=flipGroundtruths
self.GetFullImagePath=GetFullImagePath
self.tranformKeypoints=tranformKeypoints
self.keypointsToOriginalResolution=keypointsToOriginalResolution
self.keypointsToFANResolution=keypointsToFANResolution
self.getimage_superpoint=getimage_superpoint
self.getimage_FAN=getimage_FAN
self.getbox=getbox
self.getGroundtruth=getGroundtruth
with open('paths/main.yaml') as file:
paths = yaml.load(file, Loader=yaml.FullLoader)
self.datapath = paths['Human_datapath']
self.imagepath=self.datapath + 'images'
try:
self.boxes=load_keypoints(self.datapath+'HumanBoundingBoxes.pickle')
except:
filename=self.datapath+'HumanBoundingBoxes.pickle'
raise Exception('File '+filename+' was not found ')
try:
self.groundtruth=load_keypoints(self.datapath+'GroundtruthKeypoints.pickle')
except:
filename=self.datapath+'GroundtruthKeypoints.pickle'
raise Exception('File '+filename+' was not found ')
self.scaleToFANRes = iaa.Sequential([iaa.Affine(scale={"x": 1.4, "y": 1.4}), iaa.Resize(256)])
self.scaleToOriginalRes = iaa.Sequential([iaa.Resize(450), iaa.Affine(scale={"x": 1/1.4, "y": 1/1.4})])
def init(self):
self.files = list(k for k in self.boxes.keys())
if (self.test):
filestrain=[f for f in self.files if 'train' in f][::50]
filestest=[f for f in self.files if 'test' in f]
self.files=filestrain[:1000]+filestest
self.is_test_sample = np.ones(len(self.files))
self.is_test_sample[:1000]=0
else:
self.files=[f for f in self.files if 'train' in f]
if (self.image_keypoints is not None):
self.files = list(self.image_keypoints.keys())
else:
init(self)
| [
"imgaug.augmentables.kps.Keypoint",
"yaml.load",
"cv2.cvtColor",
"torchfile.load",
"numpy.ones",
"imgaug.augmenters.Resize",
"numpy.expand_dims",
"numpy.argsort",
"imgaug.augmenters.Affine",
"random.random",
"pathlib.Path",
"numpy.array",
"imgaug.augmentables.kps.KeypointsOnImage",
"cv2.re... | [((835, 873), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'scale': "{'x': 1.3, 'y': 1.3}"}), "(scale={'x': 1.3, 'y': 1.3})\n", (845, 873), True, 'import imgaug.augmenters as iaa\n'), ((913, 951), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'scale': "{'x': 1.6, 'y': 1.6}"}), "(scale={'x': 1.6, 'y': 1.6})\n", (923, 951), True, 'import imgaug.augmenters as iaa\n'), ((1250, 1289), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1262, 1289), False, 'import cv2\n'), ((1957, 1996), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1969, 1996), False, 'import cv2\n'), ((2105, 2145), 'cv2.cvtColor', 'cv2.cvtColor', (['image1', 'cv2.COLOR_BGR2GRAY'], {}), '(image1, cv2.COLOR_BGR2GRAY)\n', (2117, 2145), False, 'import cv2\n'), ((2256, 2296), 'cv2.cvtColor', 'cv2.cvtColor', (['image2', 'cv2.COLOR_BGR2GRAY'], {}), '(image2, cv2.COLOR_BGR2GRAY)\n', (2268, 2296), False, 'import cv2\n'), ((5049, 5064), 'random.random', 'random.random', ([], {}), '()\n', (5062, 5064), False, 'import random\n'), ((6015, 6044), 'numpy.ones', 'np.ones', (['(number_of_pairs, 5)'], {}), '((number_of_pairs, 5))\n', (6022, 6044), True, 'import numpy as np\n'), ((13953, 13988), 'cv2.resize', 'cv2.resize', (['image'], {'dsize': '(256, 256)'}), '(image, dsize=(256, 256))\n', (13963, 13988), False, 'import cv2\n'), ((14778, 14817), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'yaml.FullLoader'}), '(file, Loader=yaml.FullLoader)\n', (14787, 14817), False, 'import yaml\n'), ((17035, 17074), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'yaml.FullLoader'}), '(file, Loader=yaml.FullLoader)\n', (17044, 17074), False, 'import yaml\n'), ((18516, 18568), 'imgaug.augmentables.kps.KeypointsOnImage', 'KeypointsOnImage', (['imgaug_keypoints'], {'shape': 'imageshape'}), '(imgaug_keypoints, shape=imageshape)\n', (18532, 18568), False, 'from imgaug.augmentables.kps import Keypoint, KeypointsOnImage\n'), ((21354, 21447), 'numpy.array', 'np.array', (['[[1, 6], [25, 17], [18, 26], [27, 19], [20, 28], [29, 21], [30, 22], [31, 23]]'], {}), '([[1, 6], [25, 17], [18, 26], [27, 19], [20, 28], [29, 21], [30, 22\n ], [31, 23]])\n', (21362, 21447), True, 'import numpy as np\n'), ((21479, 21522), 'numpy.array', 'np.array', (['[[2, 7], [3, 8], [4, 9], [5, 10]]'], {}), '([[2, 7], [3, 8], [4, 9], [5, 10]])\n', (21487, 21522), True, 'import numpy as np\n'), ((22895, 22934), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'yaml.FullLoader'}), '(file, Loader=yaml.FullLoader)\n', (22904, 22934), False, 'import yaml\n'), ((12002, 12040), 'torchfile.load', 'torchfile.load', (["(imagefile[:-4] + '.t7')"], {}), "(imagefile[:-4] + '.t7')\n", (12016, 12040), False, 'import torchfile\n'), ((13494, 13547), 'imgaug.augmentables.kps.KeypointsOnImage', 'KeypointsOnImage', (['imgaug_keypoints'], {'shape': 'image.shape'}), '(imgaug_keypoints, shape=image.shape)\n', (13510, 13547), False, 'from imgaug.augmentables.kps import Keypoint, KeypointsOnImage\n'), ((15034, 15053), 'pathlib.Path', 'Path', (['self.datapath'], {}), '(self.datapath)\n', (15038, 15053), False, 'from pathlib import Path\n'), ((20545, 20598), 'imgaug.augmentables.kps.KeypointsOnImage', 'KeypointsOnImage', (['imgaug_keypoints'], {'shape': 'image.shape'}), '(imgaug_keypoints, shape=image.shape)\n', (20561, 20598), False, 'from imgaug.augmentables.kps import Keypoint, KeypointsOnImage\n'), ((22238, 22265), 'numpy.argsort', 'np.argsort', (['keypoints[:, 2]'], {}), '(keypoints[:, 2])\n', (22248, 22265), True, 'import numpy as np\n'), ((23616, 23654), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'scale': "{'x': 1.4, 'y': 1.4}"}), "(scale={'x': 1.4, 'y': 1.4})\n", (23626, 23654), True, 'import imgaug.augmenters as iaa\n'), ((23656, 23671), 'imgaug.augmenters.Resize', 'iaa.Resize', (['(256)'], {}), '(256)\n', (23666, 23671), True, 'import imgaug.augmenters as iaa\n'), ((23728, 23743), 'imgaug.augmenters.Resize', 'iaa.Resize', (['(450)'], {}), '(450)\n', (23738, 23743), True, 'import imgaug.augmenters as iaa\n'), ((23745, 23791), 'imgaug.augmenters.Affine', 'iaa.Affine', ([], {'scale': "{'x': 1 / 1.4, 'y': 1 / 1.4}"}), "(scale={'x': 1 / 1.4, 'y': 1 / 1.4})\n", (23755, 23791), True, 'import imgaug.augmenters as iaa\n'), ((1328, 1357), 'numpy.expand_dims', 'np.expand_dims', (['image_gray', '(0)'], {}), '(image_gray, 0)\n', (1342, 1357), True, 'import numpy as np\n'), ((2035, 2064), 'numpy.expand_dims', 'np.expand_dims', (['image_gray', '(0)'], {}), '(image_gray, 0)\n', (2049, 2064), True, 'import numpy as np\n'), ((2185, 2215), 'numpy.expand_dims', 'np.expand_dims', (['image_gray1', '(0)'], {}), '(image_gray1, 0)\n', (2199, 2215), True, 'import numpy as np\n'), ((2336, 2366), 'numpy.expand_dims', 'np.expand_dims', (['image_gray2', '(0)'], {}), '(image_gray2, 0)\n', (2350, 2366), True, 'import numpy as np\n'), ((10605, 10643), 'torchfile.load', 'torchfile.load', (["(imagefile[:-4] + '.t7')"], {}), "(imagefile[:-4] + '.t7')\n", (10619, 10643), False, 'import torchfile\n'), ((18444, 18490), 'imgaug.augmentables.kps.Keypoint', 'Keypoint', ([], {'x': 'keypoints[i, 0]', 'y': 'keypoints[i, 1]'}), '(x=keypoints[i, 0], y=keypoints[i, 1])\n', (18452, 18490), False, 'from imgaug.augmentables.kps import Keypoint, KeypointsOnImage\n'), ((13394, 13464), 'imgaug.augmentables.kps.Keypoint', 'Keypoint', ([], {'x': 'keypoints_originalres[i, 0]', 'y': 'keypoints_originalres[i, 1]'}), '(x=keypoints_originalres[i, 0], y=keypoints_originalres[i, 1])\n', (13402, 13464), False, 'from imgaug.augmentables.kps import Keypoint, KeypointsOnImage\n'), ((20445, 20515), 'imgaug.augmentables.kps.Keypoint', 'Keypoint', ([], {'x': 'keypoints_originalres[i, 0]', 'y': 'keypoints_originalres[i, 1]'}), '(x=keypoints_originalres[i, 0], y=keypoints_originalres[i, 1])\n', (20453, 20515), False, 'from imgaug.augmentables.kps import Keypoint, KeypointsOnImage\n'), ((15286, 15305), 'pathlib.Path', 'Path', (['self.datapath'], {}), '(self.datapath)\n', (15290, 15305), False, 'from pathlib import Path\n')] |
from resnap.benchmark.structure import Structure
from pymatgen.io.lammps.outputs import parse_lammps_log
import numpy as np
import argparse
import os
import sys
import shutil
import time
import math
"""
This module implements a core class ElasticJob that support
lammps data/imput/log files i/o for calculating elastic
constant of Re .
"""
__author__ = "<NAME> and <NAME>"
__copyright__ = "Copyright 2020, Tingzheng Hou and <NAME>"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "May 3, 2020"
class ElasticJob:
def __init__(self, directory, job="make", timer=5, etamin=-0.008,
etamax=0.009, etastep=0.002):
valid = {"make", "makerun", "getEF"}
if job not in valid:
raise ValueError("Job type must be of of %r." % valid)
self.eta = np.arange(etamin, etamax, etastep)
self.f_tensor = self.get_f_tensor()
self.timer = timer
self.job = job
self.directory = directory
def do_job(self):
os.chdir(self.directory)
if self.job == "getEF":
self.get_data()
elif self.job == 'make' or self.job == 'makenrun':
for i in range(7):
for j in range(len(self.eta)):
strdir = "s" + str(i + 1) + "_%.2f" % (self.eta[j] * 100)
os.mkdir(strdir) # Make directory si_j
# copy relavent files to new directory
shutil.copy("Reunit.dat", strdir)
shutil.copy("Re_3.snapcoeff", strdir)
shutil.copy("Re_3.snapparam", strdir)
shutil.copy("input.lmp", strdir)
shutil.copy("submit", strdir)
os.chdir(strdir)
cell = Structure("LAMMPSdat", "Reunit.dat")
cell.addstrain(self.f_tensor[i][j])
cell.write_lammps_data("Reunit.dat", "Re")
# submit the job
if self.job == 'makenrun':
time.sleep(self.timer)
os.system('sbatch submit')
os.chdir('..')
def get_data(self):
energy = []
for m in range(7):
energy_i = []
stress = []
for n in range(len(self.eta)):
strdir = "s"+str(m+1)+"_%.2f" % (self.eta[n]*100)
os.chdir(strdir)
df = parse_lammps_log('log.lammps')
energy_ij = float(df[0].iloc[-1, :][["TotEng"]])
pxx = float(df[0].iloc[-1, :][["Pxx"]])
pyy = float(df[0].iloc[-1, :][["Pyy"]])
pzz = float(df[0].iloc[-1, :][["Pzz"]])
pxy = float(df[0].iloc[-1, :][["Pxy"]])
pxz = float(df[0].iloc[-1, :][["Pxz"]])
pyz = float(df[0].iloc[-1, :][["Pyz"]])
energy_i.append(energy_ij)
stress.append([pxx, pyy, pzz, pxy, pxz, pyz])
os.chdir('..')
self.write_stress(m, stress)
energy.append(energy_i)
self.write_energy(energy)
def write_stress(self, m, stress):
fs = open("stress_" + str(m + 1), "w")
fs.write("eta range: ")
for j in range(len(self.eta)):
fs.write("%.5f " % self.eta[j])
fs.write("\n")
fs.write("pxx pyy pzz pxy pxz pyz\n")
for j in range(len(self.eta)):
for k in range(6):
fs.write("%.5f " % stress[j][k])
fs.write("\n")
fs.close()
def write_energy(self, energy):
fe = open("energy_data", "w")
fe.write("eta range: \n")
for m in range(len(self.eta)):
fe.write("%.5f " % self.eta[m])
fe.write("\n")
fe.write("energy (eV): \n")
for m in range(len(energy)):
for n in range(len(self.eta)):
fe.write("%.5f " % energy[m][n])
fe.write("\n")
fe.close()
def get_f_tensor(self):
f = []
f1 = []
for i in range(len(self.eta)):
fi = np.eye(3)
fi[0][0] = math.sqrt(2*self.eta[i]+1)
f1 += [fi]
f += [f1]
f2 = []
for i in range(len(self.eta)):
fi = np.eye(3)
fi[0][0] = math.sqrt(2*self.eta[i]+1)
fi[1][1] = math.sqrt(2*self.eta[i]+1)
f2 += [fi]
f += [f2]
f3 = []
for i in range(len(self.eta)):
fi = np.eye(3)
fi[2][2] = math.sqrt(2*self.eta[i]+1)
f3 += [fi]
f += [f3]
f4 = []
for i in range(len(self.eta)):
fi = np.eye(3)
fi[1][1] = math.sqrt(2*self.eta[i]+1)
fi[2][2] = math.sqrt(2*self.eta[i]+1)
f4 += [fi]
f += [f4]
f5 = []
for i in range(len(self.eta)):
fi = np.eye(3)
fi[2][2] = math.sqrt(1-4*(self.eta[i]**2))
fi[1][2] = 2*self.eta[i]
f5 += [fi]
f += [f5]
f6 = []
for i in range(len(self.eta)):
fi = np.eye(3)
fi[2][2] = math.sqrt(1-4*(self.eta[i]**2))
fi[0][2] = 2*self.eta[i]
f6 += [fi]
f += [f6]
f7 = []
for i in range(len(self.eta)):
fi = np.eye(3)
fi[1][1] = math.sqrt(1-4*(self.eta[i]**2))
fi[0][1] = 2*self.eta[i]
f7 += [fi]
f += [f7]
return f
def main(args):
parser = argparse.ArgumentParser()
# -d DIRECTORY -j {make,makerun,getEF} -t TIMER -min STAMIN -max ETAMAX
# -step ETASTEP
parser.add_argument("-d", "--directory", help="Working directory",
type=str, default=os.getcwd())
parser.add_argument("-j", "--job", help="Job type",
choices=['make', 'makerun', 'getEF'], default="make")
parser.add_argument("-t", "--timer", help="Job submission interval",
type=int, default=5)
parser.add_argument("-min", "--etamin", help="eta min",
type=float, default=-0.008)
parser.add_argument("-max", "--etamax", help="eta max",
type=float, default=0.009)
parser.add_argument("-step", "--etastep", help="eta step",
type=float, default=0.002)
args = parser.parse_args(args)
print("Working dir: ", args.directory)
print("Job type: ", args.job)
print("Timer: ", args.timer)
print("eta: ", args.etamin, args.etamax, args.etastep)
job_instance = ElasticJob(args.directory,
job=args.job,
timer=args.timer,
etamin=args.etamin,
etamax=args.etamax,
etastep=args.etastep)
job_instance.do_job()
print("Job done.")
if __name__ == '__main__':
main(sys.argv[1:])
| [
"os.mkdir",
"argparse.ArgumentParser",
"math.sqrt",
"os.getcwd",
"os.system",
"time.sleep",
"numpy.arange",
"pymatgen.io.lammps.outputs.parse_lammps_log",
"numpy.eye",
"resnap.benchmark.structure.Structure",
"os.chdir",
"shutil.copy"
] | [((5548, 5573), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5571, 5573), False, 'import argparse\n'), ((827, 861), 'numpy.arange', 'np.arange', (['etamin', 'etamax', 'etastep'], {}), '(etamin, etamax, etastep)\n', (836, 861), True, 'import numpy as np\n'), ((1022, 1046), 'os.chdir', 'os.chdir', (['self.directory'], {}), '(self.directory)\n', (1030, 1046), False, 'import os\n'), ((4128, 4137), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4134, 4137), True, 'import numpy as np\n'), ((4161, 4191), 'math.sqrt', 'math.sqrt', (['(2 * self.eta[i] + 1)'], {}), '(2 * self.eta[i] + 1)\n', (4170, 4191), False, 'import math\n'), ((4302, 4311), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4308, 4311), True, 'import numpy as np\n'), ((4335, 4365), 'math.sqrt', 'math.sqrt', (['(2 * self.eta[i] + 1)'], {}), '(2 * self.eta[i] + 1)\n', (4344, 4365), False, 'import math\n'), ((4385, 4415), 'math.sqrt', 'math.sqrt', (['(2 * self.eta[i] + 1)'], {}), '(2 * self.eta[i] + 1)\n', (4394, 4415), False, 'import math\n'), ((4526, 4535), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4532, 4535), True, 'import numpy as np\n'), ((4559, 4589), 'math.sqrt', 'math.sqrt', (['(2 * self.eta[i] + 1)'], {}), '(2 * self.eta[i] + 1)\n', (4568, 4589), False, 'import math\n'), ((4700, 4709), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4706, 4709), True, 'import numpy as np\n'), ((4733, 4763), 'math.sqrt', 'math.sqrt', (['(2 * self.eta[i] + 1)'], {}), '(2 * self.eta[i] + 1)\n', (4742, 4763), False, 'import math\n'), ((4783, 4813), 'math.sqrt', 'math.sqrt', (['(2 * self.eta[i] + 1)'], {}), '(2 * self.eta[i] + 1)\n', (4792, 4813), False, 'import math\n'), ((4924, 4933), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4930, 4933), True, 'import numpy as np\n'), ((4957, 4992), 'math.sqrt', 'math.sqrt', (['(1 - 4 * self.eta[i] ** 2)'], {}), '(1 - 4 * self.eta[i] ** 2)\n', (4966, 4992), False, 'import math\n'), ((5140, 5149), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (5146, 5149), True, 'import numpy as np\n'), ((5173, 5208), 'math.sqrt', 'math.sqrt', (['(1 - 4 * self.eta[i] ** 2)'], {}), '(1 - 4 * self.eta[i] ** 2)\n', (5182, 5208), False, 'import math\n'), ((5356, 5365), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (5362, 5365), True, 'import numpy as np\n'), ((5389, 5424), 'math.sqrt', 'math.sqrt', (['(1 - 4 * self.eta[i] ** 2)'], {}), '(1 - 4 * self.eta[i] ** 2)\n', (5398, 5424), False, 'import math\n'), ((5784, 5795), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5793, 5795), False, 'import os\n'), ((2402, 2418), 'os.chdir', 'os.chdir', (['strdir'], {}), '(strdir)\n', (2410, 2418), False, 'import os\n'), ((2440, 2470), 'pymatgen.io.lammps.outputs.parse_lammps_log', 'parse_lammps_log', (['"""log.lammps"""'], {}), "('log.lammps')\n", (2456, 2470), False, 'from pymatgen.io.lammps.outputs import parse_lammps_log\n'), ((2993, 3007), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (3001, 3007), False, 'import os\n'), ((1342, 1358), 'os.mkdir', 'os.mkdir', (['strdir'], {}), '(strdir)\n', (1350, 1358), False, 'import os\n'), ((1461, 1494), 'shutil.copy', 'shutil.copy', (['"""Reunit.dat"""', 'strdir'], {}), "('Reunit.dat', strdir)\n", (1472, 1494), False, 'import shutil\n'), ((1515, 1552), 'shutil.copy', 'shutil.copy', (['"""Re_3.snapcoeff"""', 'strdir'], {}), "('Re_3.snapcoeff', strdir)\n", (1526, 1552), False, 'import shutil\n'), ((1573, 1610), 'shutil.copy', 'shutil.copy', (['"""Re_3.snapparam"""', 'strdir'], {}), "('Re_3.snapparam', strdir)\n", (1584, 1610), False, 'import shutil\n'), ((1631, 1663), 'shutil.copy', 'shutil.copy', (['"""input.lmp"""', 'strdir'], {}), "('input.lmp', strdir)\n", (1642, 1663), False, 'import shutil\n'), ((1684, 1713), 'shutil.copy', 'shutil.copy', (['"""submit"""', 'strdir'], {}), "('submit', strdir)\n", (1695, 1713), False, 'import shutil\n'), ((1735, 1751), 'os.chdir', 'os.chdir', (['strdir'], {}), '(strdir)\n', (1743, 1751), False, 'import os\n'), ((1780, 1816), 'resnap.benchmark.structure.Structure', 'Structure', (['"""LAMMPSdat"""', '"""Reunit.dat"""'], {}), "('LAMMPSdat', 'Reunit.dat')\n", (1789, 1816), False, 'from resnap.benchmark.structure import Structure\n'), ((2140, 2154), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (2148, 2154), False, 'import os\n'), ((2045, 2067), 'time.sleep', 'time.sleep', (['self.timer'], {}), '(self.timer)\n', (2055, 2067), False, 'import time\n'), ((2092, 2118), 'os.system', 'os.system', (['"""sbatch submit"""'], {}), "('sbatch submit')\n", (2101, 2118), False, 'import os\n')] |
import torch
from torch._C import device
import torch.nn as nn
import numpy as np
import random
BATCH_SIZE = 256
GAMMA = 0.99
INITIAL_EPSILON = 1
DECAY_RATE = 1
REPLAY_SIZE = 50000
TAU = 0.02
TARGET_NETWORK_REPLACE_FREQ = 100 # 网络更新频率
np.random.seed(2)
torch.manual_seed(2)
random.seed(2)
class NET(nn.Module): # 神经网络state_dim->512->256->128->action_dim
def __init__(self, state_dim, action_dim):
super(NET, self).__init__()
self.lin1 = nn.Sequential(
nn.Linear(state_dim, 512),
nn.ReLU()
)
self.lin2 = nn.Sequential(
nn.Linear(512, 256),
nn.ReLU()
)
self.lin3 = nn.Sequential(
nn.Linear(256, 128),
nn.ReLU()
)
self.lin4 = nn.Linear(128, action_dim)
def forward(self, state):
# state = state.view(-1, len(state))
state = state.to(torch.float32)
feature = self.lin1(state)
feature = self.lin2(feature)
feature = self.lin3(feature)
out_feature = self.lin4(feature)
return out_feature
class ReplayBuffer:
def __init__(self, buffer_size, batch_size):
self.buffer = []
self.max_size = buffer_size
self.batch_size = batch_size
def push(self, state, action, reward, next_state, done):
transition_tuple = (state, action, reward, next_state, done)
if len(self.buffer) >= self.max_size:
self.buffer.pop(0)
self.buffer.append(transition_tuple)
def get_batches(self):
sample_batch = random.sample(self.buffer, self.batch_size)
state_batches = np.array([_[0] for _ in sample_batch])
action_batches = np.array([_[1] for _ in sample_batch])
reward_batches = np.array([_[2] for _ in sample_batch])
next_state_batches = np.array([_[3] for _ in sample_batch])
done = np.array([_[4] for _ in sample_batch])
return state_batches, action_batches, reward_batches, next_state_batches, done
def __len__(self):
return len(self.buffer)
class DQN(object):
def __init__(self, state_dim, action_dim, isTrain):
self.device = torch.device("cuda:0")
self.replay_buffer = ReplayBuffer(REPLAY_SIZE, BATCH_SIZE)
# self.replay_buffer = ReplayBuffer(REPLAY_SIZE, BATCH_SIZE)
self.time_step = 0
self.tau = TAU
self.epsilon = INITIAL_EPSILON
self.state_dim = state_dim
self.action_dim = action_dim
self.IsTrain = isTrain
# 定义网络,损失函数,优化器
self.eval_net, self.target_net = NET(
self.state_dim, self.action_dim), NET(self.state_dim, self.action_dim)
self.eval_net, self.target_net = self.eval_net.to(
self.device), self.target_net.to(self.device)
self.loss_fun = nn.MSELoss()
self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=5e-5)
self.LOSS = 0
def egreedy_action(self, state):
state = torch.from_numpy(
state).view(-1, self.state_dim).to(self.device)
Q_next = self.target_net(state).detach()
if self.epsilon > 0.05:
self.epsilon = self.epsilon - 0.00004
else:
self.epsilon *= DECAY_RATE
if self.IsTrain and random.random() <= self.epsilon:
return random.randint(0, self.action_dim - 1)
else:
Q_next = Q_next.cpu() # 将数据由gpu转向cpu
return np.argmax(Q_next.numpy())
def action(self, state):
state = torch.from_numpy(state).to(self.device)
Q_next = self.target_net(state).detach().cpu().numpy()
return np.argmax(Q_next)
def train(self):
self.time_step += 1
state_batch, action_batch, reward_batch, next_state_batch, done = self.replay_buffer.get_batches()
state_batch = torch.tensor(state_batch).to(self.device)
action_batch = torch.tensor(action_batch).view(
BATCH_SIZE, 1).to(self.device) # 转换成batch*1的tensor
reward_batch = torch.tensor(reward_batch).view(
BATCH_SIZE, 1).to(self.device) # 转换成batch*1的tensor
next_state_batch = torch.tensor(next_state_batch).to(self.device)
done = torch.tensor(done).view(BATCH_SIZE, 1).to(self.device)
# print(done)
Q_eval = self.eval_net(state_batch).gather(
1, action_batch) # (batch_size, 1), eval中动作a对应的Q值
Q_next = self.target_net(next_state_batch).detach() # 下一个状态的Q值,并且不反向传播
Q_target = reward_batch + \
(1-done) * GAMMA * \
Q_next.max(1)[0].view(BATCH_SIZE, 1) # (batch_size, 1),Q的近似值
loss = self.loss_fun(Q_eval, Q_target)
self.LOSS += loss.item()
if (self.time_step + 1) % 10000 == 0:
print(self.time_step + 1, "loss:", self.LOSS / 10000)
self.LOSS = 0
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step() # execute back propagation for one step
for target_param, param in zip(self.target_net.parameters(), self.eval_net.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - self.tau) + param.data * self.tau)
def load(self, name):
self.target_net.load_state_dict(
torch.load(name, map_location=self.device))
self.eval_net.load_state_dict(
self.target_net.state_dict())
def save_paramaters(self, name):
torch.save(self.target_net.state_dict(), name)
print("victor:", name)
| [
"torch.nn.MSELoss",
"numpy.random.seed",
"torch.nn.ReLU",
"random.randint",
"numpy.argmax",
"random.sample",
"torch.manual_seed",
"torch.load",
"random.random",
"random.seed",
"numpy.array",
"torch.nn.Linear",
"torch.device",
"torch.tensor",
"torch.from_numpy"
] | [((237, 254), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (251, 254), True, 'import numpy as np\n'), ((255, 275), 'torch.manual_seed', 'torch.manual_seed', (['(2)'], {}), '(2)\n', (272, 275), False, 'import torch\n'), ((276, 290), 'random.seed', 'random.seed', (['(2)'], {}), '(2)\n', (287, 290), False, 'import random\n'), ((768, 794), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'action_dim'], {}), '(128, action_dim)\n', (777, 794), True, 'import torch.nn as nn\n'), ((1561, 1604), 'random.sample', 'random.sample', (['self.buffer', 'self.batch_size'], {}), '(self.buffer, self.batch_size)\n', (1574, 1604), False, 'import random\n'), ((1630, 1668), 'numpy.array', 'np.array', (['[_[0] for _ in sample_batch]'], {}), '([_[0] for _ in sample_batch])\n', (1638, 1668), True, 'import numpy as np\n'), ((1694, 1732), 'numpy.array', 'np.array', (['[_[1] for _ in sample_batch]'], {}), '([_[1] for _ in sample_batch])\n', (1702, 1732), True, 'import numpy as np\n'), ((1758, 1796), 'numpy.array', 'np.array', (['[_[2] for _ in sample_batch]'], {}), '([_[2] for _ in sample_batch])\n', (1766, 1796), True, 'import numpy as np\n'), ((1826, 1864), 'numpy.array', 'np.array', (['[_[3] for _ in sample_batch]'], {}), '([_[3] for _ in sample_batch])\n', (1834, 1864), True, 'import numpy as np\n'), ((1880, 1918), 'numpy.array', 'np.array', (['[_[4] for _ in sample_batch]'], {}), '([_[4] for _ in sample_batch])\n', (1888, 1918), True, 'import numpy as np\n'), ((2162, 2184), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (2174, 2184), False, 'import torch\n'), ((2808, 2820), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2818, 2820), True, 'import torch.nn as nn\n'), ((3630, 3647), 'numpy.argmax', 'np.argmax', (['Q_next'], {}), '(Q_next)\n', (3639, 3647), True, 'import numpy as np\n'), ((489, 514), 'torch.nn.Linear', 'nn.Linear', (['state_dim', '(512)'], {}), '(state_dim, 512)\n', (498, 514), True, 'import torch.nn as nn\n'), ((528, 537), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (535, 537), True, 'import torch.nn as nn\n'), ((595, 614), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(256)'], {}), '(512, 256)\n', (604, 614), True, 'import torch.nn as nn\n'), ((628, 637), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (635, 637), True, 'import torch.nn as nn\n'), ((695, 714), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(128)'], {}), '(256, 128)\n', (704, 714), True, 'import torch.nn as nn\n'), ((728, 737), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (735, 737), True, 'import torch.nn as nn\n'), ((3318, 3356), 'random.randint', 'random.randint', (['(0)', '(self.action_dim - 1)'], {}), '(0, self.action_dim - 1)\n', (3332, 3356), False, 'import random\n'), ((5258, 5300), 'torch.load', 'torch.load', (['name'], {'map_location': 'self.device'}), '(name, map_location=self.device)\n', (5268, 5300), False, 'import torch\n'), ((3266, 3281), 'random.random', 'random.random', ([], {}), '()\n', (3279, 3281), False, 'import random\n'), ((3512, 3535), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (3528, 3535), False, 'import torch\n'), ((3827, 3852), 'torch.tensor', 'torch.tensor', (['state_batch'], {}), '(state_batch)\n', (3839, 3852), False, 'import torch\n'), ((4136, 4166), 'torch.tensor', 'torch.tensor', (['next_state_batch'], {}), '(next_state_batch)\n', (4148, 4166), False, 'import torch\n'), ((2976, 2999), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (2992, 2999), False, 'import torch\n'), ((3892, 3918), 'torch.tensor', 'torch.tensor', (['action_batch'], {}), '(action_batch)\n', (3904, 3918), False, 'import torch\n'), ((4012, 4038), 'torch.tensor', 'torch.tensor', (['reward_batch'], {}), '(reward_batch)\n', (4024, 4038), False, 'import torch\n'), ((4198, 4216), 'torch.tensor', 'torch.tensor', (['done'], {}), '(done)\n', (4210, 4216), False, 'import torch\n')] |
"""Module for global optimization using
the Neighbourhood algorithm.
"""
from pytomo.inversion.inversionresult import InversionResult
from pytomo.inversion import modelutils
from pytomo.inversion.umcutils import UniformMonteCarlo
from pytomo.inversion.umcutils import get_best_models, process_outputs
import pytomo.inversion.voronoi as voronoi
from pytomo import utilities
from dsmpy.seismicmodel import SeismicModel
from dsmpy.modelparameters import ModelParameters, ParameterType
from dsmpy.dataset import Dataset
from dsmpy.dsm import compute_models_parallel
from dsmpy.windowmaker import WindowMaker
from dsmpy.component import Component
import numpy as np
from mpi4py import MPI
import matplotlib.pyplot as plt
import time
import sys
import os
import glob
import warnings
class InputFile:
"""Input file for NeighbourAlgorithm (NA) inversion.
Args:
input_file (str): path of NA input file
"""
def __init__(self, input_file):
self.input_file = input_file
def read(self):
"""Read from the input file into a dict.
Returns:
dict: input file parameters
"""
params = dict()
params['verbose'] = 0
params['filter_type'] = None
params['seed'] = 42
params['stf_catalog'] = None
params['misfit_type'] = 'variance'
params['misfit_kwargs'] = None
with open(self.input_file, 'r') as f:
for line in f:
if line.strip().startswith('#'):
continue
key, value = self._parse_line(line)
if key is not None:
params[key] = value
if 'phases' in params:
assert 'components' in params
assert len(params['phases']) == len(params['components'])
assert params['n_s'] % params['n_r'] == 0
assert params['n_mod'] % params['n_s'] == 0
return params
def _parse_line(self, line):
key, value = line.strip().split()[:2]
if key == 'tlen':
value_parsed = float(value)
elif key == 'nspc':
value_parsed = int(value)
elif key == 'sampling_hz':
value_parsed = int(value)
elif key == 'n_mod':
value_parsed = int(value)
elif key == 'n_s':
value_parsed = int(value)
elif key == 'n_r':
value_parsed = int(value)
elif key == 'mode':
value_parsed = int(value)
elif key == 'result_path':
full_path = os.path.expanduser(value.strip())
value_parsed = full_path
elif key == 'verbose':
value_parsed = int(value)
elif key == 'freq':
value_parsed = float(value)
elif key == 'freq2':
value_parsed = float(value)
elif key == 'filter_type':
value_parsed = value.strip().lower()
elif key == 'distance_min':
value_parsed = float(value)
elif key == 'distance_max':
value_parsed = float(value)
elif key == 't_before':
value_parsed = float(value)
elif key == 't_after':
value_parsed = float(value)
elif key == 'stf_catalog':
full_path = os.path.expanduser(value.strip())
value_parsed = full_path
elif key == 'phases':
values = line.strip().split()[1:]
ss = [s.strip() for s in values]
value_parsed = ss
elif key == 'components':
values = line.strip().split()[1:]
ss = [s.strip() for s in values]
value_parsed = [Component.parse_component(s) for s in ss]
elif key == 'seed':
value_parsed = int(value)
elif key == 'convergence_threshold':
value_parsed = float(value)
elif key == 'misfit_type':
value_parsed = value.strip()
elif key == 'misfit_kwargs':
values = line.strip().split()[1:]
value_parsed = {x.split(':')[0]: int(x.split(':')[1])
for x in values}
else:
print('Warning: key {} undefined. Ignoring.'.format(key))
return None, None
return key, value_parsed
class NeighbouhoodAlgorithm:
"""Implements the Neighbourhood Algorithm
Args:
dataset (Dataset): dataset.
model_ref (SeismicModel): reference seismic model.
model_params (ModelParameters): model parameters.
range_dict (dict): range of sampled perturbations. Entries
are of type ParameterType:ndarray of shape
(n_nodes, 2).
tlen (float): duration of the synthetics (in seconds)
(better to be 2**n/10).
nspc (int): number of frequency points in the synthetics
(better to be 2**n).
sampling_hz (int): sampling frequency of the synthetics.
mode (int): computation mode. 0: both, 1: P-SV, 2: SH.
n_mod (int): maximum number of models sampled.
n_s (int): number of models at each step of the NA
(must have n_mod % n_s = 0)
n_r (int): number of best-fit models retained at each step of the NA
(must have n_mod % n_s = 0)
phases (list of str): list of seismic phases.
components (list of Component): seismic components.
t_before (float): time (in seconds) before each phase arrival
time when creating time windows.
t_after (float): time (in seconds) after each phase arrival
time when creating time windows.
filter_type (str): 'bandpass' or 'lowpass'.
freq (float): minimum frequency of the filter (in Hz)
freq2 (float): maximum frequency of the filter (in Hz). Used
only for bandpass filter.
distance_min (float): minimum epicentral distance (in degree).
distance_max (float): maximum epicentral distance (in degree).
convergence_threshold (float): convergence threshold
stf_catalog (str): path to a source time function catalog.
result_path (str): path to the output folder.
misfit_type (str): misfit used to select the best models.
('variance', 'corr', 'rolling_variance'),
(default is 'variance')
misfit_kwargs (dict): kwargs for the misfit function.
Used for rolling_variance. See umcutils.rolling_variance.
seed (int): seed for the random generator.
verbose (int): verbosity level.
comm (MPI_COMM_WORLD): MPI communicator.
"""
def __init__(
self, dataset, model_ref, model_params, range_dict, tlen, nspc,
sampling_hz, mode, n_mod, n_s, n_r,
phases, components, t_before, t_after, filter_type,
freq, freq2, distance_min, distance_max,
convergence_threshold,
stf_catalog, result_path, misfit_type, misfit_kwargs,
seed, verbose, comm):
self.dataset = dataset
self.model_ref = model_ref
self.model_params = model_params
self.range_dict = range_dict
self.tlen = tlen
self.nspc = nspc
self.sampling_hz = sampling_hz
self.mode = mode
self.n_mod = n_mod
self.n_s = n_s
self.n_r = n_r
self.phases = phases
self.components = components
self.t_before = t_before
self.t_after = t_after
self.filter_type = filter_type
self.freq = freq
self.freq2 = freq2
self.distance_min = distance_min
self.distance_max = distance_max
self.convergence_threshold = convergence_threshold
self.stf_catalog = stf_catalog
self.misfit_type = misfit_type
self.misfit_kwargs = misfit_kwargs
self.seed = seed
self.verbose = verbose
self.comm = comm
self.rng_gibbs = np.random.default_rng(seed)
if comm.Get_rank() == 0:
out_dir = 'output_' + utilities.get_temporary_str()
os.mkdir(out_dir)
else:
out_dir = None
self.out_dir = comm.bcast(out_dir, root=0)
self.result_path = os.path.join(self.out_dir, result_path)
assert misfit_type in {'corr', 'variance', 'rolling_variance'}
if misfit_type == 'rolling_variance':
assert 'size' in misfit_kwargs and 'stride' in misfit_kwargs
@classmethod
def from_file(
cls, input_file, model_ref, model_params,
range_dict, dataset, comm):
"""Build a NeighbourhoodAlgorithm object from an input file and
key inputs.
Args:
input_file (str): path to an input file.
model_ref (SeismicModel): reference seismic model.
model_params (ModelParameters): model parameters.
range_dict (dict): range of sampled perturbations. Entries
are of type ParameterType:ndarray of shape
(n_nodes, 2).
dataset (Dataset): dataset
comm (MPI_COMM_WOLRD): MPI communicator
Returns:
NeighbourhoodAlgorithm: NeighbourhoodAlgorithm object
"""
params = InputFile(input_file).read()
tlen = params['tlen']
nspc = params['nspc']
sampling_hz = params['sampling_hz']
mode = params['mode']
n_mod = params['n_mod']
n_s = params['n_s']
n_r = params['n_r']
phases = params['phases']
components = params['components']
t_before = params['t_before']
t_after = params['t_after']
filter_type = params['filter_type']
freq = params['freq']
freq2 = params['freq2']
distance_min = params['distance_min']
distance_max = params['distance_max']
stf_catalog = params['stf_catalog']
result_path = params['result_path']
seed = params['seed']
verbose = params['verbose']
convergence_threshold = params['convergence_threshold']
misfit_type = params['misfit_type']
misfit_kwargs = params['misfit_kwargs']
# fix
dataset.tlen = tlen
dataset.nspc = nspc
return cls(
dataset, model_ref, model_params, range_dict, tlen, nspc,
sampling_hz, mode, n_mod, n_s, n_r,
phases, components, t_before, t_after, filter_type,
freq, freq2, distance_min, distance_max,
convergence_threshold,
stf_catalog, result_path, misfit_type, misfit_kwargs,
seed, verbose, comm)
def get_meta(self):
"""Return the meta parameters of the NeighbourhoodAlgorithm
object.
Returns:
dict: dict with meta parameters
"""
return dict(
range_dict=self.range_dict, tlen=self.tlen, nspc=self.nspc,
sampling_hz=self.sampling_hz, mode=self.mode, n_mod=self.n_mod,
n_s=self.n_s, n_r=self.n_r, phases=self.phases,
components=self.components, t_before=self.t_before,
t_after=self.t_after, filter_type=self.filter_type,
freq=self.freq, freq2=self.freq2,
distance_min=self.distance_min, distance_max=self.distance_max,
convergence_threshold=self.convergence_threshold,
stf_catalog=self.stf_catalog, result_path=self.result_path,
seed=self.seed, verbose=self.verbose, out_dir=self.out_dir,
misfit_type=self.misfit_type
)
@classmethod
def from_file_with_default(cls, input_file_path, dataset, comm):
"""Build a NeighbourhoodAlgorithm from an input file and
default values.
Args:
input_file_path (str): path of the input file
dataset (Dataset): dataset.
comm (COMM_WORLD): MPI communicator
Returns:
NeighbourhoodAlgorithm: NeighbourhoodAlgorithm object
"""
params = InputFile(input_file_path).read()
# define default model parameters
types = [ParameterType.VSH]
n_upper_mantle = 0
n_mtz = 0
n_lower_mantle = 0
n_dpp = 4
model_ref, model_params = modelutils.std_boxcar_mesh(
n_upper_mantle, n_mtz, n_lower_mantle, n_dpp, types,
verbose=params['verbose'])
# define default parameter ranges
range_dict = dict()
for param_type in types:
range_arr = np.empty(
(model_params._n_grd_params, 2), dtype='float')
range_arr[:, 0] = -0.5
range_arr[:, 1] = 0.5
range_dict[param_type] = range_arr
return cls.from_file(
input_file_path, model_ref, model_params,
range_dict, dataset, comm)
def _get_windows(self):
"""Compute the time windows.
Returns:
list of Window: time windows
"""
windows = []
for i in range(len(self.phases)):
windows_tmp = WindowMaker.windows_from_dataset(
self.dataset, 'ak135', [self.phases[i]],
[self.components[i]],
t_before=self.t_before, t_after=self.t_after)
windows += windows_tmp
windows = [
w for w in windows
if
self.distance_min <= w.get_epicentral_distance()
<= self.distance_max]
return windows
@staticmethod
def _get_points_for_voronoi(perturbations, range_dict, types):
"""Get the voronoi points. These are the perturbations points
scaled to their maximum range (range_dict).
Args:
perturbations (list of ndarray): list of model perturbations.
Return:
ndarray: voronoi points.
"""
scale_arr = np.hstack(
[range_dict[p][:, 1] - range_dict[p][:, 0]
for p in types])
points = np.array(perturbations)
points = np.true_divide(
points, scale_arr, out=np.zeros_like(points),
where=(scale_arr != 0))
return points
def _get_bounds_for_voronoi(self):
min_bounds = np.zeros(
self.model_params._n_grd_params * len(self.model_params._types),
dtype='float')
max_bounds = np.zeros(
self.model_params._n_grd_params * len(self.model_params._types),
dtype='float')
for itype in range(len(self.model_params._types)):
for igrd in range(self.model_params._n_grd_params):
i = igrd + itype * self.model_params._n_grd_params
min_bounds[i] = self.range_dict[
self.model_params._types[itype]][igrd, 0]
max_bounds[i] = self.range_dict[
self.model_params._types[itype]][igrd, 1]
# scale
if (max_bounds[i] - min_bounds[i]) != 0:
min_bounds[i] /= (max_bounds[i] - min_bounds[i])
max_bounds[i] /= (max_bounds[i] - min_bounds[i])
return min_bounds, max_bounds
def _compute_one_step(
self, umcutils, dataset, models, perturbations,
result, windows, comm):
# TODO URGENT fix zero output when n_model % n_core != 0
rank = comm.Get_rank()
outputs = compute_models_parallel(
dataset, models, self.tlen, self.nspc,
self.sampling_hz, mode=self.mode,
verbose=self.verbose)
if rank == 0:
misfit_dict = process_outputs(
outputs, dataset, models, windows,
self.freq, self.freq2, self.filter_type,
**self.misfit_kwargs)
result.add_result(models, misfit_dict, perturbations)
def compute(self, comm, log=None):
"""Run the NA inversion.
Args:
comm (COMM_WORLD): MPI Communicator.
log (str): path to a log file (default is None).
Returns:
InversionResult: inversion result object
"""
rank = comm.Get_rank()
if log is not None:
log.write('Start running NA...\n')
n_distinct_comp_phase = len(self.phases)
free_indices = self.model_params.get_free_indices()
print('free_indices: {}'.format(free_indices))
n_pass = self.n_mod // self.n_s
assert self.n_mod % self.n_s == 0
if self.verbose > 1:
print('n_pass={}'.format(n_pass))
if rank == 0:
scale_arr = np.hstack(
[self.range_dict[p][:, 1] - self.range_dict[p][:, 0]
for p in self.model_params._types])
umcutils = UniformMonteCarlo(
self.model_ref, self.model_params, self.range_dict,
mesh_type='lininterp', seed=self.seed)
models, perturbations = umcutils.sample_models(self.n_s)
else:
models = None
perturbations = None
umcutils = None
self.range_dict = comm.bcast(self.range_dict, root=0)
if rank == 0:
if self.filter_type is not None:
self.dataset.filter(self.freq, self.freq2, self.filter_type)
windows = self._get_windows()
npts_max = int((self.t_before + self.t_after) * self.sampling_hz)
self.dataset.apply_windows(
windows, n_distinct_comp_phase, npts_max, buffer=0.)
else:
windows = None
self.dataset = None
self.dataset = comm.bcast(self.dataset, root=0)
result = InversionResult(
self.dataset, windows, self.get_meta())
if rank == 0:
start_time = time.time_ns()
# step 0
self._compute_one_step(
umcutils, self.dataset, models, perturbations, result,
windows, comm)
comm.Barrier()
# steps 1,...,n_pass-1
ipass = 1
converged = False
while (ipass < n_pass) and not converged:
print(rank, ipass)
if rank == 0:
# indexing of points corrsespond to that of
# perturbations and of that of models
points = NeighbouhoodAlgorithm._get_points_for_voronoi(
result.perturbations, self.range_dict,
self.model_params._types)
min_bounds, max_bounds = self._get_bounds_for_voronoi()
indices_best = get_best_models(
result.misfit_dict, self.n_r, self.misfit_type)
models = []
perturbations = []
for imod in range(self.n_r):
ip = indices_best[imod]
# current_model = result.models[ip]
current_perturbations = np.array(result.perturbations[ip])
value_dict = dict()
for i, param_type in enumerate(self.model_params._types):
value_dict[param_type] = current_perturbations[
(
i * self.model_params._n_grd_params)
:((
i + 1) * self.model_params._n_grd_params)]
n_step = int(self.n_s // self.n_r)
current_point = np.array(points[ip])
self.model_params.it = 0 # just to be sure
counter = 0
istep = 0
max_count = 3000
while istep < n_step and counter < max_count:
idim, itype, igrd = (
self.model_params.next())
self.model_params.it += 1
log.write('{}'.format(free_indices))
log.write(
'{} {} {} {}\n'.format(
istep, idim, itype, igrd))
# calculate bound
points_free = points[:, free_indices]
current_point_free = np.array(
current_point[free_indices])
idim_free = np.where(free_indices == idim)[0][0]
tmp_bounds1 = voronoi.implicit_find_bound_for_dim(
points_free, points_free[ip],
current_point_free, idim_free, n_nearest=1000,
min_bound=min_bounds[idim],
max_bound=max_bounds[idim], step_size=0.001,
n_step_max=1000, log=log)
tmp_bounds2 = voronoi.implicit_find_bound_for_dim(
points_free, points_free[ip],
current_point_free, idim_free, n_nearest=1500,
min_bound=min_bounds[idim],
max_bound=max_bounds[idim], step_size=0.001,
n_step_max=1000, log=log)
if not np.allclose(tmp_bounds1, tmp_bounds2):
print(tmp_bounds1)
print(tmp_bounds2)
warnings.warn(
'''Problems with finding bounds
of Voronoi cell.
Please increase n_nearest.\n
{}\n{}'''.format(tmp_bounds1, tmp_bounds2))
bounds = tmp_bounds2
lo, up = bounds
max_per = self.range_dict[
self.model_params._types[itype]][igrd, 1]
min_per = self.range_dict[
self.model_params._types[itype]][igrd, 0]
scale = max_per - min_per
# correct the Voronoi cell bounds to avoid
# sampling outsitde of the user-defined bounds
max_per_i = current_point[idim] + up
min_per_i = current_point[idim] + lo
if max_per_i > max_per / scale:
up = max_per / scale - current_point[idim]
if min_per_i < min_per / scale:
lo = min_per / scale - current_point[idim]
if (up - lo) > self.convergence_threshold:
per = self.rng_gibbs.uniform(lo, up, 1)[0]
# per = self._bi_triangle(lo, up)
value_dict[
self.model_params._types[itype]
][igrd] += per * scale
per_arr = np.hstack(
[value_dict[p]
for p in self.model_params._types]
)
log.write('{}\n'.format(per_arr))
new_model = self.model_ref.build_model(
umcutils.mesh, self.model_params,
value_dict)
models.append(new_model)
if log:
log.write(
'{} {} {} {} {} {} {:.3f} '
'{:.3f} {:.3f} {}\n'
.format(rank, ipass, imod, istep, idim,
per_arr, lo, up, per, scale))
current_point = np.true_divide(
per_arr, scale_arr,
out=np.zeros_like(per_arr),
where=(scale_arr != 0))
perturbations.append(per_arr)
istep += 1
counter += 1
else:
models = None
counter = None
max_count = None
counter = comm.bcast(counter, root=0)
max_count = comm.bcast(max_count, root=0)
if counter < max_count:
self._compute_one_step(
umcutils, self.dataset, models, perturbations,
result, windows, comm)
# check convergence
if rank == 0:
if counter == max_count:
converged = True
elif ipass > 2:
# TODO smooth or not smooth?
perturbations_diff = result.get_model_perturbations_diff(
self.n_r, scale=scale_arr, smooth=False, n_s=self.n_s)
perturbations_diff_free = perturbations_diff[
:, free_indices]
converged = (
# (perturbations_diff_free[-2:]
(perturbations_diff_free[-2 * self.n_s:]
<= self.convergence_threshold).all())
converged = comm.bcast(converged, root=0)
ipass += 1
comm.Barrier()
if rank == 0:
end_time = time.time_ns()
result.save(self.result_path)
if self.verbose > 0:
if log is not None:
log.write('Models and misfits computation done in {} s\n'
.format((end_time - start_time) * 1e-9))
log.write(
'Results saved to \'{}\''.format(self.result_path))
else:
print('Models and misfits computation done in {} s'
.format((end_time - start_time) * 1e-9))
print('Results saved to \'{}\''.format(self.result_path))
conv_curve_path = os.path.join(
self.out_dir, 'convergence_curve.pdf')
result.save_convergence_curve(
conv_curve_path, scale_arr, free_indices, smooth=False)
var_curve_path = os.path.join(
self.out_dir, 'variance_curve.pdf')
result.save_variance_curve(var_curve_path, smooth=False)
return result
def _bi_triangle_cfd_inv(self, x, a, b):
aa = np.abs(a)
h = 2. / (aa + b)
if x < h * aa / 4.:
y = np.sqrt(x * aa / h) - aa
elif x < h * aa / 2.:
y = -np.sqrt(aa * aa / 2. - x * aa / h)
elif x < (h * aa / 2. + h * b / 4.):
y = np.sqrt(x * b / h - aa * b / 2.)
else:
y = -np.sqrt(b / h * (1 - x)) + b
return y
def _bi_triangle(self, a, b):
if a == b == 0:
return 0.
assert (a < b) and (a <= 0) and (b >= 0)
x_unif = self.rng_gibbs.uniform(0, 1, 1)[0]
x = self._bi_triangle_cfd_inv(x_unif, a, b)
return x
if __name__ == '__main__':
comm = MPI.COMM_WORLD
n_cores = comm.Get_size()
rank = comm.Get_rank()
dataset = None
na = NeighbouhoodAlgorithm.from_file_with_default(
sys.argv[1], dataset, comm)
log_path = os.path.join(
na.out_dir, 'log_{}'.format(rank))
log = open(log_path, 'w', buffering=1)
if rank == 0:
start_time = time.time_ns()
result = na.compute(comm, log)
if rank == 0:
end_time = time.time_ns()
print(
'NA finished in {} s'
.format((end_time - start_time) * 1e-9))
log.close()
if rank == 0:
fig, ax = result.plot_models(
types=[ParameterType.VSH], n_best=1,
color='black', label='best model')
_, ax = na.model_ref.plot(
types=[ParameterType.VSH], ax=ax,
color='gray', label='ref')
ax.set(
ylim=[3480, 4000],
xlim=[6.5, 8.])
ax.legend()
fig_path = os.path.join(
na.out_dir, 'inverted_models.pdf')
plt.savefig(
fig_path,
bbox_inches='tight')
plt.close(fig)
| [
"os.mkdir",
"numpy.abs",
"numpy.empty",
"numpy.allclose",
"numpy.random.default_rng",
"os.path.join",
"dsmpy.windowmaker.WindowMaker.windows_from_dataset",
"numpy.zeros_like",
"matplotlib.pyplot.close",
"pytomo.utilities.get_temporary_str",
"dsmpy.dsm.compute_models_parallel",
"pytomo.inversio... | [((7853, 7880), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (7874, 7880), True, 'import numpy as np\n'), ((8129, 8168), 'os.path.join', 'os.path.join', (['self.out_dir', 'result_path'], {}), '(self.out_dir, result_path)\n', (8141, 8168), False, 'import os\n'), ((12129, 12239), 'pytomo.inversion.modelutils.std_boxcar_mesh', 'modelutils.std_boxcar_mesh', (['n_upper_mantle', 'n_mtz', 'n_lower_mantle', 'n_dpp', 'types'], {'verbose': "params['verbose']"}), "(n_upper_mantle, n_mtz, n_lower_mantle, n_dpp,\n types, verbose=params['verbose'])\n", (12155, 12239), False, 'from pytomo.inversion import modelutils\n'), ((13733, 13804), 'numpy.hstack', 'np.hstack', (['[(range_dict[p][:, 1] - range_dict[p][:, 0]) for p in types]'], {}), '([(range_dict[p][:, 1] - range_dict[p][:, 0]) for p in types])\n', (13742, 13804), True, 'import numpy as np\n'), ((13847, 13870), 'numpy.array', 'np.array', (['perturbations'], {}), '(perturbations)\n', (13855, 13870), True, 'import numpy as np\n'), ((15240, 15363), 'dsmpy.dsm.compute_models_parallel', 'compute_models_parallel', (['dataset', 'models', 'self.tlen', 'self.nspc', 'self.sampling_hz'], {'mode': 'self.mode', 'verbose': 'self.verbose'}), '(dataset, models, self.tlen, self.nspc, self.\n sampling_hz, mode=self.mode, verbose=self.verbose)\n', (15263, 15363), False, 'from dsmpy.dsm import compute_models_parallel\n'), ((26296, 26305), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (26302, 26305), True, 'import numpy as np\n'), ((27285, 27299), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (27297, 27299), False, 'import time\n'), ((27374, 27388), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (27386, 27388), False, 'import time\n'), ((27899, 27946), 'os.path.join', 'os.path.join', (['na.out_dir', '"""inverted_models.pdf"""'], {}), "(na.out_dir, 'inverted_models.pdf')\n", (27911, 27946), False, 'import os\n'), ((27968, 28010), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_path'], {'bbox_inches': '"""tight"""'}), "(fig_path, bbox_inches='tight')\n", (27979, 28010), True, 'import matplotlib.pyplot as plt\n'), ((28044, 28058), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (28053, 28058), True, 'import matplotlib.pyplot as plt\n'), ((7991, 8008), 'os.mkdir', 'os.mkdir', (['out_dir'], {}), '(out_dir)\n', (7999, 8008), False, 'import os\n'), ((12389, 12445), 'numpy.empty', 'np.empty', (['(model_params._n_grd_params, 2)'], {'dtype': '"""float"""'}), "((model_params._n_grd_params, 2), dtype='float')\n", (12397, 12445), True, 'import numpy as np\n'), ((12938, 13084), 'dsmpy.windowmaker.WindowMaker.windows_from_dataset', 'WindowMaker.windows_from_dataset', (['self.dataset', '"""ak135"""', '[self.phases[i]]', '[self.components[i]]'], {'t_before': 'self.t_before', 't_after': 'self.t_after'}), "(self.dataset, 'ak135', [self.phases[i]], [\n self.components[i]], t_before=self.t_before, t_after=self.t_after)\n", (12970, 13084), False, 'from dsmpy.windowmaker import WindowMaker\n'), ((15445, 15562), 'pytomo.inversion.umcutils.process_outputs', 'process_outputs', (['outputs', 'dataset', 'models', 'windows', 'self.freq', 'self.freq2', 'self.filter_type'], {}), '(outputs, dataset, models, windows, self.freq, self.freq2,\n self.filter_type, **self.misfit_kwargs)\n', (15460, 15562), False, 'from pytomo.inversion.umcutils import get_best_models, process_outputs\n'), ((16433, 16537), 'numpy.hstack', 'np.hstack', (['[(self.range_dict[p][:, 1] - self.range_dict[p][:, 0]) for p in self.\n model_params._types]'], {}), '([(self.range_dict[p][:, 1] - self.range_dict[p][:, 0]) for p in\n self.model_params._types])\n', (16442, 16537), True, 'import numpy as np\n'), ((16590, 16702), 'pytomo.inversion.umcutils.UniformMonteCarlo', 'UniformMonteCarlo', (['self.model_ref', 'self.model_params', 'self.range_dict'], {'mesh_type': '"""lininterp"""', 'seed': 'self.seed'}), "(self.model_ref, self.model_params, self.range_dict,\n mesh_type='lininterp', seed=self.seed)\n", (16607, 16702), False, 'from pytomo.inversion.umcutils import UniformMonteCarlo\n'), ((17605, 17619), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (17617, 17619), False, 'import time\n'), ((25213, 25227), 'time.time_ns', 'time.time_ns', ([], {}), '()\n', (25225, 25227), False, 'import time\n'), ((25865, 25916), 'os.path.join', 'os.path.join', (['self.out_dir', '"""convergence_curve.pdf"""'], {}), "(self.out_dir, 'convergence_curve.pdf')\n", (25877, 25916), False, 'import os\n'), ((26079, 26127), 'os.path.join', 'os.path.join', (['self.out_dir', '"""variance_curve.pdf"""'], {}), "(self.out_dir, 'variance_curve.pdf')\n", (26091, 26127), False, 'import os\n'), ((7949, 7978), 'pytomo.utilities.get_temporary_str', 'utilities.get_temporary_str', ([], {}), '()\n', (7976, 7978), False, 'from pytomo import utilities\n'), ((13939, 13960), 'numpy.zeros_like', 'np.zeros_like', (['points'], {}), '(points)\n', (13952, 13960), True, 'import numpy as np\n'), ((18366, 18429), 'pytomo.inversion.umcutils.get_best_models', 'get_best_models', (['result.misfit_dict', 'self.n_r', 'self.misfit_type'], {}), '(result.misfit_dict, self.n_r, self.misfit_type)\n', (18381, 18429), False, 'from pytomo.inversion.umcutils import get_best_models, process_outputs\n'), ((26376, 26395), 'numpy.sqrt', 'np.sqrt', (['(x * aa / h)'], {}), '(x * aa / h)\n', (26383, 26395), True, 'import numpy as np\n'), ((18704, 18738), 'numpy.array', 'np.array', (['result.perturbations[ip]'], {}), '(result.perturbations[ip])\n', (18712, 18738), True, 'import numpy as np\n'), ((19331, 19351), 'numpy.array', 'np.array', (['points[ip]'], {}), '(points[ip])\n', (19339, 19351), True, 'import numpy as np\n'), ((26448, 26483), 'numpy.sqrt', 'np.sqrt', (['(aa * aa / 2.0 - x * aa / h)'], {}), '(aa * aa / 2.0 - x * aa / h)\n', (26455, 26483), True, 'import numpy as np\n'), ((26544, 26577), 'numpy.sqrt', 'np.sqrt', (['(x * b / h - aa * b / 2.0)'], {}), '(x * b / h - aa * b / 2.0)\n', (26551, 26577), True, 'import numpy as np\n'), ((20088, 20125), 'numpy.array', 'np.array', (['current_point[free_indices]'], {}), '(current_point[free_indices])\n', (20096, 20125), True, 'import numpy as np\n'), ((20267, 20491), 'pytomo.inversion.voronoi.implicit_find_bound_for_dim', 'voronoi.implicit_find_bound_for_dim', (['points_free', 'points_free[ip]', 'current_point_free', 'idim_free'], {'n_nearest': '(1000)', 'min_bound': 'min_bounds[idim]', 'max_bound': 'max_bounds[idim]', 'step_size': '(0.001)', 'n_step_max': '(1000)', 'log': 'log'}), '(points_free, points_free[ip],\n current_point_free, idim_free, n_nearest=1000, min_bound=min_bounds[\n idim], max_bound=max_bounds[idim], step_size=0.001, n_step_max=1000,\n log=log)\n', (20302, 20491), True, 'import pytomo.inversion.voronoi as voronoi\n'), ((20658, 20882), 'pytomo.inversion.voronoi.implicit_find_bound_for_dim', 'voronoi.implicit_find_bound_for_dim', (['points_free', 'points_free[ip]', 'current_point_free', 'idim_free'], {'n_nearest': '(1500)', 'min_bound': 'min_bounds[idim]', 'max_bound': 'max_bounds[idim]', 'step_size': '(0.001)', 'n_step_max': '(1000)', 'log': 'log'}), '(points_free, points_free[ip],\n current_point_free, idim_free, n_nearest=1500, min_bound=min_bounds[\n idim], max_bound=max_bounds[idim], step_size=0.001, n_step_max=1000,\n log=log)\n', (20693, 20882), True, 'import pytomo.inversion.voronoi as voronoi\n'), ((21042, 21079), 'numpy.allclose', 'np.allclose', (['tmp_bounds1', 'tmp_bounds2'], {}), '(tmp_bounds1, tmp_bounds2)\n', (21053, 21079), True, 'import numpy as np\n'), ((22760, 22820), 'numpy.hstack', 'np.hstack', (['[value_dict[p] for p in self.model_params._types]'], {}), '([value_dict[p] for p in self.model_params._types])\n', (22769, 22820), True, 'import numpy as np\n'), ((26608, 26632), 'numpy.sqrt', 'np.sqrt', (['(b / h * (1 - x))'], {}), '(b / h * (1 - x))\n', (26615, 26632), True, 'import numpy as np\n'), ((20191, 20221), 'numpy.where', 'np.where', (['(free_indices == idim)'], {}), '(free_indices == idim)\n', (20199, 20221), True, 'import numpy as np\n'), ((23719, 23741), 'numpy.zeros_like', 'np.zeros_like', (['per_arr'], {}), '(per_arr)\n', (23732, 23741), True, 'import numpy as np\n'), ((3619, 3647), 'dsmpy.component.Component.parse_component', 'Component.parse_component', (['s'], {}), '(s)\n', (3644, 3647), False, 'from dsmpy.component import Component\n')] |
import numpy as np
def calc_optimal_grid(n):
# Try to distribute n images as best as possible
# For now: simple overestimation
sides = int(np.ceil(np.sqrt(n)))
return sides, sides
| [
"numpy.sqrt"
] | [((160, 170), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (167, 170), True, 'import numpy as np\n')] |
#!/usr/bin/python
# nlantau, 2021-11-01
import numpy as np
def snail(m):
f=[]
if len(m) < 1:
return [[]]
m = np.array(m)
f.extend(m[0])
while True:
if len(m[0]) < 1:
break
m = np.rot90(np.delete(m, 0, 0))
f.extend(m[0])
return f
a = [
[1,2,3],
[8,9,4],
[7,6,5]
]
print(snail(a))
| [
"numpy.array",
"numpy.delete"
] | [((131, 142), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (139, 142), True, 'import numpy as np\n'), ((244, 262), 'numpy.delete', 'np.delete', (['m', '(0)', '(0)'], {}), '(m, 0, 0)\n', (253, 262), True, 'import numpy as np\n')] |
import sys
sys.path.append("../src")
import numpy as np
import tensorflow as tf
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
import h5py
import os
from draw import DRAW
from counter import *
print("PID :", os.getpid())
def longform_latent(latent,):
longform_samples = np.zeros((latent.shape[1], latent.shape[0] * latent.shape[2]))
latent_dim = latent.shape[2]
for i, evts in enumerate(latent):
longform_samples[:, i * latent_dim : (i + 1) * latent_dim] = evts
return longform_samples
def compute_accuracy(X, y):
model = LogisticRegression(
solver="liblinear", multi_class="ovr", class_weight="balanced"
)
mlp = MLPClassifier(
max_iter=1000,
batch_size=10,
hidden_layer_sizes=(80, 30, 10),
early_stopping=True,
learning_rate_init=0.01,
)
mlp.fit(X, y)
model.fit(X, y)
logreg_score = np.average(cross_val_score(model, X, y, cv=4))
mlp_score = np.average(cross_val_score(mlp, X, y, cv=4))
return logreg_score, mlp_score
enc_size = 800
dec_size = 800
latent_dim = 3
batch_size = 86
all_data = np.load("../data/processed/all_0130.npy")
train_data = np.load("../data/processed/train.npy")
with h5py.File("../data/images.h5", "r") as fo:
train_targets = np.array(fo["train_targets"])[:-1]
# all_data = np.concatenate((train_data, test_data), axis=0)
treshold_value = 0.4
treshold_data = False
if treshold_data:
all_data[all_data < treshold_value] = 0
delta_range = np.linspace(0.8, 1.1, 6)
t_range = np.arange(3, 8)
# t_range = t_range[::-1]
N_range = np.arange(55, 95, 10)
N_range = N_range[::-1]
repeats = 2
epochs = 20
loss_record_shape = (len(delta_range), len(N_range), len(t_range), repeats, 2, epochs)
accuracy_record_shape = (len(delta_range), len(N_range), len(t_range), repeats, 2)
hyperparam_vals = np.array((delta_range, N_range, t_range, repeats, [0, 1], epochs))
loss_record = np.zeros(loss_record_shape)
classification_recod = np.zeros(accuracy_record_shape)
for i, delta in enumerate(delta_range):
delta_write = delta
delta_read = delta
for j, N in enumerate(N_range):
for k, T in enumerate(t_range):
for l in range(repeats):
read_N = N
write_N = N
array_delta_w = np.zeros((batch_size, 1))
array_delta_w.fill(delta_write)
array_delta_w = array_delta_w.astype(np.float32)
array_delta_r = np.zeros((batch_size, 1))
array_delta_r.fill(delta_read)
array_delta_r = array_delta_r.astype(np.float32)
attn_config = {
"read_N": read_N,
"write_N": write_N,
"write_N_sq": write_N ** 2,
"delta_w": array_delta_w,
"delta_r": array_delta_r,
}
mode_config = {"simulated_mode": False}
draw_model = DRAW(
T,
dec_size,
enc_size,
latent_dim,
batch_size,
all_data,
attn_config=attn_config,
mode_config=mode_config,
)
graph_kwds = {"initializer": tf.initializers.glorot_normal}
loss_kwds = {"reconst_loss": None}
draw_model.CompileModel(graph_kwds, loss_kwds)
opt = tf.train.AdamOptimizer
opt_args = [1e-2]
opt_kwds = {"beta1": 0.5}
draw_model.computeGradients(opt, opt_args, opt_kwds)
sess = tf.InteractiveSession()
data_dir = "../drawing"
model_dir = "../models"
lx, lz, = draw_model.train(
sess,
epochs,
data_dir,
model_dir,
earlystopping=False,
save_checkpoints=False,
)
if any(np.isnan(lx)) or any(np.isnan(lz)):
sess.close()
continue
print()
loss_record[i, j, k, l, 0] = lx
loss_record[i, j, k, l, 1] = lz
draw_model.X = train_data
latent_values, _, _ = draw_model.generateLatent(
sess, "../drawing", (train_data,), save=False
)
latent_values = longform_latent(latent_values[0])
accuracies = compute_accuracy(latent_values, train_targets)
classification_recod[i, j, k, l] = accuracies
print("--------------------")
print("delta: ", delta, " N : ", N, " T : ", T)
print("logreg_acc", accuracies[0], "mlp_acc", accuracies[1])
print("---------------------")
# draw_model.generateSamples("../drawing", "../drawing")
sess.close()
np.save("../loss_records/delta_opt_{}.npy".format(RUN_COUNT), loss_record)
np.save("../loss_records/hyperparam_vals_{}.npy".format(RUN_COUNT), hyperparam_vals)
np.save("../loss_records/accuracy_vals{}.npy".format(RUN_COUNT), classification_recod)
with open("counter.py", "w") as fo:
fo.write("RUN_COUNT = {}".format(RUN_COUNT + 1))
| [
"sys.path.append",
"numpy.load",
"h5py.File",
"os.getpid",
"draw.DRAW",
"sklearn.model_selection.cross_val_score",
"numpy.zeros",
"numpy.isnan",
"sklearn.linear_model.LogisticRegression",
"numpy.arange",
"numpy.array",
"numpy.linspace",
"sklearn.neural_network.MLPClassifier",
"tensorflow.I... | [((12, 37), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (27, 37), False, 'import sys\n'), ((1227, 1268), 'numpy.load', 'np.load', (['"""../data/processed/all_0130.npy"""'], {}), "('../data/processed/all_0130.npy')\n", (1234, 1268), True, 'import numpy as np\n'), ((1282, 1320), 'numpy.load', 'np.load', (['"""../data/processed/train.npy"""'], {}), "('../data/processed/train.npy')\n", (1289, 1320), True, 'import numpy as np\n'), ((1609, 1633), 'numpy.linspace', 'np.linspace', (['(0.8)', '(1.1)', '(6)'], {}), '(0.8, 1.1, 6)\n', (1620, 1633), True, 'import numpy as np\n'), ((1645, 1660), 'numpy.arange', 'np.arange', (['(3)', '(8)'], {}), '(3, 8)\n', (1654, 1660), True, 'import numpy as np\n'), ((1697, 1718), 'numpy.arange', 'np.arange', (['(55)', '(95)', '(10)'], {}), '(55, 95, 10)\n', (1706, 1718), True, 'import numpy as np\n'), ((1959, 2025), 'numpy.array', 'np.array', (['(delta_range, N_range, t_range, repeats, [0, 1], epochs)'], {}), '((delta_range, N_range, t_range, repeats, [0, 1], epochs))\n', (1967, 2025), True, 'import numpy as np\n'), ((2041, 2068), 'numpy.zeros', 'np.zeros', (['loss_record_shape'], {}), '(loss_record_shape)\n', (2049, 2068), True, 'import numpy as np\n'), ((2092, 2123), 'numpy.zeros', 'np.zeros', (['accuracy_record_shape'], {}), '(accuracy_record_shape)\n', (2100, 2123), True, 'import numpy as np\n'), ((321, 332), 'os.getpid', 'os.getpid', ([], {}), '()\n', (330, 332), False, 'import os\n'), ((389, 451), 'numpy.zeros', 'np.zeros', (['(latent.shape[1], latent.shape[0] * latent.shape[2])'], {}), '((latent.shape[1], latent.shape[0] * latent.shape[2]))\n', (397, 451), True, 'import numpy as np\n'), ((671, 758), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""liblinear"""', 'multi_class': '"""ovr"""', 'class_weight': '"""balanced"""'}), "(solver='liblinear', multi_class='ovr', class_weight=\n 'balanced')\n", (689, 758), False, 'from sklearn.linear_model import LogisticRegression\n'), ((779, 905), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'max_iter': '(1000)', 'batch_size': '(10)', 'hidden_layer_sizes': '(80, 30, 10)', 'early_stopping': '(True)', 'learning_rate_init': '(0.01)'}), '(max_iter=1000, batch_size=10, hidden_layer_sizes=(80, 30, 10),\n early_stopping=True, learning_rate_init=0.01)\n', (792, 905), False, 'from sklearn.neural_network import MLPClassifier\n'), ((1327, 1362), 'h5py.File', 'h5py.File', (['"""../data/images.h5"""', '"""r"""'], {}), "('../data/images.h5', 'r')\n", (1336, 1362), False, 'import h5py\n'), ((1019, 1053), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['model', 'X', 'y'], {'cv': '(4)'}), '(model, X, y, cv=4)\n', (1034, 1053), False, 'from sklearn.model_selection import cross_val_score\n'), ((1082, 1114), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['mlp', 'X', 'y'], {'cv': '(4)'}), '(mlp, X, y, cv=4)\n', (1097, 1114), False, 'from sklearn.model_selection import cross_val_score\n'), ((1390, 1419), 'numpy.array', 'np.array', (["fo['train_targets']"], {}), "(fo['train_targets'])\n", (1398, 1419), True, 'import numpy as np\n'), ((2418, 2443), 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (2426, 2443), True, 'import numpy as np\n'), ((2590, 2615), 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), '((batch_size, 1))\n', (2598, 2615), True, 'import numpy as np\n'), ((3084, 3200), 'draw.DRAW', 'DRAW', (['T', 'dec_size', 'enc_size', 'latent_dim', 'batch_size', 'all_data'], {'attn_config': 'attn_config', 'mode_config': 'mode_config'}), '(T, dec_size, enc_size, latent_dim, batch_size, all_data, attn_config=\n attn_config, mode_config=mode_config)\n', (3088, 3200), False, 'from draw import DRAW\n'), ((3784, 3807), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (3805, 3807), True, 'import tensorflow as tf\n'), ((4176, 4188), 'numpy.isnan', 'np.isnan', (['lx'], {}), '(lx)\n', (4184, 4188), True, 'import numpy as np\n'), ((4197, 4209), 'numpy.isnan', 'np.isnan', (['lz'], {}), '(lz)\n', (4205, 4209), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : Nov-20-20 16:16
# @Author : <NAME> (<EMAIL>)
# @RefLink : https://www.tensorflow.org/api_docs/python/tf/keras/metrics/Metric?version=nightly
import tensorflow as tf
class ClassTruePositives(tf.keras.metrics.Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super(BinaryTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self.dtype)
sample_weight = tf.broadcast_to(sample_weight, values.shape)
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
class BinaryTruePositives(tf.keras.metrics.Metric):
def __init__(self, name='binary_true_positives', **kwargs):
super(BinaryTruePositives, self).__init__(name=name, **kwargs)
self.true_positives = self.add_weight(name='tp', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, tf.bool) # Equivalent to that threshold=0.5
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
if sample_weight is not None:
sample_weight = tf.cast(sample_weight, self.dtype)
sample_weight = tf.broadcast_to(sample_weight, values.shape)
values = tf.multiply(values, sample_weight)
self.true_positives.assign_add(tf.reduce_sum(values))
def result(self):
return self.true_positives
def main():
import numpy as np
binary_true_positives = BinaryTruePositives()
y_true = np.asarray([1, 1, 0, 0])
y_pred = np.asarray([0.981, 1, 0, 0.6]) # tp, tp, tn, fn
binary_true_positives.update_state(y_true, y_pred)
result = binary_true_positives.result().numpy()
print(f"binary_true_positives: {result}")
if __name__ == "__main__":
main()
| [
"tensorflow.reduce_sum",
"numpy.asarray",
"tensorflow.cast",
"tensorflow.multiply",
"tensorflow.broadcast_to",
"tensorflow.equal"
] | [((2148, 2172), 'numpy.asarray', 'np.asarray', (['[1, 1, 0, 0]'], {}), '([1, 1, 0, 0])\n', (2158, 2172), True, 'import numpy as np\n'), ((2186, 2216), 'numpy.asarray', 'np.asarray', (['[0.981, 1, 0, 0.6]'], {}), '([0.981, 1, 0, 0.6])\n', (2196, 2216), True, 'import numpy as np\n'), ((576, 600), 'tensorflow.cast', 'tf.cast', (['y_true', 'tf.bool'], {}), '(y_true, tf.bool)\n', (583, 600), True, 'import tensorflow as tf\n'), ((618, 642), 'tensorflow.cast', 'tf.cast', (['y_pred', 'tf.bool'], {}), '(y_pred, tf.bool)\n', (625, 642), True, 'import tensorflow as tf\n'), ((741, 768), 'tensorflow.cast', 'tf.cast', (['values', 'self.dtype'], {}), '(values, self.dtype)\n', (748, 768), True, 'import tensorflow as tf\n'), ((1469, 1493), 'tensorflow.cast', 'tf.cast', (['y_true', 'tf.bool'], {}), '(y_true, tf.bool)\n', (1476, 1493), True, 'import tensorflow as tf\n'), ((1547, 1571), 'tensorflow.cast', 'tf.cast', (['y_pred', 'tf.bool'], {}), '(y_pred, tf.bool)\n', (1554, 1571), True, 'import tensorflow as tf\n'), ((1670, 1697), 'tensorflow.cast', 'tf.cast', (['values', 'self.dtype'], {}), '(values, self.dtype)\n', (1677, 1697), True, 'import tensorflow as tf\n'), ((676, 698), 'tensorflow.equal', 'tf.equal', (['y_true', '(True)'], {}), '(y_true, True)\n', (684, 698), True, 'import tensorflow as tf\n'), ((700, 722), 'tensorflow.equal', 'tf.equal', (['y_pred', '(True)'], {}), '(y_pred, True)\n', (708, 722), True, 'import tensorflow as tf\n'), ((835, 869), 'tensorflow.cast', 'tf.cast', (['sample_weight', 'self.dtype'], {}), '(sample_weight, self.dtype)\n', (842, 869), True, 'import tensorflow as tf\n'), ((898, 942), 'tensorflow.broadcast_to', 'tf.broadcast_to', (['sample_weight', 'values.shape'], {}), '(sample_weight, values.shape)\n', (913, 942), True, 'import tensorflow as tf\n'), ((964, 998), 'tensorflow.multiply', 'tf.multiply', (['values', 'sample_weight'], {}), '(values, sample_weight)\n', (975, 998), True, 'import tensorflow as tf\n'), ((1038, 1059), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['values'], {}), '(values)\n', (1051, 1059), True, 'import tensorflow as tf\n'), ((1605, 1627), 'tensorflow.equal', 'tf.equal', (['y_true', '(True)'], {}), '(y_true, True)\n', (1613, 1627), True, 'import tensorflow as tf\n'), ((1629, 1651), 'tensorflow.equal', 'tf.equal', (['y_pred', '(True)'], {}), '(y_pred, True)\n', (1637, 1651), True, 'import tensorflow as tf\n'), ((1764, 1798), 'tensorflow.cast', 'tf.cast', (['sample_weight', 'self.dtype'], {}), '(sample_weight, self.dtype)\n', (1771, 1798), True, 'import tensorflow as tf\n'), ((1827, 1871), 'tensorflow.broadcast_to', 'tf.broadcast_to', (['sample_weight', 'values.shape'], {}), '(sample_weight, values.shape)\n', (1842, 1871), True, 'import tensorflow as tf\n'), ((1893, 1927), 'tensorflow.multiply', 'tf.multiply', (['values', 'sample_weight'], {}), '(values, sample_weight)\n', (1904, 1927), True, 'import tensorflow as tf\n'), ((1967, 1988), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['values'], {}), '(values)\n', (1980, 1988), True, 'import tensorflow as tf\n')] |
import os
import glob
import numpy as np
import xarray as xr
from ..archive import archive
from ..core.remo_ds import preprocess as remo_preprocess
from ..core import codes
soil_temps = ['TS', 'TSL', 'TEMP2', 'TSN', 'TD3', 'TD4', 'TD5']
def open_mfdataset(files, use_cftime=True, parallel=True, data_vars='minimal', chunks="auto",
coords='minimal', compat='override', drop=None, **kwargs):
"""optimized function for opening large cf datasets.
based on https://github.com/pydata/xarray/issues/1385#issuecomment-561920115
"""
def drop_all_coords(ds):
return ds.reset_coords(drop=True)
ds = xr.open_mfdataset(files, parallel=parallel, decode_times=False, combine='by_coords',
preprocess=drop_all_coords, decode_cf=False, chunks=chunks,
data_vars=data_vars, coords=coords, compat=compat, **kwargs)
return xr.decode_cf(ds, use_cftime=use_cftime)
def open_mfdataset2(files, use_cftime=True, parallel=True, data_vars='minimal', chunks="auto",
coords='minimal', compat='override', drop=None, **kwargs):
"""optimized function for opening large cf datasets.
based on https://github.com/pydata/xarray/issues/1385#issuecomment-561920115
"""
ds = xr.open_mfdataset(files, parallel=parallel, decode_times=True, combine='by_coords',
decode_cf=True, chunks=chunks, use_cftime=use_cftime,
data_vars=data_vars, coords=coords, compat=compat, **kwargs)
return ds#xr.decode_cf(ds, use_cftime=use_cftime)
class VariableSet():
def __init__(self, varnames, name=None, long_name=None):
self.varnames = varnames
def extract(self, ds):
res = xr.merge([ds[var] for var in self.varnames if var in ds])
res.attrs = ds.attrs
return res
def stack(self, ds):
return stack_variables(ds, self.varnames)
soil = VariableSet(soil_temps, name='soil_temperature', long_name='soil temperature')
def stack_variables(ds, varnames, name=None, long_name=None):
found = [ds[var] for var in varnames if var in ds]
dim = xr.DataArray(data=[var.name for var in found], dims='var', name='var')
stack = xr.concat(found, dim=dim)
if name is not None:
stack.name = name
if long_name is not None:
stack.attrs['long_name'] = long_name
return stack
def weighted_field_mean(ds, lon='rlon', lat='rlat', weights=None):
"""
function to compute area-weighted spatial means
"""
if weights is None:
weights = np.cos(np.deg2rad(ds[lat]))
return ds.weighted(weights).mean(dim=(lon, lat))
def seasonal_mean(da):
"""
Function to compute seasonal means with a simple groupby approach
"""
return da.groupby('time.season').mean(dim='time')
def height_correction(height1, height2):
"""returns height correction in m"""
return (height1 - height2) * 0.0065
class Dataset():
def __init__(self, mask='tas', **kwargs):
self.filenames = {}
self._mask_var = mask
for key, value in kwargs.items():
if not isinstance(value, list):
value = [value]
self.filenames[key] = value
self.ds = self._init_dataset(self.filenames)
def _init_dataset(self, filenames):
da = []
for var, filename in self.filenames.items():
da.append(xr.open_mfdataset(filename)[var])
ds = xr.merge(da).rename(self.inv_map)
ds["mask"] = xr.where(~np.isnan(ds[self._mask_var].isel(time=0)),1,0)
return ds
def _get_id(self, id):
return self.varmap[id]
@property
def mask(self):
return self.ds.mask
@property
def inv_map(self):
return {v: k for k, v in self.varmap.items()}
@property
def orog(self):
return xr.open_dataset(self.filenames['orog'][0])
def dataset(self, variables=None, mask=False):
if variables is None:
ds = self.ds
else:
if not isinstance(variables, list):
variables = [variables]
#names = [self._get_id(id) for id in variables]
ds = xr.merge([self.ds[v] for v in variables])
return ds
class CRU_TS4(Dataset):
varmap = {'tas': 'tmp',
'pr': 'pre',
'orog': 'topo'}
def __init__(self):
path = "/mnt/lustre02/work/ch0636/eddy/pool/obs/cru/CRU/TS4.04/original"
template = "cru_ts4.04.1901.2019.{variable}.dat.nc"
variables = ["tmp", "pre", "cld", "dtr", "frs", "pet"]
kwargs = {key: os.path.join(path, template.format(variable=key)) for key in variables}
Dataset.__init__(self,
**kwargs,
topo="/mnt/lustre02/work/ch0636/eddy/pool/obs/cru/CRU/TS4.04/original/cru404_c129.nc")
def inv_map(dict):
return {v: k for k, v in dict.items()}
def _glob_filenames(pattern):
filenames = glob.glob(pattern)
filenames.sort()
return filenames
def _get_dataarrays(filenames, drop=None, chunks="auto", **kwargs):
da = []
for v, f in filenames.items():
files = _glob_filenames(f)
if len(files) > 1:
ds = open_mfdataset2(files)
else:
ds = xr.open_dataset(files[0], chunks=chunks, **kwargs)
if drop is not None:
try:
ds.drop(drop)
#grid_mapping = cx.preprocessing.get_grid_mapping(ds)
#da.append(xr.merge(ds[v], grid_mapping))
except:
pass
da.append(ds)
return da
def create_dataset(filenames, drop=None, mask=None, varmap=None, **kwargs):
ds = xr.merge(_get_dataarrays(filenames, drop=drop, **kwargs),
compat='override')
if mask is not None:
ds["mask"] = xr.where(~np.isnan(ds[mask].isel(time=0)),1,0)
if varmap is not None:
ds = ds.rename(inv_map(varmap))
return ds
def cru_ts4(chunks="auto", **kwargs):
"""Returns CRU_TS4 dataset from DKRZ filesystem"""
varmap = {'tas': 'tmp',
'pr': 'pre',
'orog': 'topo'}
variables = ["tmp", "pre", "cld", "dtr", "frs", "pet"]
path = "/mnt/lustre02/work/ch0636/eddy/pool/obs/cru/CRU/TS4.04/original"
template = "cru_ts4.04.1901.2019.{variable}.dat.nc"
filenames = {key: os.path.join(path, template.format(variable=key)) for key in variables}
filenames['topo'] = "/mnt/lustre02/work/ch0636/eddy/pool/obs/cru/CRU/TS4.04/original/cru404_c129.nc"
return create_dataset(filenames, mask='tmp', drop="stn", varmap=varmap, chunks=chunks, **kwargs)
def eobs(version="v22.0e", chunks="auto", **kwargs):
"""Returns eobs dataset from DKRZ filesystem"""
varmap = {'tas': 'tg',
'pr': 'rr',
'tasmax': 'tx',
'tasmin': 'tn',
'rsds': 'qq',
'psl': 'pp',
'orog': 'elevation'}
variables = ["tg", "tx", "tn", "rr", "qq", "pp"]
path = "/mnt/lustre02/work/ch0636/eddy/pool/obs/eobs/{version}/original_025/day/var/{cf_name}/"
template = "{variable}_ens_mean_0.25deg_reg_{version}.nc"
filenames = {key: os.path.join(path, template).format(variable=key, version=version, cf_name=inv_map(varmap)[key])
for key in variables}
filenames['elevation'] = "/mnt/lustre02/work/ch0636/eddy/pool/obs/eobs/{version}/original_025/fx/orog/elev_ens_0.25deg_reg_{version}.nc".format(version=version)
return create_dataset(filenames, mask='tg', varmap=varmap, chunks=chunks, **kwargs)
#CRU_TS4 = Dataset(tas="/mnt/lustre02/work/ch0636/eddy/pool/obs/cru/CRU/TS4.04/original/cru_ts4.04.1901.2019.tmp.dat.nc")
def hyras(chunks="auto", **kwargs):
"""Returns hyras dataset from DKRZ filesystem."""
variables = ["tas", "pr", "tmax", "tmin", "hurs"]
path = "/mnt/lustre02/work/ch0636/eddy/pool/obs/HYRAS/{variable}"
template = "{variable}_hyras_5_*_v3.0_ocz.nc"
filenames = {key: os.path.join(path, template).format(variable=key)
for key in variables}
return create_dataset(filenames, mask="tas", chunks=chunks, **kwargs)
#def monthly(self, time_range=None):
# return self.archive.monthly(time_range=time_range)
def regrid(ds_src, ds_trg, method="bilinear", **kwargs):
import xesmf as xe
regridder = xe.Regridder(ds_src, ds_trg, method=method, **kwargs)
print(regridder)
return regridder(ds_src)
| [
"xarray.decode_cf",
"xesmf.Regridder",
"numpy.deg2rad",
"xarray.open_dataset",
"xarray.concat",
"xarray.merge",
"xarray.DataArray",
"glob.glob",
"xarray.open_mfdataset",
"os.path.join"
] | [((648, 862), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['files'], {'parallel': 'parallel', 'decode_times': '(False)', 'combine': '"""by_coords"""', 'preprocess': 'drop_all_coords', 'decode_cf': '(False)', 'chunks': 'chunks', 'data_vars': 'data_vars', 'coords': 'coords', 'compat': 'compat'}), "(files, parallel=parallel, decode_times=False, combine=\n 'by_coords', preprocess=drop_all_coords, decode_cf=False, chunks=chunks,\n data_vars=data_vars, coords=coords, compat=compat, **kwargs)\n", (665, 862), True, 'import xarray as xr\n'), ((911, 950), 'xarray.decode_cf', 'xr.decode_cf', (['ds'], {'use_cftime': 'use_cftime'}), '(ds, use_cftime=use_cftime)\n', (923, 950), True, 'import xarray as xr\n'), ((1288, 1495), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['files'], {'parallel': 'parallel', 'decode_times': '(True)', 'combine': '"""by_coords"""', 'decode_cf': '(True)', 'chunks': 'chunks', 'use_cftime': 'use_cftime', 'data_vars': 'data_vars', 'coords': 'coords', 'compat': 'compat'}), "(files, parallel=parallel, decode_times=True, combine=\n 'by_coords', decode_cf=True, chunks=chunks, use_cftime=use_cftime,\n data_vars=data_vars, coords=coords, compat=compat, **kwargs)\n", (1305, 1495), True, 'import xarray as xr\n'), ((2173, 2243), 'xarray.DataArray', 'xr.DataArray', ([], {'data': '[var.name for var in found]', 'dims': '"""var"""', 'name': '"""var"""'}), "(data=[var.name for var in found], dims='var', name='var')\n", (2185, 2243), True, 'import xarray as xr\n'), ((2256, 2281), 'xarray.concat', 'xr.concat', (['found'], {'dim': 'dim'}), '(found, dim=dim)\n', (2265, 2281), True, 'import xarray as xr\n'), ((5063, 5081), 'glob.glob', 'glob.glob', (['pattern'], {}), '(pattern)\n', (5072, 5081), False, 'import glob\n'), ((8469, 8522), 'xesmf.Regridder', 'xe.Regridder', (['ds_src', 'ds_trg'], {'method': 'method'}), '(ds_src, ds_trg, method=method, **kwargs)\n', (8481, 8522), True, 'import xesmf as xe\n'), ((1760, 1817), 'xarray.merge', 'xr.merge', (['[ds[var] for var in self.varnames if var in ds]'], {}), '([ds[var] for var in self.varnames if var in ds])\n', (1768, 1817), True, 'import xarray as xr\n'), ((3924, 3966), 'xarray.open_dataset', 'xr.open_dataset', (["self.filenames['orog'][0]"], {}), "(self.filenames['orog'][0])\n", (3939, 3966), True, 'import xarray as xr\n'), ((2611, 2630), 'numpy.deg2rad', 'np.deg2rad', (['ds[lat]'], {}), '(ds[lat])\n', (2621, 2630), True, 'import numpy as np\n'), ((4257, 4298), 'xarray.merge', 'xr.merge', (['[self.ds[v] for v in variables]'], {}), '([self.ds[v] for v in variables])\n', (4265, 4298), True, 'import xarray as xr\n'), ((5374, 5424), 'xarray.open_dataset', 'xr.open_dataset', (['files[0]'], {'chunks': 'chunks'}), '(files[0], chunks=chunks, **kwargs)\n', (5389, 5424), True, 'import xarray as xr\n'), ((3514, 3526), 'xarray.merge', 'xr.merge', (['da'], {}), '(da)\n', (3522, 3526), True, 'import xarray as xr\n'), ((7291, 7319), 'os.path.join', 'os.path.join', (['path', 'template'], {}), '(path, template)\n', (7303, 7319), False, 'import os\n'), ((8095, 8123), 'os.path.join', 'os.path.join', (['path', 'template'], {}), '(path, template)\n', (8107, 8123), False, 'import os\n'), ((3467, 3494), 'xarray.open_mfdataset', 'xr.open_mfdataset', (['filename'], {}), '(filename)\n', (3484, 3494), True, 'import xarray as xr\n')] |
"""Implemenets helpers for analtyical solution."""
import numpy as np
import torch
from gaussian_sharing.model.analytical_model import AnalyticStructBiasModel
from gaussian_sharing.model.build import StructBiasModel
from struct_discovery.solver.hypergrad import implicit_function
from gaussian_sharing.data import helpers
def train_one_model(method, datasets):
assert method in ['no_share', 'oracle',
'brute_force', 'single_loop', 'double_loop']
train_dataset, val_dataset, test_dataset = datasets
if method == 'no_share':
A_hat, theta_hat = fit_no_share(train_dataset, val_dataset)
if method == 'oracle':
A_hat, theta_hat = fit_oracle(train_dataset, val_dataset)
if method == 'brute_force':
A_hat, theta_hat = fit_brute_force(train_dataset, val_dataset)
if method == 'single_loop':
best_in_sample_loss = np.inf
# Non-convex optimization, train with different random initialization.
num_init = 3
for k in range(num_init):
A_hat_k, theta_hat_k, in_sample_loss = fit_single_loop(
train_dataset, val_dataset)
if in_sample_loss < best_in_sample_loss:
best_in_sample_loss = in_sample_loss
A_hat = A_hat_k
theta_hat = theta_hat_k
if method == 'double_loop':
A_hat, theta_hat = fit_double_loop(train_dataset, val_dataset)
return A_hat, theta_hat
def fit_no_share(train_dataset, val_dataset):
train_data = train_dataset.data
val_data = val_dataset.data
kdim = train_data.shape[-1]
A_ind = np.eye(kdim)
train_val_data = np.concatenate([train_data, val_data], 0)
kdim = train_data.shape[-1]
model = AnalyticStructBiasModel(
kdim, train_val_data, A_init=A_ind, A_init_scale=10)
theta_hat = model.forward(torch.zeros([1, kdim]))
return A_ind, theta_hat.data.cpu().squeeze().numpy()
def fit_oracle(train_dataset, val_dataset):
A_gt = train_dataset.A_gt
train_data = train_dataset.data
val_data = val_dataset.data
train_val_data = np.concatenate([train_data, val_data], 0)
kdim = train_data.shape[-1]
model = AnalyticStructBiasModel(
kdim, train_val_data, A_init=A_gt, A_init_scale=10)
theta_hat = model.forward(torch.zeros([1, kdim]))
return A_gt, theta_hat.data.cpu().squeeze().numpy()
def fit_single_loop(train_dataset, val_dataset, config=None):
defaults = {'num_iter': 1000, 'outer_lr': 2e-2}
if config is not None:
pass # TODO: update with config.
num_iter = defaults['num_iter']
lr = defaults['outer_lr']
kdim = train_dataset.data.shape[-1]
model = AnalyticStructBiasModel(kdim, train_dataset.data)
hyper_optimizer = torch.optim.RMSprop(
model.hyper_parameters(), lr, weight_decay=1e-4)
torch_val_data = torch.from_numpy(val_dataset.data).float()
model.train()
model.cuda()
torch_val_data = torch_val_data.cuda()
for _ in range(num_iter):
y_pred = model.forward(torch_val_data)
AA = model.get_A()
loss_reg = 0
loss_reg += torch.trace(torch.sqrt(AA.T.mm(AA)))
loss_reg += -1*torch.sum(torch.log(AA+1e-6)*(AA+1e-6), -1).mean()
loss = model.total_val_loss(
y_pred, torch_val_data) + 1e-2*loss_reg
model.zero_grad()
loss.backward()
hyper_optimizer.step()
A_best_idx = model.get_A().data.cpu().numpy().argmax(-1)
A_best = np.zeros((kdim, kdim))
A_best[np.arange(0, kdim), A_best_idx] = 1
# Refit for the best-parameters.
train_data = train_dataset.data
val_data = val_dataset.data
train_val_data = np.concatenate([train_data, val_data], 0)
model = AnalyticStructBiasModel(
kdim, train_val_data, A_init=A_best, A_init_scale=10)
theta_hat = model.forward(torch.zeros([1, kdim]))[0]
return A_best, theta_hat.data.cpu().squeeze().numpy(), loss.item()
def fit_double_loop(train_dataset, val_dataset, config=None):
defaults = {'num_iter': 1000, 'num_inner': 10,
'outer_lr': 1e-2, 'inner_lr': 1e-4}
if config is not None:
pass # TODO: update with config.
num_iter = defaults['num_iter']
num_inner = defaults['num_inner']
lr = defaults['outer_lr']
inner_lr = defaults['inner_lr']
kdim = train_dataset.data.shape[-1]
model = StructBiasModel(kdim, train_dataset.data)
model_optimizer = torch.optim.Adam(model.model_parameters(), inner_lr)
hyper_optimizer = torch.optim.Adam(model.hyper_parameters(), lr)
torch_train_data = torch.from_numpy(train_dataset.data).float()
torch_val_data = torch.from_numpy(val_dataset.data).float()
model.train()
model.cuda()
torch_val_data = torch_val_data.cuda()
torch_train_data = torch_train_data.cuda()
for _ in range(num_iter):
# Inner loop.
for inner in range(num_inner):
y_pred = model.forward(torch_train_data)
inner_loss = model.total_loss(y_pred, torch_train_data)
model.zero_grad()
inner_loss.backward()
model_optimizer.step()
# Outer loop.
y_pred = model.forward(torch_val_data)
# Computes hypergradient.
model.zero_grad()
hyper_grad = implicit_function.compute_hypergrad(
(torch_train_data, torch_train_data),
(torch_val_data, torch_val_data),
model,
method='EXACT')
# Update hypergradient.
with torch.no_grad():
bidx = 0
for mm in model.hyper_parameters():
mm_size = mm.nelement()
eidx = bidx + mm_size
mm.grad = torch.reshape(
hyper_grad[bidx:eidx, :], mm.shape).clone()
hyper_optimizer.step()
A_best_idx = model.get_A().data.cpu().numpy().argmax(-1)
A_best = np.zeros((kdim, kdim))
A_best[np.arange(0, kdim), A_best_idx] = 1
# Refit for the best-parameters.
train_data = train_dataset.data
val_data = val_dataset.data
train_val_data = np.concatenate([train_data, val_data], 0)
model = AnalyticStructBiasModel(kdim, train_val_data, A_init=A_best)
theta_hat = model.forward(torch.zeros([1, kdim]))[0]
return A_best, theta_hat.data.cpu().squeeze().numpy()
def fit_brute_force(train_dataset, val_dataset):
# Search over all A.
train_data = train_dataset.data
val_data = val_dataset.data
train_val_data = np.concatenate([train_data, val_data], 0)
torch_val_data = torch.from_numpy(val_dataset.data).float()
kdim = train_data.shape[-1]
group = list(helpers.partition(list(range(0, kdim))))
min_val_loss = np.inf
best_A = None
best_theta = None
for curr_group in group:
A = np.zeros((kdim, kdim))
for cnum, cluster in enumerate(curr_group):
A[cnum, cluster] = 1
A = A.T
# Fit on train given A.
model = AnalyticStructBiasModel(kdim, train_data, A_init=A)
y_pred = model.forward(torch_val_data)
val_loss = model.total_val_loss(y_pred, torch_val_data)
AA = model.get_A()
loss_reg = 0
loss_reg += 1e-2*torch.trace(torch.sqrt(AA.T.mm(AA)))
val_loss += loss_reg
if min_val_loss > val_loss:
min_val_loss = val_loss
best_A = A
best_theta = y_pred[0:1]
# Refit using the best A.
model = AnalyticStructBiasModel(
kdim, train_val_data, A_init=best_A, A_init_scale=10.)
best_theta = model.forward(torch_val_data)[0:1]
return best_A, best_theta.data.cpu().squeeze().numpy()
| [
"numpy.zeros",
"struct_discovery.solver.hypergrad.implicit_function.compute_hypergrad",
"gaussian_sharing.model.build.StructBiasModel",
"numpy.arange",
"gaussian_sharing.model.analytical_model.AnalyticStructBiasModel",
"torch.zeros",
"numpy.eye",
"torch.reshape",
"torch.no_grad",
"torch.log",
"n... | [((1611, 1623), 'numpy.eye', 'np.eye', (['kdim'], {}), '(kdim)\n', (1617, 1623), True, 'import numpy as np\n'), ((1645, 1686), 'numpy.concatenate', 'np.concatenate', (['[train_data, val_data]', '(0)'], {}), '([train_data, val_data], 0)\n', (1659, 1686), True, 'import numpy as np\n'), ((1731, 1807), 'gaussian_sharing.model.analytical_model.AnalyticStructBiasModel', 'AnalyticStructBiasModel', (['kdim', 'train_val_data'], {'A_init': 'A_ind', 'A_init_scale': '(10)'}), '(kdim, train_val_data, A_init=A_ind, A_init_scale=10)\n', (1754, 1807), False, 'from gaussian_sharing.model.analytical_model import AnalyticStructBiasModel\n'), ((2093, 2134), 'numpy.concatenate', 'np.concatenate', (['[train_data, val_data]', '(0)'], {}), '([train_data, val_data], 0)\n', (2107, 2134), True, 'import numpy as np\n'), ((2179, 2254), 'gaussian_sharing.model.analytical_model.AnalyticStructBiasModel', 'AnalyticStructBiasModel', (['kdim', 'train_val_data'], {'A_init': 'A_gt', 'A_init_scale': '(10)'}), '(kdim, train_val_data, A_init=A_gt, A_init_scale=10)\n', (2202, 2254), False, 'from gaussian_sharing.model.analytical_model import AnalyticStructBiasModel\n'), ((2679, 2728), 'gaussian_sharing.model.analytical_model.AnalyticStructBiasModel', 'AnalyticStructBiasModel', (['kdim', 'train_dataset.data'], {}), '(kdim, train_dataset.data)\n', (2702, 2728), False, 'from gaussian_sharing.model.analytical_model import AnalyticStructBiasModel\n'), ((3474, 3496), 'numpy.zeros', 'np.zeros', (['(kdim, kdim)'], {}), '((kdim, kdim))\n', (3482, 3496), True, 'import numpy as np\n'), ((3670, 3711), 'numpy.concatenate', 'np.concatenate', (['[train_data, val_data]', '(0)'], {}), '([train_data, val_data], 0)\n', (3684, 3711), True, 'import numpy as np\n'), ((3724, 3801), 'gaussian_sharing.model.analytical_model.AnalyticStructBiasModel', 'AnalyticStructBiasModel', (['kdim', 'train_val_data'], {'A_init': 'A_best', 'A_init_scale': '(10)'}), '(kdim, train_val_data, A_init=A_best, A_init_scale=10)\n', (3747, 3801), False, 'from gaussian_sharing.model.analytical_model import AnalyticStructBiasModel\n'), ((4369, 4410), 'gaussian_sharing.model.build.StructBiasModel', 'StructBiasModel', (['kdim', 'train_dataset.data'], {}), '(kdim, train_dataset.data)\n', (4384, 4410), False, 'from gaussian_sharing.model.build import StructBiasModel\n'), ((5875, 5897), 'numpy.zeros', 'np.zeros', (['(kdim, kdim)'], {}), '((kdim, kdim))\n', (5883, 5897), True, 'import numpy as np\n'), ((6072, 6113), 'numpy.concatenate', 'np.concatenate', (['[train_data, val_data]', '(0)'], {}), '([train_data, val_data], 0)\n', (6086, 6113), True, 'import numpy as np\n'), ((6126, 6186), 'gaussian_sharing.model.analytical_model.AnalyticStructBiasModel', 'AnalyticStructBiasModel', (['kdim', 'train_val_data'], {'A_init': 'A_best'}), '(kdim, train_val_data, A_init=A_best)\n', (6149, 6186), False, 'from gaussian_sharing.model.analytical_model import AnalyticStructBiasModel\n'), ((6467, 6508), 'numpy.concatenate', 'np.concatenate', (['[train_data, val_data]', '(0)'], {}), '([train_data, val_data], 0)\n', (6481, 6508), True, 'import numpy as np\n'), ((7419, 7498), 'gaussian_sharing.model.analytical_model.AnalyticStructBiasModel', 'AnalyticStructBiasModel', (['kdim', 'train_val_data'], {'A_init': 'best_A', 'A_init_scale': '(10.0)'}), '(kdim, train_val_data, A_init=best_A, A_init_scale=10.0)\n', (7442, 7498), False, 'from gaussian_sharing.model.analytical_model import AnalyticStructBiasModel\n'), ((1847, 1869), 'torch.zeros', 'torch.zeros', (['[1, kdim]'], {}), '([1, kdim])\n', (1858, 1869), False, 'import torch\n'), ((2294, 2316), 'torch.zeros', 'torch.zeros', (['[1, kdim]'], {}), '([1, kdim])\n', (2305, 2316), False, 'import torch\n'), ((5276, 5411), 'struct_discovery.solver.hypergrad.implicit_function.compute_hypergrad', 'implicit_function.compute_hypergrad', (['(torch_train_data, torch_train_data)', '(torch_val_data, torch_val_data)', 'model'], {'method': '"""EXACT"""'}), "((torch_train_data, torch_train_data), (\n torch_val_data, torch_val_data), model, method='EXACT')\n", (5311, 5411), False, 'from struct_discovery.solver.hypergrad import implicit_function\n'), ((6771, 6793), 'numpy.zeros', 'np.zeros', (['(kdim, kdim)'], {}), '((kdim, kdim))\n', (6779, 6793), True, 'import numpy as np\n'), ((6943, 6994), 'gaussian_sharing.model.analytical_model.AnalyticStructBiasModel', 'AnalyticStructBiasModel', (['kdim', 'train_data'], {'A_init': 'A'}), '(kdim, train_data, A_init=A)\n', (6966, 6994), False, 'from gaussian_sharing.model.analytical_model import AnalyticStructBiasModel\n'), ((2851, 2885), 'torch.from_numpy', 'torch.from_numpy', (['val_dataset.data'], {}), '(val_dataset.data)\n', (2867, 2885), False, 'import torch\n'), ((3508, 3526), 'numpy.arange', 'np.arange', (['(0)', 'kdim'], {}), '(0, kdim)\n', (3517, 3526), True, 'import numpy as np\n'), ((3841, 3863), 'torch.zeros', 'torch.zeros', (['[1, kdim]'], {}), '([1, kdim])\n', (3852, 3863), False, 'import torch\n'), ((4579, 4615), 'torch.from_numpy', 'torch.from_numpy', (['train_dataset.data'], {}), '(train_dataset.data)\n', (4595, 4615), False, 'import torch\n'), ((4645, 4679), 'torch.from_numpy', 'torch.from_numpy', (['val_dataset.data'], {}), '(val_dataset.data)\n', (4661, 4679), False, 'import torch\n'), ((5501, 5516), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5514, 5516), False, 'import torch\n'), ((5909, 5927), 'numpy.arange', 'np.arange', (['(0)', 'kdim'], {}), '(0, kdim)\n', (5918, 5927), True, 'import numpy as np\n'), ((6217, 6239), 'torch.zeros', 'torch.zeros', (['[1, kdim]'], {}), '([1, kdim])\n', (6228, 6239), False, 'import torch\n'), ((6530, 6564), 'torch.from_numpy', 'torch.from_numpy', (['val_dataset.data'], {}), '(val_dataset.data)\n', (6546, 6564), False, 'import torch\n'), ((5691, 5740), 'torch.reshape', 'torch.reshape', (['hyper_grad[bidx:eidx, :]', 'mm.shape'], {}), '(hyper_grad[bidx:eidx, :], mm.shape)\n', (5704, 5740), False, 'import torch\n'), ((3189, 3210), 'torch.log', 'torch.log', (['(AA + 1e-06)'], {}), '(AA + 1e-06)\n', (3198, 3210), False, 'import torch\n')] |
from collections import namedtuple
import pickle
import gym
import ptan
from ptan.agent import float32_preprocessor
import torch
import numpy as np
from util import PGN
GAMMA = 0.99
NUM_TRAJS = 100
EpisodeStep = namedtuple('EpisodeStep', field_names=['state', 'action', 'reward', 'next_state'])
Trajectory = namedtuple('Trajectory', field_names=['prob', 'episode_steps'])
if __name__ == '__main__':
env = gym.make('CartPole-v1')
net = PGN(env.observation_space.shape[0], env.action_space.n)
net.load_state_dict(torch.load('cartpole_expert.mod'))
net.eval()
agent = ptan.agent.PolicyAgent(net, apply_softmax=True, preprocessor=float32_preprocessor)
exp_source = ptan.experience.ExperienceSourceFirstLast(env, agent, gamma=GAMMA)
trajectories = []
for ep in range(NUM_TRAJS):
episode = []
qt = 1.0
for step_idx, exp in enumerate(exp_source):
probs = torch.softmax(net(float32_preprocessor(exp.state).view(1, -1)), dim=1)
probs = probs.squeeze()[int(exp.action)].item()
qt *= probs
episode.append(EpisodeStep(state=exp.state, action=int(exp.action), reward=exp.reward,
next_state=exp.last_state))
if exp.last_state is None:
break
print(np.prod())
trajectories.append(Trajectory(prob=qt, episode_steps=episode))
print(f'Number of trajectories: {len(trajectories)}')
with open('demonstrations.list.pkl', 'wb') as f:
pickle.dump(trajectories, f)
env.close()
| [
"pickle.dump",
"gym.make",
"ptan.agent.float32_preprocessor",
"torch.load",
"util.PGN",
"numpy.prod",
"collections.namedtuple",
"ptan.agent.PolicyAgent",
"ptan.experience.ExperienceSourceFirstLast"
] | [((228, 314), 'collections.namedtuple', 'namedtuple', (['"""EpisodeStep"""'], {'field_names': "['state', 'action', 'reward', 'next_state']"}), "('EpisodeStep', field_names=['state', 'action', 'reward',\n 'next_state'])\n", (238, 314), False, 'from collections import namedtuple\n'), ((325, 388), 'collections.namedtuple', 'namedtuple', (['"""Trajectory"""'], {'field_names': "['prob', 'episode_steps']"}), "('Trajectory', field_names=['prob', 'episode_steps'])\n", (335, 388), False, 'from collections import namedtuple\n'), ((430, 453), 'gym.make', 'gym.make', (['"""CartPole-v1"""'], {}), "('CartPole-v1')\n", (438, 453), False, 'import gym\n'), ((465, 520), 'util.PGN', 'PGN', (['env.observation_space.shape[0]', 'env.action_space.n'], {}), '(env.observation_space.shape[0], env.action_space.n)\n', (468, 520), False, 'from util import PGN\n'), ((610, 697), 'ptan.agent.PolicyAgent', 'ptan.agent.PolicyAgent', (['net'], {'apply_softmax': '(True)', 'preprocessor': 'float32_preprocessor'}), '(net, apply_softmax=True, preprocessor=\n float32_preprocessor)\n', (632, 697), False, 'import ptan\n'), ((711, 777), 'ptan.experience.ExperienceSourceFirstLast', 'ptan.experience.ExperienceSourceFirstLast', (['env', 'agent'], {'gamma': 'GAMMA'}), '(env, agent, gamma=GAMMA)\n', (752, 777), False, 'import ptan\n'), ((546, 579), 'torch.load', 'torch.load', (['"""cartpole_expert.mod"""'], {}), "('cartpole_expert.mod')\n", (556, 579), False, 'import torch\n'), ((1561, 1589), 'pickle.dump', 'pickle.dump', (['trajectories', 'f'], {}), '(trajectories, f)\n', (1572, 1589), False, 'import pickle\n'), ((1351, 1360), 'numpy.prod', 'np.prod', ([], {}), '()\n', (1358, 1360), True, 'import numpy as np\n'), ((966, 997), 'ptan.agent.float32_preprocessor', 'float32_preprocessor', (['exp.state'], {}), '(exp.state)\n', (986, 997), False, 'from ptan.agent import float32_preprocessor\n')] |
import sys
sys.path.insert(0, '..')
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from config.config import *
from utils.common_util import *
def fill_targets(row):
row.Target = np.array(row.Target.split(" ")).astype(np.int)
for num in row.Target:
name = LABEL_NAMES[int(num)]
row.loc[name] = 1
row.Target = ' '.join(np.sort(np.unique(row.Target)).astype(str).tolist())
return row
def generate_meta(meta_dir, fname, dataset='train'):
is_external = True if dataset == 'external' else False
label_df = pd.read_csv(opj(DATA_DIR, 'raw', fname))
for key in LABEL_NAMES.keys():
label_df[LABEL_NAMES[key]] = 0
meta_df = label_df.apply(fill_targets, axis=1)
meta_df[EXTERNAL] = is_external
if is_external:
meta_df[ANTIBODY] = meta_df[ID].apply(lambda x: x.split('_')[0])
clf = LabelEncoder()
meta_df[ANTIBODY_CODE] = clf.fit_transform(meta_df[ANTIBODY])
meta_df[ANTIBODY] = meta_df[ANTIBODY].astype(int)
meta_fname = opj(meta_dir, '%s_meta.csv' % dataset)
meta_df.to_csv(meta_fname, index=False)
if __name__ == '__main__':
print('%s: calling main function ... ' % os.path.basename(__file__))
meta_dir = opj(DATA_DIR, 'meta')
os.makedirs(meta_dir, exist_ok=True)
generate_meta(meta_dir, 'train.csv', dataset='train')
# https://www.kaggle.com/c/human-protein-atlas-image-classification/discussion/69984
generate_meta(meta_dir, 'HPAv18RBGY_wodpl.csv', dataset='external')
print('\nsuccess!')
| [
"numpy.unique",
"sys.path.insert",
"sklearn.preprocessing.LabelEncoder"
] | [((11, 35), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (26, 35), False, 'import sys\n'), ((895, 909), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (907, 909), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((395, 416), 'numpy.unique', 'np.unique', (['row.Target'], {}), '(row.Target)\n', (404, 416), True, 'import numpy as np\n')] |
import numba as nb
import numpy as np
import pandas as pd
from sid.shared import boolean_choices
from sid.time import get_date
from src.shared import from_epochs_to_timestamps
IS_POSITIVE_CASE = (
"knows_infectious | (knows_immune & symptomatic) "
"| (knows_immune & (cd_received_test_result_true >= -13))"
)
"""str: Condition for a positive test case.
The individual either ...
- knows that she is infectious.
- knows she is immune but still symptomatic.
- knows she is immune but 14 days since infection have not passed.
"""
def go_to_weekly_meeting(
states, params, group_col_name, day_of_week, seed # noqa: U100
):
"""Return who participates in a weekly meeting.
Args:
states (pandas.DataFrame): sid states DataFrame
params (pandas.DataFrame): DataFrame with two index levels,
subcategory and name.
group_col_name (str): name of the column identifying this contact model's
group column.
day_of_week (str): day of the week on which this model takes place.
Returns:
attends_meeting (pandas.Series): same index as states. 1 for workers that
go to the weekly meeting today.
"""
date = get_date(states)
day = date.day_name()
if day != day_of_week:
attends_meeting = pd.Series(data=False, index=states.index)
else:
attends_meeting = states[group_col_name] != -1
for params_entry, condition in [
("symptomatic_multiplier", states["symptomatic"]),
("positive_test_multiplier", states["knows_currently_infected"]),
]:
attends_meeting = reduce_contacts_on_condition(
attends_meeting,
states,
params.loc[(params_entry, params_entry), "value"],
condition,
is_recurrent=True,
)
return attends_meeting
def go_to_daily_work_meeting(states, params, seed): # noqa: U100
"""Return which people go to work.
Args:
states (pandas.DataFrame): sid states DataFrame
params (pandas.DataFrame): DataFrame with two index levels,
subcategory and name. has a "value" column that contains the probabilities
to the number of possible columns in the "name" index level.
Returns:
attends_work (pandas.Series): same index as states. 1 for workers that go to
work this period, 0 for everyone else.
"""
date = get_date(states)
day = date.day_name()
attends_work = (states["occupation"] == "working") & (
states["work_daily_group_id"] != -1
)
if day in ["Saturday", "Sunday"]:
attends_work = attends_work & states[f"work_{day.lower()}"]
else:
for params_entry, condition in [
("symptomatic_multiplier", states["symptomatic"]),
("positive_test_multiplier", states["knows_currently_infected"]),
]:
attends_work = reduce_contacts_on_condition(
attends_work,
states,
params.loc[(params_entry, params_entry), "value"],
condition,
is_recurrent=True,
)
return attends_work
def meet_daily_other_contacts(states, params, group_col_name, seed): # noqa: U100
attends_meeting = states[group_col_name] != -1
for params_entry, condition in [
("symptomatic_multiplier", states["symptomatic"]),
("positive_test_multiplier", states["knows_currently_infected"]),
]:
attends_meeting = reduce_contacts_on_condition(
attends_meeting,
states,
params.loc[(params_entry, params_entry), "value"],
condition,
is_recurrent=True,
)
return attends_meeting
def attends_educational_facility(states, params, id_column, seed): # noqa: U100
"""Indicate which children go to an educational facility.
Children go to an educational facility on weekdays.
During vacations, all children do not go to educational facilities.
Furthermore, there is a probability that children stay at home when they experience
symptoms or receive a positive test result.
Args:
states (pandas.DataFrame): The states given by sid.
params (pandas.DataFrame): DataFrame with three category levels,
id_column (str): name of the column in *states* that identifies
which pupils and adults belong to a group.
Returns:
attends_facility (pandas.Series): It is a series with the same index as states.
The values are one for children that go to the facility and zero for those
who do not.
"""
facility, _, _, digit = id_column.split("_")
model_name = f"educ_{facility}_{digit}"
date = get_date(states)
day = date.day_name()
if day in ["Saturday", "Sunday"]:
attends_facility = pd.Series(data=False, index=states.index)
else:
attends_facility = states[id_column] != -1
attends_facility = _pupils_having_vacations_do_not_attend(
attends_facility, states, params
)
for params_entry, condition in [
("symptomatic_multiplier", states["symptomatic"]),
("positive_test_multiplier", states["knows_currently_infected"]),
]:
attends_facility = reduce_contacts_on_condition(
attends_facility,
states,
params.loc[(model_name, params_entry, params_entry), "value"],
condition,
is_recurrent=True,
)
return attends_facility
def meet_hh_members(states, params, seed): # noqa: U100
"""Meet household members.
As single person households have unique household ids, everyone meets their
household unless they are symptomatic. In that case the sick household member
don't meet the others with a certain probability.
Args:
states (pandas.DataFrame): The states.
params (pandas.DataFrame): DataFrame with two index levels,
subcategory and name. has a "value" column that contains the probabilities
to the number of possible columns in the "name" index level.
"""
meet_hh = states["hh_model_group_id"] != -1
for params_entry, condition in [
("symptomatic_multiplier", states["symptomatic"]),
("positive_test_multiplier", states["knows_currently_infected"]),
]:
meet_hh = reduce_contacts_on_condition(
meet_hh,
states,
params.loc[(params_entry, params_entry), "value"],
condition,
is_recurrent=True,
)
return meet_hh
def meet_other_non_recurrent_contacts(states, params, seed):
"""Meet other non recurrent contacts.
Individuals in households with educ_workers, retired and children have
additional contacts during vacations.
"""
contacts = calculate_non_recurrent_contacts_from_empirical_distribution(
states=states,
params=params.loc["other_non_recurrent"],
seed=seed,
on_weekends=True,
query=None,
reduce_on_condition=False,
)
affected_in_case_of_vacation = _identify_ppl_affected_by_vacation(states)
date = get_date(states)
state_to_vacation = get_states_w_vacations(date, params)
potential_vacation_contacts = _draw_potential_vacation_contacts(
states, params, state_to_vacation, seed
)
vacation_contacts = potential_vacation_contacts.where(
affected_in_case_of_vacation, 0
)
contacts = contacts + vacation_contacts
for params_entry, condition in [
("symptomatic_multiplier", states["symptomatic"]),
("positive_test_multiplier", states["knows_currently_infected"]),
]:
contacts = reduce_contacts_on_condition(
contacts,
states,
params.loc[("other_non_recurrent", params_entry, params_entry), "value"],
condition,
is_recurrent=False,
)
contacts = contacts.astype(int)
return contacts
def _identify_ppl_affected_by_vacation(states):
affected_categories = ["school", "preschool", "nursery", "retired"]
has_school_vacation = (
states["occupation"].isin(affected_categories) | states["educ_worker"]
)
# ~60% of individuals are in a household where someone has school vacations
in_hh_with_vacation = has_school_vacation.groupby(states["hh_id"]).transform(np.any)
return in_hh_with_vacation
def _draw_potential_vacation_contacts(states, params, state_to_vacation, seed):
np.random.seed(seed)
fed_state_to_p_contact = {fed_state: 0 for fed_state in states["state"].unique()}
for fed_state, vacation in state_to_vacation.items():
loc = ("additional_other_vacation_contact", "probability", vacation)
fed_state_to_p_contact[fed_state] = params.loc[loc, "value"]
p_contact = states["state"].map(fed_state_to_p_contact.get)
vacation_contact = pd.Series(boolean_choices(p_contact), index=states.index)
vacation_contact = vacation_contact.astype(int)
return vacation_contact
def calculate_non_recurrent_contacts_from_empirical_distribution(
states, params, on_weekends, seed, query=None, reduce_on_condition=True
):
"""Draw how many non recurrent contacts each person will have today.
Args:
states (pandas.DataFrame): sid states DataFrame.
params (pandas.DataFrame): DataFrame with two index levels,
subcategory and name. has a "value" column that contains the probabilities
to the number of possible columns in the "name" index level.
on_weekends (bool or str): whether to meet on weekends or not. If it's a string
it's interpreted as the prefix of columns identifying who participates
in this contact model on weekends. Then, columns of the form
"{on_weekends}_saturday" and "{on_weekends}_sunday" must be in states.
query (str): query string to identify the subset of individuals to which this
contact model applies.
Returns:
contacts (pandas.Series): index is the same as states. values is the number of
contacts.
"""
date = get_date(states)
day = date.day_name()
contacts = pd.Series(0, index=states.index)
if not on_weekends and day in ["Saturday", "Sunday"]:
pass
else:
if isinstance(on_weekends, str) and day in ["Saturday", "Sunday"]:
participating_today = states[f"{on_weekends}_{day.lower()}"]
is_participating = states.eval(query) & participating_today
else:
if query is not None:
is_participating = states.eval(query)
else:
is_participating = pd.Series(True, index=states.index)
distribution = params.query("~subcategory.str.contains('multiplier')")["value"]
contacts[is_participating] = _draw_nr_of_contacts(
distribution=distribution,
is_participating=is_participating,
states=states,
seed=seed,
)
if reduce_on_condition:
for params_entry, condition in [
("symptomatic_multiplier", states["symptomatic"]),
("positive_test_multiplier", states["knows_currently_infected"]),
]:
contacts = reduce_contacts_on_condition(
contacts,
states,
params.loc[(params_entry, params_entry), "value"],
condition,
is_recurrent=False,
)
contacts = contacts.astype(float)
return contacts
def _draw_nr_of_contacts(distribution, is_participating, states, seed):
"""Draw the number of contacts for everyone in a is_participating.
Args:
distribution (pandas.Series): slice of the params DataFrame with
the distribution. The `subcategory` level of the index either
identifies the age group specific distribution or must be the
same for the whole slice. The `name` index level gives the support
and the values of the Series give the probabilities.
is_participating (pandas.Series): same index as states. True for the individuals
that participate in the current contact model, i.e. for which the
number of contacts should be drawn.
states (pandas.DataFrame): sid states DataFrame.
Returns:
nr_of_contacts (pandas.Series): Same index as the states, values are
the number of contacts for each person.
"""
is_age_varying = distribution.index.get_level_values("subcategory").nunique() > 1
if is_age_varying:
age_labels = [f"{i}-{i + 9}" for i in range(0, 71, 10)] + ["80-100"]
age_dtype = pd.CategoricalDtype(categories=age_labels, ordered=True)
age_group = states["age_group"].astype(age_dtype)
age_codes = age_group.cat.codes.to_numpy()
probs_df = distribution.unstack().reindex(age_labels).fillna(0)
support = probs_df.columns.to_numpy().astype(int)
probs = probs_df.to_numpy()
cum_probs = probs.cumsum(axis=1)
nr_of_contacts_arr = _draw_age_varying_nr_of_contacts_numba(
support=support,
cum_probs=cum_probs,
age_codes=age_codes,
is_participating=is_participating.to_numpy(),
seed=seed,
)
else:
np.random.seed(seed)
support = distribution.index.get_level_values("name").to_numpy()
probs = distribution.to_numpy()
nr_of_contacts_arr = np.where(
is_participating.to_numpy(),
np.random.choice(support, p=probs, size=len(states)),
0,
)
return pd.Series(nr_of_contacts_arr, index=states.index)
@nb.njit
def _draw_age_varying_nr_of_contacts_numba(
support, cum_probs, age_codes, is_participating, seed
):
np.random.seed(seed)
n_obs = len(age_codes)
out = np.zeros(n_obs)
for i in range(n_obs):
if is_participating[i]:
out[i] = _fast_choice(support, cum_probs[age_codes[i]])
return out
@nb.njit
def _fast_choice(arr, cdf):
u = np.random.uniform(0, 1)
i = 0
highest_i = len(cdf) - 1
while cdf[i] < u and i < highest_i:
i += 1
return arr[i]
# -------------------------------------------------------------------------------------
def reduce_contacts_on_condition(contacts, states, multiplier, condition, is_recurrent):
"""Reduce contacts for share of population for which condition is fulfilled.
The subset of contacts for which contacts are reduced is specified by the condition
and whoever has a positive number of contacts. Then, a share of individuals in the
subset is sampled and the contacts are set to 0.
Args:
contacts (pandas.Series): The series with contacts.
states (pandas.DataFrame): The states of one day passed by sid.
multiplier (float): The share of people who maintain their contacts
despite condition.
condition (str, numpy.ndarray or pandas.Series): Condition or boolean array
or Series which defines the subset of individuals who potentially reduce
their contacts.
seed (int)
"""
if isinstance(condition, str):
is_condition_true = states.eval(condition)
elif isinstance(condition, pd.Series):
is_condition_true = condition.to_numpy()
elif isinstance(condition, np.ndarray):
is_condition_true = condition
else:
raise ValueError
refuser = states["quarantine_compliance"] <= multiplier
no_change = refuser | ~is_condition_true
if is_recurrent:
reduced = contacts.where(cond=no_change, other=False)
else:
reduced = contacts.where(cond=no_change, other=0)
return reduced
# =============================================================================
def _pupils_having_vacations_do_not_attend(attends_facility, states, params):
"""Make pupils stay away from school if their state has vacations."""
attends_facility = attends_facility.copy(deep=True)
date = get_date(states)
states_w_vacations = get_states_w_vacations(date, params).keys()
has_vacation = states.state.isin(states_w_vacations)
attends_facility.loc[attends_facility & has_vacation] = False
return attends_facility
def get_states_w_vacations(date: pd.Timestamp, params: pd.DataFrame) -> dict:
"""Get states which currently have vacations for pupils.
Returns:
state_to_vacation_name (dict): keys are the states that have vacations
on the current date. Values are the names of the vacation.
"""
vacations = params.filter(like="ferien", axis=0).copy()
if vacations.empty:
raise ValueError("'params' does not contain any information about vacations.")
# Dates are stored as epochs so that value can be a numeric column.
vacations["value"] = from_epochs_to_timestamps(vacations["value"])
vacations = vacations.groupby(vacations.index.names)["value"].first().unstack()
latest_vacation_date = vacations["end"].max()
assert (
date <= latest_vacation_date
), f"Vacations are only known until {latest_vacation_date}"
has_vacations = (vacations["start"] <= date) & (date <= vacations["end"])
state_to_vacation = {
state: name for name, state in has_vacations[has_vacations].index
}
return state_to_vacation
| [
"numpy.random.uniform",
"numpy.random.seed",
"sid.time.get_date",
"numpy.zeros",
"pandas.CategoricalDtype",
"src.shared.from_epochs_to_timestamps",
"pandas.Series",
"sid.shared.boolean_choices"
] | [((1205, 1221), 'sid.time.get_date', 'get_date', (['states'], {}), '(states)\n', (1213, 1221), False, 'from sid.time import get_date\n'), ((2460, 2476), 'sid.time.get_date', 'get_date', (['states'], {}), '(states)\n', (2468, 2476), False, 'from sid.time import get_date\n'), ((4781, 4797), 'sid.time.get_date', 'get_date', (['states'], {}), '(states)\n', (4789, 4797), False, 'from sid.time import get_date\n'), ((7249, 7265), 'sid.time.get_date', 'get_date', (['states'], {}), '(states)\n', (7257, 7265), False, 'from sid.time import get_date\n'), ((8597, 8617), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (8611, 8617), True, 'import numpy as np\n'), ((10240, 10256), 'sid.time.get_date', 'get_date', (['states'], {}), '(states)\n', (10248, 10256), False, 'from sid.time import get_date\n'), ((10298, 10330), 'pandas.Series', 'pd.Series', (['(0)'], {'index': 'states.index'}), '(0, index=states.index)\n', (10307, 10330), True, 'import pandas as pd\n'), ((13811, 13860), 'pandas.Series', 'pd.Series', (['nr_of_contacts_arr'], {'index': 'states.index'}), '(nr_of_contacts_arr, index=states.index)\n', (13820, 13860), True, 'import pandas as pd\n'), ((13981, 14001), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (13995, 14001), True, 'import numpy as np\n'), ((14039, 14054), 'numpy.zeros', 'np.zeros', (['n_obs'], {}), '(n_obs)\n', (14047, 14054), True, 'import numpy as np\n'), ((14244, 14267), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (14261, 14267), True, 'import numpy as np\n'), ((16223, 16239), 'sid.time.get_date', 'get_date', (['states'], {}), '(states)\n', (16231, 16239), False, 'from sid.time import get_date\n'), ((17044, 17089), 'src.shared.from_epochs_to_timestamps', 'from_epochs_to_timestamps', (["vacations['value']"], {}), "(vacations['value'])\n", (17069, 17089), False, 'from src.shared import from_epochs_to_timestamps\n'), ((1301, 1342), 'pandas.Series', 'pd.Series', ([], {'data': '(False)', 'index': 'states.index'}), '(data=False, index=states.index)\n', (1310, 1342), True, 'import pandas as pd\n'), ((4889, 4930), 'pandas.Series', 'pd.Series', ([], {'data': '(False)', 'index': 'states.index'}), '(data=False, index=states.index)\n', (4898, 4930), True, 'import pandas as pd\n'), ((9005, 9031), 'sid.shared.boolean_choices', 'boolean_choices', (['p_contact'], {}), '(p_contact)\n', (9020, 9031), False, 'from sid.shared import boolean_choices\n'), ((12845, 12901), 'pandas.CategoricalDtype', 'pd.CategoricalDtype', ([], {'categories': 'age_labels', 'ordered': '(True)'}), '(categories=age_labels, ordered=True)\n', (12864, 12901), True, 'import pandas as pd\n'), ((13494, 13514), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (13508, 13514), True, 'import numpy as np\n'), ((10788, 10823), 'pandas.Series', 'pd.Series', (['(True)'], {'index': 'states.index'}), '(True, index=states.index)\n', (10797, 10823), True, 'import pandas as pd\n')] |
"""
The :mod:'atml.measure' module contains a set of common evaluation measures for predictive machine learning tasks.
"""
# Author: <NAME> (<EMAIL>)
# License: BSD-3
import numpy
import scipy.integrate
tiny = numpy.finfo('float64').tiny
class Measure:
"""
The base measure class, and specifies the corresponding task type for the measure. (e.g. classification)
"""
def __init__(self, task):
"""
Parameters
----------
task: string
"""
self.task = task
class AUC(Measure):
"""
Area under the ROC curve
"""
def __init__(self, target_positive=0):
"""
Parameters
----------
target_positive: int
"""
super().__init__(task='classification')
self.target_positive = target_positive
self.name = 'area under the curve (class ' + str(self.target_positive+1) + 'vs rest)'
def get_measure(self, s, y):
"""
Parameters
----------
s: numpy.ndarray
y: numpy.ndarray
Returns
----------
auc: float
"""
bin_edges = numpy.unique(1-s[:, self.target_positive])
count_pos = numpy.histogram(1-s[y[:, self.target_positive] == 1,
self.target_positive], bins=bin_edges, range=(0.0, 1.0))[0]
count_neg = numpy.histogram(1-s[y[:, self.target_positive] != 1, self.target_positive],
bins=bin_edges, range=(0.0, 1.0))[0]
if numpy.sum(count_pos) == 0:
count_pos[:] = 1.0
if numpy.sum(count_neg) == 0:
count_neg[:] = 1.0
cdf_pos = numpy.hstack([0.0,
numpy.cumsum(count_pos) / numpy.sum(count_pos),
1.0])
cdf_neg = numpy.hstack([0.0,
numpy.cumsum(count_neg) / numpy.sum(count_neg),
1.0])
auc = scipy.integrate.trapz(cdf_pos, cdf_neg)
return auc
@staticmethod
def transform(m):
"""
Parameters
----------
m: float
Returns
----------
m_hat: float
"""
m_hat = m
return m_hat
class BAcc(Measure):
"""
Binary accuracy
"""
def __init__(self, target_positive=0):
"""
Parameters
----------
target_positive: int
"""
super().__init__(task='classification', target_positive=target_positive)
self.name = 'accuracy (class ' + str(self.target_positive+1) + 'vs rest)'
def get_measure(self, s, y):
"""
Parameters
----------
s: numpy.ndarray
y: numpy.ndarray
Returns
----------
bacc: float
"""
n = numpy.shape(s)[0]
s_bin = numpy.zeros((n, 2))
y_bin = numpy.zeros((n, 2))
s_bin[:, 0] = s[:, self.target_positive]
s_bin[:, 1] = 1.0 - s_bin[:, 0]
y_bin[:, 0] = y[:, self.target_positive]
y_bin[:, 1] = 1.0 - y_bin[:, 0]
bacc = numpy.mean(numpy.argmax(s_bin, axis=1) == numpy.argmax(y_bin, axis=1))
return bacc
class F1(Measure):
"""
F1 score
"""
def __init__(self, target_positive=0):
"""
Parameters
----------
target_positive: int
"""
super().__init__(task='classification', target_positive=target_positive)
self.name = 'F1 score (class ' + str(self.target_positive+1) + 'vs rest)'
def get_measure(self, s, y):
"""
Parameters
----------
s: numpy.ndarray
y: numpy.ndarray
Returns
----------
f1: float
"""
y_hat = numpy.argmax(s, axis=1)
y_label = numpy.argmax(y, axis=1)
TP = numpy.sum((y_hat == self.target_positive) * (y_label == self.target_positive))
FP = numpy.sum((y_hat == self.target_positive) * (y_label != self.target_positive))
FN = numpy.sum((y_hat != self.target_positive) * (y_label == self.target_positive))
if (TP + FP + FN) == 0:
TP = 1
FP = 1
FN = 1
f1 = 2 * TP / (2 * TP + FP + FN)
return f1
class Acc(Measure):
"""
multi-class accuracy
"""
def __init__(self):
"""
"""
super().__init__(task='classification')
self.name = 'accuracy'
@staticmethod
def get_measure(s, y):
"""
Parameters
----------
s: numpy.ndarray
y: numpy.ndarray
Returns
----------
acc: float
"""
acc = numpy.mean(numpy.argmax(s, axis=1) == numpy.argmax(y, axis=1))
return acc
class BS(Measure):
"""
Brier score
"""
def __init__(self):
"""
"""
super().__init__(task='classification')
self.name = 'Brier score'
@staticmethod
def get_measure(s, y):
"""
Parameters
----------
s: numpy.ndarray
y: numpy.ndarray
Returns
----------
bs: float
"""
bs = numpy.mean(numpy.sum((s - y) ** 2, axis=1))
return bs
@staticmethod
def transform(m):
"""
Parameters
----------
m: float
Returns
----------
m_hat: float
"""
m_hat = 1 - (m/2)
return m_hat
class LL(Measure):
"""
Logarithm loss (cross entropy)
"""
def __init__(self):
"""
"""
super().__init__(task='classification')
self.name = 'Log loss (cross entropy)'
@staticmethod
def get_measure(s, y):
"""
Parameters
----------
s: numpy.ndarray
y: numpy.ndarray
Returns
----------
ll: float
"""
s[s <= tiny] = tiny
ll = numpy.mean(numpy.sum(-numpy.log(s) * y, axis=1))
return ll
| [
"numpy.sum",
"numpy.log",
"numpy.argmax",
"numpy.zeros",
"numpy.shape",
"numpy.finfo",
"numpy.histogram",
"numpy.cumsum",
"numpy.unique"
] | [((221, 243), 'numpy.finfo', 'numpy.finfo', (['"""float64"""'], {}), "('float64')\n", (232, 243), False, 'import numpy\n'), ((1229, 1273), 'numpy.unique', 'numpy.unique', (['(1 - s[:, self.target_positive])'], {}), '(1 - s[:, self.target_positive])\n', (1241, 1273), False, 'import numpy\n'), ((3039, 3058), 'numpy.zeros', 'numpy.zeros', (['(n, 2)'], {}), '((n, 2))\n', (3050, 3058), False, 'import numpy\n'), ((3076, 3095), 'numpy.zeros', 'numpy.zeros', (['(n, 2)'], {}), '((n, 2))\n', (3087, 3095), False, 'import numpy\n'), ((3996, 4019), 'numpy.argmax', 'numpy.argmax', (['s'], {'axis': '(1)'}), '(s, axis=1)\n', (4008, 4019), False, 'import numpy\n'), ((4039, 4062), 'numpy.argmax', 'numpy.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (4051, 4062), False, 'import numpy\n'), ((4077, 4155), 'numpy.sum', 'numpy.sum', (['((y_hat == self.target_positive) * (y_label == self.target_positive))'], {}), '((y_hat == self.target_positive) * (y_label == self.target_positive))\n', (4086, 4155), False, 'import numpy\n'), ((4170, 4248), 'numpy.sum', 'numpy.sum', (['((y_hat == self.target_positive) * (y_label != self.target_positive))'], {}), '((y_hat == self.target_positive) * (y_label != self.target_positive))\n', (4179, 4248), False, 'import numpy\n'), ((4263, 4341), 'numpy.sum', 'numpy.sum', (['((y_hat != self.target_positive) * (y_label == self.target_positive))'], {}), '((y_hat != self.target_positive) * (y_label == self.target_positive))\n', (4272, 4341), False, 'import numpy\n'), ((1293, 1409), 'numpy.histogram', 'numpy.histogram', (['(1 - s[y[:, self.target_positive] == 1, self.target_positive])'], {'bins': 'bin_edges', 'range': '(0.0, 1.0)'}), '(1 - s[y[:, self.target_positive] == 1, self.target_positive\n ], bins=bin_edges, range=(0.0, 1.0))\n', (1308, 1409), False, 'import numpy\n'), ((1468, 1584), 'numpy.histogram', 'numpy.histogram', (['(1 - s[y[:, self.target_positive] != 1, self.target_positive])'], {'bins': 'bin_edges', 'range': '(0.0, 1.0)'}), '(1 - s[y[:, self.target_positive] != 1, self.target_positive\n ], bins=bin_edges, range=(0.0, 1.0))\n', (1483, 1584), False, 'import numpy\n'), ((1630, 1650), 'numpy.sum', 'numpy.sum', (['count_pos'], {}), '(count_pos)\n', (1639, 1650), False, 'import numpy\n'), ((1701, 1721), 'numpy.sum', 'numpy.sum', (['count_neg'], {}), '(count_neg)\n', (1710, 1721), False, 'import numpy\n'), ((3004, 3018), 'numpy.shape', 'numpy.shape', (['s'], {}), '(s)\n', (3015, 3018), False, 'import numpy\n'), ((5501, 5532), 'numpy.sum', 'numpy.sum', (['((s - y) ** 2)'], {'axis': '(1)'}), '((s - y) ** 2, axis=1)\n', (5510, 5532), False, 'import numpy\n'), ((3305, 3332), 'numpy.argmax', 'numpy.argmax', (['s_bin'], {'axis': '(1)'}), '(s_bin, axis=1)\n', (3317, 3332), False, 'import numpy\n'), ((3336, 3363), 'numpy.argmax', 'numpy.argmax', (['y_bin'], {'axis': '(1)'}), '(y_bin, axis=1)\n', (3348, 3363), False, 'import numpy\n'), ((4967, 4990), 'numpy.argmax', 'numpy.argmax', (['s'], {'axis': '(1)'}), '(s, axis=1)\n', (4979, 4990), False, 'import numpy\n'), ((4994, 5017), 'numpy.argmax', 'numpy.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (5006, 5017), False, 'import numpy\n'), ((1832, 1855), 'numpy.cumsum', 'numpy.cumsum', (['count_pos'], {}), '(count_pos)\n', (1844, 1855), False, 'import numpy\n'), ((1858, 1878), 'numpy.sum', 'numpy.sum', (['count_pos'], {}), '(count_pos)\n', (1867, 1878), False, 'import numpy\n'), ((1992, 2015), 'numpy.cumsum', 'numpy.cumsum', (['count_neg'], {}), '(count_neg)\n', (2004, 2015), False, 'import numpy\n'), ((2018, 2038), 'numpy.sum', 'numpy.sum', (['count_neg'], {}), '(count_neg)\n', (2027, 2038), False, 'import numpy\n'), ((6327, 6339), 'numpy.log', 'numpy.log', (['s'], {}), '(s)\n', (6336, 6339), False, 'import numpy\n')] |
import argparse as ap
import numpy as np
import os
"""
python match_labels.py --data_source_path /media/data_0/fra/gctd/Data_Sources/ramsey_nj/seed_data_source
"""
parser = ap.ArgumentParser()
parser.add_argument('--data_source_path',
default='C:/Users/Public/fra-gctd-project/Data_Sources/'
'ramsey_nj/seed_data_source')
args = parser.parse_args()
labels_dir_path = os.path.join(args.data_source_path, 'labels')
label_file_paths = [os.path.join(labels_dir_path, label_file_name)
for label_file_name in sorted(os.listdir(labels_dir_path))]
num_mismatches = 0
for i in range(0, len(label_file_paths), 2):
label_0 = np.load(label_file_paths[i])
label_1 = np.load(label_file_paths[i+1])
where_equal = np.equal(label_0, label_1)
all_equal = np.all(where_equal)
if not all_equal:
num_mismatches += 1
print('Index: {}, Name: {}, Mismatches: ({})'.format(
i, os.path.basename(label_file_paths[i]),
np.squeeze(np.argwhere(np.bitwise_not(where_equal)))))
print('Total mismatches: {}'.format(num_mismatches)) | [
"numpy.load",
"argparse.ArgumentParser",
"os.path.basename",
"numpy.bitwise_not",
"numpy.equal",
"os.path.join",
"os.listdir",
"numpy.all"
] | [((175, 194), 'argparse.ArgumentParser', 'ap.ArgumentParser', ([], {}), '()\n', (192, 194), True, 'import argparse as ap\n'), ((421, 466), 'os.path.join', 'os.path.join', (['args.data_source_path', '"""labels"""'], {}), "(args.data_source_path, 'labels')\n", (433, 466), False, 'import os\n'), ((487, 533), 'os.path.join', 'os.path.join', (['labels_dir_path', 'label_file_name'], {}), '(labels_dir_path, label_file_name)\n', (499, 533), False, 'import os\n'), ((692, 720), 'numpy.load', 'np.load', (['label_file_paths[i]'], {}), '(label_file_paths[i])\n', (699, 720), True, 'import numpy as np\n'), ((733, 765), 'numpy.load', 'np.load', (['label_file_paths[i + 1]'], {}), '(label_file_paths[i + 1])\n', (740, 765), True, 'import numpy as np\n'), ((781, 807), 'numpy.equal', 'np.equal', (['label_0', 'label_1'], {}), '(label_0, label_1)\n', (789, 807), True, 'import numpy as np\n'), ((822, 841), 'numpy.all', 'np.all', (['where_equal'], {}), '(where_equal)\n', (828, 841), True, 'import numpy as np\n'), ((584, 611), 'os.listdir', 'os.listdir', (['labels_dir_path'], {}), '(labels_dir_path)\n', (594, 611), False, 'import os\n'), ((955, 992), 'os.path.basename', 'os.path.basename', (['label_file_paths[i]'], {}), '(label_file_paths[i])\n', (971, 992), False, 'import os\n'), ((1023, 1050), 'numpy.bitwise_not', 'np.bitwise_not', (['where_equal'], {}), '(where_equal)\n', (1037, 1050), True, 'import numpy as np\n')] |
# Example for numObsInSurveyTimeOverlap
# <NAME> - Lehigh University
# Last edited : 10/21/2020
# Calculates number of observations during simultaneous windows of another survey.
# SurveyObsWin is the list of the survey observing window/inter-seasonal gap intervals. It should be in the format:
# SurveyObsWin = [ [YYYY-MM-DD, YYYY-MM-DD] , [YYYY-MM-DD, YYYY-MM-DD] , ... , [YYYY-MM-DD, YYYY-MM-DD] ]
import numpy as np
from astropy.time import Time
from rubin_sim.maf.metrics import BaseMetric
__all__ = ['numObsInSurveyTimeOverlapMetric']
class numObsInSurveyTimeOverlapMetric (BaseMetric):
def __init__ (self, SurveyObsWin, TimeCol='observationStartMJD',metricName= 'numObsInSurveyTimeOverlapMetric', **kwargs):
self.TimeCol = TimeCol
self.metricName = metricName
self.SurveyObsWin = SurveyObsWin
super(numObsInSurveyTimeOverlapMetric, self).__init__(col= TimeCol, metricName=metricName, **kwargs)
def run (self, dataSlice, slicePoint=None):
N_Obs = 0
for interval in self.SurveyObsWin :
start_interval = Time(interval[0]+' 00:00:00')
end_interval = Time(interval[1]+' 00:00:00')
index = np.where ((dataSlice[self.TimeCol]> start_interval.mjd) & (dataSlice[self.TimeCol]<end_interval.mjd))[0]
N_Obs = N_Obs + np.size(index)
return N_Obs
| [
"astropy.time.Time",
"numpy.size",
"numpy.where"
] | [((1109, 1140), 'astropy.time.Time', 'Time', (["(interval[0] + ' 00:00:00')"], {}), "(interval[0] + ' 00:00:00')\n", (1113, 1140), False, 'from astropy.time import Time\n'), ((1166, 1197), 'astropy.time.Time', 'Time', (["(interval[1] + ' 00:00:00')"], {}), "(interval[1] + ' 00:00:00')\n", (1170, 1197), False, 'from astropy.time import Time\n'), ((1216, 1324), 'numpy.where', 'np.where', (['((dataSlice[self.TimeCol] > start_interval.mjd) & (dataSlice[self.TimeCol] <\n end_interval.mjd))'], {}), '((dataSlice[self.TimeCol] > start_interval.mjd) & (dataSlice[self.\n TimeCol] < end_interval.mjd))\n', (1224, 1324), True, 'import numpy as np\n'), ((1349, 1363), 'numpy.size', 'np.size', (['index'], {}), '(index)\n', (1356, 1363), True, 'import numpy as np\n')] |
import pysam
from math import ceil
import pandas as pd
import numpy as np
from collections import Counter
from scipy.stats import entropy
class Phaser:
def __init__(self,splitter,sampleName,
aggressive=False,log=None):
self.sampleName = sampleName #sample name
self.aggressive = aggressive #aggressively separate groups
self.splitter = splitter #split generator
self.log = log
#################
self.vTree = None
self.pending = []
def __repr__(self):
if self.vTree:
return str(self.vTree)
else:
return f'Phaser: {self.sampleName}'
def run(self):
#initialize starting conditions
if self.log:
self.log.info('Initializing Tree')
self._initGroups()
#split groups
if self.log:
self.log.info('Splitting Groups')
self._splitGroups()
# identify residual node and dump in noise bin
if self.log:
self.log.info('Cleaning Up')
#self._dumpResidual()
#get clusters
self._getClusters()
def _initGroups(self):
'''start splitting tree'''
#init labeler
self.labeler = self.indexer()
#all reads
rootLbl = next(self.labeler)
rootGrp = self.splitter.readnames
root = vCluster(rootGrp,
rootLbl,
split=self.sampleName,
pending=-True)
#group tree
if self.log:
self.log.debug(f'Starting vTree with {root}. Minimum reads: {self.splitter.minCount}')
self.vTree = vTree(root)
#reads matching reference at all selected pos
#refcall = self.sigVar.index[(self.sigVar == '.').all(axis=1)]
self._updatePending()
return
def _updatePending(self):
self.pending = sorted([node.label
for node in self.vTree.nodes.values()
if node.pending],
reverse=True)
if self.log:
self.log.debug(f'Pending nodes: {self.pending}')
def _splitGroups(self):
#func to det if big enough to split
if self.aggressive:
canSplit = (lambda t: len(t) > self.splitter.minCount)
else:
canSplit = (lambda t: len(t) >= 2*self.splitter.minCount)
while len(self.pending):
label = self.pending.pop()
self.vTree[label].pending = False
testSet = set(self.vTree[label].reads)
subset,pos,vnt = self.splitter.split(testSet)
if self.log and subset is not None:
self.log.debug(f'{self.vTree[label]} split: {len(subset)},{pos},{vnt}')
if subset is None:
continue
#self.vTree[label].pending = False
else:
#joined by variant
newGroup = vCluster(subset,
next(self.labeler),
parent =label,
split =(pos,vnt),
pending=canSplit(subset))
self.vTree.append(newGroup)
#leftovers
complement = testSet.difference(subset)
remainder = vCluster(complement,
next(self.labeler),
parent =label,
split =(pos,'.'),
pending=canSplit(complement))
self.vTree.append(remainder)
self._updatePending()
return
def _isRefCall(self,lbl):
plrty = self.splitter.sigVar.reindex(self.vTree[lbl].reads)\
.apply(pd.Series.value_counts).fillna(0).idxmax()
return (plrty.astype(str)== '.').all()
def _getClusters(self):
'''map of node label -> cluster'''
#check for refcall -- all '.' variants for alignment method. passes through for dbg meth
offset = 0
self.node2cluster = {}
isRefcall = self._isRefCall if hasattr(self.splitter,'refFasta') else (lambda lbl: False)
for lbl in self.vTree.leaves:
leaf = self.vTree[lbl]
if len(leaf) < self.splitter.minCount:
#dump in noise bin
if self.log:
self.log.debug(f'{leaf} has fewer than {self.splitter.minCount} reads. Labelled as "noise"(-1)')
self.vTree.noise.update(leaf.reads)
leaf.setNoise()
elif isRefcall(lbl):
#set as first
leaf.setRefCall()
self.node2cluster[lbl] = offset
offset += 1
#check if refcall was found, else increase offset if not found but there is one
if offset == 0 and hasattr(self.splitter,'refFasta') and not self.splitter.refFasta is None:
offset += 1
#clusters sorted by size, descending
sortedLeaves = sorted(filter(lambda n: not (self.vTree[n]._isNoise or self.vTree[n]._isRefCall),
self.vTree.leaves),
key=lambda n:-len(self.vTree[n]))
self.node2cluster.update({node : idx + offset
for idx,node in enumerate(sortedLeaves)})
#update nodes
for node,clust in self.node2cluster.items():
self.vTree[node].set_cluster(clust)
return
def _getPlurality(self,vTable):
return vTable.apply(pd.Series.value_counts)\
.fillna(0).idxmax()
def summary(self):
'''return summary of results in dataframe format'''
#most common call by cluster/group
plurality = self.sigVar.groupby(self.clusterMap).apply(self._getPlurality)
#remove columns with all refcall (.) or del-continue (*)
varCols = plurality.columns[~plurality.isin(['.','*']).all(axis=0)]
#new table
summary = plurality[varCols].copy()
countMap = Counter(self.clusterMap.values())
summary['nReads'] = summary.index.map(countMap)
summary.index = np.where(summary.index >= 0,
'cluster' + summary.index.astype(str),
'ResidualNoise')
summary.index.name = 'Group'
summary.columns.name = 'RefPos'
return summary
@property
def clusterMap(self):
'''dict of {readname:cluster}'''
clustMap = {name:-1 for name in self.vTree.noise}
for node,clust in self.node2cluster.items():
clustMap.update({name:clust for name in self.vTree[node].reads})
return clustMap
def indexer(self,start=0):
i=start
while True:
yield i
i+=1
class vCluster:
def __init__(self,reads,label,parent=None,split=None,pending=True):
self.label = label
self.reads = reads
self.parent = parent
self.split = split
self.children = []
self.pending = pending
#private
self._isLeaf = False
self._cluster = None
self._isNoise = False
self._isRefCall = False
def set_cluster(self,cluster):
self._isLeaf = True
self._cluster = cluster
def setNoise(self):
self._isNoise = True
def setRefCall(self):
self._isRefCall = True
self._cluster = 0
def clusterName(self):
assert self._cluster is not None
return f'cluster{self._cluster}_numreads{len(self.reads)}'
def __len__(self):
return len(self.reads)
def __repr__(self):
outstr = f'Node {self.label} ({len(self.reads)} reads)'
if self._isNoise:
outstr += ' Noise'
elif self._isRefCall:
outstr += f' Cluster {self._cluster} (refcall)'
elif self._isLeaf and self._cluster is not None:
outstr += f' Cluster {self._cluster}'
return outstr
class vTree:
def __init__(self,root):
if not root.parent is None:
raise Phaser_Error('root parent must = None')
self.root = root
self.label = root.label
self.size = len(root)
self.leaves = [root.label]
self.nodes = {root.label:root}
self.noise = set()
def append(self,other):
if other.parent is None or other.parent not in self.nodes:
raise Phaser_Error('invalid parent')
if other.split is None:
raise Phaser_Error('please set split value')
self.nodes[other.label] = other
parent = self.nodes[other.parent]
if len(parent.children) == 0:
#remove parent as leaf
self.leaves.pop(self.leaves.index(parent.label))
#add this leaf
self.leaves.append(other.label)
parent.children.append(other.label)
def getSplits(self,label):
if label not in self.nodes:
raise Phaser_Error('invalid label')
splits = []
leaf = label
while leaf:
vclust = self.nodes[leaf]
splits.insert(0,vclust.split)
leaf = vclust.parent
return splits
def isRefcall(self,label):
'''identify refcall group -- reads that did not split out by any variants'''
spl = self.getSplits(label)
if len(spl) == 0:
return False
else:
return np.all([s=='.' for p,s in spl]) and len(self[label].children)==0
def _splitStr(self,label):
'''join splits for output'''
splits = self.getSplits(label)
splits.insert(0,'all')
return ' -> '.join(map(str,splits))
def __getitem__(self,node):
return self.nodes[node]
def __iter__(self):
for lbl,node in self.nodes.items():
yield node
def __repr__(self):
leaves = [f'{str(self[lbl])} : {self._splitStr(lbl)}' for lbl in sorted(self.leaves)]
return '\n'.join(leaves)
class Phaser_Error(Exception):
pass
| [
"numpy.all"
] | [((9664, 9700), 'numpy.all', 'np.all', (["[(s == '.') for p, s in spl]"], {}), "([(s == '.') for p, s in spl])\n", (9670, 9700), True, 'import numpy as np\n')] |
import numpy as np
class Initializer():
def fill(self, layer_dims):
raise NotImplementedError()
class RandomInit(Initializer):
def fill(self, layer_dims):
np.random.seed(1)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert (parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l - 1]))
assert (parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
class HeInit(Initializer):
def fill(self, layer_dims):
np.random.seed(1)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * np.sqrt(2 / layer_dims[l - 1])
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert (parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l - 1]))
assert (parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
class XavierInit(Initializer):
def fill(self, layer_dims):
np.random.seed(1)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * np.sqrt(1 / layer_dims[l - 1])
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
assert (parameters['W' + str(l)].shape == (layer_dims[l], layer_dims[l - 1]))
assert (parameters['b' + str(l)].shape == (layer_dims[l], 1))
return parameters
| [
"numpy.zeros",
"numpy.sqrt",
"numpy.random.seed",
"numpy.random.randn"
] | [((184, 201), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (198, 201), True, 'import numpy as np\n'), ((744, 761), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (758, 761), True, 'import numpy as np\n'), ((1335, 1352), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1349, 1352), True, 'import numpy as np\n'), ((455, 483), 'numpy.zeros', 'np.zeros', (['(layer_dims[l], 1)'], {}), '((layer_dims[l], 1))\n', (463, 483), True, 'import numpy as np\n'), ((1041, 1069), 'numpy.zeros', 'np.zeros', (['(layer_dims[l], 1)'], {}), '((layer_dims[l], 1))\n', (1049, 1069), True, 'import numpy as np\n'), ((1632, 1660), 'numpy.zeros', 'np.zeros', (['(layer_dims[l], 1)'], {}), '((layer_dims[l], 1))\n', (1640, 1660), True, 'import numpy as np\n'), ((359, 408), 'numpy.random.randn', 'np.random.randn', (['layer_dims[l]', 'layer_dims[l - 1]'], {}), '(layer_dims[l], layer_dims[l - 1])\n', (374, 408), True, 'import numpy as np\n'), ((919, 968), 'numpy.random.randn', 'np.random.randn', (['layer_dims[l]', 'layer_dims[l - 1]'], {}), '(layer_dims[l], layer_dims[l - 1])\n', (934, 968), True, 'import numpy as np\n'), ((971, 1001), 'numpy.sqrt', 'np.sqrt', (['(2 / layer_dims[l - 1])'], {}), '(2 / layer_dims[l - 1])\n', (978, 1001), True, 'import numpy as np\n'), ((1510, 1559), 'numpy.random.randn', 'np.random.randn', (['layer_dims[l]', 'layer_dims[l - 1]'], {}), '(layer_dims[l], layer_dims[l - 1])\n', (1525, 1559), True, 'import numpy as np\n'), ((1562, 1592), 'numpy.sqrt', 'np.sqrt', (['(1 / layer_dims[l - 1])'], {}), '(1 / layer_dims[l - 1])\n', (1569, 1592), True, 'import numpy as np\n')] |
"""Exponential distribution
"""
import numpy as np
from scipy.stats import expon
from xgboost_distribution.distributions.base import BaseDistribution
from xgboost_distribution.distributions.utils import check_is_ge_zero
class Exponential(BaseDistribution):
"""Exponential distribution with log score
Definition:
f(x) = 1 / scale * e^(-x / scale)
We reparameterize scale -> log(scale) = a to ensure scale >= 0. Gradient:
d/da -log[f(x)] = d/da -log[1/e^a e^(-x / e^a)]
= 1 - x e^-a
= 1 - x / scale
The Fisher information = 1 / scale^2, when reparameterized:
1 / scale^2 = I ( d/d(scale) log(scale) )^2 = I ( 1/ scale )^2
Hence we find: I = 1
"""
@property
def params(self):
return ("scale",)
def check_target(self, y):
check_is_ge_zero(y)
def gradient_and_hessian(self, y, params, natural_gradient=True):
"""Gradient and diagonal hessian"""
(scale,) = self.predict(params)
grad = np.zeros(shape=(len(y), 1))
grad[:, 0] = 1 - y / scale
if natural_gradient:
fisher_matrix = np.ones(shape=(len(y), 1, 1))
grad = np.linalg.solve(fisher_matrix, grad)
hess = np.ones(shape=(len(y), 1)) # we set the hessian constant
else:
hess = -(grad - 1)
return grad, hess
def loss(self, y, params):
scale = self.predict(params)
return "ExponentialError", -expon.logpdf(y, scale=scale).mean()
def predict(self, params):
log_scale = params
scale = np.exp(log_scale)
return self.Predictions(scale=scale)
def starting_params(self, y):
return (np.log(np.mean(y)),)
| [
"scipy.stats.expon.logpdf",
"numpy.mean",
"numpy.exp",
"xgboost_distribution.distributions.utils.check_is_ge_zero",
"numpy.linalg.solve"
] | [((855, 874), 'xgboost_distribution.distributions.utils.check_is_ge_zero', 'check_is_ge_zero', (['y'], {}), '(y)\n', (871, 874), False, 'from xgboost_distribution.distributions.utils import check_is_ge_zero\n'), ((1620, 1637), 'numpy.exp', 'np.exp', (['log_scale'], {}), '(log_scale)\n', (1626, 1637), True, 'import numpy as np\n'), ((1218, 1254), 'numpy.linalg.solve', 'np.linalg.solve', (['fisher_matrix', 'grad'], {}), '(fisher_matrix, grad)\n', (1233, 1254), True, 'import numpy as np\n'), ((1741, 1751), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1748, 1751), True, 'import numpy as np\n'), ((1509, 1537), 'scipy.stats.expon.logpdf', 'expon.logpdf', (['y'], {'scale': 'scale'}), '(y, scale=scale)\n', (1521, 1537), False, 'from scipy.stats import expon\n')] |
import numpy
arrayIn = numpy.arange(6)
print(arrayIn)
it = numpy.nditer(arrayIn, op_flags=['readwrite'])
my_sum = 0
for num in it:
my_sum = my_sum + num
num[...] = my_sum
print(arrayIn)
| [
"numpy.nditer",
"numpy.arange"
] | [((24, 39), 'numpy.arange', 'numpy.arange', (['(6)'], {}), '(6)\n', (36, 39), False, 'import numpy\n'), ((60, 105), 'numpy.nditer', 'numpy.nditer', (['arrayIn'], {'op_flags': "['readwrite']"}), "(arrayIn, op_flags=['readwrite'])\n", (72, 105), False, 'import numpy\n')] |
# --------------
#Importing header files
import pandas as pd
import scipy.stats as stats
import math
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.stats.weightstats import ztest
from statsmodels.stats.weightstats import ztest
from scipy.stats import chi2_contingency
import warnings
warnings.filterwarnings('ignore')
#Sample_Size
sample_size=2000
#Z_Critical Score
z_critical = stats.norm.ppf(q = 0.95)
# Critical Value
critical_value = stats.chi2.ppf(q = 0.95, # Find the critical value for 95% confidence*
df = 6) # Df = number of variable categories(in purpose) - 1
#Reading file
data=pd.read_csv(path)
#Code starts here
data_sample = data.sample(n=sample_size,random_state = 0)
print(data_sample.shape)
#population mean
true_mean = data['installment'].mean()
#population standard deviation
true_std = data_sample['installment'].std()
#sample mean
sample_mean = data_sample['installment'].mean()
#sample standard deviation
sample_std = data_sample['installment'].std()
#margin of error
margin_of_error = z_critical*(true_std/(np.sqrt(sample_size)))
print('Margin of error is',margin_of_error)
print('=='*30)
#confidence interval
confidence_interval = [round(sample_mean-margin_of_error,2),round(sample_mean+margin_of_error,2)]
print('Confidence Interval is ',confidence_interval)
print('=='*30)
print('True mean is',true_mean)
print('=='*30)
#Central Limit theorem for installment column
print('Central Limit Theorem for installment column')
#creating array of three sample sizes
## sample from population with different number of sampling
# a list of sample mean
meansample = []
# number of sample
numofsample = 1000
# sample size
samplesize = [20,50,100]
# for each number of sampling (1000 to 50000)
array_1 = []
for j in range(1,1000):
for i in samplesize:
array_1.append(data['installment'].sample(n=100,replace= True).mean())
#print(array)
plt.hist(array_1, bins=100)
plt.xlabel('installment')
plt.ylabel('frequency')
plt.title('Histogram of Installment frequency')
plt.axvline(x=np.mean(array_1),color='r')
print('=='*30)
print(' Performing the Z-test on int.rate')
data['int.rate'] = data['int.rate'].map(lambda x : x.split('%')[0])
data['int.rate'] = data['int.rate'].astype(float)
df_ = data[data['purpose']=='small_business']
value_ = data['int.rate'].mean()
z_statistic_1 ,p_value_1 = ztest(df_['int.rate'],value=value_,alternative='larger')
print('z-test is ',z_statistic_1)
print('=='*30)
print('p-value is',p_value_1)
print('=='*30)
print('Performing the two-sided the z-test on installments')
installment_paid_mean = data[data['paid.back.loan']=='Yes']['installment']
installment_notpaid_mean = data[data['paid.back.loan']=='No']['installment']
z_statistic_2 ,p_value_2 = ztest(x2 = installment_paid_mean,x1=installment_notpaid_mean,alternative='two-sided')
print('z-test is ',z_statistic_2)
print('=='*30)
print('p-value is',p_value_2)
print('=='*30)
print('Performing chi-test on purpose and loan default' )
yes=data[data['paid.back.loan']=='Yes']['purpose'].value_counts()
no=data[data['paid.back.loan']=='No']['purpose'].value_counts()
#Concating yes and no into a single dataframe
observed=pd.concat([yes.transpose(),no.transpose()], 1,keys=['Yes','No'])
print(observed)
chi2, p, dof, ex = chi2_contingency(observed)
| [
"scipy.stats.norm.ppf",
"matplotlib.pyplot.title",
"matplotlib.pyplot.hist",
"warnings.filterwarnings",
"pandas.read_csv",
"scipy.stats.chi2.ppf",
"numpy.mean",
"scipy.stats.chi2_contingency",
"statsmodels.stats.weightstats.ztest",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy... | [((319, 352), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (342, 352), False, 'import warnings\n'), ((420, 442), 'scipy.stats.norm.ppf', 'stats.norm.ppf', ([], {'q': '(0.95)'}), '(q=0.95)\n', (434, 442), True, 'import scipy.stats as stats\n'), ((485, 513), 'scipy.stats.chi2.ppf', 'stats.chi2.ppf', ([], {'q': '(0.95)', 'df': '(6)'}), '(q=0.95, df=6)\n', (499, 513), True, 'import scipy.stats as stats\n'), ((667, 684), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (678, 684), True, 'import pandas as pd\n'), ((1996, 2023), 'matplotlib.pyplot.hist', 'plt.hist', (['array_1'], {'bins': '(100)'}), '(array_1, bins=100)\n', (2004, 2023), True, 'import matplotlib.pyplot as plt\n'), ((2027, 2052), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""installment"""'], {}), "('installment')\n", (2037, 2052), True, 'import matplotlib.pyplot as plt\n'), ((2054, 2077), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {}), "('frequency')\n", (2064, 2077), True, 'import matplotlib.pyplot as plt\n'), ((2079, 2126), 'matplotlib.pyplot.title', 'plt.title', (['"""Histogram of Installment frequency"""'], {}), "('Histogram of Installment frequency')\n", (2088, 2126), True, 'import matplotlib.pyplot as plt\n'), ((2464, 2522), 'statsmodels.stats.weightstats.ztest', 'ztest', (["df_['int.rate']"], {'value': 'value_', 'alternative': '"""larger"""'}), "(df_['int.rate'], value=value_, alternative='larger')\n", (2469, 2522), False, 'from statsmodels.stats.weightstats import ztest\n'), ((2867, 2957), 'statsmodels.stats.weightstats.ztest', 'ztest', ([], {'x2': 'installment_paid_mean', 'x1': 'installment_notpaid_mean', 'alternative': '"""two-sided"""'}), "(x2=installment_paid_mean, x1=installment_notpaid_mean, alternative=\n 'two-sided')\n", (2872, 2957), False, 'from statsmodels.stats.weightstats import ztest\n'), ((3411, 3437), 'scipy.stats.chi2_contingency', 'chi2_contingency', (['observed'], {}), '(observed)\n', (3427, 3437), False, 'from scipy.stats import chi2_contingency\n'), ((1123, 1143), 'numpy.sqrt', 'np.sqrt', (['sample_size'], {}), '(sample_size)\n', (1130, 1143), True, 'import numpy as np\n'), ((2142, 2158), 'numpy.mean', 'np.mean', (['array_1'], {}), '(array_1)\n', (2149, 2158), True, 'import numpy as np\n')] |
#
# Copyright (c) Microsoft Corporation.
#
#
# Methods for compressing a network using an ensemble of interpolants
#
import sys
import numpy as np
from .model_mgr import DataModel
from .bayesitp import interpolant, params as itp_params
from .error import BadParameter,Error, filter_params
from .itp import output_category_predicate
def compress(name,**kwargs):
data_model = DataModel()
data_model.load(name)
params = data_model.params.copy()
params.update(kwargs)
size = params.get('size',len(data_model.x_train))
layers = params.get('layers',[])
category = params.get('category',0)
if not layers:
raise BadParameter('layers')
l1 = layers[-1]
data_model.set_sample_size(size)
conc = output_category_predicate(data_model,category)
print ('conc: {}'.format(conc))
# inputs = conc.sat(data_model._train_eval)
# print ('len(inputs): {}'.format(len(inputs)))
# inpidx = params.get('input',0)
# if inpidx >= len(inputs):
# raise BadParameter('input')
params = filter_params(params,itp_params)
model = data_model._train_eval
inputs = model.data
pred = conc.eval(data_model._train_eval)
itps = []
if False:
for idx in range(len(inputs)):
if pred[idx]:
print ('Remaining: {}'.format(np.count_nonzero(pred)))
itp,stats = interpolant(data_model,l1,inputs[idx],conc,**params)
itps.append(itp)
pred = np.logical_and(pred,np.logical_not(itp.eval(model)))
else:
while pred.any():
print ('Remaining: {}'.format(np.count_nonzero(pred)))
A = np.compress(pred,inputs,axis=0)
# itp,stats = interpolant(data_model,l1,A,conc,weights=pred,**params)
itp,stats = interpolant(data_model,l1,A,conc,**params)
itps.append(itp)
pred = np.logical_and(pred,np.logical_not(itp.eval(model)))
print ('Interpolants:')
for itp in itps:
print (itp)
def main():
if len(sys.argv) <= 2:
print ('usage: nnitp compress option=value ... model_name')
exit(1)
name = sys.argv[-1]
try:
compress(name,size=20000)
except Error as err:
print (err)
exit(1)
if __name__ == '__main__':
main()
| [
"numpy.compress",
"numpy.count_nonzero"
] | [((1653, 1686), 'numpy.compress', 'np.compress', (['pred', 'inputs'], {'axis': '(0)'}), '(pred, inputs, axis=0)\n', (1664, 1686), True, 'import numpy as np\n'), ((1612, 1634), 'numpy.count_nonzero', 'np.count_nonzero', (['pred'], {}), '(pred)\n', (1628, 1634), True, 'import numpy as np\n'), ((1319, 1341), 'numpy.count_nonzero', 'np.count_nonzero', (['pred'], {}), '(pred)\n', (1335, 1341), True, 'import numpy as np\n')] |
from __future__ import print_function, division
import re
import sys
import os
import gensim.models
from gensim.test.utils import datapath, get_tmpfile
from gensim.scripts.glove2word2vec import glove2word2vec
import argparse
import json
import numpy as np
import scipy.sparse
import warnings
from sklearn.decomposition import PCA
if sys.version_info[0] < 3:
import io
open = io.open
else:
unicode = str
# CONSTANTS
BOLD = '\033[1m' # add this string to start printing in bold
END = '\033[0m' # add this string to start printing normally
BLUE = '\033[94m' # add this string to start printing in blue
RED = '\033[31m' # add this string to start printing in red
GREEN = '\033[92m' # add this string to start printing in green
"""
WordEmbedding Class definition
"""
class WordEmbedding:
def __init__(self, fname, em_limit):
# info
print("*** Reading data from " + fname)
# read txt by default
binary = False
# check file extension if .bin read binary file
if fname[-3:] == "bin":
binary = True
#load model
model = gensim.models.KeyedVectors.load_word2vec_format(fname, binary=binary, limit=em_limit)
# model has been loaded
assert (model is not None)
# filter words as specified in paper and store in list
self.words = [w for w in model.vocab if self.word_filter(w)]
# print number of words after filtering
print("Number of words: ", len(self.words))
# extract word vectors and store in list
self.vecs = np.array([model[w] for w in self.words], dtype='float32')
# word to index look up
self.reindex()
# compute all norms along axis 1
norms = np.linalg.norm(self.vecs, axis=1)
# if diff between max norm and min norm
# is over a threshhold normalize all vectors
if max(norms)-min(norms) > 0.0001:
self.normalize()
def word_filter(self, word):
# paper uses only words wich are less than 20 chars
# and which do not contain non word characters or numbers
if len(word) < 20 and word.islower() and not bool(re.search(r'\W|[0-9]', word)):
return word
def normalize(self):
# normalize
self.vecs /= np.linalg.norm(self.vecs, axis=1)[:, np.newaxis]
# reindex
self.reindex()
def reindex(self):
# create dictionary from word to its index
self.index = {w: i for i, w in enumerate(self.words)}
# get vec shape
self.n, _ = self.vecs.shape
# make sure all dims line up
assert self.n == len(self.words) == len(self.index)
def v(self, word):
# return word vector based on word
return self.vecs[self.index[word]]
def diff(self, w_1, w_2):
# vector difference between two words
v_diff = self.v(w_1) - self.v(w_2)
# return normalized
return v_diff/np.linalg.norm(v_diff)
def save_w2v(self, filename, ext):
# open file to write to
with open(filename, 'wb') as fout:
# write out number of tokens and vector size as per
# word2vec specs
fout.write(to_utf8("%s %s\n" % self.vecs.shape))
# store in sorted order: most frequent words at the top
for i, word in enumerate(self.words):
# write to binary (less memory not human readable)
if ext == "bin":
fout.write(to_utf8(word) + b" " + self.vecs[i].tostring())
# write to text (more memory human readable)
elif ext == "txt":
fout.write(to_utf8("%s %s\n" % (word, " ".join([str(j) for j in self.vecs[i]]))))
"""
Additional functions
"""
def doPCA(pairs, embedding, num_components = 10):
matrix = []
# for each pair
for a, b in pairs:
# get center
center = (embedding.v(a) + embedding.v(b))/2
# add two vecs to matrix and shift them by center
matrix.extend([embedding.v(a) - center, embedding.v(b) - center])
# create PCA object
pca = PCA(n_components = num_components)
# fit data
pca.fit(np.array(matrix))
return pca
def drop(u, v, s):
# vector minus scaled dot product v*v
return u - v * u.dot(v) * s
def to_utf8(text, errors='strict', encoding='utf8'):
"""Convert a string (unicode or bytestring in `encoding`), to bytestring in utf8."""
if isinstance(text, unicode):
return text.encode('utf8')
# do bytestring -> unicode -> utf8 full circle, to ensure valid utf8
return unicode(text, encoding, errors=errors).encode('utf8')
def debias(E, gender_specific_words, definitional, equalize, num_components):
# get gender axis
gender_subspace = doPCA(definitional, E, num_components).components_
# remove top 'num_components' gender directions
for gender_direction in gender_subspace[0:num_components]:
# get param
scaling = 1/gender_direction.dot(gender_direction)
# inint mask
marks = np.zeros(len(E.words), dtype=bool)
# for each gender specific word
for w in gender_specific_words:
# if word is in E
if w in E.index:
# mark word to skip
marks[E.index[w]] = True
i = 0
for w in E.words:
# for all words not market to be skipped
if not marks[i]:
# shift each vector
E.vecs[i] = drop(E.vecs[i], gender_direction, scaling)
i += 1
# normalize
E.normalize()
# create maps from lower, title, upper to equalize pairs
lower = map(lambda x : (x[0].lower(), x[1].lower()), equalize)
title = map(lambda x : (x[0].title(), x[1].title()), equalize)
upper = map(lambda x : (x[0].upper(), x[1].upper()), equalize)
# for each candidate
for candidates in [lower, title, upper]:
# for each pair in candidate
for (a, b) in candidates:
# if both a and b are in the index
if (a in E.index and b in E.index):
# get y ais shift
y = drop((E.v(a) + E.v(b)) / 2, gender_direction, scaling)
# get z shift
z = np.sqrt(1 - np.linalg.norm(y)**2)
# differnce between a and b dot product with the gender vector
# is negative then flip the z shift
if (E.v(a) - E.v(b)).dot(gender_direction) < 0:
z = -z
# debiasing shift to both a and b
E.vecs[E.index[a]] = z * gender_direction + y
E.vecs[E.index[b]] = -z * gender_direction + y
E.normalize()
def project_professions(args, E, before=True):
# if professions are being projected
if args.load_profs:
# get two words defining the axis
w_axis = args.axis_profs.split("-")
# get axis in vector form
v_axis = E.diff(w_axis[0], w_axis[1])
# project professions on to axis sorted by distance
p_profs = sorted([(E.v(w).dot(v_axis), w) for w in args.profs if w in E.index])
# number of projections to print
num = args.n_profs
if num > len(p_profs):
num = len(p_profs)
# get extremes on one side
extreme_1 = p_profs[0:num]
# get extremes on the other side
extreme_2 = p_profs[-num:]
# reverse
extreme_2 = extreme_2[::-1]
# prinitng stuff
if before:
print("%c%s%s%s%s" % ('\n', BOLD, RED, " Before debiasing", END))
else:
print("%c%s%s%s%s" % ('\n', BOLD, GREEN, " After debiasing", END))
print(" %s%s%s %s\t%s %s%s" % (BOLD, BLUE, w_axis[0], "extreme".ljust(15), w_axis[1], "extreme", END))
tab = len(w_axis[0]) + 15
for i in range(len(extreme_1)):
print("%s%d.%s %s\t%s" % (BOLD, i + 1, END, extreme_2[i][1].ljust(tab), extreme_1[i][1]))
print("\n")
def main(args):
# read definitional pairs
with open(args.def_fn, "r") as f:
defs = json.load(f)
# read equalizing pairs
with open(args.eq_fn, "r") as f:
equalize_pairs = json.load(f)
# read gender specific words
with open(args.g_words_fn, "r") as f:
gender_specific_words = json.load(f)
if args.load_profs:
# read professions lisst
with open(args.profs, "r") as f:
professions = json.load(f)
args.profs = [p[0] for p in professions]
# create word embedding
E = WordEmbedding(args.i_em, args.em_limit)
# dump vectors prior to debiasing
print("Saving biased vectors to file...")
E.save_w2v(args.bias_o_em, args.o_ext)
project_professions(args, E, True)
# debias
print("Debiasing...")
debias(E, gender_specific_words, defs, equalize_pairs, args.n_comp)
# dump debiased vectors to file
print("Saving debiased vectors to file...")
E.save_w2v(args.debias_o_em, args.o_ext)
project_professions(args, E, False)
print("Done!\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--em_limit", type=int, default=50000, help="number of words to load")
parser.add_argument("--i_em", default="../embeddings/GoogleNews-vectors-negative300.bin", help="The name of the embedding")
parser.add_argument("--def_fn", help="JSON of definitional pairs", default="../data/definitional_pairs.json")
parser.add_argument("--g_words_fn", help="File containing words not to neutralize (one per line)", default="../data/gender_specific_full.json")
parser.add_argument("--eq_fn", help="JSON with equalizing pairs", default="../data/equalize_pairs.json")
parser.add_argument("--load_profs", type=bool, help="Flag for loading professions", default=False)
parser.add_argument("--profs", help="JSON with list of professions", default="../data/professions.json")
parser.add_argument("--axis_profs", help="Projection axis for professions. Examples: she-he, softball-football etc. Format is word1-word2", default="softball-football")
parser.add_argument("--n_profs", type=int, help="Number of most extreme professions to print", default=5)
parser.add_argument("--debias_o_em", help="Output debiased embeddings file", default="../embeddings/debiased.bin")
parser.add_argument("--bias_o_em", help="Output bieased embeddings file", default="../embeddings/biased.bin")
parser.add_argument("--o_ext", help="Extension of output file [txt, bin]", default="bin")
parser.add_argument("--n_comp", type=int, help="number of components for PCA", default=10)
args = parser.parse_args()
# get rid of annoying warning
warnings.filterwarnings("ignore")
main(args)
| [
"json.load",
"argparse.ArgumentParser",
"warnings.filterwarnings",
"numpy.array",
"numpy.linalg.norm",
"sklearn.decomposition.PCA",
"re.search"
] | [((3649, 3681), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'num_components'}), '(n_components=num_components)\n', (3652, 3681), False, 'from sklearn.decomposition import PCA\n'), ((8021, 8046), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8044, 8046), False, 'import argparse\n'), ((9581, 9614), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (9604, 9614), False, 'import warnings\n'), ((1451, 1508), 'numpy.array', 'np.array', (['[model[w] for w in self.words]'], {'dtype': '"""float32"""'}), "([model[w] for w in self.words], dtype='float32')\n", (1459, 1508), True, 'import numpy as np\n'), ((1599, 1632), 'numpy.linalg.norm', 'np.linalg.norm', (['self.vecs'], {'axis': '(1)'}), '(self.vecs, axis=1)\n', (1613, 1632), True, 'import numpy as np\n'), ((3706, 3722), 'numpy.array', 'np.array', (['matrix'], {}), '(matrix)\n', (3714, 3722), True, 'import numpy as np\n'), ((7101, 7113), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7110, 7113), False, 'import json\n'), ((7193, 7205), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7202, 7205), False, 'import json\n'), ((7302, 7314), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7311, 7314), False, 'import json\n'), ((2075, 2108), 'numpy.linalg.norm', 'np.linalg.norm', (['self.vecs'], {'axis': '(1)'}), '(self.vecs, axis=1)\n', (2089, 2108), True, 'import numpy as np\n'), ((2651, 2673), 'numpy.linalg.norm', 'np.linalg.norm', (['v_diff'], {}), '(v_diff)\n', (2665, 2673), True, 'import numpy as np\n'), ((7416, 7428), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7425, 7428), False, 'import json\n'), ((1977, 2005), 're.search', 're.search', (['"""\\\\W|[0-9]"""', 'word'], {}), "('\\\\W|[0-9]', word)\n", (1986, 2005), False, 'import re\n'), ((5535, 5552), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {}), '(y)\n', (5549, 5552), True, 'import numpy as np\n')] |
# coding: utf-8
# In[1]:
"Get Packages"
import numpy as np #numpy package
import pandas as pd #pandas package
import matplotlib.pyplot as plt #matplotlib for plotting
from sklearn import preprocessing #preprocessing
get_ipython().magic('matplotlib inline')
# In[11]:
"Get Data"
train = pd.read_csv("train.csv")
train = train.drop(['ID'],axis=1)
test = pd.read_csv("test.csv")
test = test.drop(['ID'],axis=1)
target = train.target
featureNames = train.columns.values
# In[12]:
"Function to convert to hexavigesimal base"
def az_to_int(az,nanVal=None):
if az==az: #catch NaN
hv = 0
for i in range(len(az)):
hv += (ord(az[i].lower())-ord('a')+1)*26**(len(az)-1-i)
return hv
else:
if nanVal is not None:
return nanVal
else:
return az
# In[13]:
"Prepare the data: combine, process, split"
test['target'] = -999
all_data = train.append(test)
# convert v22 to hexavigesimal
all_data.v22 = all_data.v22.apply(az_to_int)
for c in all_data.columns.values:
if all_data[c].dtype=='object':
all_data[c], tmpItter = all_data[c].factorize()
# replace all NA's with -1
all_data.fillna(-1, inplace=True)
# split the data
train = all_data[all_data['target']>-999]
test = all_data[all_data['target']==-999]
test = test.drop(['target'],axis=1)
# In[14]:
plt.rcParams['figure.max_open_warning']=300
nbins=20
for c in featureNames:
if train[c].dtype != 'object' and c != 'target':
if c=='v22':
hbins = 100
else:
hbins = nbins
fig=plt.figure(figsize=(14,4))
ax1 = fig.add_subplot(1,2,1)
dataset1 = train[c][~np.isnan(train[c])]
dataset2 = train[c][~np.isnan(train[c]) & train.target]
# left plot
hd = ax1.hist((dataset1, dataset2), bins=hbins, histtype='bar',normed=True,
color=["blue", "red"],label=['all','target=1'])
ax1.set_xlabel('Feature: '+c)
ax1.set_xlim((-1,max(train[c])))
binwidth = hd[1][1]-hd[1][0]
midpts = (hd[1][:-1]+hd[1][1:])/2
cdf_all= np.cumsum(hd[0][0])*binwidth
cdf_ones = np.cumsum(hd[0][1])*binwidth
# right plot
ax2 = fig.add_subplot(1,2,2)
ax2.set_ylim((0,1))
ax2.set_xlim((0,nbins))
ax2.plot(midpts,cdf_all,color='b')
ax2.plot(midpts,cdf_ones,color='r')
ax2.plot(midpts,0.5+10*(cdf_all-cdf_ones),color='k')
ax2.grid()
ax2.set_xlim((-1,max(train[c])))
ax2.set_xlabel('cdfs plus cdf_diff*10+0.5')
ax2.axhline(0.5,color='gray',linestyle='--')
#Plot Descriptions
#histogram plot
#Blue colour is all of the train data which is normalized.
#Red colour is all train data (normalized) where the target variable is one.
#NA's are -1 in this case.
#CDF Plots
#Blue colour is all of the train data which is normalized.
#Red colour is all train data (normalized) where the target variable is one.
#Black is the difference (see second last line, multiply by 10+0.5 for visualization)
# In[ ]:
#"Submission"
#submission = pd.DataFrame()
#submission["??"] = test["??"]
#submission["???????"] = State WHAT
#submission.to_csv('FILENAME.csv', index=False)
# In[ ]:
# In[ ]:
# In[ ]:
# In[16]:
# In[ ]:
| [
"pandas.read_csv",
"numpy.cumsum",
"matplotlib.pyplot.figure",
"numpy.isnan"
] | [((292, 316), 'pandas.read_csv', 'pd.read_csv', (['"""train.csv"""'], {}), "('train.csv')\n", (303, 316), True, 'import pandas as pd\n'), ((358, 381), 'pandas.read_csv', 'pd.read_csv', (['"""test.csv"""'], {}), "('test.csv')\n", (369, 381), True, 'import pandas as pd\n'), ((1577, 1604), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(14, 4)'}), '(figsize=(14, 4))\n', (1587, 1604), True, 'import matplotlib.pyplot as plt\n'), ((2133, 2152), 'numpy.cumsum', 'np.cumsum', (['hd[0][0]'], {}), '(hd[0][0])\n', (2142, 2152), True, 'import numpy as np\n'), ((2181, 2200), 'numpy.cumsum', 'np.cumsum', (['hd[0][1]'], {}), '(hd[0][1])\n', (2190, 2200), True, 'import numpy as np\n'), ((1680, 1698), 'numpy.isnan', 'np.isnan', (['train[c]'], {}), '(train[c])\n', (1688, 1698), True, 'import numpy as np\n'), ((1729, 1747), 'numpy.isnan', 'np.isnan', (['train[c]'], {}), '(train[c])\n', (1737, 1747), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.