code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import pandas as pd
import scipy.spatial as sp
import scipy.cluster.hierarchy as hc
from sklearn.metrics import silhouette_score
import numpy as np
from common import genome_pdist as gd
def automatic_cluster_species(Dist,seed_tresholds= [0.92,0.97],linkage_method='average'):
linkage = hc.linkage(sp.distance.squareform(Dist), method=linkage_method)
def get_Nclusters(treshold):
labels= hc.fcluster(linkage,(1-treshold),criterion='distance')
return max(labels)
N_range= [get_Nclusters(t) for t in seed_tresholds]
assert (N_range[1]-N_range[0])< 60, "Need to evaluate more than 60 tresholds"
assert ~np.isnan(N_range).any(), "N range is not defined"
Scores= gd.evaluate_clusters_range(np.arange(min(N_range),max(N_range)+1),Dist,linkage_method=linkage_method)
if N_range[0]==N_range[1]:
labels= hc.fcluster(linkage,(1-seed_tresholds[0]),criterion='distance')
else:
N_species= Scores.Silhouette_score.idxmax()
labels= hc.fcluster(linkage,N_species,criterion='maxclust')
return Scores,labels
def treshold_based_clustering(Dist,treshold,linkage_method='average'):
assert (treshold>0.9)&(treshold<1), "treshold should be between 0.9 and 1 or 'auto', treshold was {treshold}"
linkage = hc.linkage(sp.distance.squareform(Dist), method=linkage_method)
labels = hc.fcluster(linkage,(1-treshold),criterion='distance')
Scores= gd.evaluate_clusters_tresholds([treshold],Dist,linkage_method=linkage_method)
return Scores,labels
if __name__=='__main__':
linkage_method= snakemake.params.linkage_method
treshold = snakemake.params.treshold
quality_score_formula = snakemake.config['quality_score']
Q= gd.load_quality(snakemake.input.quality)
quality_score= Q.eval(quality_score_formula)
M= gd.load_mummer(snakemake.input.dists)
Dist= 1-gd.pairewise2matrix(M,fillna=0.9)
if treshold=='auto':
Scores,labels= automatic_cluster_species(Dist,linkage_method=linkage_method)
else:
Scores, labels = treshold_based_clustering(Dist,treshold,linkage_method=linkage_method)
Scores.to_csv(snakemake.output.scores,sep='\t')
mag2Species= pd.DataFrame(index=Q.index,columns=['SpeciesNr','Species'])
mag2Species.index.name='genome'
mag2Species.loc[Dist.index,'SpeciesNr']= labels
speciesNr= labels.max()
missing_species=mag2Species.SpeciesNr.isnull()
mag2Species.loc[missing_species,'SpeciesNr']=np.arange(speciesNr+1,
speciesNr+1+missing_species.sum())
print(f"Identified { mag2Species.SpeciesNr.max()} species")
n_leading_zeros= len(str(max(labels)))
format_int='sp{:0'+str(n_leading_zeros)+'d}'
mag2Species['Species']=mag2Species.SpeciesNr.apply(format_int.format)
mag2Species['Representative_Species']=gd.best_genome_from_table(mag2Species.Species,quality_score)
mag2Species.to_csv(snakemake.output.cluster_file,sep='\t')
| [
"common.genome_pdist.load_mummer",
"common.genome_pdist.evaluate_clusters_tresholds",
"scipy.spatial.distance.squareform",
"numpy.isnan",
"common.genome_pdist.load_quality",
"pandas.DataFrame",
"common.genome_pdist.best_genome_from_table",
"common.genome_pdist.pairewise2matrix",
"scipy.cluster.hiera... | [((1362, 1418), 'scipy.cluster.hierarchy.fcluster', 'hc.fcluster', (['linkage', '(1 - treshold)'], {'criterion': '"""distance"""'}), "(linkage, 1 - treshold, criterion='distance')\n", (1373, 1418), True, 'import scipy.cluster.hierarchy as hc\n'), ((1429, 1508), 'common.genome_pdist.evaluate_clusters_tresholds', 'gd.evaluate_clusters_tresholds', (['[treshold]', 'Dist'], {'linkage_method': 'linkage_method'}), '([treshold], Dist, linkage_method=linkage_method)\n', (1459, 1508), True, 'from common import genome_pdist as gd\n'), ((1722, 1762), 'common.genome_pdist.load_quality', 'gd.load_quality', (['snakemake.input.quality'], {}), '(snakemake.input.quality)\n', (1737, 1762), True, 'from common import genome_pdist as gd\n'), ((1820, 1857), 'common.genome_pdist.load_mummer', 'gd.load_mummer', (['snakemake.input.dists'], {}), '(snakemake.input.dists)\n', (1834, 1857), True, 'from common import genome_pdist as gd\n'), ((2196, 2257), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'Q.index', 'columns': "['SpeciesNr', 'Species']"}), "(index=Q.index, columns=['SpeciesNr', 'Species'])\n", (2208, 2257), True, 'import pandas as pd\n'), ((2868, 2929), 'common.genome_pdist.best_genome_from_table', 'gd.best_genome_from_table', (['mag2Species.Species', 'quality_score'], {}), '(mag2Species.Species, quality_score)\n', (2893, 2929), True, 'from common import genome_pdist as gd\n'), ((309, 337), 'scipy.spatial.distance.squareform', 'sp.distance.squareform', (['Dist'], {}), '(Dist)\n', (331, 337), True, 'import scipy.spatial as sp\n'), ((412, 468), 'scipy.cluster.hierarchy.fcluster', 'hc.fcluster', (['linkage', '(1 - treshold)'], {'criterion': '"""distance"""'}), "(linkage, 1 - treshold, criterion='distance')\n", (423, 468), True, 'import scipy.cluster.hierarchy as hc\n'), ((861, 926), 'scipy.cluster.hierarchy.fcluster', 'hc.fcluster', (['linkage', '(1 - seed_tresholds[0])'], {'criterion': '"""distance"""'}), "(linkage, 1 - seed_tresholds[0], criterion='distance')\n", (872, 926), True, 'import scipy.cluster.hierarchy as hc\n'), ((1004, 1057), 'scipy.cluster.hierarchy.fcluster', 'hc.fcluster', (['linkage', 'N_species'], {'criterion': '"""maxclust"""'}), "(linkage, N_species, criterion='maxclust')\n", (1015, 1057), True, 'import scipy.cluster.hierarchy as hc\n'), ((1296, 1324), 'scipy.spatial.distance.squareform', 'sp.distance.squareform', (['Dist'], {}), '(Dist)\n', (1318, 1324), True, 'import scipy.spatial as sp\n'), ((1870, 1904), 'common.genome_pdist.pairewise2matrix', 'gd.pairewise2matrix', (['M'], {'fillna': '(0.9)'}), '(M, fillna=0.9)\n', (1889, 1904), True, 'from common import genome_pdist as gd\n'), ((647, 664), 'numpy.isnan', 'np.isnan', (['N_range'], {}), '(N_range)\n', (655, 664), True, 'import numpy as np\n')] |
#-------------------------------------------------------------------------------------------------------------------
# Packages & Settings
#-------------------------------------------------------------------------------------------------------------------
# General packages
import time
import sys
import os
import datetime
from glob import glob
import shutil
# Math and data structure packages
from scipy import stats
from scipy.optimize import curve_fit
from pylab import *
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy.stats import spearmanr
# Writing Output
import pickle
exp_folder = '/home/rettenls/data/experiments/semeval/'
#-------------------------------------------------------------------------------------------------------------------
# Loading own Modules
#-------------------------------------------------------------------------------------------------------------------
import sys
sys.path.append("/home/rettenls/code/")
from lib.model import Model
from lib.trafo import Transformation
from lib.eval import print_nn_word, get_nn_list, get_cosine_similarity, get_pip_norm, get_ww_pip_norm
from lib.score import evaluate_analogy
from lib.operations import align, avg, align_list
from lib.util import get_filename
#-------------------------------------------------------------------------------------------------------------------
# Checking the Coordination File
#-------------------------------------------------------------------------------------------------------------------
def open_file(exp_folder, submission, task, language):
folder = exp_folder + 'mixed_answers/' + submission + '/answer/' + task
if not os.path.isdir(folder):
os.makedirs(folder)
file_name = folder + '/' + language
return (open(file_name, 'w'))
def get_displacement(dist):
displacement = dist# / (np.sqrt(np.square(dist[0][0]) + np.square(dist[0][1])))
displacement /= np.mean(displacement)
return (displacement)
def gauss(x,mu,sigma,A):
return A*exp(-(x-mu)**2/2/sigma**2)
def bimodal(x,mu1,sigma1,A1,mu2,sigma2,A2):
return gauss(x,mu1,sigma1,A1)+gauss(x,mu2,sigma2,A2)
def fit_double_gauss(displacement, show = False):
data = displacement
y,x,_= plt.hist(data,100,alpha=.3,label='data')
x=(x[1:]+x[:-1])/2 # for len(x)==len(y)
expected=( np.mean(displacement),
np.std(displacement),
max(y),
np.mean(displacement) + np.std(displacement),
np.std(displacement),
max(y) / 5)
params,cov=curve_fit(bimodal,x,y,expected)
sigma=sqrt(diag(cov))
plt.plot(x,gauss(x,*params[:3]),color='red',lw=3,label='model_1')
plt.plot(x,gauss(x,*params[3:]),color='blue',lw=3,label='model_2')
legend()
if show:
plt.show()
return params
languages = ['english', 'german', 'latin', 'swedish']
corpora = ['corpus1', 'corpus2']
submissions = ['fasttext_glove_word2vec', 'fasttext_glove', 'word2vec_glove',
'fasttext_word2vec', 'fasttext', 'glove', 'word2vec']
max_run_num = 16
for language in languages:
# Open Task File
task_file_name = exp_folder + 'tasks/targets/' + language + '/targets.txt'
task_file = open(task_file_name, 'r')
task_file_lines = task_file.readlines()
task_file.close()
# Read Eval Words from File
eval_words = []
for task_file_line in task_file_lines:
word = task_file_line.split('\n')[0]
eval_words.append(word)
# Get Word List and Distributions from Disk
dist_folder = exp_folder + 'mixed_distributions/' + language + '/'
distributions = pickle.load(open(dist_folder + 'results.pickle', 'rb'))
word_list = pickle.load(open(dist_folder + 'words.pickle', 'rb'))
for submission in submissions:
models = submission.split('_')
#if(len(models) == 2):
# continue
semantic_displacement = None
for model in models:
if semantic_displacement is None:
semantic_displacement = get_displacement(distributions[model])
else:
semantic_displacement += get_displacement(distributions[model])
fit_params = fit_double_gauss(semantic_displacement, True)
if (fit_params[0] < fit_params[3]):
mean = fit_params[0]
std = fit_params[1]
else:
mean = fit_params[3]
std = fit_params[4]
# Calculate Results
task1_result = list()
task2_result = list()
for word in eval_words:
word_index = word_list.index(word)
word_displacement = semantic_displacement[word_index]
# Task 1
if (word_displacement > mean + std):
task1_result.append(1)
else:
task1_result.append(0)
# Task 2
task2_result.append(word_displacement)
# Open Files
task1_file = open_file(exp_folder, submission, 'task1', language + '.txt')
task2_file = open_file(exp_folder, submission, 'task2', language + '.txt')
# Write to File
i = 0
for word in eval_words:
task1_res_line = word + '\t' + str(task1_result[i]) + '\n'
task1_file.write(task1_res_line)
task2_res_line = word + '\t' + str(task2_result[i]) + '\n'
task2_file.write(task2_res_line)
i += 1
task1_file.close()
task2_file.close()
print('Completed Evaluation.')
print('Language:', language)
print('Models:', models)
print('Evaluation:', len([x for x in task1_result if x == 1]), 'of', len(task1_result), 'have changed in meaning.')
print('') | [
"scipy.optimize.curve_fit",
"numpy.mean",
"matplotlib.pyplot.hist",
"os.makedirs",
"os.path.isdir",
"numpy.std",
"sys.path.append",
"matplotlib.pyplot.show"
] | [((935, 974), 'sys.path.append', 'sys.path.append', (['"""/home/rettenls/code/"""'], {}), "('/home/rettenls/code/')\n", (950, 974), False, 'import sys\n'), ((1929, 1950), 'numpy.mean', 'np.mean', (['displacement'], {}), '(displacement)\n', (1936, 1950), True, 'import numpy as np\n'), ((2219, 2263), 'matplotlib.pyplot.hist', 'plt.hist', (['data', '(100)'], {'alpha': '(0.3)', 'label': '"""data"""'}), "(data, 100, alpha=0.3, label='data')\n", (2227, 2263), True, 'import matplotlib.pyplot as plt\n'), ((2481, 2515), 'scipy.optimize.curve_fit', 'curve_fit', (['bimodal', 'x', 'y', 'expected'], {}), '(bimodal, x, y, expected)\n', (2490, 2515), False, 'from scipy.optimize import curve_fit\n'), ((1688, 1709), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (1701, 1709), False, 'import os\n'), ((1713, 1732), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (1724, 1732), False, 'import os\n'), ((2315, 2336), 'numpy.mean', 'np.mean', (['displacement'], {}), '(displacement)\n', (2322, 2336), True, 'import numpy as np\n'), ((2342, 2362), 'numpy.std', 'np.std', (['displacement'], {}), '(displacement)\n', (2348, 2362), True, 'import numpy as np\n'), ((2430, 2450), 'numpy.std', 'np.std', (['displacement'], {}), '(displacement)\n', (2436, 2450), True, 'import numpy as np\n'), ((2693, 2703), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2701, 2703), True, 'import matplotlib.pyplot as plt\n'), ((2380, 2401), 'numpy.mean', 'np.mean', (['displacement'], {}), '(displacement)\n', (2387, 2401), True, 'import numpy as np\n'), ((2404, 2424), 'numpy.std', 'np.std', (['displacement'], {}), '(displacement)\n', (2410, 2424), True, 'import numpy as np\n')] |
# //utils for periodic boundary conditions
# // Author: <NAME>
# // Date: 6.8.2021
# // Group: Rappel Group, UCSD
from numba import njit
import numpy as np
@njit
def pbc(x, L):
if(x<0):
X = x+L
return X
if(x>=L):
X = x-L
return X
return x
@njit
def sqdiff(x1, x2):
return pow((x1-x2),2)
@njit
def min3(num1, num2, num3):
if (num1 > num2 ):
mn=num2
else:
mn=num1
if (mn>num3):
mn=num3
return mn
@njit
def min2(num1, num2):
if (num1 > num2):
mn=num2
else:
mn=num1
return mn
@njit
def max2(num1, num2):
if (num1 < num2):
mn=num2
else:
mn=num1
return mn
@njit
def dist_pbc(x1, y1, x2, y2, L):
# returns the smallest dist of each possible pbc combination
xsq1 = sqdiff(x1,x2)
xsq2 = sqdiff(x1,x2+L)
xsq3 = sqdiff(x1,x2-L)
ysq1 = sqdiff(y1,y2)
ysq2 = sqdiff(y1,y2+L)
ysq3 = sqdiff(y1,y2-L)
xsq = min3(xsq1,xsq2,xsq3)
ysq = min3(ysq1,ysq2,ysq3)
return np.sqrt(xsq+ysq)
@njit
def subtract_pbc_1d(x1, x2, L):
# returns the smallest dist of each possible pbc combination
dx = x1-x2
dx1 = x1-x2+L
dx2 = x1-x2-L
if (abs(dx1)<abs(dx)):
dx=dx1;
else:
if (abs(dx2)<abs(dx)):
dx=dx2
return dx
| [
"numpy.sqrt"
] | [((1036, 1054), 'numpy.sqrt', 'np.sqrt', (['(xsq + ysq)'], {}), '(xsq + ysq)\n', (1043, 1054), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Created on Thu Dec 5 16:49:20 2019
# @author: arthurd
"""
FoRoute Module.
Visualize Map Matching routes on HTML maps.
"""
from matplotlib import collections as mc
import matplotlib.pyplot as plt
import osmnx as ox
import webbrowser
import folium
import numpy as np
import noiseplanet.matcher as matching
def linesProjection(track, track_corr):
lines = []
if len(track) != len(track_corr):
print("\n>>> WARNING:",
"\nAn error occured while drawing lines for each projection.",
"\nPlease make sure the dimensions of your original track and corrected one are equals.")
else:
lines = [[(track[i][1], track[i][0]), (track_corr[i][1], track_corr[i][0])] for i in range(len(track))]
return lines
def plot_graph(track, graph=None, track_corr=[],
track_color="black", track_corr_color="darkcyan",
route_color="black", route_corr_color="darkcyan",
track_size=20, track_corr_size=20,
track_marker="x", track_corr_marker="x",
proj=False, proj_color="skyblue", proj_size=1, proj_alpha=1,
route_corr=np.array([[None, None]]),
route_size=4, route_corr_size=4, route_opacity=.6,
title_fig="", title_color="#999999", title_fontweight="bold"
):
"""
Create a matplotlib graph of the map matching algorithm.
Parameters
----------
graph : NetworkX MultiDiGraph
Graph of the area.
track : TYPE
DESCRIPTION.
track_corr : TYPE, optional
DESCRIPTION. The default is [].
track_color : TYPE, optional
DESCRIPTION. The default is "black".
track_corr_color : TYPE, optional
DESCRIPTION. The default is "darkcyan".
route_color : TYPE, optional
DESCRIPTION. The default is "black".
route_corr_color : TYPE, optional
DESCRIPTION. The default is "darkcyan".
track_size : TYPE, optional
DESCRIPTION. The default is 20.
track_corr_size : TYPE, optional
DESCRIPTION. The default is 20.
track_marker : TYPE, optional
DESCRIPTION. The default is "x".
track_corr_marker : TYPE, optional
DESCRIPTION. The default is "x".
proj : TYPE, optional
DESCRIPTION. The default is False.
proj_color : TYPE, optional
DESCRIPTION. The default is "skyblue".
proj_size : TYPE, optional
DESCRIPTION. The default is 1.
proj_alpha : TYPE, optional
DESCRIPTION. The default is 1.
route_corr : TYPE, optional
DESCRIPTION. The default is np.array([[None, None]]).
route_size : TYPE, optional
DESCRIPTION. The default is 4.
route_corr_size : TYPE, optional
DESCRIPTION. The default is 4.
route_opacity : TYPE, optional
DESCRIPTION. The default is .6.
title_fig : TYPE, optional
DESCRIPTION. The default is "".
title_color : TYPE, optional
DESCRIPTION. The default is "#999999".
title_fontweight : TYPE, optional
DESCRIPTION. The default is "bold".
Returns
-------
None.
"""
fig, ax = ox.plot_graph(graph, node_color="skyblue", node_alpha=.5, node_size=20, annotate=True, margin=0, show=False, close=False)
plt.title(title_fig, color=title_color, fontweight=title_fontweight)
# add track points
plt.scatter(track[:][1], track[:][0], s=track_size, marker=track_marker, color=track_color)
ax.scatter(track_corr[:][1], track_corr[:][0], s=track_corr_size, marker=track_corr_marker, color=track_corr_color)
# plot the route
ax.plot(track[:][1], track[:][0], marker='x', linewidth=route_size, alpha=.7, color=route_color)
ax.plot(route_corr[:][1], route_corr[:][0], linewidth=route_corr_size, alpha=.7, color=route_corr_color)
#projection between the two tracks
if proj:
lines_proj_HMM = linesProjection(track, track_corr)
lc = mc.LineCollection(lines_proj_HMM, colors=proj_color, alpha=proj_alpha, linewidths=proj_size)
ax.add_collection(lc)
return fig, ax
def plot_html(track, track_corr=[],
track_color="black", track_corr_color="#CD473E",
track_size=2, track_corr_size=2,
route_corr=[],
route_size=2, route_corr_size=2,
route_color="black", route_corr_color="#CD473E",
route_opacity=.7, route_corr_opacity=.7,
proj=False, proj_color="#CD473E", proj_size=1, proj_alpha=.5,
show_graph=False, graph=None,
file_name="my_map.html", save=True
):
"""
Parameters
----------
track : TYPE
DESCRIPTION.
track_corr : TYPE, optional
DESCRIPTION. The default is [].
track_color : TYPE, optional
DESCRIPTION. The default is "black".
track_corr_color : TYPE, optional
DESCRIPTION. The default is "darkcyan".
track_size : TYPE, optional
DESCRIPTION. The default is 2.
track_corr_size : TYPE, optional
DESCRIPTION. The default is 2.
route_corr : TYPE, optional
DESCRIPTION. The default is [].
route_size : TYPE, optional
DESCRIPTION. The default is 2.
route_corr_size : TYPE, optional
DESCRIPTION. The default is 2.
route_color : TYPE, optional
DESCRIPTION. The default is "black".
route_corr_color : TYPE, optional
DESCRIPTION. The default is "darkcyan".
route_opacity : TYPE, optional
DESCRIPTION. The default is .6.
route_corr_opacity : TYPE, optional
DESCRIPTION. The default is .6.
proj : TYPE, optional
DESCRIPTION. The default is False.
proj_color : TYPE, optional
DESCRIPTION. The default is "skyblue".
proj_size : TYPE, optional
DESCRIPTION. The default is 1.
proj_alpha : TYPE, optional
DESCRIPTION. The default is 1.
show_graph : TYPE, optional
DESCRIPTION. The default is False.
graph : TYPE, optional
DESCRIPTION. The default is None.
file_name : TYPE, optional
DESCRIPTION. The default is "my_map.html".
save : TYPE, optional
DESCRIPTION. The default is True.
Returns
-------
my_map : TYPE
DESCRIPTION.
"""
med_lat = track[len(track)//2][0]
med_lon = track[len(track)//2][1]
# Load map centred on central coordinates
my_map = folium.Map(location=[med_lat, med_lon], zoom_start=20)
if show_graph or graph is not None:
if graph is None:
graph = matching.graph_from_track(track, network='drive')
my_map = ox.plot_graph_folium(graph, popup_attribute='name', edge_width=1, edge_color='darkgrey')
if proj:
for i in range(len(track)):
folium.PolyLine([(track[i][0], track[i][1]), (track_corr[i][0], track_corr[i][1])],
color=proj_color, weight=proj_size, opacity=proj_alpha).add_to(my_map)
# If the route is given in input, plot both (original and corrected)
if len(route_corr) > 0:
# add lines
folium.PolyLine(track, color=route_color, weight=route_size, opacity=route_opacity).add_to(my_map)
folium.PolyLine(route_corr, color=route_corr_color, weight=route_corr_size, opacity=route_corr_opacity).add_to(my_map)
# add dots
for i in range(len(track)):
folium.CircleMarker(location=[track[i][0], track[i][1]],
radius=track_size,
weight=1,
color=track_color,
fill=True,
fill_opacity=1).add_to(my_map)
for i in range(len(track_corr)):
folium.CircleMarker(location=[track_corr[i][0], track_corr[i][1]],
radius=track_corr_size,
weight=1,
color=track_corr_color,
fill=True,
fill_opacity=1).add_to(my_map)
# add the OSM light grey background
folium.TileLayer('cartodbpositron').add_to(my_map)
# plot the legend in the HTML page
legend_html = """
<div style="position: fixed;
width: 210px;
top: 10px; right: 10px;
border: 2px solid lightgrey;
border-radius: 4px;
background-color: rgba(255, 255, 255, 0.85);
z-index:9999;
font-size: 15px; color: slategrey;
">
<span style="font-weight: bold">Legend</span>
<br>
Original Point
<i class="fa fa-circle"
style="float: right;
margin-right: 19px; margin-top: 4px;
color: black">
</i>
<br>
Projected Point
<i class="fa fa-circle"
style="float: right;
margin-right: 19px; margin-top: 4px;
color: #CD473E">
</i>
<br>
Projection
<div class="line"
style="float: right;
margin-right: 10px; margin-top: 10px;
width: 30px; height: 2px;
background-color: #CD473E">
</div>
</div>
"""
my_map.get_root().html.add_child(folium.Element(legend_html))
if save:
my_map.save(file_name)
# Plot in new tab
webbrowser.open(file_name, new=2) # open in new tab
return my_map
| [
"osmnx.plot_graph",
"folium.Element",
"osmnx.plot_graph_folium",
"folium.TileLayer",
"webbrowser.open",
"folium.Map",
"matplotlib.collections.LineCollection",
"numpy.array",
"noiseplanet.matcher.graph_from_track",
"matplotlib.pyplot.scatter",
"folium.PolyLine",
"matplotlib.pyplot.title",
"fo... | [((1160, 1184), 'numpy.array', 'np.array', (['[[None, None]]'], {}), '([[None, None]])\n', (1168, 1184), True, 'import numpy as np\n'), ((3143, 3269), 'osmnx.plot_graph', 'ox.plot_graph', (['graph'], {'node_color': '"""skyblue"""', 'node_alpha': '(0.5)', 'node_size': '(20)', 'annotate': '(True)', 'margin': '(0)', 'show': '(False)', 'close': '(False)'}), "(graph, node_color='skyblue', node_alpha=0.5, node_size=20,\n annotate=True, margin=0, show=False, close=False)\n", (3156, 3269), True, 'import osmnx as ox\n'), ((3269, 3337), 'matplotlib.pyplot.title', 'plt.title', (['title_fig'], {'color': 'title_color', 'fontweight': 'title_fontweight'}), '(title_fig, color=title_color, fontweight=title_fontweight)\n', (3278, 3337), True, 'import matplotlib.pyplot as plt\n'), ((3366, 3461), 'matplotlib.pyplot.scatter', 'plt.scatter', (['track[:][1]', 'track[:][0]'], {'s': 'track_size', 'marker': 'track_marker', 'color': 'track_color'}), '(track[:][1], track[:][0], s=track_size, marker=track_marker,\n color=track_color)\n', (3377, 3461), True, 'import matplotlib.pyplot as plt\n'), ((6400, 6454), 'folium.Map', 'folium.Map', ([], {'location': '[med_lat, med_lon]', 'zoom_start': '(20)'}), '(location=[med_lat, med_lon], zoom_start=20)\n', (6410, 6454), False, 'import folium\n'), ((3935, 4031), 'matplotlib.collections.LineCollection', 'mc.LineCollection', (['lines_proj_HMM'], {'colors': 'proj_color', 'alpha': 'proj_alpha', 'linewidths': 'proj_size'}), '(lines_proj_HMM, colors=proj_color, alpha=proj_alpha,\n linewidths=proj_size)\n', (3952, 4031), True, 'from matplotlib import collections as mc\n'), ((6609, 6701), 'osmnx.plot_graph_folium', 'ox.plot_graph_folium', (['graph'], {'popup_attribute': '"""name"""', 'edge_width': '(1)', 'edge_color': '"""darkgrey"""'}), "(graph, popup_attribute='name', edge_width=1,\n edge_color='darkgrey')\n", (6629, 6701), True, 'import osmnx as ox\n'), ((9445, 9472), 'folium.Element', 'folium.Element', (['legend_html'], {}), '(legend_html)\n', (9459, 9472), False, 'import folium\n'), ((9553, 9586), 'webbrowser.open', 'webbrowser.open', (['file_name'], {'new': '(2)'}), '(file_name, new=2)\n', (9568, 9586), False, 'import webbrowser\n'), ((6542, 6591), 'noiseplanet.matcher.graph_from_track', 'matching.graph_from_track', (['track'], {'network': '"""drive"""'}), "(track, network='drive')\n", (6567, 6591), True, 'import noiseplanet.matcher as matching\n'), ((8067, 8102), 'folium.TileLayer', 'folium.TileLayer', (['"""cartodbpositron"""'], {}), "('cartodbpositron')\n", (8083, 8102), False, 'import folium\n'), ((7099, 7187), 'folium.PolyLine', 'folium.PolyLine', (['track'], {'color': 'route_color', 'weight': 'route_size', 'opacity': 'route_opacity'}), '(track, color=route_color, weight=route_size, opacity=\n route_opacity)\n', (7114, 7187), False, 'import folium\n'), ((7206, 7313), 'folium.PolyLine', 'folium.PolyLine', (['route_corr'], {'color': 'route_corr_color', 'weight': 'route_corr_size', 'opacity': 'route_corr_opacity'}), '(route_corr, color=route_corr_color, weight=route_corr_size,\n opacity=route_corr_opacity)\n', (7221, 7313), False, 'import folium\n'), ((7382, 7517), 'folium.CircleMarker', 'folium.CircleMarker', ([], {'location': '[track[i][0], track[i][1]]', 'radius': 'track_size', 'weight': '(1)', 'color': 'track_color', 'fill': '(True)', 'fill_opacity': '(1)'}), '(location=[track[i][0], track[i][1]], radius=track_size,\n weight=1, color=track_color, fill=True, fill_opacity=1)\n', (7401, 7517), False, 'import folium\n'), ((7714, 7874), 'folium.CircleMarker', 'folium.CircleMarker', ([], {'location': '[track_corr[i][0], track_corr[i][1]]', 'radius': 'track_corr_size', 'weight': '(1)', 'color': 'track_corr_color', 'fill': '(True)', 'fill_opacity': '(1)'}), '(location=[track_corr[i][0], track_corr[i][1]], radius=\n track_corr_size, weight=1, color=track_corr_color, fill=True,\n fill_opacity=1)\n', (7733, 7874), False, 'import folium\n'), ((6787, 6931), 'folium.PolyLine', 'folium.PolyLine', (['[(track[i][0], track[i][1]), (track_corr[i][0], track_corr[i][1])]'], {'color': 'proj_color', 'weight': 'proj_size', 'opacity': 'proj_alpha'}), '([(track[i][0], track[i][1]), (track_corr[i][0], track_corr[\n i][1])], color=proj_color, weight=proj_size, opacity=proj_alpha)\n', (6802, 6931), False, 'import folium\n')] |
#!/usr/bin/env python
# modules
# ROS stuff and multithreading
import rospy
from geometry_msgs.msg import Twist, Pose2D, PoseStamped
from sensor_msgs.msg import JointState
from nav_msgs.msg import Odometry, Path
from std_msgs.msg import Float32MultiArray
import tf
import numpy as np
import sys
from dynamic_reconfigure.server import Server as DRServer
from dynamic_reconfigure.client import Client as DRClient
from mobro.cfg import GainsConfig
static_fb = False
class dictToNamespace(object):
def __init__(self, adict):
self.__dict__.update(adict)
def is_defined(v):
return type(v) != type(None)
def toPi(v):
return (v + np.pi) % (2*np.pi) - np.pi
class Traj:
def __init__(self):
self.a = 3
self.b = 2
self.w = .5
def ref(self, t):
c,s = np.cos(self.w*t),np.sin(self.w*t)
x = (self.a + self.b*c)*c
y = (self.a + self.b*c)*s
vx = -self.w*(self.a + 2*self.b*c)*s
vy = self.w*(self.a*c - 2*self.b*s**2 + self.b)
ax = self.w**2*(-self.a*c + 4*self.b*s**2 - 2*self.b)
ay = -self.w**2*(self.a + 4*self.b*c)*s
return [np.matrix(val).transpose() for val in [[x,y],[vx,vy],[ax,ay]]]
class Robot:
def __init__(self, bike = True):
self.xy = np.matrix([[0.],[0.]])
self.theta = 0
self.v = 0
self.w = 0
self.bike = bike
rospy.Subscriber('odom', Odometry, self.odom_cb)
if bike:
rospy.Subscriber('joint_states', JointState, self.joint_cb)
self.beta = 0.
self.L = 1.6
self.goal = PoseStamped()
self.goal.header.frame_id = 'world'
self.manual_goal = None
rospy.Subscriber('/move_base_simple/goal', PoseStamped, self.goal_cb)
self.t0 = 0
self.goal_pub = rospy.Publisher('goal', PoseStamped, queue_size=10)
self.cmd = Twist()
self.cmd_pub = rospy.Publisher('cmd', Twist, queue_size=10)
self.error = Float32MultiArray()
self.error.data = [0,0,0,0]
self.error_pub = rospy.Publisher('error', Float32MultiArray, queue_size=10)
# control gains
self.gains = None
self.srv = DRServer(GainsConfig, self.gains_cb)
def gains_cb(self, config, level):
self.gains = dictToNamespace(config)
if self.gains.d <= 0:
self.gains.d = 0.01
return config
def odom_cb(self, msg):
self.xy[0] = msg.pose.pose.position.x
self.xy[1] = msg.pose.pose.position.y
self.theta = 2*np.arctan2(msg.pose.pose.orientation.z, msg.pose.pose.orientation.w)
self.v = msg.twist.twist.linear.x
self.w = msg.twist.twist.angular.z
def joint_cb(self, msg):
self.beta = msg.position[0]
def goal_cb(self, msg):
self.manual_goal = np.matrix([[msg.pose.position.x], [msg.pose.position.y]])
self.goal = msg
def reach(self, goal, xyd = None):
c = np.cos(self.theta)
s = np.sin(self.theta)
d = self.gains.d
self.error.data = [goal[0,0] - self.xy[0,0],
goal[1,0] - self.xy[1,0],
0]
if self.bike:
ctb = np.cos(self.theta+self.beta)
stb = np.sin(self.theta + self.beta)
xyp = self.xy + self.L*np.matrix([[c],[s]]) + d*np.matrix([[ctb],[stb]])
K = np.matrix([[ctb-d/self.L*stb*np.sin(self.beta), -d*stb],
[ stb+d/self.L*ctb*np.sin(self.beta), d*ctb]])
else:
xyp = self.xy + d*np.matrix([[c],[s]])
K = np.matrix([[c, -d*s],[s, d*c]])
xyd_cmd = self.gains.Kp * (goal - xyp)
if is_defined(xyd):
xyd_cmd += xyd
cmd = np.linalg.inv(K) * xyd_cmd
self.cmd.linear.x = cmd[0]
self.cmd.angular.z = cmd[1]
return np.linalg.norm(goal - xyp) < 1e-3
def move(self, t, traj):
if type(self.manual_goal) != type(None):
# follow goal
if self.reach(self.manual_goal): # reached
if self.t0 == 0:
self.t0 = t
if t - self.t0 > 5.: # go back to traj after 10 sec
self.t0 = 0
self.manual_goal = None
else:
xy,xyd,xydd = traj.ref(t)
# publish goal for visualization
self.goal.pose.position.x = xy[0]
self.goal.pose.position.y = xy[1]
theta_goal = np.arctan2(xyd[1], xyd[0])
self.goal.pose.orientation.z = np.sin(theta_goal/2)
self.goal.pose.orientation.w = np.cos(theta_goal/2)
if static_fb:
self.reach(xy, xyd)
else:
# Lyapunov
c = np.cos(self.theta)
s = np.sin(self.theta)
Kx = self.gains.Kx
Ky = self.gains.Ky
Kt = self.gains.Kt
vref = c*xyd[0] + s*xyd[1]
if abs(vref) > 1e-3:
wref = (xyd[0]*xydd[1] - xyd[1]*xydd[0])/vref**2;
else:
wref = 0
# local error
L = self.bike and self.L or 0
beta = self.bike and self.beta or 0
self.error.data = [xy[0,0] - self.xy[0,0],
xy[1,0] - self.xy[1,0],
toPi(theta_goal - self.theta)]
xy_err = xy - self.xy # - L*np.matrix([[c],[s]])
xe = (np.matrix([[c,s]]) * xy_err)[0,0]
ye = (np.matrix([[-s,c]]) * xy_err)[0,0]
te = toPi(theta_goal - self.theta - beta)
self.cmd.linear.x = vref*np.cos(te) + Kx*xe
self.cmd.angular.z = wref + Ky*ye*vref*np.sinc(te) + Kt*te
if self.bike:
self.cmd.angular.z -= self.cmd.linear.x/L*np.sin(beta)
self.cmd_pub.publish(self.cmd)
self.goal_pub.publish(self.goal)
self.error.data.append(np.linalg.norm(self.error.data))
self.error_pub.publish(self.error)
if __name__ == "__main__":
rospy.init_node('control')
robot = Robot(sys.argv[1] == 'bike')
traj = Traj()
# build path
path = Path()
path.header.frame_id = 'world'
for t in np.linspace(-np.pi/traj.w, np.pi/traj.w, 100/traj.w):
pose = PoseStamped()
pose.pose.orientation.w = 1
pose.pose.position.x, pose.pose.position.y = traj.ref(t)[0]
path.poses.append(pose)
path_pub = rospy.Publisher('path', Path, queue_size=10)
rate = rospy.Rate(10.)
while not rospy.is_shutdown():
path_pub.publish(path)
robot.move(rospy.Time.now().to_sec(), traj)
rate.sleep()
| [
"rospy.init_node",
"rospy.Rate",
"numpy.arctan2",
"numpy.linalg.norm",
"numpy.sin",
"dynamic_reconfigure.server.Server",
"numpy.linspace",
"rospy.Subscriber",
"geometry_msgs.msg.Twist",
"rospy.Time.now",
"numpy.cos",
"rospy.Publisher",
"nav_msgs.msg.Path",
"rospy.is_shutdown",
"std_msgs.... | [((6547, 6573), 'rospy.init_node', 'rospy.init_node', (['"""control"""'], {}), "('control')\n", (6562, 6573), False, 'import rospy\n'), ((6671, 6677), 'nav_msgs.msg.Path', 'Path', ([], {}), '()\n', (6675, 6677), False, 'from nav_msgs.msg import Odometry, Path\n'), ((6728, 6786), 'numpy.linspace', 'np.linspace', (['(-np.pi / traj.w)', '(np.pi / traj.w)', '(100 / traj.w)'], {}), '(-np.pi / traj.w, np.pi / traj.w, 100 / traj.w)\n', (6739, 6786), True, 'import numpy as np\n'), ((6962, 7006), 'rospy.Publisher', 'rospy.Publisher', (['"""path"""', 'Path'], {'queue_size': '(10)'}), "('path', Path, queue_size=10)\n", (6977, 7006), False, 'import rospy\n'), ((7036, 7052), 'rospy.Rate', 'rospy.Rate', (['(10.0)'], {}), '(10.0)\n', (7046, 7052), False, 'import rospy\n'), ((1308, 1333), 'numpy.matrix', 'np.matrix', (['[[0.0], [0.0]]'], {}), '([[0.0], [0.0]])\n', (1317, 1333), True, 'import numpy as np\n'), ((1434, 1482), 'rospy.Subscriber', 'rospy.Subscriber', (['"""odom"""', 'Odometry', 'self.odom_cb'], {}), "('odom', Odometry, self.odom_cb)\n", (1450, 1482), False, 'import rospy\n'), ((1666, 1679), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (1677, 1679), False, 'from geometry_msgs.msg import Twist, Pose2D, PoseStamped\n'), ((1764, 1833), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/move_base_simple/goal"""', 'PoseStamped', 'self.goal_cb'], {}), "('/move_base_simple/goal', PoseStamped, self.goal_cb)\n", (1780, 1833), False, 'import rospy\n'), ((1878, 1929), 'rospy.Publisher', 'rospy.Publisher', (['"""goal"""', 'PoseStamped'], {'queue_size': '(10)'}), "('goal', PoseStamped, queue_size=10)\n", (1893, 1929), False, 'import rospy\n'), ((1958, 1965), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (1963, 1965), False, 'from geometry_msgs.msg import Twist, Pose2D, PoseStamped\n'), ((1989, 2033), 'rospy.Publisher', 'rospy.Publisher', (['"""cmd"""', 'Twist'], {'queue_size': '(10)'}), "('cmd', Twist, queue_size=10)\n", (2004, 2033), False, 'import rospy\n'), ((2064, 2083), 'std_msgs.msg.Float32MultiArray', 'Float32MultiArray', ([], {}), '()\n', (2081, 2083), False, 'from std_msgs.msg import Float32MultiArray\n'), ((2145, 2203), 'rospy.Publisher', 'rospy.Publisher', (['"""error"""', 'Float32MultiArray'], {'queue_size': '(10)'}), "('error', Float32MultiArray, queue_size=10)\n", (2160, 2203), False, 'import rospy\n'), ((2282, 2318), 'dynamic_reconfigure.server.Server', 'DRServer', (['GainsConfig', 'self.gains_cb'], {}), '(GainsConfig, self.gains_cb)\n', (2290, 2318), True, 'from dynamic_reconfigure.server import Server as DRServer\n'), ((2957, 3014), 'numpy.matrix', 'np.matrix', (['[[msg.pose.position.x], [msg.pose.position.y]]'], {}), '([[msg.pose.position.x], [msg.pose.position.y]])\n', (2966, 3014), True, 'import numpy as np\n'), ((3099, 3117), 'numpy.cos', 'np.cos', (['self.theta'], {}), '(self.theta)\n', (3105, 3117), True, 'import numpy as np\n'), ((3130, 3148), 'numpy.sin', 'np.sin', (['self.theta'], {}), '(self.theta)\n', (3136, 3148), True, 'import numpy as np\n'), ((6797, 6810), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (6808, 6810), False, 'from geometry_msgs.msg import Twist, Pose2D, PoseStamped\n'), ((7072, 7091), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (7089, 7091), False, 'import rospy\n'), ((804, 822), 'numpy.cos', 'np.cos', (['(self.w * t)'], {}), '(self.w * t)\n', (810, 822), True, 'import numpy as np\n'), ((821, 839), 'numpy.sin', 'np.sin', (['(self.w * t)'], {}), '(self.w * t)\n', (827, 839), True, 'import numpy as np\n'), ((1521, 1580), 'rospy.Subscriber', 'rospy.Subscriber', (['"""joint_states"""', 'JointState', 'self.joint_cb'], {}), "('joint_states', JointState, self.joint_cb)\n", (1537, 1580), False, 'import rospy\n'), ((2653, 2721), 'numpy.arctan2', 'np.arctan2', (['msg.pose.pose.orientation.z', 'msg.pose.pose.orientation.w'], {}), '(msg.pose.pose.orientation.z, msg.pose.pose.orientation.w)\n', (2663, 2721), True, 'import numpy as np\n'), ((3355, 3385), 'numpy.cos', 'np.cos', (['(self.theta + self.beta)'], {}), '(self.theta + self.beta)\n', (3361, 3385), True, 'import numpy as np\n'), ((3402, 3432), 'numpy.sin', 'np.sin', (['(self.theta + self.beta)'], {}), '(self.theta + self.beta)\n', (3408, 3432), True, 'import numpy as np\n'), ((3747, 3783), 'numpy.matrix', 'np.matrix', (['[[c, -d * s], [s, d * c]]'], {}), '([[c, -d * s], [s, d * c]])\n', (3756, 3783), True, 'import numpy as np\n'), ((3925, 3941), 'numpy.linalg.inv', 'np.linalg.inv', (['K'], {}), '(K)\n', (3938, 3941), True, 'import numpy as np\n'), ((4047, 4073), 'numpy.linalg.norm', 'np.linalg.norm', (['(goal - xyp)'], {}), '(goal - xyp)\n', (4061, 4073), True, 'import numpy as np\n'), ((4738, 4764), 'numpy.arctan2', 'np.arctan2', (['xyd[1]', 'xyd[0]'], {}), '(xyd[1], xyd[0])\n', (4748, 4764), True, 'import numpy as np\n'), ((4808, 4830), 'numpy.sin', 'np.sin', (['(theta_goal / 2)'], {}), '(theta_goal / 2)\n', (4814, 4830), True, 'import numpy as np\n'), ((4872, 4894), 'numpy.cos', 'np.cos', (['(theta_goal / 2)'], {}), '(theta_goal / 2)\n', (4878, 4894), True, 'import numpy as np\n'), ((6401, 6432), 'numpy.linalg.norm', 'np.linalg.norm', (['self.error.data'], {}), '(self.error.data)\n', (6415, 6432), True, 'import numpy as np\n'), ((5034, 5052), 'numpy.cos', 'np.cos', (['self.theta'], {}), '(self.theta)\n', (5040, 5052), True, 'import numpy as np\n'), ((5073, 5091), 'numpy.sin', 'np.sin', (['self.theta'], {}), '(self.theta)\n', (5079, 5091), True, 'import numpy as np\n'), ((1169, 1183), 'numpy.matrix', 'np.matrix', (['val'], {}), '(val)\n', (1178, 1183), True, 'import numpy as np\n'), ((3493, 3518), 'numpy.matrix', 'np.matrix', (['[[ctb], [stb]]'], {}), '([[ctb], [stb]])\n', (3502, 3518), True, 'import numpy as np\n'), ((3710, 3731), 'numpy.matrix', 'np.matrix', (['[[c], [s]]'], {}), '([[c], [s]])\n', (3719, 3731), True, 'import numpy as np\n'), ((7152, 7168), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (7166, 7168), False, 'import rospy\n'), ((3468, 3489), 'numpy.matrix', 'np.matrix', (['[[c], [s]]'], {}), '([[c], [s]])\n', (3477, 3489), True, 'import numpy as np\n'), ((5842, 5861), 'numpy.matrix', 'np.matrix', (['[[c, s]]'], {}), '([[c, s]])\n', (5851, 5861), True, 'import numpy as np\n'), ((5898, 5918), 'numpy.matrix', 'np.matrix', (['[[-s, c]]'], {}), '([[-s, c]])\n', (5907, 5918), True, 'import numpy as np\n'), ((6049, 6059), 'numpy.cos', 'np.cos', (['te'], {}), '(te)\n', (6055, 6059), True, 'import numpy as np\n'), ((6252, 6264), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (6258, 6264), True, 'import numpy as np\n'), ((6123, 6134), 'numpy.sinc', 'np.sinc', (['te'], {}), '(te)\n', (6130, 6134), True, 'import numpy as np\n'), ((3564, 3581), 'numpy.sin', 'np.sin', (['self.beta'], {}), '(self.beta)\n', (3570, 3581), True, 'import numpy as np\n'), ((3638, 3655), 'numpy.sin', 'np.sin', (['self.beta'], {}), '(self.beta)\n', (3644, 3655), True, 'import numpy as np\n')] |
""" Contains classes and methods to obtain various regression based metrics to evaluate"""
from sklearn import metrics
import numpy as np
import pandas as pd
import math
import sys
sys.path.append("../config")
class MetricsEval:
"""MetricsEval Class
Evaluate metrics to evaluate model performance
"""
def metrics_eval_base(self,predicted_y, test_y,logs_path,run_id=0):
"""Get predicted and actual value for all KCCs and return regression metrics namely: Mean Absolute Error, Mean Squared Error, Root Mean Squared Error, R-Squared Value
:param predicted_y: predicted values for the process parameters
:type conn_str: numpy.array [test_samples*kccs] (required)
:param predicted_y: actual values for the process parameters
:type conn_str: numpy.array [test_samples*kccs] (required)
:param logs_path: Logs path to save the evaluation metrics
:type logs_path: str (required)
:returns: dictionary of all metrics for each KCC
:rtype: dict
:returns: dataframe of all metrics for each KCC
:rtype: pandas.dataframe
"""
kcc_dim=test_y.shape[1]
import kcc_config as kcc_config
#kcc_struct=kcc_config.get_kcc_struct()
kcc_struct=kcc_config.kcc_struct
# Calculating Regression Based Evaluation Metrics
mae_KCCs=np.zeros((kcc_dim))
mse_KCCs=np.zeros((kcc_dim))
r2_KCCs=np.zeros((kcc_dim))
#print(kcc_struct)
kcc_id=[]
for kcc in kcc_struct:
if(kcc['kcc_type']==1):
kcc_name=kcc['kcc_id']
kcc_id.append(kcc_name)
mae_KCCs=metrics.mean_absolute_error(predicted_y, test_y,multioutput='raw_values')
mse_KCCs=metrics.mean_squared_error(predicted_y, test_y,multioutput='raw_values')
r2_KCCs = metrics.r2_score(predicted_y, test_y,multioutput='raw_values')
#print(kcc_id)
rmse_KCCs=np.sqrt(mse_KCCs)
eval_metrics= {
"KCC_ID":kcc_id,
"Mean Absolute Error" : mae_KCCs,
"Mean Squared Error" : mse_KCCs,
"Root Mean Squared Error" : rmse_KCCs,
"R Squared" : r2_KCCs
}
#print(len(kcc_id),len(mae_KCCs),len(mae_KCCs),len(rmse_KCCs),len(r2_KCCs))
#print(eval_metrics)
accuracy_metrics_df=pd.DataFrame.from_dict(eval_metrics)
accuracy_metrics_df=accuracy_metrics_df.set_index('KCC_ID')
#accuracy_metrics_df.to_csv(logs_path+'/metrics.csv') #moved to function call
return eval_metrics,accuracy_metrics_df
def metrics_eval_classification(self,y_pred, y_true,logs_path,run_id=0):
"""Get predicted and actual value for all KCCs and return regression metrics namely: Mean Absolute Error, Mean Squared Error, Root Mean Squared Error, R-Squared Value
:param predicted_y: predicted values for the process parameters
:type conn_str: numpy.array [test_samples*kccs] (required)
:param predicted_y: actual values for the process parameters
:type conn_str: numpy.array [test_samples*kccs] (required)
:param logs_path: Logs path to save the evaluation metrics
:type logs_path: str (required)
:returns: dictionary of all metrics for each KCC
:rtype: dict
:returns: dataframe of all metrics for each KCC
:rtype: pandas.dataframe
"""
kcc_dim=y_true.shape[1]
import kcc_config as kcc_config
kcc_struct=kcc_config.get_kcc_struct()
# Calculating Regression Based Evaluation Metrics
kcc_id=[]
for kcc in kcc_struct:
if(kcc['kcc_type']==1):
kcc_name=kcc['kcc_id']
kcc_id.append(kcc_name)
acc_kccs=[]
f1_kccs=[]
pre_kccs=[]
recall_kccs=[]
roc_auc_kccs=[]
kappa_kccs=[]
from sklearn.metrics import accuracy_score,f1_score,precision_score,recall_score,roc_auc_score,cohen_kappa_score
for i in range(y_true.shape[1]):
#Binary Prediction arrray
y_pred_bin=np.where(y_pred[:,i] > 0.5, 1, 0)
acc_kccs.append(accuracy_score(y_true[:,i],y_pred_bin))
f1_kccs.append(f1_score(y_true[:,i],y_pred_bin))
pre_kccs.append(precision_score(y_true[:,i],y_pred_bin))
recall_kccs.append(recall_score(y_true[:,i],y_pred_bin))
kappa_kccs.append(cohen_kappa_score(y_true[:,i],y_pred_bin))
#Probablity based Scoring
roc_auc_kccs.append(roc_auc_score(y_true[:,i],y_pred[:,i]))
eval_metrics= {
"KCC_ID":kcc_id,
"Accuracy" : acc_kccs,
"F1" : f1_kccs,
"Precision" : pre_kccs,
"Recall" : recall_kccs,
"ROC_AUC":roc_auc_kccs,
"Kappa":kappa_kccs
}
accuracy_metrics_df=pd.DataFrame.from_dict(eval_metrics)
accuracy_metrics_df=accuracy_metrics_df.set_index('KCC_ID')
#accuracy_metrics_df.to_csv(logs_path+'/metrics.csv') #moved to function call
return eval_metrics,accuracy_metrics_df
def metrics_eval_cop(self,predicted_y, test_y,logs_path,run_id=0):
"""Get predicted and actual value for all KCCs and return regression metrics namely: Mean Absolute Error, Mean Squared Error, Root Mean Squared Error, R-Squared Value
:param predicted_y: predicted values for the process parameters
:type conn_str: numpy.array [test_samples*kccs] (required)
:param predicted_y: actual values for the process parameters
:type conn_str: numpy.array [test_samples*kccs] (required)
:param logs_path: Logs path to save the evaluation metrics
:type logs_path: str (required)
:returns: dictionary of all metrics for each KCC
:rtype: dict
:returns: dataframe of all metrics for each KCC
:rtype: pandas.dataframe
"""
kcc_dim=test_y.shape[1]
mae_KCCs=np.zeros((kcc_dim))
mse_KCCs=np.zeros((kcc_dim))
r2_KCCs=np.zeros((kcc_dim))
mae_KCCs=metrics.mean_absolute_error(predicted_y, test_y,multioutput='raw_values')
mse_KCCs=metrics.mean_squared_error(predicted_y, test_y,multioutput='raw_values')
r2_KCCs = metrics.r2_score(predicted_y, test_y,multioutput='raw_values')
rmse_KCCs=np.sqrt(mse_KCCs)
r2_adjusted=np.zeros(kcc_dim)
from tqdm import tqdm
for i in tqdm(range(kcc_dim)):
y_cop_test_flat=test_y[:,i]
y_cop_pred_flat=predicted_y[:,i]
combined_array=np.stack([y_cop_test_flat,y_cop_pred_flat],axis=1)
filtered_array=combined_array[np.where(abs(combined_array[:,0]) >= 0)]
y_cop_test_vector=filtered_array[:,0:1]
y_cop_pred_vector=filtered_array[:,1:2]
#print(y_cop_pred_vector.shape)
r2_adjusted[i] = metrics.r2_score(y_cop_test_vector,y_cop_pred_vector,multioutput='raw_values')[0]
eval_metrics= {
"Mean Absolute Error" : mae_KCCs,
"Mean Squared Error" : mse_KCCs,
"Root Mean Squared Error" : rmse_KCCs,
"R Squared" : r2_KCCs,
"R Squared Adjusted" : r2_adjusted
}
accuracy_metrics_df=pd.DataFrame({'MAE':mae_KCCs,'MSE':mse_KCCs,'RMSE':rmse_KCCs,'R2':r2_KCCs,"R2_Adjusted":r2_adjusted},columns=['MAE','MSE','RMSE','R2',"R2_Adjusted"])
#accuracy_metrics_df.to_csv(logs_path+'/metrics.csv') #moved to function call
return eval_metrics,accuracy_metrics_df
def metrics_eval_aleatoric_model(self,predicted_y, test_y,logs_path):
kcc_dim=test_y.shape[1]
log_variance=y_pred[:,kcc_dim]
variance=np.exp(log_variance)
predicted_y_sub=predicted_y[:,0:(kcc_dim-1)]
standard_deviation=np.sqrt(variance)
avg_aleatoric_SD=np.mean(standard_deviation)
# Calculating Regression Based Evaluation Metrics
mae_KCCs=np.zeros((kcc_dim))
mse_KCCs=np.zeros((kcc_dim))
r2_KCCs=np.zeros((kcc_dim))
kcc_id=[]
for i in range(kcc_dim):
kcc_name="KCC_"+str(i+1)
kcc_id.append(kcc_name)
mae_KCCs=metrics.mean_absolute_error(predicted_y_sub, test_y,multioutput='raw_values')
mse_KCCs=metrics.mean_squared_error(predicted_y_sub, test_y,multioutput='raw_values')
r2_KCCs = metrics.r2_score(predicted_y_sub, test_y,multioutput='raw_values')
rmse_KCCs=sqrt(mse_KCCs)
eval_metrics= {
"Mean Absolute Error" : mae_KCCs,
"Mean Squared Error" : mse_KCCs,
"Root Mean Squared Error" : rmse_KCCs,
"R Squared" : r2_KCCs,
"Aleatoric Standard Deviation":avg_aleatoric_SD
}
accuracy_metrics_df=pd.DataFrame({'KCC_ID':kcc_id,'MAE':mae_KCCs,'MSE':mse_KCCs,'RMSE':rmse_KCCs,'R2':r2_KCCs})
accuracy_metrics_df.columns = ['KCC_ID','MAE','MSE','RMSE','R2']
accuracy_metrics_df.to_csv(logs_path+'/metrics.csv')
return eval_metrics | [
"numpy.sqrt",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.r2_score",
"sys.path.append",
"numpy.mean",
"numpy.where",
"pandas.DataFrame.from_dict",
"numpy.exp",
"numpy.stack",
"pandas.DataFrame",
"sklearn.metrics.mean_ab... | [((181, 209), 'sys.path.append', 'sys.path.append', (['"""../config"""'], {}), "('../config')\n", (196, 209), False, 'import sys\n'), ((1268, 1285), 'numpy.zeros', 'np.zeros', (['kcc_dim'], {}), '(kcc_dim)\n', (1276, 1285), True, 'import numpy as np\n'), ((1299, 1316), 'numpy.zeros', 'np.zeros', (['kcc_dim'], {}), '(kcc_dim)\n', (1307, 1316), True, 'import numpy as np\n'), ((1329, 1346), 'numpy.zeros', 'np.zeros', (['kcc_dim'], {}), '(kcc_dim)\n', (1337, 1346), True, 'import numpy as np\n'), ((1511, 1585), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['predicted_y', 'test_y'], {'multioutput': '"""raw_values"""'}), "(predicted_y, test_y, multioutput='raw_values')\n", (1538, 1585), False, 'from sklearn import metrics\n'), ((1596, 1669), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['predicted_y', 'test_y'], {'multioutput': '"""raw_values"""'}), "(predicted_y, test_y, multioutput='raw_values')\n", (1622, 1669), False, 'from sklearn import metrics\n'), ((1681, 1744), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['predicted_y', 'test_y'], {'multioutput': '"""raw_values"""'}), "(predicted_y, test_y, multioutput='raw_values')\n", (1697, 1744), False, 'from sklearn import metrics\n'), ((1774, 1791), 'numpy.sqrt', 'np.sqrt', (['mse_KCCs'], {}), '(mse_KCCs)\n', (1781, 1791), True, 'import numpy as np\n'), ((2098, 2134), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['eval_metrics'], {}), '(eval_metrics)\n', (2120, 2134), True, 'import pandas as pd\n'), ((3155, 3182), 'kcc_config.get_kcc_struct', 'kcc_config.get_kcc_struct', ([], {}), '()\n', (3180, 3182), True, 'import kcc_config as kcc_config\n'), ((4298, 4334), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['eval_metrics'], {}), '(eval_metrics)\n', (4320, 4334), True, 'import pandas as pd\n'), ((5313, 5330), 'numpy.zeros', 'np.zeros', (['kcc_dim'], {}), '(kcc_dim)\n', (5321, 5330), True, 'import numpy as np\n'), ((5344, 5361), 'numpy.zeros', 'np.zeros', (['kcc_dim'], {}), '(kcc_dim)\n', (5352, 5361), True, 'import numpy as np\n'), ((5374, 5391), 'numpy.zeros', 'np.zeros', (['kcc_dim'], {}), '(kcc_dim)\n', (5382, 5391), True, 'import numpy as np\n'), ((5409, 5483), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['predicted_y', 'test_y'], {'multioutput': '"""raw_values"""'}), "(predicted_y, test_y, multioutput='raw_values')\n", (5436, 5483), False, 'from sklearn import metrics\n'), ((5494, 5567), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['predicted_y', 'test_y'], {'multioutput': '"""raw_values"""'}), "(predicted_y, test_y, multioutput='raw_values')\n", (5520, 5567), False, 'from sklearn import metrics\n'), ((5579, 5642), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['predicted_y', 'test_y'], {'multioutput': '"""raw_values"""'}), "(predicted_y, test_y, multioutput='raw_values')\n", (5595, 5642), False, 'from sklearn import metrics\n'), ((5655, 5672), 'numpy.sqrt', 'np.sqrt', (['mse_KCCs'], {}), '(mse_KCCs)\n', (5662, 5672), True, 'import numpy as np\n'), ((5690, 5707), 'numpy.zeros', 'np.zeros', (['kcc_dim'], {}), '(kcc_dim)\n', (5698, 5707), True, 'import numpy as np\n'), ((6428, 6599), 'pandas.DataFrame', 'pd.DataFrame', (["{'MAE': mae_KCCs, 'MSE': mse_KCCs, 'RMSE': rmse_KCCs, 'R2': r2_KCCs,\n 'R2_Adjusted': r2_adjusted}"], {'columns': "['MAE', 'MSE', 'RMSE', 'R2', 'R2_Adjusted']"}), "({'MAE': mae_KCCs, 'MSE': mse_KCCs, 'RMSE': rmse_KCCs, 'R2':\n r2_KCCs, 'R2_Adjusted': r2_adjusted}, columns=['MAE', 'MSE', 'RMSE',\n 'R2', 'R2_Adjusted'])\n", (6440, 6599), True, 'import pandas as pd\n'), ((6848, 6868), 'numpy.exp', 'np.exp', (['log_variance'], {}), '(log_variance)\n', (6854, 6868), True, 'import numpy as np\n'), ((6943, 6960), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (6950, 6960), True, 'import numpy as np\n'), ((6981, 7008), 'numpy.mean', 'np.mean', (['standard_deviation'], {}), '(standard_deviation)\n', (6988, 7008), True, 'import numpy as np\n'), ((7075, 7092), 'numpy.zeros', 'np.zeros', (['kcc_dim'], {}), '(kcc_dim)\n', (7083, 7092), True, 'import numpy as np\n'), ((7107, 7124), 'numpy.zeros', 'np.zeros', (['kcc_dim'], {}), '(kcc_dim)\n', (7115, 7124), True, 'import numpy as np\n'), ((7138, 7155), 'numpy.zeros', 'np.zeros', (['kcc_dim'], {}), '(kcc_dim)\n', (7146, 7155), True, 'import numpy as np\n'), ((7275, 7353), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['predicted_y_sub', 'test_y'], {'multioutput': '"""raw_values"""'}), "(predicted_y_sub, test_y, multioutput='raw_values')\n", (7302, 7353), False, 'from sklearn import metrics\n'), ((7365, 7442), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['predicted_y_sub', 'test_y'], {'multioutput': '"""raw_values"""'}), "(predicted_y_sub, test_y, multioutput='raw_values')\n", (7391, 7442), False, 'from sklearn import metrics\n'), ((7455, 7522), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['predicted_y_sub', 'test_y'], {'multioutput': '"""raw_values"""'}), "(predicted_y_sub, test_y, multioutput='raw_values')\n", (7471, 7522), False, 'from sklearn import metrics\n'), ((7797, 7901), 'pandas.DataFrame', 'pd.DataFrame', (["{'KCC_ID': kcc_id, 'MAE': mae_KCCs, 'MSE': mse_KCCs, 'RMSE': rmse_KCCs,\n 'R2': r2_KCCs}"], {}), "({'KCC_ID': kcc_id, 'MAE': mae_KCCs, 'MSE': mse_KCCs, 'RMSE':\n rmse_KCCs, 'R2': r2_KCCs})\n", (7809, 7901), True, 'import pandas as pd\n'), ((3653, 3687), 'numpy.where', 'np.where', (['(y_pred[:, i] > 0.5)', '(1)', '(0)'], {}), '(y_pred[:, i] > 0.5, 1, 0)\n', (3661, 3687), True, 'import numpy as np\n'), ((5851, 5903), 'numpy.stack', 'np.stack', (['[y_cop_test_flat, y_cop_pred_flat]'], {'axis': '(1)'}), '([y_cop_test_flat, y_cop_pred_flat], axis=1)\n', (5859, 5903), True, 'import numpy as np\n'), ((3710, 3750), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true[:, i]', 'y_pred_bin'], {}), '(y_true[:, i], y_pred_bin)\n', (3724, 3750), False, 'from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score, cohen_kappa_score\n'), ((3768, 3802), 'sklearn.metrics.f1_score', 'f1_score', (['y_true[:, i]', 'y_pred_bin'], {}), '(y_true[:, i], y_pred_bin)\n', (3776, 3802), False, 'from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score, cohen_kappa_score\n'), ((3821, 3862), 'sklearn.metrics.precision_score', 'precision_score', (['y_true[:, i]', 'y_pred_bin'], {}), '(y_true[:, i], y_pred_bin)\n', (3836, 3862), False, 'from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score, cohen_kappa_score\n'), ((3884, 3922), 'sklearn.metrics.recall_score', 'recall_score', (['y_true[:, i]', 'y_pred_bin'], {}), '(y_true[:, i], y_pred_bin)\n', (3896, 3922), False, 'from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score, cohen_kappa_score\n'), ((3943, 3986), 'sklearn.metrics.cohen_kappa_score', 'cohen_kappa_score', (['y_true[:, i]', 'y_pred_bin'], {}), '(y_true[:, i], y_pred_bin)\n', (3960, 3986), False, 'from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score, cohen_kappa_score\n'), ((4042, 4083), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_true[:, i]', 'y_pred[:, i]'], {}), '(y_true[:, i], y_pred[:, i])\n', (4055, 4083), False, 'from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, roc_auc_score, cohen_kappa_score\n'), ((6117, 6202), 'sklearn.metrics.r2_score', 'metrics.r2_score', (['y_cop_test_vector', 'y_cop_pred_vector'], {'multioutput': '"""raw_values"""'}), "(y_cop_test_vector, y_cop_pred_vector, multioutput='raw_values'\n )\n", (6133, 6202), False, 'from sklearn import metrics\n')] |
import cv2
import matplotlib
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
from PIL import ImageFilter, Image
def find_circle_coords(imagefile, radmin=80, radmax=110, houghaccumulator=0.6, searchrad=190):
''' Pass a raw image, and it will return a list of the identified circles
for further processing.
image - image file (tested to work with jpg)
radmin - minimum circle radius to accept
radmax - maximumc circles radius to accept
houghaccumulator - value used in the Hough gradient estimation function to identify circles
a higher value gives more aggressive circle finding
'''
image = cv2.imread(imagefile)
# converting to grayscale
output = image.copy()
gray = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)
# identifying circles
rawcircles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, houghaccumulator, searchrad)[0] #removing dimens.
# applying filter function to found circles
circles = radfilter(rawcircles, radmin, radmax)
# returning filter circle array
return circles
def radfilter(rawcircles, radmin, radmax):
'''Filter function to take an array of circles and filter out circles which are larger or smaller than
two given radii'''
newcircles = rawcircles[(rawcircles[:,2]>radmin)&(rawcircles[:,2]<radmax)]
return newcircles
def plot_circle_coords(imagefile, circles):
''' Pass an array of identified circles, and the image they were found in,
and produce an overlay that shows the circle positions.'''
# get image
image = cv2.imread(imagefile)
output = image.copy()
# ensure at least some circles were found
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
# loop over the (x, y) coordinates and radius of the circles
for (x, y, r) in circles.astype("int"):
# draw the circle in the output image, then draw a rectangle
# corresponding to the center of the circle
cv2.circle(output, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
# show the output image
cv2.imshow("output", np.hstack([image, output]))
cv2.waitKey(10000)
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
| [
"cv2.rectangle",
"numpy.hstack",
"cv2.HoughCircles",
"cv2.circle",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imread"
] | [((672, 693), 'cv2.imread', 'cv2.imread', (['imagefile'], {}), '(imagefile)\n', (682, 693), False, 'import cv2\n'), ((761, 801), 'cv2.cvtColor', 'cv2.cvtColor', (['output', 'cv2.COLOR_BGR2GRAY'], {}), '(output, cv2.COLOR_BGR2GRAY)\n', (773, 801), False, 'import cv2\n'), ((1593, 1614), 'cv2.imread', 'cv2.imread', (['imagefile'], {}), '(imagefile)\n', (1603, 1614), False, 'import cv2\n'), ((846, 917), 'cv2.HoughCircles', 'cv2.HoughCircles', (['gray', 'cv2.HOUGH_GRADIENT', 'houghaccumulator', 'searchrad'], {}), '(gray, cv2.HOUGH_GRADIENT, houghaccumulator, searchrad)\n', (862, 917), False, 'import cv2\n'), ((2279, 2297), 'cv2.waitKey', 'cv2.waitKey', (['(10000)'], {}), '(10000)\n', (2290, 2297), False, 'import cv2\n'), ((2306, 2320), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2317, 2320), False, 'import cv2\n'), ((2329, 2352), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2350, 2352), False, 'import cv2\n'), ((2361, 2375), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2372, 2375), False, 'import cv2\n'), ((2050, 2095), 'cv2.circle', 'cv2.circle', (['output', '(x, y)', 'r', '(0, 255, 0)', '(4)'], {}), '(output, (x, y), r, (0, 255, 0), 4)\n', (2060, 2095), False, 'import cv2\n'), ((2108, 2180), 'cv2.rectangle', 'cv2.rectangle', (['output', '(x - 5, y - 5)', '(x + 5, y + 5)', '(0, 128, 255)', '(-1)'], {}), '(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)\n', (2121, 2180), False, 'import cv2\n'), ((2243, 2269), 'numpy.hstack', 'np.hstack', (['[image, output]'], {}), '([image, output])\n', (2252, 2269), True, 'import numpy as np\n')] |
"""
This file is test file for agent 6,7 and 8.
"""
# Necessary imports
import time
import numpy as np
import multiprocessing
from datetime import datetime
import pickle
from constants import STARTING_POSITION_OF_AGENT, INF, PROBABILITY_OF_GRID, NUM_ROWS, NUM_COLS, NUM_ITERATIONS
from helpers.helper import generate_grid_with_probability_p, compute_explored_cells_from_path, \
length_of_path_from_source_to_goal, examine_and_propagate_probability, generate_target_position
from src.Agent6 import Agent6
# Create agent 6 object
agent = Agent6()
legends = ['Agent6', 'Agent7', 'Agent8']
def find_the_target(num: int):
"""
Function to run each process for each grid
:param num: number of times it's running
:return: [total movements, total examinations, total actions]
"""
print('Running for:', num)
agents = [6, 7, 8]
x = list()
# Keep generating grid and target position until we will get valid pair of it
while True:
random_maze = generate_grid_with_probability_p(PROBABILITY_OF_GRID)
target_pos = generate_target_position(random_maze)
if length_of_path_from_source_to_goal(random_maze, STARTING_POSITION_OF_AGENT, target_pos) != INF:
break
# Run agent 6,7, and 8 for the above generate grid and target position
for agent_num in agents:
# Print when the agent started it's execution
print('Starting agent', agent_num)
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print("date and time =", dt_string)
# Reset agent before using it
agent.reset()
target_found = False
# Run the following loop until target is not found
while not target_found:
# First, find the current estimated target
agent.pre_planning(agent_num)
# Second, prepare a path to reach to this target
agent.planning(agent.current_estimated_goal)
# If the given target is not reachable, set it's probability of containing target to zero and find another
# target and it's corresponding path
while agent.current_estimated_goal not in agent.parents:
agent.maze[agent.current_estimated_goal[0]][agent.current_estimated_goal[1]].is_blocked = True
examine_and_propagate_probability(agent.maze, agent.probability_of_containing_target,
agent.false_negative_rates, agent.current_position, target_pos,
agent.current_estimated_goal, agent.current_estimated_goal)
agent.pre_planning(agent_num)
agent.planning(agent.current_estimated_goal)
# Execute on the generated path
agent.execution(random_maze)
# Examine the current cell
target_found = agent.examine(target_pos)
# Find total number of movements
movements = compute_explored_cells_from_path(agent.final_paths)
x.append([agent.num_examinations, movements])
# End the execution of the current run
print('ending agent', agent_num)
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print("date and time =", dt_string)
return x
if __name__ == "__main__":
# Initialize the following list to store final results
total_examinations_6 = list()
total_movements_6 = list()
total_cost_6 = list()
total_examinations_7 = list()
total_movements_7 = list()
total_cost_7 = list()
total_examinations_8 = list()
total_movements_8 = list()
total_cost_8 = list()
start_time = time.time()
# Used multiprocessing to parallelize processes
n_cores = int(multiprocessing.cpu_count())
print('Number of cores', n_cores)
p = multiprocessing.Pool(processes=n_cores)
results = p.imap_unordered(find_the_target, range(NUM_ITERATIONS))
# store results in final matrix
for result in results:
total_examinations_6.append(result[0][0])
total_movements_6.append(result[0][1])
total_cost_6.append(total_examinations_6[-1] + total_movements_6[-1])
total_examinations_7.append(result[1][0])
total_movements_7.append(result[1][1])
total_cost_7.append(total_examinations_7[-1] + total_movements_7[-1])
total_examinations_8.append(result[2][0])
total_movements_8.append(result[2][1])
total_cost_8.append(total_examinations_8[-1] + total_movements_8[-1])
# Dump final output in the pickle file
with open('../data/agent6_100_grids_100x100_forest_target.pkl', 'wb') as f:
pickle.dump({'total_actions': total_cost_6, 'total_examinations': total_examinations_6, 'total_movements':
total_movements_6}, f)
with open('../data/agent7_100_grids_100x100_forest_target.pkl', 'wb') as f:
pickle.dump({'total_actions': total_cost_7, 'total_examinations': total_examinations_7, 'total_movements':
total_movements_7}, f)
with open('../data/agent8_100_grids_100x100_forest_target.pkl', 'wb') as f:
pickle.dump({'total_actions': total_cost_8, 'total_examinations': total_examinations_8, 'total_movements':
total_movements_8}, f)
end_time = time.time()
# Print final outputs
print("Average Number of movements of agent 6 = ", np.average(total_movements_6))
print("Average Number of total examinations of agent 6 = ", np.average(total_examinations_6))
print("Total average cost of agent 6 = ", np.average(total_movements_6) + np.average(total_examinations_6))
print("Average Number of movements of agent 7 = ", np.average(total_movements_7))
print("Average Number of total examinations of agent 7 = ", np.average(total_examinations_7))
print("Total average cost of agent 7 = ", np.average(total_movements_7) + np.average(total_examinations_7))
print("Average Number of movements of agent 8 = ", np.average(total_movements_8))
print("Average Number of total examinations of agent 8 = ", np.average(total_examinations_8))
print("Total average cost of agent 8 = ", np.average(total_movements_8) + np.average(total_examinations_8))
print(f"Runtime = {end_time - start_time}")
| [
"pickle.dump",
"helpers.helper.compute_explored_cells_from_path",
"numpy.average",
"helpers.helper.generate_grid_with_probability_p",
"multiprocessing.cpu_count",
"src.Agent6.Agent6",
"datetime.datetime.now",
"multiprocessing.Pool",
"helpers.helper.examine_and_propagate_probability",
"helpers.help... | [((543, 551), 'src.Agent6.Agent6', 'Agent6', ([], {}), '()\n', (549, 551), False, 'from src.Agent6 import Agent6\n'), ((3698, 3709), 'time.time', 'time.time', ([], {}), '()\n', (3707, 3709), False, 'import time\n'), ((3856, 3895), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'n_cores'}), '(processes=n_cores)\n', (3876, 3895), False, 'import multiprocessing\n'), ((5311, 5322), 'time.time', 'time.time', ([], {}), '()\n', (5320, 5322), False, 'import time\n'), ((991, 1044), 'helpers.helper.generate_grid_with_probability_p', 'generate_grid_with_probability_p', (['PROBABILITY_OF_GRID'], {}), '(PROBABILITY_OF_GRID)\n', (1023, 1044), False, 'from helpers.helper import generate_grid_with_probability_p, compute_explored_cells_from_path, length_of_path_from_source_to_goal, examine_and_propagate_probability, generate_target_position\n'), ((1066, 1103), 'helpers.helper.generate_target_position', 'generate_target_position', (['random_maze'], {}), '(random_maze)\n', (1090, 1103), False, 'from helpers.helper import generate_grid_with_probability_p, compute_explored_cells_from_path, length_of_path_from_source_to_goal, examine_and_propagate_probability, generate_target_position\n'), ((1446, 1460), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1458, 1460), False, 'from datetime import datetime\n'), ((2981, 3032), 'helpers.helper.compute_explored_cells_from_path', 'compute_explored_cells_from_path', (['agent.final_paths'], {}), '(agent.final_paths)\n', (3013, 3032), False, 'from helpers.helper import generate_grid_with_probability_p, compute_explored_cells_from_path, length_of_path_from_source_to_goal, examine_and_propagate_probability, generate_target_position\n'), ((3190, 3204), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3202, 3204), False, 'from datetime import datetime\n'), ((3781, 3808), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (3806, 3808), False, 'import multiprocessing\n'), ((4691, 4824), 'pickle.dump', 'pickle.dump', (["{'total_actions': total_cost_6, 'total_examinations': total_examinations_6,\n 'total_movements': total_movements_6}", 'f'], {}), "({'total_actions': total_cost_6, 'total_examinations':\n total_examinations_6, 'total_movements': total_movements_6}, f)\n", (4702, 4824), False, 'import pickle\n'), ((4922, 5055), 'pickle.dump', 'pickle.dump', (["{'total_actions': total_cost_7, 'total_examinations': total_examinations_7,\n 'total_movements': total_movements_7}", 'f'], {}), "({'total_actions': total_cost_7, 'total_examinations':\n total_examinations_7, 'total_movements': total_movements_7}, f)\n", (4933, 5055), False, 'import pickle\n'), ((5153, 5286), 'pickle.dump', 'pickle.dump', (["{'total_actions': total_cost_8, 'total_examinations': total_examinations_8,\n 'total_movements': total_movements_8}", 'f'], {}), "({'total_actions': total_cost_8, 'total_examinations':\n total_examinations_8, 'total_movements': total_movements_8}, f)\n", (5164, 5286), False, 'import pickle\n'), ((5405, 5434), 'numpy.average', 'np.average', (['total_movements_6'], {}), '(total_movements_6)\n', (5415, 5434), True, 'import numpy as np\n'), ((5500, 5532), 'numpy.average', 'np.average', (['total_examinations_6'], {}), '(total_examinations_6)\n', (5510, 5532), True, 'import numpy as np\n'), ((5701, 5730), 'numpy.average', 'np.average', (['total_movements_7'], {}), '(total_movements_7)\n', (5711, 5730), True, 'import numpy as np\n'), ((5796, 5828), 'numpy.average', 'np.average', (['total_examinations_7'], {}), '(total_examinations_7)\n', (5806, 5828), True, 'import numpy as np\n'), ((5997, 6026), 'numpy.average', 'np.average', (['total_movements_8'], {}), '(total_movements_8)\n', (6007, 6026), True, 'import numpy as np\n'), ((6092, 6124), 'numpy.average', 'np.average', (['total_examinations_8'], {}), '(total_examinations_8)\n', (6102, 6124), True, 'import numpy as np\n'), ((1115, 1206), 'helpers.helper.length_of_path_from_source_to_goal', 'length_of_path_from_source_to_goal', (['random_maze', 'STARTING_POSITION_OF_AGENT', 'target_pos'], {}), '(random_maze, STARTING_POSITION_OF_AGENT,\n target_pos)\n', (1149, 1206), False, 'from helpers.helper import generate_grid_with_probability_p, compute_explored_cells_from_path, length_of_path_from_source_to_goal, examine_and_propagate_probability, generate_target_position\n'), ((5580, 5609), 'numpy.average', 'np.average', (['total_movements_6'], {}), '(total_movements_6)\n', (5590, 5609), True, 'import numpy as np\n'), ((5612, 5644), 'numpy.average', 'np.average', (['total_examinations_6'], {}), '(total_examinations_6)\n', (5622, 5644), True, 'import numpy as np\n'), ((5876, 5905), 'numpy.average', 'np.average', (['total_movements_7'], {}), '(total_movements_7)\n', (5886, 5905), True, 'import numpy as np\n'), ((5908, 5940), 'numpy.average', 'np.average', (['total_examinations_7'], {}), '(total_examinations_7)\n', (5918, 5940), True, 'import numpy as np\n'), ((6172, 6201), 'numpy.average', 'np.average', (['total_movements_8'], {}), '(total_movements_8)\n', (6182, 6201), True, 'import numpy as np\n'), ((6204, 6236), 'numpy.average', 'np.average', (['total_examinations_8'], {}), '(total_examinations_8)\n', (6214, 6236), True, 'import numpy as np\n'), ((2323, 2547), 'helpers.helper.examine_and_propagate_probability', 'examine_and_propagate_probability', (['agent.maze', 'agent.probability_of_containing_target', 'agent.false_negative_rates', 'agent.current_position', 'target_pos', 'agent.current_estimated_goal', 'agent.current_estimated_goal'], {}), '(agent.maze, agent.\n probability_of_containing_target, agent.false_negative_rates, agent.\n current_position, target_pos, agent.current_estimated_goal, agent.\n current_estimated_goal)\n', (2356, 2547), False, 'from helpers.helper import generate_grid_with_probability_p, compute_explored_cells_from_path, length_of_path_from_source_to_goal, examine_and_propagate_probability, generate_target_position\n')] |
from datetime import datetime
from queue import deque
from time import time
import numpy as np
from comet_ml import Experiment
class Logger:
def __init__(self, opts=None, exp=None, n_train=None, n_val=None, n_test=None):
self.opts = opts
self.exp: Experiment = exp
self.n_train = n_train
self.n_val = n_val
self.n_test = n_test
self.global_step = 0
self.batch_id = 0
self.epoch_id = 0
self.n_epochs = self.opts.get("epochs")
self.qs = {
"train": deque([], maxlen=25),
"val": deque([], maxlen=25),
"test": deque([], maxlen=25),
}
self.ts = {
"train": time(),
"val": time(),
"test": time(),
}
def now(self):
return str(datetime.now()).split(".")[0].split()[-1]
def log_step(self, losses="", mode="train", upload=True):
if mode == "train":
n = self.n_train
elif mode == "val":
n = self.n_val
elif mode == "test":
n = self.n_test
now = self.now()
nt = time()
diff = nt - self.ts[mode]
self.ts[mode] = nt
self.qs[mode].append(diff)
batch_time = np.mean(self.qs[mode])
t = f"{batch_time: .2f}s/b"
losses_str = (
" | ".join("{}: {:.5f}".format(k, float(v)) for k, v in losses.items())
if losses
else ""
)
if mode == "train":
current_state = "{:>5} {:>3}/{} {:>3}/{}".format(
self.global_step,
self.batch_id + 1,
n,
self.epoch_id + 1,
self.n_epochs,
)
else:
current_state = f"<> {mode} <>"
print(
"[{} {}] {} | {}".format(now, t, current_state, losses_str),
end="\r",
)
if upload and self.exp is not None:
self.exp.log_metrics(
{f"{mode}_{k}": v for k, v in losses.items()}, step=self.global_step
)
if mode == "train":
self.exp.log_metric("batch_time", batch_time)
| [
"numpy.mean",
"time.time",
"datetime.datetime.now",
"queue.deque"
] | [((1131, 1137), 'time.time', 'time', ([], {}), '()\n', (1135, 1137), False, 'from time import time\n'), ((1255, 1277), 'numpy.mean', 'np.mean', (['self.qs[mode]'], {}), '(self.qs[mode])\n', (1262, 1277), True, 'import numpy as np\n'), ((548, 568), 'queue.deque', 'deque', (['[]'], {'maxlen': '(25)'}), '([], maxlen=25)\n', (553, 568), False, 'from queue import deque\n'), ((589, 609), 'queue.deque', 'deque', (['[]'], {'maxlen': '(25)'}), '([], maxlen=25)\n', (594, 609), False, 'from queue import deque\n'), ((631, 651), 'queue.deque', 'deque', (['[]'], {'maxlen': '(25)'}), '([], maxlen=25)\n', (636, 651), False, 'from queue import deque\n'), ((705, 711), 'time.time', 'time', ([], {}), '()\n', (709, 711), False, 'from time import time\n'), ((732, 738), 'time.time', 'time', ([], {}), '()\n', (736, 738), False, 'from time import time\n'), ((760, 766), 'time.time', 'time', ([], {}), '()\n', (764, 766), False, 'from time import time\n'), ((817, 831), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (829, 831), False, 'from datetime import datetime\n')] |
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
class TestSoftplus(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.beta = numpy.random.uniform(1, 2, ())
self.check_forward_options = {}
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.softplus(x, beta=self.beta)
x_value = cuda.to_cpu(x_data)
y_exp = numpy.log(1 + numpy.exp(self.beta * x_value)) / self.beta
self.assertEqual(y.data.dtype, self.dtype)
testing.assert_allclose(
y_exp, y.data, **self.check_forward_options)
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
functions.Softplus(beta=self.beta), x_data, y_grad,
dtype=numpy.float64, **self.check_backward_options)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| [
"chainer.Variable",
"chainer.testing.fix_random",
"chainer.testing.run_module",
"chainer.functions.softplus",
"chainer.functions.Softplus",
"chainer.cuda.to_cpu",
"chainer.testing.product",
"numpy.exp",
"numpy.random.uniform",
"chainer.testing.assert_allclose",
"chainer.cuda.to_gpu"
] | [((332, 352), 'chainer.testing.fix_random', 'testing.fix_random', ([], {}), '()\n', (350, 352), False, 'from chainer import testing\n'), ((1835, 1873), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (1853, 1873), False, 'from chainer import testing\n'), ((587, 617), 'numpy.random.uniform', 'numpy.random.uniform', (['(1)', '(2)', '()'], {}), '(1, 2, ())\n', (607, 617), False, 'import numpy\n'), ((930, 954), 'chainer.Variable', 'chainer.Variable', (['x_data'], {}), '(x_data)\n', (946, 954), False, 'import chainer\n'), ((967, 1004), 'chainer.functions.softplus', 'functions.softplus', (['x'], {'beta': 'self.beta'}), '(x, beta=self.beta)\n', (985, 1004), False, 'from chainer import functions\n'), ((1023, 1042), 'chainer.cuda.to_cpu', 'cuda.to_cpu', (['x_data'], {}), '(x_data)\n', (1034, 1042), False, 'from chainer import cuda\n'), ((1176, 1244), 'chainer.testing.assert_allclose', 'testing.assert_allclose', (['y_exp', 'y.data'], {}), '(y_exp, y.data, **self.check_forward_options)\n', (1199, 1244), False, 'from chainer import testing\n'), ((222, 323), 'chainer.testing.product', 'testing.product', (["{'shape': [(3, 2), ()], 'dtype': [numpy.float16, numpy.float32, numpy.float64]}"], {}), "({'shape': [(3, 2), ()], 'dtype': [numpy.float16, numpy.\n float32, numpy.float64]})\n", (237, 323), False, 'from chainer import testing\n'), ((1356, 1390), 'chainer.functions.Softplus', 'functions.Softplus', ([], {'beta': 'self.beta'}), '(beta=self.beta)\n', (1374, 1390), False, 'from chainer import functions\n'), ((1614, 1633), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (1625, 1633), False, 'from chainer import cuda\n'), ((1790, 1809), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.x'], {}), '(self.x)\n', (1801, 1809), False, 'from chainer import cuda\n'), ((1811, 1831), 'chainer.cuda.to_gpu', 'cuda.to_gpu', (['self.gy'], {}), '(self.gy)\n', (1822, 1831), False, 'from chainer import cuda\n'), ((431, 470), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (451, 470), False, 'import numpy\n'), ((508, 547), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (528, 547), False, 'import numpy\n'), ((1073, 1103), 'numpy.exp', 'numpy.exp', (['(self.beta * x_value)'], {}), '(self.beta * x_value)\n', (1082, 1103), False, 'import numpy\n')] |
"""
Module for testing the model_selection.search module.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import numpy as np
import pytest
from surprise import Dataset
from surprise import Reader
from surprise import SVD
from surprise.model_selection import KFold
from surprise.model_selection import PredefinedKFold
from surprise.model_selection import GridSearchCV
from surprise.model_selection import cross_validate
def test_parameter_combinations():
"""Make sure that parameter_combinations attribute is correct (has correct
size). Dict parameters like bsl_options and sim_options require special
treatment in the param_grid argument. We here test both in one shot with
KNNBaseline."""
param_grid = {'bsl_options': {'method': ['als', 'sgd'],
'reg': [1, 2]},
'k': [2, 3],
'sim_options': {'name': ['msd', 'cosine'],
'min_support': [1, 5],
'user_based': [False]}
}
gs = GridSearchCV(SVD, param_grid)
assert len(gs.param_combinations) == 32
def test_best_estimator():
"""Ensure that the best estimator is the one giving the best score (by
re-running it)"""
train_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_train')
test_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
data = Dataset.load_from_folds([(train_file, test_file)],
Reader('ml-100k'))
param_grid = {'n_epochs': [5], 'lr_all': [0.002, 0.005],
'reg_all': [0.4, 0.6], 'n_factors': [1], 'init_std_dev': [0]}
gs = GridSearchCV(SVD, param_grid, measures=['mae'],
cv=PredefinedKFold(), joblib_verbose=100)
gs.fit(data)
best_estimator = gs.best_estimator['mae']
# recompute MAE of best_estimator
mae = cross_validate(best_estimator, data, measures=['MAE'],
cv=PredefinedKFold())['test_mae']
assert mae == gs.best_score['mae']
def test_same_splits():
"""Ensure that all parameter combinations are tested on the same splits (we
check their RMSE scores are the same once averaged over the splits, which
should be enough). We use as much parallelism as possible."""
data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
data = Dataset.load_from_file(data_file, reader=Reader('ml-100k'))
kf = KFold(3, shuffle=True, random_state=4)
# all RMSE should be the same (as param combinations are the same)
param_grid = {'n_epochs': [5], 'lr_all': [.2, .2],
'reg_all': [.4, .4], 'n_factors': [5], 'random_state': [0]}
gs = GridSearchCV(SVD, param_grid, measures=['RMSE'], cv=kf,
n_jobs=-1)
gs.fit(data)
rmse_scores = [m for m in gs.cv_results['mean_test_rmse']]
assert len(set(rmse_scores)) == 1 # assert rmse_scores are all equal
# Note: actually, even when setting random_state=None in kf, the same folds
# are used because we use product(param_comb, kf.split(...)). However, it's
# needed to have the same folds when calling fit again:
gs.fit(data)
rmse_scores += [m for m in gs.cv_results['mean_test_rmse']]
assert len(set(rmse_scores)) == 1 # assert rmse_scores are all equal
def test_cv_results():
'''Test the cv_results attribute'''
f = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
data = Dataset.load_from_file(f, Reader('ml-100k'))
kf = KFold(3, shuffle=True, random_state=4)
param_grid = {'n_epochs': [5], 'lr_all': [.2, .2],
'reg_all': [.4, .4], 'n_factors': [5], 'random_state': [0]}
gs = GridSearchCV(SVD, param_grid, measures=['RMSE', 'mae'], cv=kf,
return_train_measures=True)
gs.fit(data)
# test keys split*_test_rmse, mean and std dev.
assert gs.cv_results['split0_test_rmse'].shape == (4,) # 4 param comb.
assert gs.cv_results['split1_test_rmse'].shape == (4,) # 4 param comb.
assert gs.cv_results['split2_test_rmse'].shape == (4,) # 4 param comb.
assert gs.cv_results['mean_test_rmse'].shape == (4,) # 4 param comb.
assert np.allclose(gs.cv_results['mean_test_rmse'],
np.mean([gs.cv_results['split0_test_rmse'],
gs.cv_results['split1_test_rmse'],
gs.cv_results['split2_test_rmse']], axis=0))
assert np.allclose(gs.cv_results['std_test_rmse'],
np.std([gs.cv_results['split0_test_rmse'],
gs.cv_results['split1_test_rmse'],
gs.cv_results['split2_test_rmse']], axis=0))
# test keys split*_train_mae, mean and std dev.
assert gs.cv_results['split0_train_rmse'].shape == (4,) # 4 param comb.
assert gs.cv_results['split1_train_rmse'].shape == (4,) # 4 param comb.
assert gs.cv_results['split2_train_rmse'].shape == (4,) # 4 param comb.
assert gs.cv_results['mean_train_rmse'].shape == (4,) # 4 param comb.
assert np.allclose(gs.cv_results['mean_train_rmse'],
np.mean([gs.cv_results['split0_train_rmse'],
gs.cv_results['split1_train_rmse'],
gs.cv_results['split2_train_rmse']], axis=0))
assert np.allclose(gs.cv_results['std_train_rmse'],
np.std([gs.cv_results['split0_train_rmse'],
gs.cv_results['split1_train_rmse'],
gs.cv_results['split2_train_rmse']], axis=0))
# test fit and train times dimensions.
assert gs.cv_results['mean_fit_time'].shape == (4,) # 4 param comb.
assert gs.cv_results['std_fit_time'].shape == (4,) # 4 param comb.
assert gs.cv_results['mean_test_time'].shape == (4,) # 4 param comb.
assert gs.cv_results['std_test_time'].shape == (4,) # 4 param comb.
assert gs.cv_results['params'] is gs.param_combinations
# assert that best parameter in gs.cv_results['rank_test_measure'] is
# indeed the best_param attribute.
best_index = np.argmin(gs.cv_results['rank_test_rmse'])
assert gs.cv_results['params'][best_index] == gs.best_params['rmse']
best_index = np.argmin(gs.cv_results['rank_test_mae'])
assert gs.cv_results['params'][best_index] == gs.best_params['mae']
def test_refit():
data_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
data = Dataset.load_from_file(data_file, Reader('ml-100k'))
param_grid = {'n_epochs': [5], 'lr_all': [0.002, 0.005],
'reg_all': [0.4, 0.6], 'n_factors': [2]}
# assert gs.fit() and gs.test will use best estimator for mae (first
# appearing in measures)
gs = GridSearchCV(SVD, param_grid, measures=['mae', 'rmse'], cv=2,
refit=True)
gs.fit(data)
gs_preds = gs.test(data.construct_testset(data.raw_ratings))
mae_preds = gs.best_estimator['mae'].test(
data.construct_testset(data.raw_ratings))
assert gs_preds == mae_preds
# assert gs.fit() and gs.test will use best estimator for rmse
gs = GridSearchCV(SVD, param_grid, measures=['mae', 'rmse'], cv=2,
refit='rmse')
gs.fit(data)
gs_preds = gs.test(data.construct_testset(data.raw_ratings))
rmse_preds = gs.best_estimator['rmse'].test(
data.construct_testset(data.raw_ratings))
assert gs_preds == rmse_preds
# test that predict() can be called
gs.predict(2, 4)
# assert test() and predict() cannot be used when refit is false
gs = GridSearchCV(SVD, param_grid, measures=['mae', 'rmse'], cv=2,
refit=False)
gs.fit(data)
with pytest.raises(ValueError):
gs_preds = gs.test(data.construct_testset(data.raw_ratings))
with pytest.raises(ValueError):
gs.predict('1', '2')
# test that error is raised if used with load_from_folds
train_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_train')
test_file = os.path.join(os.path.dirname(__file__), './u1_ml100k_test')
data = Dataset.load_from_folds([(train_file, test_file)],
Reader('ml-100k'))
gs = GridSearchCV(SVD, param_grid, measures=['mae', 'rmse'], cv=2,
refit=True)
with pytest.raises(ValueError):
gs.fit(data)
| [
"surprise.model_selection.GridSearchCV",
"numpy.mean",
"surprise.model_selection.PredefinedKFold",
"os.path.dirname",
"pytest.raises",
"numpy.std",
"numpy.argmin",
"surprise.Reader",
"surprise.model_selection.KFold"
] | [((1134, 1163), 'surprise.model_selection.GridSearchCV', 'GridSearchCV', (['SVD', 'param_grid'], {}), '(SVD, param_grid)\n', (1146, 1163), False, 'from surprise.model_selection import GridSearchCV\n'), ((2541, 2579), 'surprise.model_selection.KFold', 'KFold', (['(3)'], {'shuffle': '(True)', 'random_state': '(4)'}), '(3, shuffle=True, random_state=4)\n', (2546, 2579), False, 'from surprise.model_selection import KFold\n'), ((2794, 2860), 'surprise.model_selection.GridSearchCV', 'GridSearchCV', (['SVD', 'param_grid'], {'measures': "['RMSE']", 'cv': 'kf', 'n_jobs': '(-1)'}), "(SVD, param_grid, measures=['RMSE'], cv=kf, n_jobs=-1)\n", (2806, 2860), False, 'from surprise.model_selection import GridSearchCV\n'), ((3613, 3651), 'surprise.model_selection.KFold', 'KFold', (['(3)'], {'shuffle': '(True)', 'random_state': '(4)'}), '(3, shuffle=True, random_state=4)\n', (3618, 3651), False, 'from surprise.model_selection import KFold\n'), ((3794, 3888), 'surprise.model_selection.GridSearchCV', 'GridSearchCV', (['SVD', 'param_grid'], {'measures': "['RMSE', 'mae']", 'cv': 'kf', 'return_train_measures': '(True)'}), "(SVD, param_grid, measures=['RMSE', 'mae'], cv=kf,\n return_train_measures=True)\n", (3806, 3888), False, 'from surprise.model_selection import GridSearchCV\n'), ((6234, 6276), 'numpy.argmin', 'np.argmin', (["gs.cv_results['rank_test_rmse']"], {}), "(gs.cv_results['rank_test_rmse'])\n", (6243, 6276), True, 'import numpy as np\n'), ((6367, 6408), 'numpy.argmin', 'np.argmin', (["gs.cv_results['rank_test_mae']"], {}), "(gs.cv_results['rank_test_mae'])\n", (6376, 6408), True, 'import numpy as np\n'), ((6875, 6948), 'surprise.model_selection.GridSearchCV', 'GridSearchCV', (['SVD', 'param_grid'], {'measures': "['mae', 'rmse']", 'cv': '(2)', 'refit': '(True)'}), "(SVD, param_grid, measures=['mae', 'rmse'], cv=2, refit=True)\n", (6887, 6948), False, 'from surprise.model_selection import GridSearchCV\n'), ((7260, 7335), 'surprise.model_selection.GridSearchCV', 'GridSearchCV', (['SVD', 'param_grid'], {'measures': "['mae', 'rmse']", 'cv': '(2)', 'refit': '"""rmse"""'}), "(SVD, param_grid, measures=['mae', 'rmse'], cv=2, refit='rmse')\n", (7272, 7335), False, 'from surprise.model_selection import GridSearchCV\n'), ((7713, 7787), 'surprise.model_selection.GridSearchCV', 'GridSearchCV', (['SVD', 'param_grid'], {'measures': "['mae', 'rmse']", 'cv': '(2)', 'refit': '(False)'}), "(SVD, param_grid, measures=['mae', 'rmse'], cv=2, refit=False)\n", (7725, 7787), False, 'from surprise.model_selection import GridSearchCV\n'), ((8338, 8411), 'surprise.model_selection.GridSearchCV', 'GridSearchCV', (['SVD', 'param_grid'], {'measures': "['mae', 'rmse']", 'cv': '(2)', 'refit': '(True)'}), "(SVD, param_grid, measures=['mae', 'rmse'], cv=2, refit=True)\n", (8350, 8411), False, 'from surprise.model_selection import GridSearchCV\n'), ((1365, 1390), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1380, 1390), False, 'import os\n'), ((1442, 1467), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1457, 1467), False, 'import os\n'), ((1586, 1603), 'surprise.Reader', 'Reader', (['"""ml-100k"""'], {}), "('ml-100k')\n", (1592, 1603), False, 'from surprise import Reader\n'), ((2414, 2439), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2429, 2439), False, 'import os\n'), ((3501, 3526), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3516, 3526), False, 'import os\n'), ((3585, 3602), 'surprise.Reader', 'Reader', (['"""ml-100k"""'], {}), "('ml-100k')\n", (3591, 3602), False, 'from surprise import Reader\n'), ((4358, 4485), 'numpy.mean', 'np.mean', (["[gs.cv_results['split0_test_rmse'], gs.cv_results['split1_test_rmse'], gs.\n cv_results['split2_test_rmse']]"], {'axis': '(0)'}), "([gs.cv_results['split0_test_rmse'], gs.cv_results[\n 'split1_test_rmse'], gs.cv_results['split2_test_rmse']], axis=0)\n", (4365, 4485), True, 'import numpy as np\n'), ((4624, 4750), 'numpy.std', 'np.std', (["[gs.cv_results['split0_test_rmse'], gs.cv_results['split1_test_rmse'], gs.\n cv_results['split2_test_rmse']]"], {'axis': '(0)'}), "([gs.cv_results['split0_test_rmse'], gs.cv_results['split1_test_rmse'\n ], gs.cv_results['split2_test_rmse']], axis=0)\n", (4630, 4750), True, 'import numpy as np\n'), ((5248, 5378), 'numpy.mean', 'np.mean', (["[gs.cv_results['split0_train_rmse'], gs.cv_results['split1_train_rmse'], gs\n .cv_results['split2_train_rmse']]"], {'axis': '(0)'}), "([gs.cv_results['split0_train_rmse'], gs.cv_results[\n 'split1_train_rmse'], gs.cv_results['split2_train_rmse']], axis=0)\n", (5255, 5378), True, 'import numpy as np\n'), ((5518, 5647), 'numpy.std', 'np.std', (["[gs.cv_results['split0_train_rmse'], gs.cv_results['split1_train_rmse'], gs\n .cv_results['split2_train_rmse']]"], {'axis': '(0)'}), "([gs.cv_results['split0_train_rmse'], gs.cv_results[\n 'split1_train_rmse'], gs.cv_results['split2_train_rmse']], axis=0)\n", (5524, 5647), True, 'import numpy as np\n'), ((6531, 6556), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (6546, 6556), False, 'import os\n'), ((6623, 6640), 'surprise.Reader', 'Reader', (['"""ml-100k"""'], {}), "('ml-100k')\n", (6629, 6640), False, 'from surprise import Reader\n'), ((7836, 7861), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7849, 7861), False, 'import pytest\n'), ((7941, 7966), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7954, 7966), False, 'import pytest\n'), ((8089, 8114), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (8104, 8114), False, 'import os\n'), ((8166, 8191), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (8181, 8191), False, 'import os\n'), ((8310, 8327), 'surprise.Reader', 'Reader', (['"""ml-100k"""'], {}), "('ml-100k')\n", (8316, 8327), False, 'from surprise import Reader\n'), ((8443, 8468), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (8456, 8468), False, 'import pytest\n'), ((1829, 1846), 'surprise.model_selection.PredefinedKFold', 'PredefinedKFold', ([], {}), '()\n', (1844, 1846), False, 'from surprise.model_selection import PredefinedKFold\n'), ((2513, 2530), 'surprise.Reader', 'Reader', (['"""ml-100k"""'], {}), "('ml-100k')\n", (2519, 2530), False, 'from surprise import Reader\n'), ((2063, 2080), 'surprise.model_selection.PredefinedKFold', 'PredefinedKFold', ([], {}), '()\n', (2078, 2080), False, 'from surprise.model_selection import PredefinedKFold\n')] |
import os
from TB2J.myTB import MyTB, merge_tbmodels_spin
import numpy as np
from TB2J.exchange import ExchangeCL, ExchangeNCL
from TB2J.exchangeCL2 import ExchangeCL2
from TB2J.utils import read_basis, auto_assign_basis_name
from ase.io import read
from TB2J.sisl_wrapper import SislWrapper
from TB2J.gpaw_wrapper import GPAWWrapper
def gen_exchange(path,
colinear=True,
orb_order=1,
posfile='POSCAR',
prefix_up='wannier90.up',
prefix_dn='wannier90.dn',
prefix_SOC='wannier90',
min_hopping_norm=1e-4,
max_distance=None,
efermi=0,
magnetic_elements=[],
kmesh=[4, 4, 4],
emin=-12.0,
emax=0.0,
nz=100,
height=0.2,
nz1=50,
nz2=200,
nz3=50,
exclude_orbs=[],
Rcut=None,
ne=None,
description=''):
atoms = read(os.path.join(path, posfile))
basis_fname = os.path.join(path, 'basis.txt')
if colinear:
print("Reading Wannier90 hamiltonian: spin up.")
tbmodel_up = MyTB.read_from_wannier_dir(
path=path, prefix=prefix_up, posfile=posfile, nls=False)
print("Reading Wannier90 hamiltonian: spin down.")
tbmodel_dn = MyTB.read_from_wannier_dir(
path=path, prefix=prefix_dn, posfile=posfile, nls=False)
if os.path.exists(basis_fname):
basis = read_basis(basis_fname)
else:
basis, _ = auto_assign_basis_name(tbmodel_up.xred, atoms)
print("Starting to calculate exchange.")
exchange = ExchangeCL2(
tbmodels=(tbmodel_up, tbmodel_dn),
atoms=atoms,
basis=basis,
efermi=efermi,
magnetic_elements=magnetic_elements,
kmesh=kmesh,
emin=emin,
emax=emax,
nz=nz,
height=height,
nz1=nz1,
nz2=nz2,
nz3=nz3,
exclude_orbs=exclude_orbs,
Rcut=Rcut,
ne=ne,
description=description)
exchange.run()
print("All calculation finsihed. The results are in TB2J_results directory.")
elif colinear and 0:
print("Reading Wannier90 hamiltonian: spin up.")
tbmodel_up = MyTB.read_from_wannier_dir(
path=path, prefix=prefix_up, posfile=posfile, nls=False)
print("Reading Wannier90 hamiltonian: spin down.")
tbmodel_dn = MyTB.read_from_wannier_dir(
path=path, prefix=prefix_dn, posfile=posfile, nls=False)
tbmodel = merge_tbmodels_spin(tbmodel_up, tbmodel_dn)
if os.path.exists(basis_fname):
basis = read_basis(basis_fname)
else:
basis, _ = auto_assign_basis_name(tbmodel.xred, atoms)
print("Starting to calculate exchange.")
exchange = ExchangeCL(
tbmodels=tbmodel,
atoms=atoms,
basis=basis,
efermi=efermi,
magnetic_elements=magnetic_elements,
kmesh=kmesh,
emin=emin,
emax=emax,
nz=nz,
height=height,
nz1=nz1,
nz2=nz2,
nz3=nz3,
exclude_orbs=exclude_orbs,
Rcut=Rcut,
ne=ne,
description=description)
exchange.run()
print("All calculation finsihed. The results are in TB2J_results directory.")
else:
print("Reading Wannier90 hamiltonian: non-colinear spin.")
tbmodel = MyTB.read_from_wannier_dir(
path=path, prefix=prefix_SOC, posfile=posfile, nls=True)
if orb_order==1:
pass
if orb_order==2:
tbmodel=tbmodel.reorder()
if os.path.exists(basis_fname):
print("The use of basis file is deprecated. It will be ignored.")
#basis = read_basis(basis_fname)
else:
basis, _ = auto_assign_basis_name(tbmodel.xred, atoms)
print("Starting to calculate exchange.")
exchange = ExchangeNCL(
tbmodels=tbmodel,
atoms=atoms,
basis=basis,
efermi=efermi,
magnetic_elements=magnetic_elements,
kmesh=kmesh,
emin=emin,
emax=emax,
nz=nz,
height=height,
nz1=nz1,
nz2=nz2,
nz3=nz3,
exclude_orbs=exclude_orbs,
Rcut=Rcut,
ne=ne,
description=description)
print("\n")
exchange.run()
print("All calculation finsihed. The results are in TB2J_results directory.")
def gen_exchange_siesta(
fdf_fname,
magnetic_elements=[],
kmesh=[4, 4, 4],
emin=-12.0,
emax=0.0,
nz=50,
#height=0.2,
#nz1=50,
#nz2=200,
#nz3=50,
exclude_orbs=[],
Rcut=None,
ne=None,
description=''):
try:
import sisl
except:
raise ImportError("sisl cannot be imported. Please install sisl first.")
fdf = sisl.get_sile(fdf_fname)
H = fdf.read_hamiltonian()
if H.spin.is_colinear:
print("Reading Siesta hamiltonian: colinear spin.")
tbmodel_up = SislWrapper(H, spin=0)
tbmodel_dn = SislWrapper(H, spin=1)
basis = dict(zip(tbmodel_up.orbs, list(range(tbmodel_up.norb))))
print("Starting to calculate exchange.")
exchange = ExchangeCL2(
tbmodels=(tbmodel_up, tbmodel_dn),
atoms=tbmodel_up.atoms,
basis=basis,
efermi=0.0,
magnetic_elements=magnetic_elements,
kmesh=kmesh,
emin=emin,
emax=emax,
nz=nz,
#height=height,
#nz1=nz1,
#nz2=nz2,
#nz3=nz3,
exclude_orbs=exclude_orbs,
Rcut=Rcut,
ne=ne,
description=description)
exchange.run()
print("\n")
print("All calculation finsihed. The results are in TB2J_results directory.")
elif H.spin.is_colinear:
print("Reading Siesta hamiltonian: colinear spin. Treat as non-colinear")
tbmodel = SislWrapper(H, spin='merge')
basis = dict(zip(tbmodel.orbs, list(range(tbmodel.nbasis))))
print("Starting to calculate exchange.")
exchange = ExchangeNCL(
tbmodels=tbmodel,
atoms=tbmodel.atoms,
basis=basis,
efermi=0.0,
magnetic_elements=magnetic_elements,
kmesh=kmesh,
emin=emin,
emax=emax,
nz=nz,
#height=height,
#nz1=nz1,
#nz2=nz2,
#nz3=nz3,
exclude_orbs=exclude_orbs,
Rcut=Rcut,
ne=ne,
description=description)
exchange.run()
print("\n")
print("All calculation finsihed. The results are in TB2J_results directory.")
elif H.spin.is_spinorbit:
print("Reading Siesta hamiltonian: non-colinear spin.")
tbmodel = SislWrapper(H, spin=None)
basis = dict(zip(tbmodel.orbs, list(range(tbmodel.nbasis))))
print("Starting to calculate exchange.")
exchange = ExchangeNCL(
tbmodels=tbmodel,
atoms=tbmodel.atoms,
basis=basis,
efermi=0.0,
magnetic_elements=magnetic_elements,
kmesh=kmesh,
emin=emin,
emax=emax,
nz=nz,
exclude_orbs=exclude_orbs,
Rcut=Rcut,
ne=ne,
description=description)
exchange.run()
print("\n")
print("All calculation finsihed. The results are in TB2J_results directory.")
def gen_exchange_gpaw(
gpw_fname,
magnetic_elements=[],
kmesh=[3, 3, 3],
emin=-12.0,
emax=0.0,
nz=50,
exclude_orbs=[],
Rcut=None,
description=''):
print("Reading from GPAW data and calculate electronic structure.")
model=GPAWWrapper(gpw_fname=gpw_fname)
efermi=model.calc.get_fermi_level()
print(f"Fermi Energy: {efermi}")
poses=np.vstack([model.positions, model.positions])
basis, _ = auto_assign_basis_name(poses, model.atoms)
if model.calc.get_spin_polarized():
print("Starting to calculate exchange.")
exchange = ExchangeNCL(
tbmodels=model,
atoms=model.atoms,
efermi=efermi,
basis=basis,
magnetic_elements=magnetic_elements,
kmesh=kmesh,
emin=emin,
emax=emax,
nz=nz,
exclude_orbs=exclude_orbs,
Rcut=Rcut,
description=description)
exchange.run()
print("\n")
print("All calculation finsihed. The results are in TB2J_results directory.")
| [
"os.path.exists",
"TB2J.exchangeCL2.ExchangeCL2",
"TB2J.myTB.merge_tbmodels_spin",
"TB2J.exchange.ExchangeNCL",
"TB2J.gpaw_wrapper.GPAWWrapper",
"TB2J.sisl_wrapper.SislWrapper",
"os.path.join",
"TB2J.utils.read_basis",
"sisl.get_sile",
"numpy.vstack",
"TB2J.utils.auto_assign_basis_name",
"TB2J... | [((1130, 1161), 'os.path.join', 'os.path.join', (['path', '"""basis.txt"""'], {}), "(path, 'basis.txt')\n", (1142, 1161), False, 'import os\n'), ((5483, 5507), 'sisl.get_sile', 'sisl.get_sile', (['fdf_fname'], {}), '(fdf_fname)\n', (5496, 5507), False, 'import sisl\n'), ((8615, 8647), 'TB2J.gpaw_wrapper.GPAWWrapper', 'GPAWWrapper', ([], {'gpw_fname': 'gpw_fname'}), '(gpw_fname=gpw_fname)\n', (8626, 8647), False, 'from TB2J.gpaw_wrapper import GPAWWrapper\n'), ((8735, 8780), 'numpy.vstack', 'np.vstack', (['[model.positions, model.positions]'], {}), '([model.positions, model.positions])\n', (8744, 8780), True, 'import numpy as np\n'), ((8796, 8838), 'TB2J.utils.auto_assign_basis_name', 'auto_assign_basis_name', (['poses', 'model.atoms'], {}), '(poses, model.atoms)\n', (8818, 8838), False, 'from TB2J.utils import read_basis, auto_assign_basis_name\n'), ((1083, 1110), 'os.path.join', 'os.path.join', (['path', 'posfile'], {}), '(path, posfile)\n', (1095, 1110), False, 'import os\n'), ((1257, 1344), 'TB2J.myTB.MyTB.read_from_wannier_dir', 'MyTB.read_from_wannier_dir', ([], {'path': 'path', 'prefix': 'prefix_up', 'posfile': 'posfile', 'nls': '(False)'}), '(path=path, prefix=prefix_up, posfile=posfile,\n nls=False)\n', (1283, 1344), False, 'from TB2J.myTB import MyTB, merge_tbmodels_spin\n'), ((1434, 1521), 'TB2J.myTB.MyTB.read_from_wannier_dir', 'MyTB.read_from_wannier_dir', ([], {'path': 'path', 'prefix': 'prefix_dn', 'posfile': 'posfile', 'nls': '(False)'}), '(path=path, prefix=prefix_dn, posfile=posfile,\n nls=False)\n', (1460, 1521), False, 'from TB2J.myTB import MyTB, merge_tbmodels_spin\n'), ((1542, 1569), 'os.path.exists', 'os.path.exists', (['basis_fname'], {}), '(basis_fname)\n', (1556, 1569), False, 'import os\n'), ((1767, 2058), 'TB2J.exchangeCL2.ExchangeCL2', 'ExchangeCL2', ([], {'tbmodels': '(tbmodel_up, tbmodel_dn)', 'atoms': 'atoms', 'basis': 'basis', 'efermi': 'efermi', 'magnetic_elements': 'magnetic_elements', 'kmesh': 'kmesh', 'emin': 'emin', 'emax': 'emax', 'nz': 'nz', 'height': 'height', 'nz1': 'nz1', 'nz2': 'nz2', 'nz3': 'nz3', 'exclude_orbs': 'exclude_orbs', 'Rcut': 'Rcut', 'ne': 'ne', 'description': 'description'}), '(tbmodels=(tbmodel_up, tbmodel_dn), atoms=atoms, basis=basis,\n efermi=efermi, magnetic_elements=magnetic_elements, kmesh=kmesh, emin=\n emin, emax=emax, nz=nz, height=height, nz1=nz1, nz2=nz2, nz3=nz3,\n exclude_orbs=exclude_orbs, Rcut=Rcut, ne=ne, description=description)\n', (1778, 2058), False, 'from TB2J.exchangeCL2 import ExchangeCL2\n'), ((5647, 5669), 'TB2J.sisl_wrapper.SislWrapper', 'SislWrapper', (['H'], {'spin': '(0)'}), '(H, spin=0)\n', (5658, 5669), False, 'from TB2J.sisl_wrapper import SislWrapper\n'), ((5691, 5713), 'TB2J.sisl_wrapper.SislWrapper', 'SislWrapper', (['H'], {'spin': '(1)'}), '(H, spin=1)\n', (5702, 5713), False, 'from TB2J.sisl_wrapper import SislWrapper\n'), ((5855, 6113), 'TB2J.exchangeCL2.ExchangeCL2', 'ExchangeCL2', ([], {'tbmodels': '(tbmodel_up, tbmodel_dn)', 'atoms': 'tbmodel_up.atoms', 'basis': 'basis', 'efermi': '(0.0)', 'magnetic_elements': 'magnetic_elements', 'kmesh': 'kmesh', 'emin': 'emin', 'emax': 'emax', 'nz': 'nz', 'exclude_orbs': 'exclude_orbs', 'Rcut': 'Rcut', 'ne': 'ne', 'description': 'description'}), '(tbmodels=(tbmodel_up, tbmodel_dn), atoms=tbmodel_up.atoms,\n basis=basis, efermi=0.0, magnetic_elements=magnetic_elements, kmesh=\n kmesh, emin=emin, emax=emax, nz=nz, exclude_orbs=exclude_orbs, Rcut=\n Rcut, ne=ne, description=description)\n', (5866, 6113), False, 'from TB2J.exchangeCL2 import ExchangeCL2\n'), ((8947, 9171), 'TB2J.exchange.ExchangeNCL', 'ExchangeNCL', ([], {'tbmodels': 'model', 'atoms': 'model.atoms', 'efermi': 'efermi', 'basis': 'basis', 'magnetic_elements': 'magnetic_elements', 'kmesh': 'kmesh', 'emin': 'emin', 'emax': 'emax', 'nz': 'nz', 'exclude_orbs': 'exclude_orbs', 'Rcut': 'Rcut', 'description': 'description'}), '(tbmodels=model, atoms=model.atoms, efermi=efermi, basis=basis,\n magnetic_elements=magnetic_elements, kmesh=kmesh, emin=emin, emax=emax,\n nz=nz, exclude_orbs=exclude_orbs, Rcut=Rcut, description=description)\n', (8958, 9171), False, 'from TB2J.exchange import ExchangeCL, ExchangeNCL\n'), ((1591, 1614), 'TB2J.utils.read_basis', 'read_basis', (['basis_fname'], {}), '(basis_fname)\n', (1601, 1614), False, 'from TB2J.utils import read_basis, auto_assign_basis_name\n'), ((1652, 1698), 'TB2J.utils.auto_assign_basis_name', 'auto_assign_basis_name', (['tbmodel_up.xred', 'atoms'], {}), '(tbmodel_up.xred, atoms)\n', (1674, 1698), False, 'from TB2J.utils import read_basis, auto_assign_basis_name\n'), ((2464, 2551), 'TB2J.myTB.MyTB.read_from_wannier_dir', 'MyTB.read_from_wannier_dir', ([], {'path': 'path', 'prefix': 'prefix_up', 'posfile': 'posfile', 'nls': '(False)'}), '(path=path, prefix=prefix_up, posfile=posfile,\n nls=False)\n', (2490, 2551), False, 'from TB2J.myTB import MyTB, merge_tbmodels_spin\n'), ((2641, 2728), 'TB2J.myTB.MyTB.read_from_wannier_dir', 'MyTB.read_from_wannier_dir', ([], {'path': 'path', 'prefix': 'prefix_dn', 'posfile': 'posfile', 'nls': '(False)'}), '(path=path, prefix=prefix_dn, posfile=posfile,\n nls=False)\n', (2667, 2728), False, 'from TB2J.myTB import MyTB, merge_tbmodels_spin\n'), ((2756, 2799), 'TB2J.myTB.merge_tbmodels_spin', 'merge_tbmodels_spin', (['tbmodel_up', 'tbmodel_dn'], {}), '(tbmodel_up, tbmodel_dn)\n', (2775, 2799), False, 'from TB2J.myTB import MyTB, merge_tbmodels_spin\n'), ((2811, 2838), 'os.path.exists', 'os.path.exists', (['basis_fname'], {}), '(basis_fname)\n', (2825, 2838), False, 'import os\n'), ((3033, 3306), 'TB2J.exchange.ExchangeCL', 'ExchangeCL', ([], {'tbmodels': 'tbmodel', 'atoms': 'atoms', 'basis': 'basis', 'efermi': 'efermi', 'magnetic_elements': 'magnetic_elements', 'kmesh': 'kmesh', 'emin': 'emin', 'emax': 'emax', 'nz': 'nz', 'height': 'height', 'nz1': 'nz1', 'nz2': 'nz2', 'nz3': 'nz3', 'exclude_orbs': 'exclude_orbs', 'Rcut': 'Rcut', 'ne': 'ne', 'description': 'description'}), '(tbmodels=tbmodel, atoms=atoms, basis=basis, efermi=efermi,\n magnetic_elements=magnetic_elements, kmesh=kmesh, emin=emin, emax=emax,\n nz=nz, height=height, nz1=nz1, nz2=nz2, nz3=nz3, exclude_orbs=\n exclude_orbs, Rcut=Rcut, ne=ne, description=description)\n', (3043, 3306), False, 'from TB2J.exchange import ExchangeCL, ExchangeNCL\n'), ((3703, 3790), 'TB2J.myTB.MyTB.read_from_wannier_dir', 'MyTB.read_from_wannier_dir', ([], {'path': 'path', 'prefix': 'prefix_SOC', 'posfile': 'posfile', 'nls': '(True)'}), '(path=path, prefix=prefix_SOC, posfile=posfile,\n nls=True)\n', (3729, 3790), False, 'from TB2J.myTB import MyTB, merge_tbmodels_spin\n'), ((3916, 3943), 'os.path.exists', 'os.path.exists', (['basis_fname'], {}), '(basis_fname)\n', (3930, 3943), False, 'import os\n'), ((4217, 4491), 'TB2J.exchange.ExchangeNCL', 'ExchangeNCL', ([], {'tbmodels': 'tbmodel', 'atoms': 'atoms', 'basis': 'basis', 'efermi': 'efermi', 'magnetic_elements': 'magnetic_elements', 'kmesh': 'kmesh', 'emin': 'emin', 'emax': 'emax', 'nz': 'nz', 'height': 'height', 'nz1': 'nz1', 'nz2': 'nz2', 'nz3': 'nz3', 'exclude_orbs': 'exclude_orbs', 'Rcut': 'Rcut', 'ne': 'ne', 'description': 'description'}), '(tbmodels=tbmodel, atoms=atoms, basis=basis, efermi=efermi,\n magnetic_elements=magnetic_elements, kmesh=kmesh, emin=emin, emax=emax,\n nz=nz, height=height, nz1=nz1, nz2=nz2, nz3=nz3, exclude_orbs=\n exclude_orbs, Rcut=Rcut, ne=ne, description=description)\n', (4228, 4491), False, 'from TB2J.exchange import ExchangeCL, ExchangeNCL\n'), ((6610, 6638), 'TB2J.sisl_wrapper.SislWrapper', 'SislWrapper', (['H'], {'spin': '"""merge"""'}), "(H, spin='merge')\n", (6621, 6638), False, 'from TB2J.sisl_wrapper import SislWrapper\n'), ((6776, 7013), 'TB2J.exchange.ExchangeNCL', 'ExchangeNCL', ([], {'tbmodels': 'tbmodel', 'atoms': 'tbmodel.atoms', 'basis': 'basis', 'efermi': '(0.0)', 'magnetic_elements': 'magnetic_elements', 'kmesh': 'kmesh', 'emin': 'emin', 'emax': 'emax', 'nz': 'nz', 'exclude_orbs': 'exclude_orbs', 'Rcut': 'Rcut', 'ne': 'ne', 'description': 'description'}), '(tbmodels=tbmodel, atoms=tbmodel.atoms, basis=basis, efermi=0.0,\n magnetic_elements=magnetic_elements, kmesh=kmesh, emin=emin, emax=emax,\n nz=nz, exclude_orbs=exclude_orbs, Rcut=Rcut, ne=ne, description=description\n )\n', (6787, 7013), False, 'from TB2J.exchange import ExchangeCL, ExchangeNCL\n'), ((2860, 2883), 'TB2J.utils.read_basis', 'read_basis', (['basis_fname'], {}), '(basis_fname)\n', (2870, 2883), False, 'from TB2J.utils import read_basis, auto_assign_basis_name\n'), ((2921, 2964), 'TB2J.utils.auto_assign_basis_name', 'auto_assign_basis_name', (['tbmodel.xred', 'atoms'], {}), '(tbmodel.xred, atoms)\n', (2943, 2964), False, 'from TB2J.utils import read_basis, auto_assign_basis_name\n'), ((4105, 4148), 'TB2J.utils.auto_assign_basis_name', 'auto_assign_basis_name', (['tbmodel.xred', 'atoms'], {}), '(tbmodel.xred, atoms)\n', (4127, 4148), False, 'from TB2J.utils import read_basis, auto_assign_basis_name\n'), ((7494, 7519), 'TB2J.sisl_wrapper.SislWrapper', 'SislWrapper', (['H'], {'spin': 'None'}), '(H, spin=None)\n', (7505, 7519), False, 'from TB2J.sisl_wrapper import SislWrapper\n'), ((7657, 7894), 'TB2J.exchange.ExchangeNCL', 'ExchangeNCL', ([], {'tbmodels': 'tbmodel', 'atoms': 'tbmodel.atoms', 'basis': 'basis', 'efermi': '(0.0)', 'magnetic_elements': 'magnetic_elements', 'kmesh': 'kmesh', 'emin': 'emin', 'emax': 'emax', 'nz': 'nz', 'exclude_orbs': 'exclude_orbs', 'Rcut': 'Rcut', 'ne': 'ne', 'description': 'description'}), '(tbmodels=tbmodel, atoms=tbmodel.atoms, basis=basis, efermi=0.0,\n magnetic_elements=magnetic_elements, kmesh=kmesh, emin=emin, emax=emax,\n nz=nz, exclude_orbs=exclude_orbs, Rcut=Rcut, ne=ne, description=description\n )\n', (7668, 7894), False, 'from TB2J.exchange import ExchangeCL, ExchangeNCL\n')] |
import io
import cv2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from estimation.coordinates import get_coordinates
from estimation.connections import get_connections
from estimation.estimators import estimate
from estimation.renderers import draw
from train_config import *
# find connection in the specified sequence, center 29 is in the position 15
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10],
[10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17],
[1, 16], [16, 18], [3, 17], [6, 18]]
# the middle joints heatmap correpondence
hmapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22],
[23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52],
[55, 56], [37, 38], [45, 46]]
# visualize
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0],
[0, 255, 0],
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255],
[85, 0, 255],
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
def plot_to_image(figure):
"""Converts the matplotlib plot specified by 'figure' to a PNG image and
returns it. The supplied figure is closed and inaccessible after this call."""
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format='png')
# Closing the figure prevents it from being displayed directly inside
# the notebook.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
def get_jet_color(v, vmin, vmax):
c = np.zeros(3)
if v < vmin:
v = vmin
if v > vmax:
v = vmax
dv = vmax - vmin
if v < (vmin + 0.125 * dv):
c[0] = 256 * (0.5 + (v * 4)) # B: 0.5 ~ 1
elif v < (vmin + 0.375 * dv):
c[0] = 255
c[1] = 256 * (v - 0.125) * 4 # G: 0 ~ 1
elif v < (vmin + 0.625 * dv):
c[0] = 256 * (-4 * v + 2.5) # B: 1 ~ 0
c[1] = 255
c[2] = 256 * (4 * (v - 0.375)) # R: 0 ~ 1
elif v < (vmin + 0.875 * dv):
c[1] = 256 * (-4 * v + 3.5) # G: 1 ~ 0
c[2] = 255
else:
c[2] = 256 * (-4 * v + 4.5) # R: 1 ~ 0.5
return c
def colorize(gray_img):
out = np.zeros(gray_img.shape + (3,))
for y in range(out.shape[0]):
for x in range(out.shape[1]):
out[y, x, :] = get_jet_color(gray_img[y, x], 0, 1)
return out
def pad_right_down_corner(img, stride, pad_value):
h = img.shape[0]
w = img.shape[1]
pad = 4 * [None]
pad[0] = 0 # up
pad[1] = 0 # left
pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
img_padded = img
pad_up = np.tile(img_padded[0:1, :, :] * 0 + pad_value, (pad[0], 1, 1))
img_padded = np.concatenate((pad_up, img_padded), axis=0)
pad_left = np.tile(img_padded[:, 0:1, :] * 0 + pad_value, (1, pad[1], 1))
img_padded = np.concatenate((pad_left, img_padded), axis=1)
pad_down = np.tile(img_padded[-2:-1, :, :] * 0 + pad_value, (pad[2], 1, 1))
img_padded = np.concatenate((img_padded, pad_down), axis=0)
pad_right = np.tile(img_padded[:, -2:-1, :] * 0 + pad_value, (1, pad[3], 1))
img_padded = np.concatenate((img_padded, pad_right), axis=1)
return img_padded, pad
def probe_model_singlenet(model, test_img_path):
img = cv2.imread(test_img_path) # B,G,R order
input_img = img[np.newaxis, :, :, [2, 1, 0]]
inputs = tf.convert_to_tensor(input_img)
output_blobs = model.predict(inputs)
paf1 = output_blobs[0]
paf2 = output_blobs[1]
paf3 = output_blobs[2]
heatmap1 = output_blobs[3]
figure = plt.figure(figsize=(24, 28))
for i in range(9):
plt.subplot(14, 8, 8*i+1, title='stage 1 - paf' + str(i*2))
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(paf1[0, :, :, i*2], cmap='gray')
plt.subplot(14, 8, 8*i+2, title='stage 2 - paf' + str(i*2))
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(paf2[0, :, :, i*2], cmap='gray')
plt.subplot(14, 8, 8*i +3, title='stage 3 - paf' + str(i*2))
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(paf3[0, :, :, i*2], cmap='gray')
plt.subplot(14, 8, 8*i +4, title='stage 4 - hp' + str(i*2))
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(heatmap1[0, :, :, i*2], cmap='gray')
plt.subplot(14, 8, 8*i+5, title='stage 1 - paf' + str(i*2+1))
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(paf1[0, :, :, i*2+1], cmap='gray')
plt.subplot(14, 8, 8*i+6, title='stage 2 - paf' + str(i*2+1))
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(paf2[0, :, :, i*2+1], cmap='gray')
plt.subplot(14, 8, 8*i +7, title='stage 3 - paf' + str(i*2+1))
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(paf3[0, :, :, i*2+1], cmap='gray')
plt.subplot(14, 8, 8*i +8, title='stage 4 - hp' + str(i*2+1))
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(heatmap1[0, :, :, i*2+1], cmap='gray')
plt.subplot(14, 8, 73, title='stage 1 - paf' + str(18))
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(paf1[0, :, :, 18], cmap='gray')
plt.subplot(14, 8, 74, title='stage 2 - paf' + str(18))
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(paf2[0, :, :, 18], cmap='gray')
plt.subplot(14, 8, 75, title='stage 3 - paf' + str(18))
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(paf3[0, :, :, 18], cmap='gray')
plt.subplot(14, 8, 76, title='stage 4 - hp' + str(18))
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(heatmap1[0, :, :, 18], cmap='gray')
# plt.subplot(2, 4, 1, title='stage 1 - paf')
# plt.xticks([])
# plt.yticks([])
# plt.grid(False)
# plt.imshow(paf1[0, :, :, 0], cmap='gray')
#
# plt.subplot(2, 4, 2, title='stage 2 - paf')
# plt.xticks([])
# plt.yticks([])
# plt.grid(False)
# plt.imshow(paf2[0, :, :, 0], cmap='gray')
#
# plt.subplot(2, 4, 5, title='stage 3 - paf')
# plt.xticks([])
# plt.yticks([])
# plt.grid(False)
# plt.imshow(paf3[0, :, :, 0], cmap='gray')
#
# plt.subplot(2, 4, 6, title='stage 4 - hps')
# plt.xticks([])
# plt.yticks([])
# plt.grid(False)
# plt.imshow(heatmap1[0, :, :, 0], cmap='gray')
try:
coordinates = get_coordinates(cfg, heatmap1[0,...])
connections = get_connections(cfg, coordinates, paf3[0,...])
skeletons = estimate(cfg, connections)
canvas = draw(cfg, img, coordinates, skeletons, resize_fac=8)
plt.subplot(14,8,89, title="keypoints")
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(canvas[:, :, [2, 1, 0]])
except:
print("cannot show plot whole image")
return figure
| [
"matplotlib.pyplot.grid",
"io.BytesIO",
"estimation.coordinates.get_coordinates",
"matplotlib.pyplot.imshow",
"estimation.estimators.estimate",
"matplotlib.pyplot.close",
"matplotlib.pyplot.yticks",
"numpy.concatenate",
"tensorflow.convert_to_tensor",
"numpy.tile",
"matplotlib.pyplot.savefig",
... | [((1411, 1423), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1421, 1423), False, 'import io\n'), ((1428, 1458), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buf'], {'format': '"""png"""'}), "(buf, format='png')\n", (1439, 1458), True, 'import matplotlib.pyplot as plt\n'), ((1557, 1574), 'matplotlib.pyplot.close', 'plt.close', (['figure'], {}), '(figure)\n', (1566, 1574), True, 'import matplotlib.pyplot as plt\n'), ((1730, 1754), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (1744, 1754), True, 'import tensorflow as tf\n'), ((1816, 1827), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1824, 1827), True, 'import numpy as np\n'), ((2464, 2495), 'numpy.zeros', 'np.zeros', (['(gray_img.shape + (3,))'], {}), '(gray_img.shape + (3,))\n', (2472, 2495), True, 'import numpy as np\n'), ((2985, 3047), 'numpy.tile', 'np.tile', (['(img_padded[0:1, :, :] * 0 + pad_value)', '(pad[0], 1, 1)'], {}), '(img_padded[0:1, :, :] * 0 + pad_value, (pad[0], 1, 1))\n', (2992, 3047), True, 'import numpy as np\n'), ((3065, 3109), 'numpy.concatenate', 'np.concatenate', (['(pad_up, img_padded)'], {'axis': '(0)'}), '((pad_up, img_padded), axis=0)\n', (3079, 3109), True, 'import numpy as np\n'), ((3125, 3187), 'numpy.tile', 'np.tile', (['(img_padded[:, 0:1, :] * 0 + pad_value)', '(1, pad[1], 1)'], {}), '(img_padded[:, 0:1, :] * 0 + pad_value, (1, pad[1], 1))\n', (3132, 3187), True, 'import numpy as np\n'), ((3205, 3251), 'numpy.concatenate', 'np.concatenate', (['(pad_left, img_padded)'], {'axis': '(1)'}), '((pad_left, img_padded), axis=1)\n', (3219, 3251), True, 'import numpy as np\n'), ((3267, 3331), 'numpy.tile', 'np.tile', (['(img_padded[-2:-1, :, :] * 0 + pad_value)', '(pad[2], 1, 1)'], {}), '(img_padded[-2:-1, :, :] * 0 + pad_value, (pad[2], 1, 1))\n', (3274, 3331), True, 'import numpy as np\n'), ((3349, 3395), 'numpy.concatenate', 'np.concatenate', (['(img_padded, pad_down)'], {'axis': '(0)'}), '((img_padded, pad_down), axis=0)\n', (3363, 3395), True, 'import numpy as np\n'), ((3412, 3476), 'numpy.tile', 'np.tile', (['(img_padded[:, -2:-1, :] * 0 + pad_value)', '(1, pad[3], 1)'], {}), '(img_padded[:, -2:-1, :] * 0 + pad_value, (1, pad[3], 1))\n', (3419, 3476), True, 'import numpy as np\n'), ((3494, 3541), 'numpy.concatenate', 'np.concatenate', (['(img_padded, pad_right)'], {'axis': '(1)'}), '((img_padded, pad_right), axis=1)\n', (3508, 3541), True, 'import numpy as np\n'), ((3631, 3656), 'cv2.imread', 'cv2.imread', (['test_img_path'], {}), '(test_img_path)\n', (3641, 3656), False, 'import cv2\n'), ((3734, 3765), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input_img'], {}), '(input_img)\n', (3754, 3765), True, 'import tensorflow as tf\n'), ((3934, 3962), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(24, 28)'}), '(figsize=(24, 28))\n', (3944, 3962), True, 'import matplotlib.pyplot as plt\n'), ((5613, 5627), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (5623, 5627), True, 'import matplotlib.pyplot as plt\n'), ((5632, 5646), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (5642, 5646), True, 'import matplotlib.pyplot as plt\n'), ((5651, 5666), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (5659, 5666), True, 'import matplotlib.pyplot as plt\n'), ((5671, 5713), 'matplotlib.pyplot.imshow', 'plt.imshow', (['paf1[0, :, :, 18]'], {'cmap': '"""gray"""'}), "(paf1[0, :, :, 18], cmap='gray')\n", (5681, 5713), True, 'import matplotlib.pyplot as plt\n'), ((5779, 5793), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (5789, 5793), True, 'import matplotlib.pyplot as plt\n'), ((5798, 5812), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (5808, 5812), True, 'import matplotlib.pyplot as plt\n'), ((5817, 5832), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (5825, 5832), True, 'import matplotlib.pyplot as plt\n'), ((5837, 5879), 'matplotlib.pyplot.imshow', 'plt.imshow', (['paf2[0, :, :, 18]'], {'cmap': '"""gray"""'}), "(paf2[0, :, :, 18], cmap='gray')\n", (5847, 5879), True, 'import matplotlib.pyplot as plt\n'), ((5945, 5959), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (5955, 5959), True, 'import matplotlib.pyplot as plt\n'), ((5964, 5978), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (5974, 5978), True, 'import matplotlib.pyplot as plt\n'), ((5983, 5998), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (5991, 5998), True, 'import matplotlib.pyplot as plt\n'), ((6003, 6045), 'matplotlib.pyplot.imshow', 'plt.imshow', (['paf3[0, :, :, 18]'], {'cmap': '"""gray"""'}), "(paf3[0, :, :, 18], cmap='gray')\n", (6013, 6045), True, 'import matplotlib.pyplot as plt\n'), ((6110, 6124), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (6120, 6124), True, 'import matplotlib.pyplot as plt\n'), ((6129, 6143), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (6139, 6143), True, 'import matplotlib.pyplot as plt\n'), ((6148, 6163), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (6156, 6163), True, 'import matplotlib.pyplot as plt\n'), ((6168, 6214), 'matplotlib.pyplot.imshow', 'plt.imshow', (['heatmap1[0, :, :, 18]'], {'cmap': '"""gray"""'}), "(heatmap1[0, :, :, 18], cmap='gray')\n", (6178, 6214), True, 'import matplotlib.pyplot as plt\n'), ((4063, 4077), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4073, 4077), True, 'import matplotlib.pyplot as plt\n'), ((4086, 4100), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4096, 4100), True, 'import matplotlib.pyplot as plt\n'), ((4109, 4124), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (4117, 4124), True, 'import matplotlib.pyplot as plt\n'), ((4133, 4178), 'matplotlib.pyplot.imshow', 'plt.imshow', (['paf1[0, :, :, i * 2]'], {'cmap': '"""gray"""'}), "(paf1[0, :, :, i * 2], cmap='gray')\n", (4143, 4178), True, 'import matplotlib.pyplot as plt\n'), ((4254, 4268), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4264, 4268), True, 'import matplotlib.pyplot as plt\n'), ((4277, 4291), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4287, 4291), True, 'import matplotlib.pyplot as plt\n'), ((4300, 4315), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (4308, 4315), True, 'import matplotlib.pyplot as plt\n'), ((4324, 4369), 'matplotlib.pyplot.imshow', 'plt.imshow', (['paf2[0, :, :, i * 2]'], {'cmap': '"""gray"""'}), "(paf2[0, :, :, i * 2], cmap='gray')\n", (4334, 4369), True, 'import matplotlib.pyplot as plt\n'), ((4446, 4460), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4456, 4460), True, 'import matplotlib.pyplot as plt\n'), ((4469, 4483), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4479, 4483), True, 'import matplotlib.pyplot as plt\n'), ((4492, 4507), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (4500, 4507), True, 'import matplotlib.pyplot as plt\n'), ((4516, 4561), 'matplotlib.pyplot.imshow', 'plt.imshow', (['paf3[0, :, :, i * 2]'], {'cmap': '"""gray"""'}), "(paf3[0, :, :, i * 2], cmap='gray')\n", (4526, 4561), True, 'import matplotlib.pyplot as plt\n'), ((4637, 4651), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4647, 4651), True, 'import matplotlib.pyplot as plt\n'), ((4660, 4674), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4670, 4674), True, 'import matplotlib.pyplot as plt\n'), ((4683, 4698), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (4691, 4698), True, 'import matplotlib.pyplot as plt\n'), ((4707, 4756), 'matplotlib.pyplot.imshow', 'plt.imshow', (['heatmap1[0, :, :, i * 2]'], {'cmap': '"""gray"""'}), "(heatmap1[0, :, :, i * 2], cmap='gray')\n", (4717, 4756), True, 'import matplotlib.pyplot as plt\n'), ((4842, 4856), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4852, 4856), True, 'import matplotlib.pyplot as plt\n'), ((4865, 4879), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4875, 4879), True, 'import matplotlib.pyplot as plt\n'), ((4888, 4903), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (4896, 4903), True, 'import matplotlib.pyplot as plt\n'), ((4912, 4961), 'matplotlib.pyplot.imshow', 'plt.imshow', (['paf1[0, :, :, i * 2 + 1]'], {'cmap': '"""gray"""'}), "(paf1[0, :, :, i * 2 + 1], cmap='gray')\n", (4922, 4961), True, 'import matplotlib.pyplot as plt\n'), ((5037, 5051), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (5047, 5051), True, 'import matplotlib.pyplot as plt\n'), ((5060, 5074), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (5070, 5074), True, 'import matplotlib.pyplot as plt\n'), ((5083, 5098), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (5091, 5098), True, 'import matplotlib.pyplot as plt\n'), ((5107, 5156), 'matplotlib.pyplot.imshow', 'plt.imshow', (['paf2[0, :, :, i * 2 + 1]'], {'cmap': '"""gray"""'}), "(paf2[0, :, :, i * 2 + 1], cmap='gray')\n", (5117, 5156), True, 'import matplotlib.pyplot as plt\n'), ((5233, 5247), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (5243, 5247), True, 'import matplotlib.pyplot as plt\n'), ((5256, 5270), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (5266, 5270), True, 'import matplotlib.pyplot as plt\n'), ((5279, 5294), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (5287, 5294), True, 'import matplotlib.pyplot as plt\n'), ((5303, 5352), 'matplotlib.pyplot.imshow', 'plt.imshow', (['paf3[0, :, :, i * 2 + 1]'], {'cmap': '"""gray"""'}), "(paf3[0, :, :, i * 2 + 1], cmap='gray')\n", (5313, 5352), True, 'import matplotlib.pyplot as plt\n'), ((5428, 5442), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (5438, 5442), True, 'import matplotlib.pyplot as plt\n'), ((5451, 5465), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (5461, 5465), True, 'import matplotlib.pyplot as plt\n'), ((5474, 5489), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (5482, 5489), True, 'import matplotlib.pyplot as plt\n'), ((5498, 5551), 'matplotlib.pyplot.imshow', 'plt.imshow', (['heatmap1[0, :, :, i * 2 + 1]'], {'cmap': '"""gray"""'}), "(heatmap1[0, :, :, i * 2 + 1], cmap='gray')\n", (5508, 5551), True, 'import matplotlib.pyplot as plt\n'), ((6917, 6955), 'estimation.coordinates.get_coordinates', 'get_coordinates', (['cfg', 'heatmap1[0, ...]'], {}), '(cfg, heatmap1[0, ...])\n', (6932, 6955), False, 'from estimation.coordinates import get_coordinates\n'), ((6977, 7024), 'estimation.connections.get_connections', 'get_connections', (['cfg', 'coordinates', 'paf3[0, ...]'], {}), '(cfg, coordinates, paf3[0, ...])\n', (6992, 7024), False, 'from estimation.connections import get_connections\n'), ((7044, 7070), 'estimation.estimators.estimate', 'estimate', (['cfg', 'connections'], {}), '(cfg, connections)\n', (7052, 7070), False, 'from estimation.estimators import estimate\n'), ((7088, 7140), 'estimation.renderers.draw', 'draw', (['cfg', 'img', 'coordinates', 'skeletons'], {'resize_fac': '(8)'}), '(cfg, img, coordinates, skeletons, resize_fac=8)\n', (7092, 7140), False, 'from estimation.renderers import draw\n'), ((7150, 7191), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(14)', '(8)', '(89)'], {'title': '"""keypoints"""'}), "(14, 8, 89, title='keypoints')\n", (7161, 7191), True, 'import matplotlib.pyplot as plt\n'), ((7198, 7212), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (7208, 7212), True, 'import matplotlib.pyplot as plt\n'), ((7221, 7235), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (7231, 7235), True, 'import matplotlib.pyplot as plt\n'), ((7244, 7259), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (7252, 7259), True, 'import matplotlib.pyplot as plt\n'), ((7268, 7303), 'matplotlib.pyplot.imshow', 'plt.imshow', (['canvas[:, :, [2, 1, 0]]'], {}), '(canvas[:, :, [2, 1, 0]])\n', (7278, 7303), True, 'import matplotlib.pyplot as plt\n')] |
import json
from multiprocessing import Pool
from random import randint
from typing import List, Dict, Callable, Any
import numpy as np
import os
from tqdm import tqdm
from pietoolbelt.datasets.common import BasicDataset
from pietoolbelt.pipeline.abstract_step import AbstractStep, DatasetInPipeline, AbstractStepDirResult
class StratificationResult(AbstractStepDirResult):
def __init__(self, path: str):
super().__init__(path)
self._meta_file = os.path.join(path, 'meta.json')
if os.path.exists(self._meta_file):
with open(self._meta_file, 'r') as meta_file:
self._meta = json.load(meta_file)
else:
self._meta = dict()
self._name2file = lambda name: name + '.npy' if len(name) < 4 or name[-4:] != '.npy' else name
self._name2path = lambda name: os.path.join(self._path, self._name2file(name))
def add_indices(self, indices: List[np.uint], name: str, dataset: BasicDataset):
dataset.set_indices(indices).flush_indices(self._name2path(name))
self._meta[name] = {'indices_num': len(indices)}
with open(self._meta_file, 'w') as meta_file:
json.dump(self._meta, meta_file)
def get_folds(self) -> List[str]:
return list(self._meta.keys())
def get_indices(self, name: str) -> List[np.ndarray]:
file_path = os.path.join(self._path, self._name2file(name))
if not os.path.exists(file_path):
raise RuntimeError('Indices file doesnt exists [{}]'.format(file_path))
return np.load(file_path)
def get_output_paths(self) -> List[str]:
return [self._path]
class DatasetStratification:
def __init__(self, dataset: BasicDataset, calc_target_label: Callable[[Any], Any], result: StratificationResult, workers_num: int = 0):
self._dataset = dataset
self._calc_label = calc_target_label
self._progress_clbk = None
self._workers_num = workers_num
self._result = result
@staticmethod
def __fill_hist(target_hist: [], indices: {}):
def pick(d):
idx = randint(0, len(indices[d]) - 1)
res = indices[d][idx]
del indices[d][idx]
return res
res = {}
for idx, d in enumerate(target_hist):
idxes = []
for _ in range(d):
idxes.append(pick(idx))
res[idx] = idxes
return res
def calc_hist(self, dataset: BasicDataset):
labels = []
if self._workers_num > 1:
with Pool(self._workers_num) as pool, tqdm(total=len(dataset)) as pbar:
for label in pool.imap(self._calc_label, dataset.get_items(), chunksize=self._workers_num * 10):
labels.append(label)
pbar.update()
else:
for d in tqdm(dataset.get_items(), total=len(dataset)):
labels.append(self._calc_label(d))
hist = [[] for _ in range(max(labels))]
for i, idxes in enumerate(labels):
hist[idxes - 1].append(i)
return np.array([len(v) for v in hist]), hist
def cal_multi_hist(self, dataset: BasicDataset):
labels = []
if self._workers_num > 1:
with Pool(self._workers_num) as pool, tqdm(total=len(dataset)) as pbar:
for label in pool.imap(self._calc_label, dataset.get_items(), chunksize=self._workers_num * 10):
labels.append(label)
pbar.update()
else:
for d in tqdm(dataset.get_items(), total=len(dataset)):
labels.append(self._calc_label(d))
percent = np.percentile(np.array(labels)[:, 1], np.linspace(0, 100, 10)).tolist()
out_p = []
for p in percent:
if percent.index(p) % 2 != 0:
out_p.append(p)
hist_1 = [[] for _ in range(int(max(np.array(labels)[:, 0])) + 1)]
for i, idxes in enumerate(labels):
hist_1[int(idxes[0])].append(i)
hist_2 = [[] for _ in range(len(out_p))]
for i, idxes in enumerate(labels):
for p in range(len(out_p)):
if p == 0 and idxes[1] <= out_p[p]:
hist_2[p].append(i)
elif p != 0 and out_p[p - 1] < idxes[1] <= out_p[p]:
hist_2[p].append(i)
hist = [[] for _ in range(len(hist_1) * len(hist_2))]
z = lambda x, y: [y.index(h) if x in h else -1 for h in y]
for i, idxes in enumerate(labels):
index_h1, index_h2 = self.get_hist_idx(i, hist_1), self.get_hist_idx(i, hist_2)
if index_h2 == -1 or index_h1 == -1:
raise Exception("Index error in histograms")
hist[int(index_h1 * index_h2) - 1].append(i)
return np.array([len(v) for v in hist]), hist
def stratificate_dataset(self, hist: np.ndarray, indices: list, parts: [float]) -> []:
res = []
for part in parts[:len(parts) - 1]:
target_hist = (hist.copy() * part).astype(np.uint32)
res.append([target_hist, self.__fill_hist(target_hist, indices)])
res.append([np.array([len(i) for i in indices]).astype(np.uint32), {i: v for i, v in enumerate(indices)}])
return res
@staticmethod
def get_hist_idx(x, hist):
res = -1
for h in hist:
res = hist.index(h) if x in h else res
return res
@staticmethod
def check_indices_for_intersection(indices: []):
for i in range(len(indices)):
for index in indices[i]:
for other_indices in indices[i + 1:]:
if index in other_indices:
raise Exception('Indices intersects')
def balance_classes(self, hist: np.ndarray, indices: {}) -> tuple:
target_hist = hist.copy()
target_hist[np.argmax(target_hist)] = np.sum(target_hist[target_hist != target_hist.max()])
return target_hist, self.__fill_hist(target_hist, indices)
def _flush_indices(self, indices: [], part_indices: [], path: str):
inner_indices = [part_indices[it] for bin in indices[1].values() for it in bin]
self._result.add_indices(indices=inner_indices, name=path, dataset=self._dataset)
return inner_indices
def run(self, parts: {str: float}, multi_hist=False) -> None:
if sum(parts.values()) > 1:
raise RuntimeError("Sum of target parts greater than 1")
parts = [[path, part] for path, part in parts.items()]
pathes = [p[0] for p in parts]
parts = [p[1] for p in parts]
part_indices = {i: i for i in range(len(self._dataset))}
hist, indices = self.cal_multi_hist(self._dataset) if multi_hist else self.calc_hist(self._dataset)
stratificated_indices = self.stratificate_dataset(hist, indices, parts)
indices_to_check = []
for i, cur_indices in enumerate(stratificated_indices):
indices_to_check.append(self._flush_indices(cur_indices, part_indices, pathes[i]))
self._dataset.remove_indices()
self.check_indices_for_intersection(indices_to_check)
class PipelineDatasetStratification(DatasetStratification, AbstractStep):
def __init__(self, dataset: DatasetInPipeline, calc_target_label: callable, result: StratificationResult, workers_num: int = 1):
DatasetStratification.__init__(self, dataset, calc_target_label, result=result, workers_num=workers_num)
AbstractStep.__init__(self, input_results=[dataset], output_res=result)
| [
"os.path.exists",
"os.path.join",
"numpy.argmax",
"numpy.array",
"numpy.linspace",
"pietoolbelt.pipeline.abstract_step.AbstractStep.__init__",
"multiprocessing.Pool",
"json.load",
"numpy.load",
"json.dump"
] | [((471, 502), 'os.path.join', 'os.path.join', (['path', '"""meta.json"""'], {}), "(path, 'meta.json')\n", (483, 502), False, 'import os\n'), ((515, 546), 'os.path.exists', 'os.path.exists', (['self._meta_file'], {}), '(self._meta_file)\n', (529, 546), False, 'import os\n'), ((1558, 1576), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (1565, 1576), True, 'import numpy as np\n'), ((7486, 7557), 'pietoolbelt.pipeline.abstract_step.AbstractStep.__init__', 'AbstractStep.__init__', (['self'], {'input_results': '[dataset]', 'output_res': 'result'}), '(self, input_results=[dataset], output_res=result)\n', (7507, 7557), False, 'from pietoolbelt.pipeline.abstract_step import AbstractStep, DatasetInPipeline, AbstractStepDirResult\n'), ((1178, 1210), 'json.dump', 'json.dump', (['self._meta', 'meta_file'], {}), '(self._meta, meta_file)\n', (1187, 1210), False, 'import json\n'), ((1431, 1456), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (1445, 1456), False, 'import os\n'), ((5869, 5891), 'numpy.argmax', 'np.argmax', (['target_hist'], {}), '(target_hist)\n', (5878, 5891), True, 'import numpy as np\n'), ((635, 655), 'json.load', 'json.load', (['meta_file'], {}), '(meta_file)\n', (644, 655), False, 'import json\n'), ((2561, 2584), 'multiprocessing.Pool', 'Pool', (['self._workers_num'], {}), '(self._workers_num)\n', (2565, 2584), False, 'from multiprocessing import Pool\n'), ((3259, 3282), 'multiprocessing.Pool', 'Pool', (['self._workers_num'], {}), '(self._workers_num)\n', (3263, 3282), False, 'from multiprocessing import Pool\n'), ((3704, 3727), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(10)'], {}), '(0, 100, 10)\n', (3715, 3727), True, 'import numpy as np\n'), ((3680, 3696), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3688, 3696), True, 'import numpy as np\n'), ((3902, 3918), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3910, 3918), True, 'import numpy as np\n')] |
"""
Programmer: <NAME>
Date of Development: 28/10/2020
"""
# set the directory path
import os,sys
import os.path as path
abs_path_pkg = path.abspath(path.join(__file__ ,"../../../"))
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, abs_path_pkg)
# import other libraries
import numpy as np
from Py_FS.filter._utilities import normalize, Result
from Py_FS.filter.algorithm import Algorithm
from sklearn import datasets
class PCC(Algorithm):
def __init__(self,
data,
target,
default_mode=False,
verbose=True):
super().__init__(
data=data,
target=target,
default_mode=default_mode,
verbose=verbose
)
def user_input(self):
# accept the parameters as user inputs (if default_mode not set)
if self.default_mode:
self.set_default()
else:
self.algo_params["weight_feat"] = float(input(f"Weight for feature-feature correlation: {self.default_vals['weight_feat']}") or self.default_vals['weight_feat'])
self.algo_params["weight_class"] = float(input(f"Weight for feature-class correlation: {self.default_vals['weight_class']}") or self.default_vals['weight_class'])
def initialize(self):
super().initialize()
self.correlation_matrix = np.zeros((self.num_features, self.num_features))
self.feature_feature_relation = np.zeros(self.num_features)
self.feature_class_relation = np.zeros(self.num_features)
def compute_SCC(self, x, y):
# function to compute the SCC value for two variables
x_order = np.argsort(np.argsort(x))
y_order = np.argsort(np.argsort(y))
mean_x = np.mean(x_order)
mean_y = np.mean(y_order)
numerator = np.sum((x_order - mean_x) * (y_order - mean_y))
denominator = np.sqrt(np.sum(np.square(x_order - mean_x)) * np.sum(np.square(y_order - mean_y)))
SCC_val = numerator/denominator
return SCC_val
def execute(self):
# generate the correlation matrix
self.feature_mean = np.mean(self.data, axis=0)
for ind_1 in range(self.num_features):
for ind_2 in range(self.num_features):
self.correlation_matrix[ind_1, ind_2] = self.correlation_matrix[ind_2, ind_1] = self.compute_SCC(self.data[:, ind_1], self.data[:, ind_2])
for ind in range(self.num_features):
self.feature_feature_relation[ind] = -np.sum(abs(self.correlation_matrix[ind,:])) # -ve because we want to remove the corralation
self.feature_class_relation[ind] = abs(self.compute_PCC(self.data[:, ind], self.target))
# produce scores and ranks from the information matrix
self.feature_feature_relation = normalize(self.feature_feature_relation)
self.feature_class_relation = normalize(self.feature_class_relation)
self.scores = (self.algo_params["weight_class"] * self.feature_class_relation) + (self.algo_params["weight_feature"] * self.feature_feature_relation)
############# for testing purpose ################
if __name__ == '__main__':
from scipy.stats.stats import pearsonr
data = datasets.load_wine()
algo = PCC(data.data, data.target)
res = algo.run()
print(res.correlation_matrix)
############# for testing purpose ################
| [
"numpy.mean",
"sys.path.insert",
"Py_FS.filter._utilities.normalize",
"os.path.join",
"numpy.square",
"os.path.realpath",
"numpy.sum",
"numpy.zeros",
"numpy.argsort",
"sklearn.datasets.load_wine"
] | [((241, 273), 'sys.path.insert', 'sys.path.insert', (['(0)', 'abs_path_pkg'], {}), '(0, abs_path_pkg)\n', (256, 273), False, 'import os, sys\n'), ((152, 184), 'os.path.join', 'path.join', (['__file__', '"""../../../"""'], {}), "(__file__, '../../../')\n", (161, 184), True, 'import os.path as path\n'), ((213, 239), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (229, 239), False, 'import os, sys\n'), ((3236, 3256), 'sklearn.datasets.load_wine', 'datasets.load_wine', ([], {}), '()\n', (3254, 3256), False, 'from sklearn import datasets\n'), ((1388, 1436), 'numpy.zeros', 'np.zeros', (['(self.num_features, self.num_features)'], {}), '((self.num_features, self.num_features))\n', (1396, 1436), True, 'import numpy as np\n'), ((1477, 1504), 'numpy.zeros', 'np.zeros', (['self.num_features'], {}), '(self.num_features)\n', (1485, 1504), True, 'import numpy as np\n'), ((1543, 1570), 'numpy.zeros', 'np.zeros', (['self.num_features'], {}), '(self.num_features)\n', (1551, 1570), True, 'import numpy as np\n'), ((1772, 1788), 'numpy.mean', 'np.mean', (['x_order'], {}), '(x_order)\n', (1779, 1788), True, 'import numpy as np\n'), ((1806, 1822), 'numpy.mean', 'np.mean', (['y_order'], {}), '(y_order)\n', (1813, 1822), True, 'import numpy as np\n'), ((1843, 1890), 'numpy.sum', 'np.sum', (['((x_order - mean_x) * (y_order - mean_y))'], {}), '((x_order - mean_x) * (y_order - mean_y))\n', (1849, 1890), True, 'import numpy as np\n'), ((2154, 2180), 'numpy.mean', 'np.mean', (['self.data'], {'axis': '(0)'}), '(self.data, axis=0)\n', (2161, 2180), True, 'import numpy as np\n'), ((2827, 2867), 'Py_FS.filter._utilities.normalize', 'normalize', (['self.feature_feature_relation'], {}), '(self.feature_feature_relation)\n', (2836, 2867), False, 'from Py_FS.filter._utilities import normalize, Result\n'), ((2906, 2944), 'Py_FS.filter._utilities.normalize', 'normalize', (['self.feature_class_relation'], {}), '(self.feature_class_relation)\n', (2915, 2944), False, 'from Py_FS.filter._utilities import normalize, Result\n'), ((1696, 1709), 'numpy.argsort', 'np.argsort', (['x'], {}), '(x)\n', (1706, 1709), True, 'import numpy as np\n'), ((1740, 1753), 'numpy.argsort', 'np.argsort', (['y'], {}), '(y)\n', (1750, 1753), True, 'import numpy as np\n'), ((1928, 1955), 'numpy.square', 'np.square', (['(x_order - mean_x)'], {}), '(x_order - mean_x)\n', (1937, 1955), True, 'import numpy as np\n'), ((1966, 1993), 'numpy.square', 'np.square', (['(y_order - mean_y)'], {}), '(y_order - mean_y)\n', (1975, 1993), True, 'import numpy as np\n')] |
import numpy as np
from scipy import signal
from scipy.spatial.transform import Rotation as R
class Resize:
def __init__(self, size=125):
"""
Initiates transform with a target number for resize
:param size: int
"""
self.size = size
def __call__(self, x):
"""
Output the requested frame size. If frames of sample are smaller it adds padding,
if frames are bigger it resamples to the requested size, otherwise returns sample.
:param x: ndarray
:return: ndarray
"""
diff = self.size - x.shape[0]
if diff > 0:
starting_point = diff // 2
result = np.zeros((self.size, x.shape[1], x.shape[2],))
result[:, :, starting_point:starting_point + x.shape[0]] = x
elif diff < 0:
result = signal.resample(x, self.size)
else:
result = x
return result
class FilterJoints:
"""
Returns specific joints (indices) from the skeleton data
Default joints: head, left elbow, left hand, right elbow,
right hand, left knee, left foot, right knee and right foot
"""
def __init__(self, joints=None):
if joints is None:
joints = [0, 5, 7, 9, 11, 13, 15, 17, 19]
self.joints = joints
def __call__(self, x):
"""
Returns x filtered according to the selected joints
:param x: ndarray
:return: ndarray
"""
return x[:, self.joints]
class ToSequence:
"""
Transforms the data by reshaping to (sequence_length, input_size)
"""
def __init__(self, sequence_length, input_size):
self.sequence_length = sequence_length
self.input_size = input_size
def __call__(self, x):
"""
Reshapes to Flattens the sample
:param x: ndarray
:return: ndarray
"""
x = x.reshape(self.sequence_length, self.input_size)
return x
class RandomEulerRotation:
"""
Data augmentation transform, applies a random rotation of -5, 0 or 5 degrees (by default) in the x,y axis of
every joint in the skeleton.
"""
def __init__(self, start=-5, end=5, step=5):
self.choices_list = list(range(start, end + 1, step))
def __call__(self, x):
rotate_to = np.random.choice(self.choices_list)
rotation = R.from_euler('xy', (rotate_to, rotate_to), degrees=True)
for frame_idx in range(x.shape[0]):
x[frame_idx, :, :] = rotation.apply(x[frame_idx, :, :])
return x
| [
"numpy.random.choice",
"scipy.signal.resample",
"numpy.zeros",
"scipy.spatial.transform.Rotation.from_euler"
] | [((2319, 2354), 'numpy.random.choice', 'np.random.choice', (['self.choices_list'], {}), '(self.choices_list)\n', (2335, 2354), True, 'import numpy as np\n'), ((2374, 2430), 'scipy.spatial.transform.Rotation.from_euler', 'R.from_euler', (['"""xy"""', '(rotate_to, rotate_to)'], {'degrees': '(True)'}), "('xy', (rotate_to, rotate_to), degrees=True)\n", (2386, 2430), True, 'from scipy.spatial.transform import Rotation as R\n'), ((681, 726), 'numpy.zeros', 'np.zeros', (['(self.size, x.shape[1], x.shape[2])'], {}), '((self.size, x.shape[1], x.shape[2]))\n', (689, 726), True, 'import numpy as np\n'), ((845, 874), 'scipy.signal.resample', 'signal.resample', (['x', 'self.size'], {}), '(x, self.size)\n', (860, 874), False, 'from scipy import signal\n')] |
#!/usr/bin/python3
import numpy as np
from matrixll import matrixll
class MLNTopology():
INDEX_INT_TYPE = int
def __init__(self):
# alt: tuple(int), xor, np.array(int), xor: simply an int !
self.layers_shape = [] # : List[int]
# nominal coord system dimensions: e.g. (x,y,ch) (theta,sigma,x,y,hue) (feature_id)
# i.e. for layer's intrinsic (functional) topology
# List[int]
# rename: coord_dims[]
self.layers_coord_dims = []
# connectivity_matrix: a sparse matrix of certain sort of indices. (address?)
# address: 1.tuple (of size len(shape)) 2. string 3. raw_flat_index (int)
# rename: conn_matrixll[]
self.matrices = []
# some layers can (suggested) to be arranged in shapes/tensors
# "map" as both map(v.) and a map (n.)
# rename: nodes_coords
self.coords_map = []
self.consistency_invariance_check()
def consistency_invariance_check(self):
nl = len(self.layers_shape)
nl2 = len(self.layers_coord_dims)
nl3 = len(self.matrices)
assert nl == nl2
if nl == 0:
assert nl3 == 0
else:
assert nl3 == nl-1
for li in range(nl):
neurons_count = self.layers_shape[li]
assert isinstance(self.layers_shape[li], int)
assert isinstance(self.layers_coord_dims[li], int)
#print('a', len(self.coords_map[li]), neurons_count)
assert len(self.coords_map[li]) == neurons_count
# assert len(self.layers_coord_dims[li]) > 0
for ni in range(neurons_count):
address = ni # address is simply the node (neuron) index, i.e. an `int`
coords = self.coords_map[li][address]
assert isinstance(coords, tuple)
#print(len(coords), self.layers_coord_dims[li])
assert len(coords) == self.layers_coord_dims[li]
# todo: if thorough: check len(coords) == self.layers_coord_dims[li]
if nl > 0:
assert len(self.matrices) == nl-1
for cli in range(1, nl):
next_layer = cli
prev_layer = cli-1
next_shape = self.layers_shape[next_layer]
prev_shape = self.layers_shape[prev_layer]
assert isinstance(next_shape, int)
assert isinstance(prev_shape, int)
h = next_shape
w = prev_shape
# self.matrices[layer] : List[List[int]]
m = self.matrices[prev_layer]
assert w == len(m)
matrixll.check(m, -1, h)
def create_reverse(self):
rev = MLNTopology()
nl = len(self.layers_shape)
for li1 in reversed(range(nl)):
lir = nl-1 - li1
new_layer_shape = self.layers_shape[li1]
coord_dims = self.layers_coord_dims[li1]
coord_iterator = self.coords_map[li1]
rev.add_layer(new_layer_shape, coord_dims, coord_iterator)
for li in reversed(range(1,nl)):
lfrom1 = li-1
lto1 = li
lir = nl-1 - li
for (ifrom, ito, conn_obj) in self.iterate_connections(lfrom1, lto1):
rev.connect(lir, ito, ifrom, conn_obj, check=False)
rev.consistency_invariance_check()
return rev
@staticmethod
def encode_matrixll(mat):
return repr(mat)
@staticmethod
def size_string(int_tuple):
return 'x'.join([str(i) for i in int_tuple])
def all_details(self):
payload = []
payload.append(type(self).__name__)
payload.append(repr(self.layers_shape)) # shape
payload.append(repr(self.layers_coord_dims)) # coord_dims
payload.append('connections:')
nl = len(self.layers_shape)
payload.append('%d layers'% nl)
for li in range(nl-1):
m = self.matrices[li]
payload.append(MLNTopology.size_string(matrixll.shape(m, 'derive')))
payload.append(MLNTopology.encode_matrixll(m))
payload.append('coords:')
for li in range(nl):
coords = self.coords_map[li]
payload.append( repr(coords) )
for i in range(len(payload)):
assert isinstance(payload[i], str), str(i) + ':' + repr(payload[i])
return '\n'.join(payload)
def report(self, internals):
nl = len(self.layers_shape)
indent = ' '
indent2 = ' '
print('Report for topology (of %d layers):' % nl, self)
print(indent, 'shape', self.layers_shape)
print(indent, 'coords', self.layers_coord_dims)
if internals:
#print('self.matrices', len(self.matrices), self.matrices) # too long
#print('self.coords_map', len(self.coords_map), self.coords_map) # too long
for li in range(nl-1): # iterate connection matrices
m = self.matrices[li]
print(indent2, 'm:', len(m))
print(indent2, 'm[0]:', len(m[0]))
print(indent2,'layer: ', li, 'connections:', matrixll.shape(m, 'derive'))
for li in range(nl): # iterate layers
print(indent2,'self.coords_map[li]', len(self.coords_map[li]))
print(indent2,'coords for %d entries' % len(self.coords_map[li]))
# rename: nodes_count()
def layer_num_elem(self, layer_no):
numel = self.layers_shape[layer_no]
assert isinstance(numel, int)
return numel
def add_layer(self, new_layer_shape, coord_dims, coord_iterator):
numnodes = new_layer_shape
assert isinstance(numnodes, int)
nl = len(self.layers_shape)
self.layers_shape += [new_layer_shape]
self.layers_coord_dims += [coord_dims]
self.coords_map += [None]
self.coords_map[-1] = [tpl for tpl in coord_iterator]
# [[(i,) for i in range(numnodes)]]
assert len(self.coords_map[-1]) == self.layers_shape[-1], 'inconsistent number of coord tuples provided'
if nl > 0:
prev_layer_shape = self.layers_shape[-2]
(w,h) = (np.prod(prev_layer_shape), np.prod(new_layer_shape))
connectivity_matrix = matrixll.create_matrixll(w,h, None)
assert matrixll.shape(connectivity_matrix, 'derive') == (w,h)
self.matrices += [connectivity_matrix]
self.consistency_invariance_check()
def iterate_connections(self, prev_layer, this_layer):
self.consistency_invariance_check()
assert prev_layer == this_layer - 1
assert prev_layer >= 0
assert this_layer < len(self.layers_shape)
(prev_layer, this_layer) = (this_layer - 1, this_layer)
next_shape = self.layers_shape[this_layer]
prev_shape = self.layers_shape[prev_layer]
w = prev_shape
h = next_shape
assert isinstance(w, int)
assert isinstance(h, int)
matrix = self.matrices[prev_layer]
d = matrixll.shape(matrix, 'derive')
assert d == (w,h)
for x in range(w):
for y in range(h):
matrix = self.matrices[prev_layer]
# connection_object_ref
conn_obj = matrix[x][y]
if conn_obj is None:
continue
(address1, address2) = (x,y)
yield address1, address2, conn_obj
def iterate_layers(self):
self.consistency_invariance_check()
nl = len(self.layers_shape)
for i in range(nl):
numel = self.layers_shape[i]
yield i, numel
# yield i, numel, i+1, next_layer_shape
def connect(self, prev_layer_no, address1_prev, address2_next, conn_obj, check=True):
layer_no_next = prev_layer_no+1
assert isinstance(address1_prev, int)
assert isinstance(address2_next, int)
# test
self.get_node_metadata(layer_no_next, address2_next)
self.get_node_metadata(prev_layer_no, address1_prev)
matrix = self.matrices[prev_layer_no]
assert matrix[address1_prev][address2_next] is None
matrix[address1_prev][address2_next] = conn_obj
assert conn_obj == 1
if check:
self.consistency_invariance_check()
"""
Uses synaptic prune rule for connectivity:
prune_rule = synaptic_prune_rule
Arrows from lower layer index towards higher
"""
def connect_all(self, prev_layer_no, next_layer_no, prune_rule):
assert next_layer_no == prev_layer_no+1, 'only MLP-style is allowed: connections must between consecutive layers only'
next_shape_int = self.layers_shape[next_layer_no]
prev_shape_int = self.layers_shape[prev_layer_no]
coords_next = self.coords_map[next_layer_no]
coords_prev = self.coords_map[prev_layer_no]
assert isinstance(next_shape_int, int)
for i_next in range(next_shape_int):
# prev_layer_count = prev_shape_int
for j_prev in range(prev_shape_int):
coord_next = coords_next[i_next]
coord_prev = coords_prev[j_prev]
# apply synaptic prune rule for connectivity:
if not prune_rule(coord_prev, coord_next):
conn_obj = 1
self.connect(prev_layer_no, j_prev, i_next, conn_obj, check=False)
self.consistency_invariance_check()
# deprecated. all layers are flat
""" shape dims """
"""
def get_address_indices(layer_no):
if layer_no == 1:
return 2+1
else:
return 1
"""
def get_node_metadata(self, layer, address):
layer_no = layer
assert layer_no >= 0, "non-existing layer %d" % layer_no
assert layer_no < len(self.layers_shape), "non-existing layer %d" % layer_no
dims = self.layers_coord_dims[layer_no]
#self.layer_dim_names[0] = ['x', 'y', 'ch']
#return {x: , y:}
assert isinstance(address, int)
coords = self.coords_map[layer_no][address]
assert len(coords) == dims
return coords
"""
# iterate over nodes
def iter_node(self, layer_no):
assert layer_no >= 0
neurons_count = self.layers_shape[layer_no]
for ni in range(neurons_count):
yield \
ni, \
self.matrices[layer_no], \
self.coords_map[layer_no]
"""
# simple maping htat does not involve inf about conections
def coord_map(self, layer_no, coords_map_lambda, newdims):
assert layer_no >= 0
assert isinstance(newdims, int)
assert isinstance(coords_map_lambda, type(lambda:0))
neurons_count = self.layers_shape[layer_no]
# iterate over nodes
for ni in range(neurons_count):
coords = self.coords_map[layer_no][ni]
#assert len(coords) == self.layers_coord_dims[li]
assert isinstance(coords, tuple)
coords_new = coords_map_lambda(coords)
assert isinstance(coords_new, tuple)
assert len(coords_new) == newdims
self.coords_map[layer_no][ni] = coords_new
self.layers_coord_dims[layer_no] = newdims
def get_layer_coord_system(self, layer_no):
# typically: [3,1,1,1,...] or [3,3,1,1,1,..]
dims = self.layers_coord_dims[layer_no]
return dims
def print0(*args):
print('print0', args)
return 0
# utilities
def connect_based_on_distance(topo, prev_layer_no, next_layer_no, radius):
(_X, _Y, _RGB) = (0,1,2)
# radius = 3.0
topo.connect_all(prev_layer_no, next_layer_no,
lambda coord1, coord2:
(coord1[_X] - coord2[_X]) ** 2 +
(coord1[_Y] - coord2[_Y]) ** 2
>
radius ** 2
)
MLNTopology.connect_based_on_distance = connect_based_on_distance
# some utilities
""" Iterated over indices of a tensor with given shape """
def tuple_iter(triple, prefix=()):
#(W,H,ChRGB) = triple
assert isinstance(triple, tuple)
if len(triple) == 0:
raise Exception('use tuple of len > 0')
if len(triple) == 1:
dim1 = triple[0]
for i in range(dim1):
yield tuple(prefix) + (i,)
return
dim1 = triple[0]
for i in range(dim1):
for y in tuple_iter((triple[1:]), prefix=prefix + (i,)):
#yield (i,) + y
yield y
| [
"matrixll.matrixll.create_matrixll",
"numpy.prod",
"matrixll.matrixll.check",
"matrixll.matrixll.shape"
] | [((7014, 7046), 'matrixll.matrixll.shape', 'matrixll.shape', (['matrix', '"""derive"""'], {}), "(matrix, 'derive')\n", (7028, 7046), False, 'from matrixll import matrixll\n'), ((2613, 2637), 'matrixll.matrixll.check', 'matrixll.check', (['m', '(-1)', 'h'], {}), '(m, -1, h)\n', (2627, 2637), False, 'from matrixll import matrixll\n'), ((6242, 6278), 'matrixll.matrixll.create_matrixll', 'matrixll.create_matrixll', (['w', 'h', 'None'], {}), '(w, h, None)\n', (6266, 6278), False, 'from matrixll import matrixll\n'), ((6155, 6180), 'numpy.prod', 'np.prod', (['prev_layer_shape'], {}), '(prev_layer_shape)\n', (6162, 6180), True, 'import numpy as np\n'), ((6182, 6206), 'numpy.prod', 'np.prod', (['new_layer_shape'], {}), '(new_layer_shape)\n', (6189, 6206), True, 'import numpy as np\n'), ((6297, 6342), 'matrixll.matrixll.shape', 'matrixll.shape', (['connectivity_matrix', '"""derive"""'], {}), "(connectivity_matrix, 'derive')\n", (6311, 6342), False, 'from matrixll import matrixll\n'), ((3985, 4012), 'matrixll.matrixll.shape', 'matrixll.shape', (['m', '"""derive"""'], {}), "(m, 'derive')\n", (3999, 4012), False, 'from matrixll import matrixll\n'), ((5119, 5146), 'matrixll.matrixll.shape', 'matrixll.shape', (['m', '"""derive"""'], {}), "(m, 'derive')\n", (5133, 5146), False, 'from matrixll import matrixll\n')] |
# -*- coding: utf-8 -*-
"""
contains main loop for training
"""
import torch
import utils
import matplotlib.pyplot as plt
import numpy as np
import torch.nn as nn
from torch.utils.data.sampler import SubsetRandomSampler
from model.dataset_class import AffectiveMonitorDataset
from model.net_valence import myLSTM_valence
from model.net_arousal import myLSTM_arousal
def train_valence(pickle_file="data_1_50_toTensor.pkl",learning_rate=0.03):
# Load Dataset
# n = 2
# subjects = [i for i in range(1,n+1)]
# face_dataset = AffectiveMonitorDataset("C:\\Users\\dspcrew\\affective-monitor-model\\data",subjects=subjects)
# face_dataset = AffectiveMonitorDataset("C:\\Users\\DSPLab\\Research\\affective-monitor-model\\data")
# face_dataset = AffectiveMonitorDataset("E:\\Research\\affective-monitor-model\\data")
face_dataset = utils.load_object(pickle_file)
# split train and test dataset
validation_split = 0.2
random_seed = 42
shuffle_dataset = True
dataset_size = len(face_dataset)
indices = list(range(dataset_size))
split = int(np.floor(validation_split*dataset_size))
if shuffle_dataset:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(val_indices)
# Make Dataset Iterable
batch_size = 100
n_iters = 1000
train_loader = torch.utils.data.DataLoader(face_dataset,
batch_size=batch_size,
sampler=train_sampler)
test_loader = torch.utils.data.DataLoader(face_dataset,
batch_size=batch_size,
sampler=test_sampler)
# Instatiate dimension parameters
# 100 time steps
# Each time step: input dimension = 19
# how many hidden layer: 1 hidden layer
# output dimension = 5
input_dim = 19
hidden_dim = 100
layer_dim = 1
output_dim = 5
# Number of steps to unroll
seq_dim = 100
num_epochs = int(n_iters/ (len(train_sampler)/batch_size))
# num_epochs = 1
# Instantiate Model class
model = myLSTM_valence(input_dim,hidden_dim,layer_dim,output_dim)
if torch.cuda.is_available():
model = model.cuda()
# Instantiate Loss class
criterion = nn.CrossEntropyLoss()
# Instantiate Optimizer Class
# learning_rate = 0.01
optimizer = torch.optim.SGD(model.parameters(),lr = learning_rate)
# training loop
iteration = 0
iter_num = []
loss_list = []
for epoch in range(num_epochs):
for i, data in enumerate(train_loader):
FAPs = data['FAP']
labels = data['Valence']
# Cast labels to float
labels = labels.long()
# Cast input to Float (Model weight is set to Float by Default)
FAPs = FAPs.float()
# Load input vector as tensors
if torch.cuda.is_available():
FAPs = FAPs.view(-1,seq_dim,input_dim).cuda()
labels = labels.cuda()
else:
FAPs = FAPs.view(-1,seq_dim,input_dim)
# Set existing torch with gradient accumation abilities
FAPs.requires_grad = True
# Clear gradients w.r.t. parameters
optimizer.zero_grad()
# Forward pass to get output/logits
# output.size() --> 100,4
outputs = model(FAPs)
# Calculate Loss: softmax --> cross entropy loss
loss = criterion(outputs,labels)
# Getting gradients w.r.t. parameters
loss.backward()
# Updating parameters
optimizer.step()
iteration = iteration+1
# Calculate accuracy every 1000 step
if iteration%100 == 0:
correct = 0
total = 0
# Iterate through test dataset
for i, data in enumerate(test_loader):
FAPs = data['FAP']
labels = data['Valence']
# Cast labels to float
labels = labels.long()
# Cast input to Float
FAPs = FAPs.float()
# Load input vector as tensors
if torch.cuda.is_available():
FAPs = FAPs.view(-1,seq_dim,input_dim).cuda()
labels = labels.cuda()
else:
FAPs = FAPs.view(-1,seq_dim,input_dim)
# Set existing torch
FAPs.requires_grad = True
# Forward pass only to get logits/output
outputs = model(FAPs)
# Get predictions from the maximum value
_, predicted = torch.max(outputs.data,1)
# Total number of labels (sum of batches)
total = total + labels.size(0)
# total accuracy prediction
if torch.cuda.is_available():
correct = correct + (predicted.cpu() == labels.cpu()).sum()
else:
correct = correct + (predicted == labels).sum()
accuracy = 100 * (correct.item()/total)
iter_num.append(iteration)
loss_list.append(loss.item())
# print Loss
print("Iteration: {}. Loss: {}. Accuracy: {}".format(iteration,loss.item(),accuracy))
# Plot Graph
plt.plot(iter_num,loss_list)
plt.xlabel("Number of Iterations")
plt.ylabel("Loss")
plt.show()
def train_arousal(pickle_file="data_1_4_toTensor.pkl",learning_rate=0.01):
# Load Dataset
# n = 2
# subjects = [i for i in range(1,n+1)]
# face_dataset = AffectiveMonitorDataset("C:\\Users\\dspcrew\\affective-monitor-model\\data",subjects=subjects)
# face_dataset = AffectiveMonitorDataset("C:\\Users\\DSPLab\\Research\\affective-monitor-model\\data")
# face_dataset = AffectiveMonitorDataset("E:\\Research\\affective-monitor-model\\data")
face_dataset = utils.load_object(pickle_file)
# split train and test dataset
validation_split = 0.2
random_seed = 42
shuffle_dataset = True
dataset_size = len(face_dataset)
indices = list(range(dataset_size))
split = int(np.floor(validation_split*dataset_size))
if shuffle_dataset:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(val_indices)
# Make Dataset Iterable
batch_size = 100
n_iters = 350*3
train_loader = torch.utils.data.DataLoader(face_dataset,
batch_size=batch_size,
sampler=train_sampler)
test_loader = torch.utils.data.DataLoader(face_dataset,
batch_size=batch_size,
sampler=test_sampler)
# Instatiate dimension parameters
# 100 time steps
# Each time step: input dimension = 19
# how many hidden layer: 1 hidden layer
# output dimension = 5
input_dim = 1
hidden_dim = 100
layer_dim = 1
output_dim = 5
# Number of steps to unroll
seq_dim = 100
num_epochs = int(n_iters/ (len(train_sampler)/batch_size))
# num_epochs = 1
# Instantiate Model class
model = myLSTM_arousal(input_dim,hidden_dim,layer_dim,output_dim)
# GPU configuration
if torch.cuda.is_available():
model.cuda()
# Instantiate Loss class
criterion = nn.CrossEntropyLoss()
# Instantiate Optimizer Class
# learning_rate = 0.05
optimizer = torch.optim.SGD(model.parameters(),lr = learning_rate)
# training loop
iteration = 0
iter_num = []
loss_list = []
accuracy_list = []
for epoch in range(num_epochs):
for i, data in enumerate(train_loader):
PDs = data['PD']
labels = data['Arousal']
# labels = labels*10
# Cast input to Float (Model weight is set to Float by Default)
PDs = PDs.float()
# Cast labels to float
labels = labels.long()
# Load input vector as tensors
if torch.cuda.is_available():
PDs = PDs.view(-1,seq_dim,input_dim).cuda()
labels = labels.cuda()
else:
PDs = PDs.view(-1,seq_dim,input_dim)
# Set existing torch with gradient accumulation abilities
PDs.requires_grad = True
# Clear gradients w.r.t. parameters
optimizer.zero_grad()
# Forward pass to get output/logits
# output.size() --> 100,4
outputs = model(PDs)
# Calculate Loss: softmax --> cross entropy loss
loss = criterion(outputs,labels)
# Getting gradients w.r.t. parameters
loss.backward()
# Updating parameters
optimizer.step()
iteration = iteration+1
# Calculate accuracy every 1000 step
if iteration%100 == 0:
correct = 0
total = 0
# Iterate through test dataset
for i, data in enumerate(test_loader):
PDs = data['PD']
labels = data['Arousal']
# Cast input to Float
PDs = PDs.float()
# Cast labels to float
labels = labels.long()
# Load input vector as tensors
if torch.cuda.is_available():
PDs = PDs.view(-1,seq_dim,input_dim).cuda()
labels = labels.cuda()
else:
PDs = PDs.view(-1,seq_dim,input_dim)
# Set existing torch
PDs.requires_grad = True
# Forward pass only to get logits/output
outputs = model(PDs)
# Get predictions from the maximum value
_, predicted = torch.max(outputs.data,1)
# Total number of labels (sum of batches)
total = total + labels.size(0)
# total accuracy prediction
if torch.cuda.is_available():
correct = correct + (predicted.cpu() == labels.cpu()).sum()
else:
correct = correct + (predicted == labels).sum()
accuracy = 100 * (correct.item()/total)
iter_num.append(iteration)
loss_list.append(loss.item())
accuracy_list.append(accuracy)
# print Loss
print("Iteration: {}. Loss: {}. Accuracy: {}".format(iteration,loss.item(),accuracy))
# Plot Graph
fig, (ax_loss, ax_lc) = plt.subplots(nrows=2,ncols=1,sharex=True)
ax_loss.plot(iter_num,loss_list)
ax_lc.plot(iter_num,accuracy_list)
ax_loss.grid(True)
ax_lc.grid(True)
ax_lc.set_xlabel("Number of Iterations")
ax_loss.set_ylabel("Loss")
ax_lc.set_ylabel("Learning curve")
fig.suptitle("learning rate: "+str(learning_rate))
plt.show()
if __name__ == "__main__":
# train_valence(pickle_file="data_1_50_toTensor.pkl",learning_rate=0.01)
train_arousal(pickle_file="data_1_50_toTensor.pkl",learning_rate=0.03)
| [
"torch.utils.data.sampler.SubsetRandomSampler",
"numpy.random.shuffle",
"torch.nn.CrossEntropyLoss",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.floor",
"torch.max",
"model.net_valence.myLSTM_valence",
"torch.cuda.is_available",
"model.net_arousal.myL... | [((854, 884), 'utils.load_object', 'utils.load_object', (['pickle_file'], {}), '(pickle_file)\n', (871, 884), False, 'import utils\n'), ((1325, 1359), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_indices'], {}), '(train_indices)\n', (1344, 1359), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((1379, 1411), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['val_indices'], {}), '(val_indices)\n', (1398, 1411), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((1504, 1596), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['face_dataset'], {'batch_size': 'batch_size', 'sampler': 'train_sampler'}), '(face_dataset, batch_size=batch_size, sampler=\n train_sampler)\n', (1531, 1596), False, 'import torch\n'), ((1706, 1797), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['face_dataset'], {'batch_size': 'batch_size', 'sampler': 'test_sampler'}), '(face_dataset, batch_size=batch_size, sampler=\n test_sampler)\n', (1733, 1797), False, 'import torch\n'), ((2327, 2387), 'model.net_valence.myLSTM_valence', 'myLSTM_valence', (['input_dim', 'hidden_dim', 'layer_dim', 'output_dim'], {}), '(input_dim, hidden_dim, layer_dim, output_dim)\n', (2341, 2387), False, 'from model.net_valence import myLSTM_valence\n'), ((2392, 2417), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2415, 2417), False, 'import torch\n'), ((2498, 2519), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2517, 2519), True, 'import torch.nn as nn\n'), ((6108, 6137), 'matplotlib.pyplot.plot', 'plt.plot', (['iter_num', 'loss_list'], {}), '(iter_num, loss_list)\n', (6116, 6137), True, 'import matplotlib.pyplot as plt\n'), ((6141, 6175), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Iterations"""'], {}), "('Number of Iterations')\n", (6151, 6175), True, 'import matplotlib.pyplot as plt\n'), ((6180, 6198), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (6190, 6198), True, 'import matplotlib.pyplot as plt\n'), ((6203, 6213), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6211, 6213), True, 'import matplotlib.pyplot as plt\n'), ((6702, 6732), 'utils.load_object', 'utils.load_object', (['pickle_file'], {}), '(pickle_file)\n', (6719, 6732), False, 'import utils\n'), ((7173, 7207), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['train_indices'], {}), '(train_indices)\n', (7192, 7207), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((7227, 7259), 'torch.utils.data.sampler.SubsetRandomSampler', 'SubsetRandomSampler', (['val_indices'], {}), '(val_indices)\n', (7246, 7259), False, 'from torch.utils.data.sampler import SubsetRandomSampler\n'), ((7353, 7445), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['face_dataset'], {'batch_size': 'batch_size', 'sampler': 'train_sampler'}), '(face_dataset, batch_size=batch_size, sampler=\n train_sampler)\n', (7380, 7445), False, 'import torch\n'), ((7555, 7646), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['face_dataset'], {'batch_size': 'batch_size', 'sampler': 'test_sampler'}), '(face_dataset, batch_size=batch_size, sampler=\n test_sampler)\n', (7582, 7646), False, 'import torch\n'), ((8175, 8235), 'model.net_arousal.myLSTM_arousal', 'myLSTM_arousal', (['input_dim', 'hidden_dim', 'layer_dim', 'output_dim'], {}), '(input_dim, hidden_dim, layer_dim, output_dim)\n', (8189, 8235), False, 'from model.net_arousal import myLSTM_arousal\n'), ((8269, 8294), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8292, 8294), False, 'import torch\n'), ((8367, 8388), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (8386, 8388), True, 'import torch.nn as nn\n'), ((12117, 12160), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(1)', 'sharex': '(True)'}), '(nrows=2, ncols=1, sharex=True)\n', (12129, 12160), True, 'import matplotlib.pyplot as plt\n'), ((12453, 12463), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12461, 12463), True, 'import matplotlib.pyplot as plt\n'), ((1098, 1139), 'numpy.floor', 'np.floor', (['(validation_split * dataset_size)'], {}), '(validation_split * dataset_size)\n', (1106, 1139), True, 'import numpy as np\n'), ((1176, 1203), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (1190, 1203), True, 'import numpy as np\n'), ((1212, 1238), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (1229, 1238), True, 'import numpy as np\n'), ((6946, 6987), 'numpy.floor', 'np.floor', (['(validation_split * dataset_size)'], {}), '(validation_split * dataset_size)\n', (6954, 6987), True, 'import numpy as np\n'), ((7024, 7051), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (7038, 7051), True, 'import numpy as np\n'), ((7060, 7086), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (7077, 7086), True, 'import numpy as np\n'), ((3152, 3177), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3175, 3177), False, 'import torch\n'), ((9083, 9108), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9106, 9108), False, 'import torch\n'), ((4682, 4707), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4705, 4707), False, 'import torch\n'), ((5282, 5308), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (5291, 5308), False, 'import torch\n'), ((5534, 5559), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5557, 5559), False, 'import torch\n'), ((10624, 10649), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10647, 10649), False, 'import torch\n'), ((11220, 11246), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (11229, 11246), False, 'import torch\n'), ((11472, 11497), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (11495, 11497), False, 'import torch\n')] |
import time
import numpy as np
import random
import sys
import os
import argparse
import cv2
import zipfile
import itertools
import pybullet
import json
import time
import numpy as np
import imageio
import pybullet as p
from collect_pose_data import PoseDataCollector
sys.path.insert(1, '../utils/')
from coord_helper import *
from data_helper import *
import obj_file
def prepare_one_pose(output_folder_dir, result_file_name, filtered_idx, filtered_obj_pose_seq_arr, object_pc):
output_file_name_arr = []
for idx, seqpose in zip(filtered_idx, filtered_obj_pose_seq_arr):
output_file_name = result_file_name + '-{}'.format(idx)
output_file_name_arr.append(output_file_name)
half_output_dir = os.path.join(output_folder_dir, output_file_name)
startpc_out_dir = half_output_dir + '-startpc.npy'
endpc_out_dir = half_output_dir + '-endpc.npy'
seqpose_out_dir = half_output_dir + '-seqpose.npy'
# assert seqpose.shape[0] > 1
startpc = apply_transform_to_pc_with_n(object_pc, seqpose[0])
endpc = apply_transform_to_pc_with_n(object_pc, seqpose[-1])
np.save(startpc_out_dir, startpc)
np.save(endpc_out_dir, endpc)
np.save(seqpose_out_dir, seqpose)
return output_file_name_arr
# def filter_one_pose(result_data, result_folder_dir, result_file_name, hook_bullet_id, object_bullet_id, hook_world_pos, hook_scaling, collector):
def process_and_filter_one_pose(result_data, result_folder_dir, result_file_name):
obj_pos_quat_seq_arr = []
idx_cutoff_arr = []
np_dir_arr = []
for i in range(len(result_data['succ_force'])):
half_output_dir = os.path.join(result_folder_dir, result_file_name + '-{}'.format(str(i)))
np_dir = half_output_dir + '-pose.npy'
assert os.path.isfile(np_dir)
np_dir_arr.append(np_dir)
quat_error_arr = []
filtered_idx = []
for i in range(len(np_dir_arr)):
obj_pos_quat_seq = np.load(np_dir_arr[i])
idx_cutoff_1 = np.searchsorted(np.linalg.norm(obj_pos_quat_seq[:, :3], axis=-1), 0.6)
idx_cutoff_2 = np.searchsorted(obj_pos_quat_seq[:, 2], 0.4)
idx_cutoff = min(idx_cutoff_1, idx_cutoff_2)
# print(i, 'idx cutoff', idx_cutoff)
idx_cutoff_arr.append(idx_cutoff)
obj_pos_quat_seq_arr.append(obj_pos_quat_seq[:idx_cutoff])
# auto disqualify sequences that have <=3 timesteps
if obj_pos_quat_seq_arr[i].shape[0] <= 3:
continue
quat_error = mean_quat_error(obj_pos_quat_seq_arr[i])
if quat_error < 0.3:
filtered_idx.append(i)
quat_error_arr.append([quat_error, i])
if len(quat_error_arr) == 0:
return [], []
if len(filtered_idx) == 0:
quat_error_arr = np.array(quat_error_arr)
filtered_idx = [int(quat_error_arr[np.argsort(quat_error_arr[:, 0]), 1][0])]
filtered_idx = np.array(filtered_idx)
# print(obj_pos_quat_seq_arr[filtered_idx[0]].shape)
# print(obj_pos_quat_seq_arr[filtered_idx[0]][0])
# reverse footage
filtered_obj_pose_seq_arr = [np.flip(obj_pos_quat_seq_arr[i], 0) for i in filtered_idx]
# print(filtered_obj_pose_seq_arr[0][-1])
return filtered_idx, filtered_obj_pose_seq_arr
# print('sort by time', np.argsort(t_arr), np.sort(t_arr))
# print('sort by quat', np.argsort(quat_error_arr), np.sort(quat_error_arr))
# print()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--home_dir_data", default="../data")
parser.add_argument("--use_labeled_data", action='store_true')
parser.add_argument("--sherlock", action='store_true')
parser.add_argument("--hook_name", default='')
parser.add_argument("--obj_cat_split_id", type=int, default=-1)
args = parser.parse_args()
obj_cat_split_id = int(args.obj_cat_split_id)
if args.sherlock:
args.home_dir_data = '/scratch/groups/bohg/hang'
assert args.hook_name != ''
assert obj_cat_split_id >= 0
data_dir = os.path.join(args.home_dir_data, 'geo_data')
exclude_dir = os.path.join(args.home_dir_data, 'exclude')
output_dir = os.path.join(args.home_dir_data, 'collection_result')
collection_result_folder_dir = os.path.join(args.home_dir_data, 'collection_result')
visualize_result_folder_dir = os.path.join(args.home_dir_data, 'collection_result_visualize')
chunk_folder_dir = os.path.join(args.home_dir_data, 'geo_data/misc_chunks')
labeled_result_folder_dir = os.path.join(args.home_dir_data, 'collection_result_labeled')
seq_result_folder_dir = os.path.join(args.home_dir_data, 'collection_result_seq')
dataset_folder_dir = os.path.join(args.home_dir_data, 'dataset_seq')
dataset_labels_folder_dir = os.path.join(args.home_dir_data, 'dataset_seq', 'labels')
all_hook_name, all_hook_urdf, all_object_name, all_object_urdf = load_all_hooks_object_w_split_id(obj_cat_split_id, data_dir, exclude_dir, None, True, True)
# p_id = bc.BulletClient(connection_mode=pybullet.GUI)
# collector = PoseDataCollector(p_id)
if not os.path.isdir(dataset_folder_dir):
os.mkdir(dataset_folder_dir)
if not os.path.isdir(dataset_labels_folder_dir):
os.mkdir(dataset_labels_folder_dir)
ct = 0
if args.hook_name != '':
assert args.hook_name in all_hook_name
for i, hook_name in enumerate(all_hook_name):
if args.hook_name != '' and args.hook_name != hook_name:
continue
# for visualize_labeled_folder_name in os.listdir(labeled_result_folder_dir):
# hook_name = visualize_labeled_folder_name.replace('visualize_chunk_', '')
# i = all_hook_name.index(hook_name)
# if not hook_name == 'hook_wall_124':
# if not hook_name == 'hook_wall_185':
# if not hook_name == 'hook_wall_75':
# continue
# hook_urdf_dir = all_hook_urdf[i]
# hook_pc_dir = get_numpy_dir_from_urdf(hook_urdf_dir)
# hook_pc = np.load(hook_pc_dir)
# hook_bullet_id, hook_scaling = collector.init_hook(hook_urdf_dir)
# hook_world_pos_offset = get_hook_wall_offset(hook_urdf_dir)
# hook_world_pos = collector.get_hook_world_pos(hook_bullet_id, hook_world_pos_offset)
# print('hook world pos offest', hook_world_pos_offset)
output_file_name_arr = []
for j, object_name in enumerate(all_object_name):
# if not 'mug' in object_name:
# continue
# if int(object_name.split('_')[-1]) < 23:
# continue
# print(object_name)
object_urdf_dir = all_object_urdf[j]
object_pc_dir = get_numpy_dir_from_urdf(object_urdf_dir)
object_pc = np.load(object_pc_dir)
result_file_name = hook_name + '_' + object_name
result_file_dir = os.path.join(collection_result_folder_dir, result_file_name + '.txt')
if not os.path.isfile(result_file_dir):
continue
result_np = load_result_file(result_file_dir)
excluded_rows = []
if args.use_labeled_data:
for k in range(result_np.shape[0]):
image_dir = os.path.join(labeled_result_folder_dir, 'visualize_chunk_{}'.format(hook_name), '{}_{}.jpg'.format(result_file_name, str(k)))
if not os.path.isfile(image_dir):
excluded_rows.append(k)
if len(excluded_rows) == result_np.shape[0]:
continue
# print(hook_pc_dir, object_pc_dir)
# print(hook_urdf_dir, object_urdf_dir)
ct += 1
# object_bullet_id = collector.p.loadURDF(object_urdf_dir, basePosition=[0, 0, 2], baseOrientation=[0, 0, 0, 1], globalScaling=1, useFixedBase=False)
# object_scaling = collector.p.getCollisionShapeData(object_bullet_id, -1)[0][3][0]
info_output_file_dir = os.path.join(seq_result_folder_dir, '{}.json'.format(result_file_name))
info_output_arr = []
print(result_file_name)
for k in range(result_np.shape[0]):
if k in excluded_rows:
continue
object_pos_local = result_np[k, -7:-4]
object_quat_local = result_np[k, -4:]
k_result_file_name = '{}-{}'.format(result_file_name, str(k))
result_json_dir = os.path.join(seq_result_folder_dir, k_result_file_name + '.json')
with open(result_json_dir) as f:
result_data = json.load(f)
if len(result_data['succ_force']) == 0:
continue
# process_and_filter_one_pose(result_data, seq_result_folder_dir, k_result_file_name, hook_bullet_id, object_bullet_id, hook_world_pos, hook_scaling, collector)
# filter_one_pose(result_data, seq_result_folder_dir, k_result_file_name)
filtered_idx, filtered_obj_pose_seq_arr = process_and_filter_one_pose(result_data, seq_result_folder_dir, k_result_file_name)
if len(filtered_idx) == 0:
continue
tmp_output_file_name_arr = prepare_one_pose(dataset_folder_dir, k_result_file_name, filtered_idx, filtered_obj_pose_seq_arr, object_pc)
output_file_name_arr += tmp_output_file_name_arr
labels_out_dir = os.path.join(dataset_labels_folder_dir, '{}.txt'.format(hook_name))
with open(labels_out_dir, 'w+') as f:
for line in output_file_name_arr:
f.write(line + '\n')
| [
"numpy.flip",
"sys.path.insert",
"argparse.ArgumentParser",
"numpy.searchsorted",
"os.path.join",
"os.path.isfile",
"numpy.array",
"numpy.argsort",
"os.path.isdir",
"os.mkdir",
"numpy.linalg.norm",
"json.load",
"numpy.load",
"numpy.save"
] | [((269, 300), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../utils/"""'], {}), "(1, '../utils/')\n", (284, 300), False, 'import sys\n'), ((2681, 2703), 'numpy.array', 'np.array', (['filtered_idx'], {}), '(filtered_idx)\n', (2689, 2703), True, 'import numpy as np\n'), ((3202, 3227), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3225, 3227), False, 'import argparse\n'), ((3740, 3784), 'os.path.join', 'os.path.join', (['args.home_dir_data', '"""geo_data"""'], {}), "(args.home_dir_data, 'geo_data')\n", (3752, 3784), False, 'import os\n'), ((3800, 3843), 'os.path.join', 'os.path.join', (['args.home_dir_data', '"""exclude"""'], {}), "(args.home_dir_data, 'exclude')\n", (3812, 3843), False, 'import os\n'), ((3858, 3911), 'os.path.join', 'os.path.join', (['args.home_dir_data', '"""collection_result"""'], {}), "(args.home_dir_data, 'collection_result')\n", (3870, 3911), False, 'import os\n'), ((3944, 3997), 'os.path.join', 'os.path.join', (['args.home_dir_data', '"""collection_result"""'], {}), "(args.home_dir_data, 'collection_result')\n", (3956, 3997), False, 'import os\n'), ((4029, 4092), 'os.path.join', 'os.path.join', (['args.home_dir_data', '"""collection_result_visualize"""'], {}), "(args.home_dir_data, 'collection_result_visualize')\n", (4041, 4092), False, 'import os\n'), ((4113, 4169), 'os.path.join', 'os.path.join', (['args.home_dir_data', '"""geo_data/misc_chunks"""'], {}), "(args.home_dir_data, 'geo_data/misc_chunks')\n", (4125, 4169), False, 'import os\n'), ((4199, 4260), 'os.path.join', 'os.path.join', (['args.home_dir_data', '"""collection_result_labeled"""'], {}), "(args.home_dir_data, 'collection_result_labeled')\n", (4211, 4260), False, 'import os\n'), ((4286, 4343), 'os.path.join', 'os.path.join', (['args.home_dir_data', '"""collection_result_seq"""'], {}), "(args.home_dir_data, 'collection_result_seq')\n", (4298, 4343), False, 'import os\n'), ((4366, 4413), 'os.path.join', 'os.path.join', (['args.home_dir_data', '"""dataset_seq"""'], {}), "(args.home_dir_data, 'dataset_seq')\n", (4378, 4413), False, 'import os\n'), ((4443, 4500), 'os.path.join', 'os.path.join', (['args.home_dir_data', '"""dataset_seq"""', '"""labels"""'], {}), "(args.home_dir_data, 'dataset_seq', 'labels')\n", (4455, 4500), False, 'import os\n'), ((704, 753), 'os.path.join', 'os.path.join', (['output_folder_dir', 'output_file_name'], {}), '(output_folder_dir, output_file_name)\n', (716, 753), False, 'import os\n'), ((1079, 1112), 'numpy.save', 'np.save', (['startpc_out_dir', 'startpc'], {}), '(startpc_out_dir, startpc)\n', (1086, 1112), True, 'import numpy as np\n'), ((1115, 1144), 'numpy.save', 'np.save', (['endpc_out_dir', 'endpc'], {}), '(endpc_out_dir, endpc)\n', (1122, 1144), True, 'import numpy as np\n'), ((1147, 1180), 'numpy.save', 'np.save', (['seqpose_out_dir', 'seqpose'], {}), '(seqpose_out_dir, seqpose)\n', (1154, 1180), True, 'import numpy as np\n'), ((1702, 1724), 'os.path.isfile', 'os.path.isfile', (['np_dir'], {}), '(np_dir)\n', (1716, 1724), False, 'import os\n'), ((1849, 1871), 'numpy.load', 'np.load', (['np_dir_arr[i]'], {}), '(np_dir_arr[i])\n', (1856, 1871), True, 'import numpy as np\n'), ((1978, 2022), 'numpy.searchsorted', 'np.searchsorted', (['obj_pos_quat_seq[:, 2]', '(0.4)'], {}), '(obj_pos_quat_seq[:, 2], 0.4)\n', (1993, 2022), True, 'import numpy as np\n'), ((2560, 2584), 'numpy.array', 'np.array', (['quat_error_arr'], {}), '(quat_error_arr)\n', (2568, 2584), True, 'import numpy as np\n'), ((2859, 2894), 'numpy.flip', 'np.flip', (['obj_pos_quat_seq_arr[i]', '(0)'], {}), '(obj_pos_quat_seq_arr[i], 0)\n', (2866, 2894), True, 'import numpy as np\n'), ((4768, 4801), 'os.path.isdir', 'os.path.isdir', (['dataset_folder_dir'], {}), '(dataset_folder_dir)\n', (4781, 4801), False, 'import os\n'), ((4805, 4833), 'os.mkdir', 'os.mkdir', (['dataset_folder_dir'], {}), '(dataset_folder_dir)\n', (4813, 4833), False, 'import os\n'), ((4842, 4882), 'os.path.isdir', 'os.path.isdir', (['dataset_labels_folder_dir'], {}), '(dataset_labels_folder_dir)\n', (4855, 4882), False, 'import os\n'), ((4886, 4921), 'os.mkdir', 'os.mkdir', (['dataset_labels_folder_dir'], {}), '(dataset_labels_folder_dir)\n', (4894, 4921), False, 'import os\n'), ((1905, 1953), 'numpy.linalg.norm', 'np.linalg.norm', (['obj_pos_quat_seq[:, :3]'], {'axis': '(-1)'}), '(obj_pos_quat_seq[:, :3], axis=-1)\n', (1919, 1953), True, 'import numpy as np\n'), ((6192, 6214), 'numpy.load', 'np.load', (['object_pc_dir'], {}), '(object_pc_dir)\n', (6199, 6214), True, 'import numpy as np\n'), ((6288, 6357), 'os.path.join', 'os.path.join', (['collection_result_folder_dir', "(result_file_name + '.txt')"], {}), "(collection_result_folder_dir, result_file_name + '.txt')\n", (6300, 6357), False, 'import os\n'), ((6368, 6399), 'os.path.isfile', 'os.path.isfile', (['result_file_dir'], {}), '(result_file_dir)\n', (6382, 6399), False, 'import os\n'), ((7567, 7632), 'os.path.join', 'os.path.join', (['seq_result_folder_dir', "(k_result_file_name + '.json')"], {}), "(seq_result_folder_dir, k_result_file_name + '.json')\n", (7579, 7632), False, 'import os\n'), ((7690, 7702), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7699, 7702), False, 'import json\n'), ((6709, 6734), 'os.path.isfile', 'os.path.isfile', (['image_dir'], {}), '(image_dir)\n', (6723, 6734), False, 'import os\n'), ((2622, 2654), 'numpy.argsort', 'np.argsort', (['quat_error_arr[:, 0]'], {}), '(quat_error_arr[:, 0])\n', (2632, 2654), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["simplexy"]
import numpy as np
from ._simplexy import simplexy as run_simplexy
_dtype = np.dtype([("x", np.float32), ("y", np.float32),
("flux", np.float32), ("bkg", np.float32)])
def simplexy(img, **kwargs):
r = run_simplexy(np.ascontiguousarray(img.T, dtype=np.float32),
**kwargs).T
return np.array(list(zip(*r)), dtype=_dtype)
| [
"numpy.dtype",
"numpy.ascontiguousarray"
] | [((177, 273), 'numpy.dtype', 'np.dtype', (["[('x', np.float32), ('y', np.float32), ('flux', np.float32), ('bkg', np.\n float32)]"], {}), "([('x', np.float32), ('y', np.float32), ('flux', np.float32), (\n 'bkg', np.float32)])\n", (185, 273), True, 'import numpy as np\n'), ((340, 385), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img.T'], {'dtype': 'np.float32'}), '(img.T, dtype=np.float32)\n', (360, 385), True, 'import numpy as np\n')] |
import pytest
import numpy as np
from mindspore import ops, Tensor, context
from mindspore.common.parameter import Parameter
from mindspore.nn import Cell
class AssignNet(Cell):
def __init__(self, input_variable):
super(AssignNet, self).__init__()
self.op = ops.Assign()
self.input_data = input_variable
def construct(self, input_x):
return self.op(self.input_data, input_x)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_assign_as_output():
"""
Feature: PyNative MindRT
Description: Test PyNative MindRT RefNode.
Expectation: No exception.
"""
np.random.seed(0)
input_np = np.random.randn(5, 5).astype(dtype=np.int32)
context.set_context(mode=context.PYNATIVE_MODE)
input_variable = Parameter(Tensor(np.random.randn(5, 5).astype(dtype=np.float32)))
input_x = Tensor(input_np)
net = AssignNet(input_variable)
out = net(input_x)
assert input_np.all() == out.asnumpy().astype(dtype=np.int32).all()
| [
"mindspore.context.set_context",
"numpy.random.seed",
"mindspore.Tensor",
"mindspore.ops.Assign",
"numpy.random.randn"
] | [((774, 791), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (788, 791), True, 'import numpy as np\n'), ((856, 903), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.PYNATIVE_MODE'}), '(mode=context.PYNATIVE_MODE)\n', (875, 903), False, 'from mindspore import ops, Tensor, context\n'), ((1005, 1021), 'mindspore.Tensor', 'Tensor', (['input_np'], {}), '(input_np)\n', (1011, 1021), False, 'from mindspore import ops, Tensor, context\n'), ((280, 292), 'mindspore.ops.Assign', 'ops.Assign', ([], {}), '()\n', (290, 292), False, 'from mindspore import ops, Tensor, context\n'), ((807, 828), 'numpy.random.randn', 'np.random.randn', (['(5)', '(5)'], {}), '(5, 5)\n', (822, 828), True, 'import numpy as np\n'), ((942, 963), 'numpy.random.randn', 'np.random.randn', (['(5)', '(5)'], {}), '(5, 5)\n', (957, 963), True, 'import numpy as np\n')] |
"""
Template matching
Template matching is a technique for finding areas of an image that are similar to
a patch (template).
A patch is a small image with certain features. The goal of template matching is
to find the patch/template in an image.
To find it, the user has to give two input images : Source Image (s) The image to
find the template in and Template Image (T) - The image that is to be found in
the source image.
It is basically a method for searching and finding the location of a template
image in a large image.
The idea here is to find identical regions of an image that match a template we
provide, giving a threshold
The threshold depends on the accuracy with which we want to detect the
template in the source image.
For instance, if we are applying face recognition and we want to detect the
eyes of a person, we can provide a random image of an eye as the template
and search the source (the face of a person)
In this case, since "eyes" show a large amount of variations from person to
person, even if we set the threshold as 50%(0.5), the eye will be detected
In cases where almost identical templates are to be searched, the
threshold should be set high. (t>=0.8)
How Template Matching Works?
The template image simple slides over the input image (as in 2d convolution)
The template and patch of input image under the template image are compared.
The result obtained is compared with the threshold.
If the function
cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMAL) the first
parameter is the mainimage, second parameter is the template to be matched
and third parameter is the method used for matching.
"""
# Python program to illustrate
# template matching
import cv2
import numpy as np
# Read the main image
img_rgb = cv2.imread('../images/1.jpeg')
# Convert it to grayscale
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
# Read the template
template = cv2.imread('template', 0)
# Store width and height of template in w and h
w, h = template.shape[::-1]
# Perform match operations.
res = cv2.matchTemplate(img_gray,template,cv2.TM_CCOEFF_NORMED)
# Specify a threshold
threshold = 0.8
# Store the coordinates of matched area in a numpy array
loc = np.where( res >= threshold)
# Draw a rectangle around the matched region.
for pt in zip(*loc[::-1]):
cv2.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,255,255), 2)
# Show the final image with the matched area.
cv2.imshow('Detected',img_rgb)
| [
"cv2.rectangle",
"numpy.where",
"cv2.imshow",
"cv2.cvtColor",
"cv2.matchTemplate",
"cv2.imread"
] | [((1881, 1911), 'cv2.imread', 'cv2.imread', (['"""../images/1.jpeg"""'], {}), "('../images/1.jpeg')\n", (1891, 1911), False, 'import cv2\n'), ((1950, 1991), 'cv2.cvtColor', 'cv2.cvtColor', (['img_rgb', 'cv2.COLOR_BGR2GRAY'], {}), '(img_rgb, cv2.COLOR_BGR2GRAY)\n', (1962, 1991), False, 'import cv2\n'), ((2024, 2049), 'cv2.imread', 'cv2.imread', (['"""template"""', '(0)'], {}), "('template', 0)\n", (2034, 2049), False, 'import cv2\n'), ((2162, 2221), 'cv2.matchTemplate', 'cv2.matchTemplate', (['img_gray', 'template', 'cv2.TM_CCOEFF_NORMED'], {}), '(img_gray, template, cv2.TM_CCOEFF_NORMED)\n', (2179, 2221), False, 'import cv2\n'), ((2323, 2349), 'numpy.where', 'np.where', (['(res >= threshold)'], {}), '(res >= threshold)\n', (2331, 2349), True, 'import numpy as np\n'), ((2540, 2571), 'cv2.imshow', 'cv2.imshow', (['"""Detected"""', 'img_rgb'], {}), "('Detected', img_rgb)\n", (2550, 2571), False, 'import cv2\n'), ((2426, 2494), 'cv2.rectangle', 'cv2.rectangle', (['img_rgb', 'pt', '(pt[0] + w, pt[1] + h)', '(0, 255, 255)', '(2)'], {}), '(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 255, 255), 2)\n', (2439, 2494), False, 'import cv2\n')] |
#!/usr/bin/env ipython
# -*- coding: utf-8 -*-
import random as ran
import math
import numpy as np
"""Define auxiliary functions for Corona Testing Simulation."""
def _make_test(testlist, current_success_rate, false_posivite_rate, prob_sick,
tests_repetitions=1, test_result_decision_strategy='max'):
"""
Function for performing one test.
Input:
testlist - list of probabilities (of being sick) of individuals
current_success_rate - current probability of a test being successful
false_positive rate - probability of a false positive
prob_sick - probability that an individual is sick
optional:
tests_repetitions - perform given number of multiple tests
test_result_decision_strategy - when using multiple tests decide either for 'max' or 'majority'
"""
if len(testlist) == 0:
print('Testing empty group. This should not happen!')
outcomes = [0]*tests_repetitions
for t in range(tests_repetitions):
# Define a random parameter for the test
random_parameter = ran.random()
# Check, whether the list contains a sick person
sick_person_exists = 0
for individual_probability in testlist:
if individual_probability <= prob_sick:
sick_person_exists = 1
# Perform the test
if (sick_person_exists == 1 and random_parameter <= current_success_rate):
outcomes[t] = 1
# elif (sick_person_exists == 1 and random_parameter > current_success_rate):
# print("aux.py DEBUG. FALSE POSITIVE")
elif (sick_person_exists == 0 and random_parameter <= false_posivite_rate):
outcomes[t] = 1
else:
outcomes[t] = 0
if test_result_decision_strategy == 'max':
return np.max(outcomes)
elif test_result_decision_strategy == 'majority':
if outcomes.count(0) > outcomes.count(1):
return 0
else:
return 1
def _split_groups(active_groups):
""" Function to perform a binary tree search test on our sample. """
size_chosen_instance = len(active_groups[1])
middle = size_chosen_instance//2
# split the first active group in two equal size groups and then remove the instance from the list of active groups
test_group = [[active_groups[0][0:middle], active_groups[1][0:middle]]
] + [[active_groups[0][middle:], active_groups[1][middle:]]]
return test_group
def generate_data(sample_size, prob_sick):
"""
Function to generate data of consecutively numbered individuals which are infected
with chance prob_sick. The number of infected people is always ceil(sample_size*prob_sick)
"""
number_sick_people = int(np.ceil(sample_size * prob_sick))
rawdata = []
sick_list = []
sick_list_indices = []
# Generate a sample of raw data: a list of sample_size instances with number_sick_people
# infected individuals (0), and all others healthy (1)
arr = np.ones(sample_size)
arr[:number_sick_people] = 0
np.random.shuffle(arr)
rawdata = list(arr.astype(int))
# sick_list is the opposite of rawdata. infected (1), healthy (0)
sick_list = [1-x for x in rawdata]
if number_sick_people == 0:
print("this test population contains no infected")
# print(
# 'There would have been zero infected (probably sample_size is quite small). For Debugging purposes one infection has been added')
# infected_individual_index = 0
# rawdata[infected_individual_index] = 0
# sick_list[infected_individual_index] = 1
# sick_list_indices.append(infected_individual_index)
# number_sick_people = 1
# print('generated data with {} sick people among total {}'.format(number_sick_people, sample_size))
# print('they are {}\n----\n'.format(sick_list_indices))
return rawdata, sick_list, number_sick_people
def generate_data_old(sample_size, prob_sick):
"""
Function to generate data of consecutively numbered individuals which are infected
with chance prob_sick
THIS IS THE OLD ROUTINE, WHICH DISTRIBUTES SICKNESS WITH THE GIVEN PROBABILITY AND THUS HAS
FLUCTUATIONS IN THE ACTUAL NUMBER OF INFECTED INDIVIDUALS
"""
rawdata = []
sick_list = []
number_sick_people = 0
sick_list_indices = []
# Generate a sample of raw data: a list of sample_size instances, equally distributed between 0 and 1
for i in range(sample_size):
rawdata += [np.random.rand()] # [ran.random()]
# Decide, who is infected
for i in range(sample_size):
if rawdata[i] <= prob_sick:
sick_list += [1]
sick_list_indices.append(i)
number_sick_people += 1
else:
sick_list += [0]
if number_sick_people == 0:
print(
'There would have been zero infected (probably sample_size is quite small). For Debugging purposes one infection has been added')
infected_individual_index = 0
rawdata[infected_individual_index] = 0
sick_list[infected_individual_index] = 1
sick_list_indices.append(infected_individual_index)
number_sick_people = 1
# print('generated data with {} sick people among total {}'.format(number_sick_people, sample_size))
# print('they are {}\n----\n'.format(sick_list_indices))
return rawdata, sick_list, number_sick_people
| [
"numpy.ceil",
"numpy.ones",
"numpy.random.rand",
"numpy.max",
"random.random",
"numpy.random.shuffle"
] | [((3063, 3083), 'numpy.ones', 'np.ones', (['sample_size'], {}), '(sample_size)\n', (3070, 3083), True, 'import numpy as np\n'), ((3121, 3143), 'numpy.random.shuffle', 'np.random.shuffle', (['arr'], {}), '(arr)\n', (3138, 3143), True, 'import numpy as np\n'), ((1127, 1139), 'random.random', 'ran.random', ([], {}), '()\n', (1137, 1139), True, 'import random as ran\n'), ((1861, 1877), 'numpy.max', 'np.max', (['outcomes'], {}), '(outcomes)\n', (1867, 1877), True, 'import numpy as np\n'), ((2803, 2835), 'numpy.ceil', 'np.ceil', (['(sample_size * prob_sick)'], {}), '(sample_size * prob_sick)\n', (2810, 2835), True, 'import numpy as np\n'), ((4581, 4597), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4595, 4597), True, 'import numpy as np\n')] |
"""
相比于原始的plot.py文件,增加了如下的功能:
1.可以直接在pycharm或者vscode执行,也可以用命令行传参;
2.按exp_name排序,而不是按时间排序;
3.固定好每个exp_name的颜色;
4.可以调节曲线的线宽,便于观察;
5.保存图片到本地,便于远程ssh画图~
6.自动显示全屏
7.图片自适应
8.针对颜色不敏感的人群,可以在每条legend上注明性能值,和性能序号
9.对图例legend根据性能从高到低排序,便于分析比较
10.提供clip_xaxis值,对训练程度进行统一截断,图看起来更整洁。
seaborn版本0.8.1
"""
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import json
import os
import os.path as osp
import numpy as np
DIV_LINE_WIDTH = 50
# Global vars for tracking and labeling data at load time.
exp_idx = 0
units = dict()
def plot_data(data, xaxis='Epoch', value="TestEpRet",
condition="Condition1", smooth=1,
linewidth=4,
rank=True,
performance=True,
**kwargs):
performance_rank_dict = {}
condition2_list = []
if smooth > 1:
"""
smooth data with moving window average.
that is,
smoothed_y[t] = average(y[t-k], y[t-k+1], ..., y[t+k-1], y[t+k])
where the "smooth" param is width of that window (2k+1)
"""
y = np.ones(smooth)
for datum in data:
condition2_list.append(datum["Condition2"].values[0])
x = np.asarray(datum[value])
z = np.ones(len(x))
smoothed_x = np.convolve(x, y, 'same') / np.convolve(z, y, 'same')
datum[value] = smoothed_x
# add mean performance to performance_rank{dict}
print("rank-add:", datum[condition].values[0])
if datum[condition].values[0] not in performance_rank_dict.keys():
performance_rank_dict[datum[condition].values[0]] = np.mean(smoothed_x[-len(smoothed_x)//10:])
else:
performance_rank_dict[datum[condition].values[0]] += np.mean(smoothed_x[-len(smoothed_x)//10:])
# concern the multi-seeds:
for key in performance_rank_dict.keys():
seed_num = sum([1 for cond in condition2_list if key in cond])
performance_rank_dict[key] /= seed_num
# value list 获取性能值排序序号
performance_list = []
performance_rank_keys = []
for key, val in performance_rank_dict.items():
print(key, val)
performance_list.append(val)
performance_rank_keys.append(key)
# 获取列表排序序号,一定要argsort2次~
performance_rank_list = np.argsort(np.argsort(-np.array(performance_list)))
performance_rank_sort_dict = {performance_rank_keys[index]: performance_rank_list[index]
for index in range(len(performance_rank_list))}
print("performance_rank_list:", performance_rank_list)
# 修改data[condition]的名字
for index, datum in enumerate(data):
origin_key = datum[condition].values[0]
if performance:
p = performance_rank_dict[origin_key]
datum[condition] = 'P-' + str(np.round(p, 3)) + "-" + datum[condition]
if rank:
rank_value = performance_rank_sort_dict[origin_key]
datum[condition] = 'Rank-' + str(rank_value) + "-" + datum[condition]
if isinstance(data, list):
data = pd.concat(data, ignore_index=True)
sns.set(style="darkgrid", font_scale=1.75, )
# # data按照lenged排序;
data.sort_values(by='Condition1', axis=0)
sns.tsplot(data=data,
time=xaxis,
value=value,
unit="Unit",
condition=condition,
ci='sd',
linewidth=linewidth,
color=sns.color_palette("Paired", len(data)),
# palette=sns.color_palette("hls", 8),
**kwargs)
"""
If you upgrade to any version of Seaborn greater than 0.8.1, switch from
tsplot to lineplot replacing L29 with:
sns.lineplot(data=data, x=xaxis, y=value, hue=condition, ci='sd', **kwargs)
Changes the colorscheme and the default legend style, though.
plt.legend()
loc:图例位置,可取('best', 'upper right', 'upper left', 'lower left', 'lower right',
'right', 'center left', 'center , right', 'lower center', 'upper center', 'center')
若是使用了bbox_to_anchor,则这项就无效了
fontsize: int或float或{'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'},字体大小;
frameon: 是否显示图例边框,
ncol: 图例的列的数量,默认为1,
title: 为图例添加标题
shadow: 是否为图例边框添加阴影,
markerfirst: True表示图例标签在句柄右侧,false反之,
markerscale: 图例标记为原图标记中的多少倍大小,
numpoints: 表示图例中的句柄上的标记点的个数,一般设为1,
fancybox: 是否将图例框的边角设为圆形
framealpha: 控制图例框的透明度
borderpad: 图例框内边距
labelspacing: 图例中条目之间的距离
handlelength: 图例句柄的长度
bbox_to_anchor: (横向看右,纵向看下),如果要自定义图例位置或者将图例画在坐标外边,用它,
比如bbox_to_anchor=(1.4,0.8),这个一般配合着ax.get_position(),
set_position([box.x0, box.y0, box.width*0.8 , box.height])使用
"""
# 对图例legend也做一个排序,这样看起来更直观~
handles, labels = plt.gca().get_legend_handles_labels()
sorted_handles = []
sorted_labels = []
for index in range(len(handles)):
order_index = list(performance_rank_list).index(index)
sorted_handles.append(handles[order_index])
sorted_labels.append(labels[order_index])
plt.legend(sorted_handles, sorted_labels, loc='upper center', labelspacing=0.25,
ncol=1,
handlelength=6,
mode="expand",
borderaxespad=0.,
)
# plt.legend(loc='upper center',
# ncol=1,
# handlelength=6,
# mode="expand",
# borderaxespad=0.,
# )
"""
For the version of the legend used in the Spinning Up benchmarking page,
swap L38 with:
plt.legend(loc='upper center', ncol=6, handlelength=1,
mode="expand", borderaxespad=0., prop={'size': 13})
"""
xscale = np.max(np.asarray(data[xaxis])) > 5e3
if xscale:
# Just some formatting niceness: x-axis scale in scientific notation if max x is large
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
plt.tight_layout(pad=0.5)
def get_datasets(logdir, condition=None):
"""
Recursively look through logdir for output files produced by
spinup.logx.Logger.
Assumes that any file "progress.txt" is a valid hit.
"""
global exp_idx
global units
datasets = []
roots = []
exp_names = []
for root, _, files in os.walk(logdir):
if 'progress.txt' in files:
exp_name = None
try:
config_path = open(os.path.join(root, 'config.json'))
config = json.load(config_path)
if 'exp_name' in config:
exp_name = config['exp_name']
exp_names.append(exp_name)
roots.append(root)
except Exception as e:
print("e:", e)
print('No file named config.json')
# just leave one seed:
# roots_names_dict = {exp_names[index]: roots[index] for index in range(len(exp_names))}
# exp_name(str) --> roots(list) with diff seeds
roots_names_dict = {exp_names[index]: roots for index in range(len(exp_names))}
for key, value in roots_names_dict.items():
print(key, value)
# 按照实验名排序
roots_names_list = sorted(roots_names_dict.items(), key=lambda x: x[0])
print("roots_names_list:", roots_names_list)
roots_names_dict = {tup[0]: tup[1] for tup in roots_names_list}
print("roots_names_dict:", roots_names_dict)
for exp_name, roots in roots_names_dict.items():
for root in roots:
condition1 = condition or exp_name or 'exp'
condition2 = condition1 + '-' + str(exp_idx)
exp_idx += 1
if condition1 not in units:
units[condition1] = 0
unit = units[condition1]
units[condition1] += 1
# x轴截断值,默认为None,如果设置的为具体值,则直接统一截断。需要根据当前的x轴坐标手动添加,比如steps,1e6,epochs数量级是500。
# 以epoch=300截断为例,直接修改clip_xaxis=300即可
clip_xaxis = None
try:
exp_data = pd.read_table(os.path.join(root, 'progress.txt'))
if clip_xaxis is not None:
exp_data = exp_data[:clip_xaxis]
line_num = len(exp_data)
print('line num:{}, read from {}'.format(line_num,
os.path.join(root, 'progress.txt')))
except:
print('Could not read from %s' % os.path.join(root, 'progress.txt'))
continue
performance = 'TestEpRet' if 'TestEpRet' in exp_data else 'AverageTestEpRet'
exp_data.insert(len(exp_data.columns), 'Unit', unit)
exp_data.insert(len(exp_data.columns), 'Condition1', condition1)
exp_data.insert(len(exp_data.columns), 'Condition2', condition2)
exp_data.insert(len(exp_data.columns), 'Performance', exp_data[performance])
datasets.append(exp_data)
# # 默认按照时间顺序获取文件夹数据
# print("-"*10, 'sorted by time', '-'*10)
# for root, _, files in os.walk(logdir):
# if 'progress.txt' in files:
# exp_name = None
# try:
# config_path = open(os.path.join(root, 'config.json'))
# config = json.load(config_path)
# if 'exp_name' in config:
# exp_name = config['exp_name']
# except:
# print('No file named config.json')
# condition1 = condition or exp_name or 'exp'
# condition2 = condition1 + '-' + str(exp_idx)
# exp_idx += 1
# if condition1 not in units:
# units[condition1] = 0
# unit = units[condition1]
# units[condition1] += 1
#
# try:
# exp_data = pd.read_table(os.path.join(root, 'progress.txt'))
# line_num = len(exp_data)
# print('line num:{}, read from {}'.format(line_num,
# os.path.join(root, 'progress.txt')))
# except:
# print('Could not read from %s' % os.path.join(root, 'progress.txt'))
# continue
# # performance = 'AverageTestEpRet' if 'AverageTestEpRet' in exp_data else 'TestEpRet'
# # performance = 'AverageEpRet' if 'AverageTestEpRet' in exp_data else 'AverageEpRet'
# performance = 'TestSuccess' if 'TestSuccess' in exp_data else 'AverageEpRet'
# exp_data.insert(len(exp_data.columns),'Unit',unit)
# exp_data.insert(len(exp_data.columns),'Condition1',condition1)
# exp_data.insert(len(exp_data.columns),'Condition2',condition2)
# exp_data.insert(len(exp_data.columns),'Performance',exp_data[performance])
# datasets.append(exp_data)
return datasets
def get_all_datasets(all_logdirs, legend=None, select=None, exclude=None):
"""
For every entry in all_logdirs,
1) check if the entry is a real directory and if it is,
pull data from it;
2) if not, check to see if the entry is a prefix for a
real directory, and pull data from that.
"""
logdirs = []
for logdir in all_logdirs:
if osp.isdir(logdir) and logdir[-1] == os.sep:
logdirs += [logdir]
else:
basedir = osp.dirname(logdir)
fulldir = lambda x: osp.join(basedir, x)
prefix = logdir.split(os.sep)[-1]
print("basedir:", basedir)
listdir = os.listdir(basedir)
logdirs += sorted([fulldir(x) for x in listdir if prefix in x])
"""
Enforce selection rules, which check logdirs for certain substrings.
Makes it easier to look at graphs from particular ablations, if you
launch many jobs at once with similar names.
"""
if select is not None:
logdirs = [log for log in logdirs if all(x in log for x in select)]
if exclude is not None:
logdirs = [log for log in logdirs if all(not (x in log) for x in exclude)]
# Verify logdirs
print('Plotting from...\n' + '=' * DIV_LINE_WIDTH + '\n')
for logdir in logdirs:
print(logdir)
print('\n' + '=' * DIV_LINE_WIDTH)
# Make sure the legend is compatible with the logdirs
assert not (legend) or (len(legend) == len(logdirs)), \
"Must give a legend title for each set of experiments."
# Load data from logdirs
data = []
if legend:
for log, leg in zip(logdirs, legend):
data += get_datasets(log, leg)
else:
for log in logdirs:
data += get_datasets(log)
return data
def make_plots(all_logdirs, legend=None,
xaxis=None, values=None,
count=False,
font_scale=1.5, smooth=1,
linewidth=4,
select=None, exclude=None,
estimator='mean',
rank=True,
performance=True,
):
data = get_all_datasets(all_logdirs, legend, select, exclude)
values = values if isinstance(values, list) else [values]
condition = 'Condition2' if count else 'Condition1'
estimator = getattr(np, estimator) # choose what to show on main curve: mean? max? min?
for value in values:
plt.figure()
plot_data(data, xaxis=xaxis, value=value,
condition=condition, smooth=smooth, estimator=estimator,
linewidth=linewidth, rank=rank, performance=performance)
# 默认最大化图片
manager = plt.get_current_fig_manager()
try:
# matplotlib3.3.4 work
manager.resize(*manager.window.maxsize())
except:
# matplotlib3.2.1//2.2.3 work
manager.window.showMaximized()
fig = plt.gcf()
fig.set_size_inches((16, 9), forward=False)
select_str = ''
exclude_str = ''
print("select:", select)
print("select_str:", select_str)
if select is not None and type(select) is list:
for s_str in select:
select_str += s_str
if exclude is not None and type(exclude) is list:
for s_str in exclude:
exclude_str += s_str
print("select_str:", select_str)
try:
# 如果非远程,则显示图片
plt.show()
except:
pass
fig.savefig(all_logdirs[0] + 'ep_reward_'+select_str+exclude_str+'.png',
bbox_inches='tight',
dpi=300)
# plt.savefig(all_logdirs[0] + 'ep_reward.png',
# bbox_inches='tight',
# dpi=300,
# )
def main():
import argparse
parser = argparse.ArgumentParser()
import sys
# 如果是命令行启动,调用下面的语句,必须要输入数据路径!
if len(sys.argv) > 1:
print("run in command: \n argv:", sys.argv, '\n', '-' * 30)
parser.add_argument('logdir', nargs='*')
# other nargs
parser.add_argument('--select', nargs='*',
help='在当前路径下,选择特定关键词,不能是下一个文件夹,'
'在idle中不能是字符串,在终端,不用加双引号,多个关键词可以用空格隔开')
parser.add_argument('--exclude', nargs='*',
help='同select')
else:
# 如果是idle启动,用于debug,则需要将路径加入到下面的语句!
print("run in pycharm\n", '-' * 30)
parser.add_argument('--logdir', '-r', type=list,
default=[
# windows路径示例:这个2020的意思是,要保留子文件夹的一些前缀,比如子文件夹的名叫"2020-reach-*",不能只是"plot_demo_files\"
r"plot_demo_files\2020",
# Ubuntu路径示例:
# "plot_demo_files/2020",
])
# other nargs
parser.add_argument('--select', default=[], )
parser.add_argument('--exclude', default=[], )
parser.add_argument('--legend', '-l', nargs='*')
parser.add_argument('--xaxis', '-x', default='TotalEnvInteracts',
help='选择什么为横坐标,默认为TotalEnvInteracts')
parser.add_argument('--value', '-y', default='Performance', nargs='*',
help='选择特定变量为性能指标,默认为AverageTestEpRet')
parser.add_argument('--count', action='store_true',
help='是否显示每个随机种子,加--count为显示')
# parser.add_argument('--count', default="False")
parser.add_argument('--smooth', '-s', type=int, default=20,
help='滑动平均,20看起来会更平滑些')
parser.add_argument('--linewidth', '-lw', type=float, default=4,
help='实验线宽,粗点容易分清')
parser.add_argument('--rank', type=bool, default=True,
help='是否在legend上显示性能排序')
parser.add_argument('--performance', type=bool, default=True,
help='是否在legend上显示性能值')
parser.add_argument('--est', default='mean')
args = parser.parse_args()
print("args:", args)
make_plots(args.logdir, args.legend, args.xaxis, args.value, args.count,
smooth=args.smooth, select=args.select, exclude=args.exclude,
estimator=args.est,
linewidth=args.linewidth,
rank=args.rank,
performance=args.performance)
if __name__ == "__main__":
main()
| [
"numpy.convolve",
"numpy.array",
"os.walk",
"seaborn.set",
"os.listdir",
"argparse.ArgumentParser",
"numpy.asarray",
"os.path.isdir",
"numpy.round",
"numpy.ones",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.gca",
"os.path.dirname",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
... | [((3104, 3146), 'seaborn.set', 'sns.set', ([], {'style': '"""darkgrid"""', 'font_scale': '(1.75)'}), "(style='darkgrid', font_scale=1.75)\n", (3111, 3146), True, 'import seaborn as sns\n'), ((5142, 5285), 'matplotlib.pyplot.legend', 'plt.legend', (['sorted_handles', 'sorted_labels'], {'loc': '"""upper center"""', 'labelspacing': '(0.25)', 'ncol': '(1)', 'handlelength': '(6)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(sorted_handles, sorted_labels, loc='upper center', labelspacing=\n 0.25, ncol=1, handlelength=6, mode='expand', borderaxespad=0.0)\n", (5152, 5285), True, 'import matplotlib.pyplot as plt\n'), ((6015, 6040), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0.5)'}), '(pad=0.5)\n', (6031, 6040), True, 'import matplotlib.pyplot as plt\n'), ((6361, 6376), 'os.walk', 'os.walk', (['logdir'], {}), '(logdir)\n', (6368, 6376), False, 'import os\n'), ((13535, 13564), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (13562, 13564), True, 'import matplotlib.pyplot as plt\n'), ((13754, 13763), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (13761, 13763), True, 'import matplotlib.pyplot as plt\n'), ((14584, 14609), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14607, 14609), False, 'import argparse\n'), ((1065, 1080), 'numpy.ones', 'np.ones', (['smooth'], {}), '(smooth)\n', (1072, 1080), True, 'import numpy as np\n'), ((3065, 3099), 'pandas.concat', 'pd.concat', (['data'], {'ignore_index': '(True)'}), '(data, ignore_index=True)\n', (3074, 3099), True, 'import pandas as pd\n'), ((5948, 6009), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'style': '"""sci"""', 'axis': '"""x"""', 'scilimits': '(0, 0)'}), "(style='sci', axis='x', scilimits=(0, 0))\n", (5968, 6009), True, 'import matplotlib.pyplot as plt\n'), ((13293, 13305), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13303, 13305), True, 'import matplotlib.pyplot as plt\n'), ((14226, 14236), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14234, 14236), True, 'import matplotlib.pyplot as plt\n'), ((1190, 1214), 'numpy.asarray', 'np.asarray', (['datum[value]'], {}), '(datum[value])\n', (1200, 1214), True, 'import numpy as np\n'), ((4850, 4859), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4857, 4859), True, 'import matplotlib.pyplot as plt\n'), ((5799, 5822), 'numpy.asarray', 'np.asarray', (['data[xaxis]'], {}), '(data[xaxis])\n', (5809, 5822), True, 'import numpy as np\n'), ((11243, 11260), 'os.path.isdir', 'osp.isdir', (['logdir'], {}), '(logdir)\n', (11252, 11260), True, 'import os.path as osp\n'), ((11355, 11374), 'os.path.dirname', 'osp.dirname', (['logdir'], {}), '(logdir)\n', (11366, 11374), True, 'import os.path as osp\n'), ((11535, 11554), 'os.listdir', 'os.listdir', (['basedir'], {}), '(basedir)\n', (11545, 11554), False, 'import os\n'), ((1272, 1297), 'numpy.convolve', 'np.convolve', (['x', 'y', '"""same"""'], {}), "(x, y, 'same')\n", (1283, 1297), True, 'import numpy as np\n'), ((1300, 1325), 'numpy.convolve', 'np.convolve', (['z', 'y', '"""same"""'], {}), "(z, y, 'same')\n", (1311, 1325), True, 'import numpy as np\n'), ((2318, 2344), 'numpy.array', 'np.array', (['performance_list'], {}), '(performance_list)\n', (2326, 2344), True, 'import numpy as np\n'), ((6554, 6576), 'json.load', 'json.load', (['config_path'], {}), '(config_path)\n', (6563, 6576), False, 'import json\n'), ((11407, 11427), 'os.path.join', 'osp.join', (['basedir', 'x'], {}), '(basedir, x)\n', (11415, 11427), True, 'import os.path as osp\n'), ((6494, 6527), 'os.path.join', 'os.path.join', (['root', '"""config.json"""'], {}), "(root, 'config.json')\n", (6506, 6527), False, 'import os\n'), ((8045, 8079), 'os.path.join', 'os.path.join', (['root', '"""progress.txt"""'], {}), "(root, 'progress.txt')\n", (8057, 8079), False, 'import os\n'), ((8342, 8376), 'os.path.join', 'os.path.join', (['root', '"""progress.txt"""'], {}), "(root, 'progress.txt')\n", (8354, 8376), False, 'import os\n'), ((2814, 2828), 'numpy.round', 'np.round', (['p', '(3)'], {}), '(p, 3)\n', (2822, 2828), True, 'import numpy as np\n'), ((8448, 8482), 'os.path.join', 'os.path.join', (['root', '"""progress.txt"""'], {}), "(root, 'progress.txt')\n", (8460, 8482), False, 'import os\n')] |
import argparse
import os
import numpy as np
from rsgd.common.dat import load_dat
from rsgd.common.logistic import logistic_grad
from rsgd.common.logistic import logistic_loss
from rsgd.common.logistic import logistic_test
from rsgd.common.utils import get_batch_index
from sklearn.utils import shuffle
def sgd_restart(X, y, grad, batch_size, n_epoch, L,
init_step=2.0, R=1.0, reg_coeff=0.001, reset_intvl=20,
verbose=False, loss_func=None, test_func=None):
N, dim = X.shape
batch_idx = get_batch_index(N, batch_size)
m = len(batch_idx) - 1
mu = reg_coeff
niter = n_epoch*m + 1
it = 0
# initialization
w = np.zeros((niter, dim))
sens = np.zeros((niter, m))
step_size = init_step
last_avg_idx = 1
epoch_cnt = 0
for t in range(n_epoch):
if m > 1:
step_size = init_step/(epoch_cnt + 1)
# recurrence coefficient
contr_coeff = max(np.abs(1. - step_size*mu),
np.abs(1. - step_size*L))
b = (2.0*R*step_size) / batch_size
for j in range(m):
mini_X = X[batch_idx[j]:batch_idx[j+1], :]
mini_y = y[batch_idx[j]:batch_idx[j+1]]
# gradient desecent update
gt = grad(w[it], mini_X, mini_y) / batch_size
gt += reg_coeff * w[it]
gt /= np.linalg.norm(gt)
w[it+1, :] = w[it] - step_size*gt
sens[it+1, :] = contr_coeff*sens[it, :]
sens[it+1, j] += b
# increase the total number of iteration counts
it += 1
# averaging and reset the step size
if (t % reset_intvl) == 0:
w[it, :] = np.mean(w[last_avg_idx:it+1, :], axis=0)
sens[it, :] = np.mean(sens[last_avg_idx:it+1, :], axis=0)
last_avg_idx = it + 1
epoch_cnt = 0
else:
epoch_cnt += 1
if verbose:
objval = loss_func(w[it], X, y)
acc = test_func(w[it], X, y)*100
print("[{0}] loss={1:.5f} acc={2:7.3f}".format(t, objval, acc))
return w[-1], sens[-1]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='recursive mechanism')
parser.add_argument('dname', help='dataset name')
parser.add_argument('T', type=int, help='epoch')
parser.add_argument('rst', type=int, help='reset interval')
parser.add_argument('--data_dir', type=str, default=None)
parser.add_argument('--batch_size', type=int, default=4000)
parser.add_argument('--init_step', type=float, default=0.5)
parser.add_argument('--mu', type=float, default=0.001)
parser.add_argument('--L', type=float, default=1.81)
parser.add_argument('--norm', type=float, default=1.0)
parser.add_argument('--delta', type=float, default=1e-12)
args = parser.parse_args()
# load the dataset
fpath = os.path.join(args.data_dir, f"{args.dname}.dat")
X, y = load_dat(fpath, normalize=args.norm)
# y[y < 0.5] = -1.0
batch_size = args.batch_size
n_epoch = args.T
w, sens = sgd_restart(X, y, logistic_grad, batch_size, n_epoch, args.L,
reg_coeff=args.mu, reset_intvl=args.rst,
R=args.norm, init_step=0.5, verbose=True,
loss_func=logistic_loss, test_func=logistic_test)
acc = logistic_test(w, X, y)*100
print("accuracy={}".format(acc))
print("sensitivity={}".format(sens))
| [
"numpy.abs",
"numpy.mean",
"argparse.ArgumentParser",
"rsgd.common.dat.load_dat",
"rsgd.common.utils.get_batch_index",
"os.path.join",
"numpy.zeros",
"rsgd.common.logistic.logistic_test",
"numpy.linalg.norm"
] | [((530, 560), 'rsgd.common.utils.get_batch_index', 'get_batch_index', (['N', 'batch_size'], {}), '(N, batch_size)\n', (545, 560), False, 'from rsgd.common.utils import get_batch_index\n'), ((675, 697), 'numpy.zeros', 'np.zeros', (['(niter, dim)'], {}), '((niter, dim))\n', (683, 697), True, 'import numpy as np\n'), ((709, 729), 'numpy.zeros', 'np.zeros', (['(niter, m)'], {}), '((niter, m))\n', (717, 729), True, 'import numpy as np\n'), ((2164, 2222), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""recursive mechanism"""'}), "(description='recursive mechanism')\n", (2187, 2222), False, 'import argparse\n'), ((2893, 2941), 'os.path.join', 'os.path.join', (['args.data_dir', 'f"""{args.dname}.dat"""'], {}), "(args.data_dir, f'{args.dname}.dat')\n", (2905, 2941), False, 'import os\n'), ((2953, 2989), 'rsgd.common.dat.load_dat', 'load_dat', (['fpath'], {'normalize': 'args.norm'}), '(fpath, normalize=args.norm)\n', (2961, 2989), False, 'from rsgd.common.dat import load_dat\n'), ((3366, 3388), 'rsgd.common.logistic.logistic_test', 'logistic_test', (['w', 'X', 'y'], {}), '(w, X, y)\n', (3379, 3388), False, 'from rsgd.common.logistic import logistic_test\n'), ((953, 981), 'numpy.abs', 'np.abs', (['(1.0 - step_size * mu)'], {}), '(1.0 - step_size * mu)\n', (959, 981), True, 'import numpy as np\n'), ((1006, 1033), 'numpy.abs', 'np.abs', (['(1.0 - step_size * L)'], {}), '(1.0 - step_size * L)\n', (1012, 1033), True, 'import numpy as np\n'), ((1362, 1380), 'numpy.linalg.norm', 'np.linalg.norm', (['gt'], {}), '(gt)\n', (1376, 1380), True, 'import numpy as np\n'), ((1696, 1738), 'numpy.mean', 'np.mean', (['w[last_avg_idx:it + 1, :]'], {'axis': '(0)'}), '(w[last_avg_idx:it + 1, :], axis=0)\n', (1703, 1738), True, 'import numpy as np\n'), ((1763, 1808), 'numpy.mean', 'np.mean', (['sens[last_avg_idx:it + 1, :]'], {'axis': '(0)'}), '(sens[last_avg_idx:it + 1, :], axis=0)\n', (1770, 1808), True, 'import numpy as np\n')] |
import numpy as np
from urllib import request
import gzip
import os
import boto3
import json
dirname = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(dirname, "config.json"), "r") as f:
CONFIG = json.load(f)
def mnist_to_numpy(data_dir='/tmp/data', train=True):
"""Download MNIST dataset and convert it to numpy array
Args:
data_dir (str): directory to save the data
train (bool): download training set
Returns:
tuple of images and labels as numpy arrays
"""
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if train:
images_file = "train-images-idx3-ubyte.gz"
labels_file = "train-labels-idx1-ubyte.gz"
else:
images_file = "t10k-images-idx3-ubyte.gz"
labels_file = "t10k-labels-idx1-ubyte.gz"
# download objects
s3 = boto3.client('s3')
bucket = CONFIG["public_bucket"]
for obj in [images_file, labels_file]:
key = os.path.join("datasets/image/MNIST", obj)
dest = os.path.join(data_dir, obj)
if not os.path.exists(dest):
s3.download_file(bucket, key, dest)
return _convert_to_numpy(data_dir, images_file, labels_file)
def _convert_to_numpy(data_dir, images_file, labels_file):
"""Byte string to numpy arrays"""
with gzip.open(os.path.join(data_dir, images_file), 'rb') as f:
images = np.frombuffer(f.read(), np.uint8, offset=16).reshape(-1, 28, 28)
with gzip.open(os.path.join(data_dir, labels_file), 'rb') as f:
labels = np.frombuffer(f.read(), np.uint8, offset=8)
return (images, labels)
def normalize(x, axis):
eps = np.finfo(float).eps
mean = np.mean(x, axis=axis, keepdims=True)
# avoid division by zero
std = np.std(x, axis=axis, keepdims=True) + eps
return (x - mean) / std
def adjust_to_framework(x, framework='pytorch'):
"""Adjust a ``numpy.ndarray`` to be used as input for specified framework
Args:
x (numpy.ndarray): Batch of images to be adjusted
to follow the convention in pytorch / tensorflow / mxnet
framework (str): Framework to use. Takes value in
``pytorch``, ``tensorflow`` or ``mxnet``
Return:
numpy.ndarray following the convention of tensors in the given
framework
"""
if x.ndim == 3:
# input is gray-scale
x = np.expand_dims(x, 1)
if framework in ['pytorch', 'mxnet']:
# depth-major
return x
elif framework == 'tensorlfow':
# depth-minor
return np.transpose(x, (0, 2, 3, 1))
elif framework == 'mxnet':
return x
else:
raise ValueError('framework must be one of ' + \
'[pytorch, tensorflow, mxnet], got {}'.format(framework))
if __name__ == '__main__':
X, Y = mnist_to_numpy()
X, Y = X.astype(np.float32), Y.astype(np.int8)
| [
"numpy.mean",
"os.path.exists",
"boto3.client",
"os.makedirs",
"numpy.std",
"os.path.join",
"json.load",
"numpy.expand_dims",
"numpy.finfo",
"os.path.abspath",
"numpy.transpose"
] | [((121, 146), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (136, 146), False, 'import os\n'), ((221, 233), 'json.load', 'json.load', (['f'], {}), '(f)\n', (230, 233), False, 'import json\n'), ((877, 895), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (889, 895), False, 'import boto3\n'), ((1704, 1740), 'numpy.mean', 'np.mean', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (1711, 1740), True, 'import numpy as np\n'), ((159, 195), 'os.path.join', 'os.path.join', (['dirname', '"""config.json"""'], {}), "(dirname, 'config.json')\n", (171, 195), False, 'import os\n'), ((552, 576), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (566, 576), False, 'import os\n'), ((586, 607), 'os.makedirs', 'os.makedirs', (['data_dir'], {}), '(data_dir)\n', (597, 607), False, 'import os\n'), ((990, 1031), 'os.path.join', 'os.path.join', (['"""datasets/image/MNIST"""', 'obj'], {}), "('datasets/image/MNIST', obj)\n", (1002, 1031), False, 'import os\n'), ((1047, 1074), 'os.path.join', 'os.path.join', (['data_dir', 'obj'], {}), '(data_dir, obj)\n', (1059, 1074), False, 'import os\n'), ((1672, 1687), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (1680, 1687), True, 'import numpy as np\n'), ((1780, 1815), 'numpy.std', 'np.std', (['x'], {'axis': 'axis', 'keepdims': '(True)'}), '(x, axis=axis, keepdims=True)\n', (1786, 1815), True, 'import numpy as np\n'), ((2421, 2441), 'numpy.expand_dims', 'np.expand_dims', (['x', '(1)'], {}), '(x, 1)\n', (2435, 2441), True, 'import numpy as np\n'), ((1090, 1110), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (1104, 1110), False, 'import os\n'), ((1343, 1378), 'os.path.join', 'os.path.join', (['data_dir', 'images_file'], {}), '(data_dir, images_file)\n', (1355, 1378), False, 'import os\n'), ((1498, 1533), 'os.path.join', 'os.path.join', (['data_dir', 'labels_file'], {}), '(data_dir, labels_file)\n', (1510, 1533), False, 'import os\n'), ((2601, 2630), 'numpy.transpose', 'np.transpose', (['x', '(0, 2, 3, 1)'], {}), '(x, (0, 2, 3, 1))\n', (2613, 2630), True, 'import numpy as np\n')] |
# Search function
# Prior to running this, a model must first be loaded and vectors must first be built for documents
import pandas as pd
import re
import spacy
from rank_bm25 import BM25Okapi
from tqdm import tqdm
import pickle
import numpy as np
from gensim.models.fasttext import FastText
import os
import nmslib
import time
from gensim import corpora
from gensim.summarization import bm25
# Search function
def besceaSearch(query_text, return_results_count, fasttext_weight):
# FastText search
output_list = []
input = query_text.lower().split()
query = [ft_model[vec] for vec in input]
query = np.mean(query,axis=0)
t0 = time.time()
ids, distances = index_nms.knnQuery(query, k=return_results_count)
t1 = time.time()
print(f'Searched {df_docs.shape[0]} records in {round(t1-t0,4) } seconds \n')
for i,j in zip(ids,distances):
output_id = df_docs.id[i]
output_text = df_docs.text.values[i]
output_score = round(j,4)
output_list.append([output_id, output_text, output_score])
output_fasttext = pd.DataFrame(output_list, columns = ['id', 'text', 'score'])
output_fasttext = output_fasttext[['id', 'score']]
output_fasttext['fasttext_rank'] = np.arange(len(output_fasttext))
#return(output_fasttext)
t2 = time.time()
print(f'Completed FastText in {round(t2-t0,4) } seconds \n')
# whoosh search
output_whoosh = index_search("models", ['content'], query_text, return_results_count) #u"long story short"
output_whoosh = output_whoosh.rename(columns = {"path": "id"})
output_whoosh['whoosh_rank'] = np.arange(len(output_whoosh))
t3 = time.time()
print(f'Completed whoosh search in {round(t3-t2,4) } seconds \n')
# Join FastText and whoosh data frames
fasttext_multiplier = (1 - fasttext_weight) * 2
whoosh_multiplier = fasttext_weight * 2
output_df = pd.merge(output_fasttext, output_whoosh, how = "outer", on='id')
output_df['whoosh_rank'] = output_df['whoosh_rank'].fillna(return_results_count)
output_df['fasttext_rank'] = output_df['fasttext_rank'].fillna(return_results_count)
output_df['total_rank'] = (output_df['fasttext_rank'] * fasttext_multiplier) + (output_df['whoosh_rank'] * whoosh_multiplier)
output_df = output_df.sort_values(by=['total_rank'])
output_df = pd.merge(output_df, df_docs[['id', 'text']], how = 'left', on = 'id')
t4 = time.time()
print(f'Joined data in {round(t4-t3,4) } seconds \n')
# # bm25 search
# texts = [doc.split() for doc in df_docs.text] # no preprocessing
# dictionary = corpora.Dictionary(texts)
# corpus = [dictionary.doc2bow(text) for text in texts]
# bm25_obj = bm25.BM25(corpus)
# query_doc = dictionary.doc2bow(query_text.split())
# scores = bm25_obj.get_scores(query_doc)
# df_docs['bmf_scores'] = scores
# output_bm25 = df_docs.sort_values(by=['bmf_scores'], ascending = False)
# output_bm25['bm25_rank'] = np.arange(len(output_bm25))
# output_bm25 = output_bm25[output_bm25['bm25_rank'] <= return_results_count]
# output_bm25 = output_bm25[['bmf_scores', 'bm25_rank', 'id']]
#
# t3 = time.time()
# print(f'Completed bm25 in {round(t3-t2,4) } seconds \n')
#
# # Join FastText and bm25 data frames
# fasttext_multiplier = (1 - fasttext_weight) * 2
# bm25_multiplier = fasttext_weight * 2
# output_df = pd.merge(output_fasttext, output_bm25, how = "outer", on='id')
# output_df['bm25_rank'] = output_df['bm25_rank'].fillna(return_results_count)
# output_df['fasttext_rank'] = output_df['fasttext_rank'].fillna(return_results_count)
# output_df['total_rank'] = (output_df['fasttext_rank'] * fasttext_multiplier) + (output_df['bm25_rank'] * bm25_multiplier)
# output_df = output_df.sort_values(by=['total_rank'])
# output_df = pd.merge(output_df, df_docs[['id', 'text']], how = 'left', on = 'id')
#
# t4 = time.time()
# print(f'Joined data in {round(t4-t3,4) } seconds \n')
return(output_df)
| [
"pandas.DataFrame",
"numpy.mean",
"pandas.merge",
"time.time"
] | [((615, 637), 'numpy.mean', 'np.mean', (['query'], {'axis': '(0)'}), '(query, axis=0)\n', (622, 637), True, 'import numpy as np\n'), ((644, 655), 'time.time', 'time.time', ([], {}), '()\n', (653, 655), False, 'import time\n'), ((732, 743), 'time.time', 'time.time', ([], {}), '()\n', (741, 743), False, 'import time\n'), ((1042, 1100), 'pandas.DataFrame', 'pd.DataFrame', (['output_list'], {'columns': "['id', 'text', 'score']"}), "(output_list, columns=['id', 'text', 'score'])\n", (1054, 1100), True, 'import pandas as pd\n'), ((1261, 1272), 'time.time', 'time.time', ([], {}), '()\n', (1270, 1272), False, 'import time\n'), ((1604, 1615), 'time.time', 'time.time', ([], {}), '()\n', (1613, 1615), False, 'import time\n'), ((1833, 1895), 'pandas.merge', 'pd.merge', (['output_fasttext', 'output_whoosh'], {'how': '"""outer"""', 'on': '"""id"""'}), "(output_fasttext, output_whoosh, how='outer', on='id')\n", (1841, 1895), True, 'import pandas as pd\n'), ((2265, 2330), 'pandas.merge', 'pd.merge', (['output_df', "df_docs[['id', 'text']]"], {'how': '"""left"""', 'on': '"""id"""'}), "(output_df, df_docs[['id', 'text']], how='left', on='id')\n", (2273, 2330), True, 'import pandas as pd\n'), ((2343, 2354), 'time.time', 'time.time', ([], {}), '()\n', (2352, 2354), False, 'import time\n')] |
"""
Specify times for synchronic image download.
Query available images and download best matches.
"""
import os
import numpy as np
import pandas as pd
from astropy.time import Time
import astropy.units as u
from chmap.settings.app import App
import chmap.database.db_classes as DBClass
from chmap.database.db_funs import init_db_conn_old
from chmap.data.download.image_download import synchronic_euv_download
# Specify a vector of synchronic times
period_start = Time('2021-01-02T00:00:00.000', scale='utc')
period_end = Time('2021-01-03T00:00:00.000', scale='utc')
# define image search interval cadence and width
interval_cadence = 2*u.hour
del_interval = 30*u.minute
# define target times over download period using interval_cadence (image times in astropy Time() format)
target_times = Time(np.arange(period_start, period_end, interval_cadence))
# generate DataFrame that defines synchronic target times as well as min/max limits
synch_times = pd.DataFrame({'target_time': target_times, 'min_time': target_times - del_interval,
'max_time': target_times + del_interval})
# specify path and filename for download_results file
download_results_filename = "download_results_" + period_start.__str__()
pickle_file = os.path.join(App.APP_HOME, "test_data", download_results_filename)
# data-file dirs
raw_data_dir = App.RAW_DATA_HOME
hdf_data_dir = App.PROCESSED_DATA_HOME
# database location
database_dir = App.DATABASE_HOME
# give the sqlite file a unique name
sqlite_filename = App.DATABASE_FNAME
# designate which database to connect to
use_db = "mysql-Q" # 'sqlite' Use local sqlite file-based db
# 'mysql-Q' Use the remote MySQL database on Q
user = "turtle" # only needed for remote databases.
password = "" # See example109 for setting-up an encrypted password. In this case leave password="", and
# init_db_conn_old() will automatically find and use your saved password. Otherwise, enter your MySQL password here.
# Establish connection to database
if use_db == 'sqlite':
# setup database connection to local sqlite file
sqlite_path = os.path.join(database_dir, sqlite_filename)
db_session = init_db_conn_old(db_name=use_db, chd_base=DBClass.Base, sqlite_path=sqlite_path)
elif use_db in ['mysql-Q', 'mysql-Q_test']:
# setup database connection to MySQL database on Q
db_session = init_db_conn_old(db_name=use_db, chd_base=DBClass.Base, user=user, password=password)
# query for images, download, and log to database
download_result = synchronic_euv_download(synch_times, App.RAW_DATA_HOME, db_session, download=True, overwrite=False,
verbose=True)
download_result.to_pickle(pickle_file)
# print a summary of results
print("Summary of download resutls:")
print(download_result.result_desc.value_counts())
db_session.close()
| [
"chmap.database.db_funs.init_db_conn_old",
"os.path.join",
"astropy.time.Time",
"pandas.DataFrame",
"chmap.data.download.image_download.synchronic_euv_download",
"numpy.arange"
] | [((467, 511), 'astropy.time.Time', 'Time', (['"""2021-01-02T00:00:00.000"""'], {'scale': '"""utc"""'}), "('2021-01-02T00:00:00.000', scale='utc')\n", (471, 511), False, 'from astropy.time import Time\n'), ((525, 569), 'astropy.time.Time', 'Time', (['"""2021-01-03T00:00:00.000"""'], {'scale': '"""utc"""'}), "('2021-01-03T00:00:00.000', scale='utc')\n", (529, 569), False, 'from astropy.time import Time\n'), ((952, 1081), 'pandas.DataFrame', 'pd.DataFrame', (["{'target_time': target_times, 'min_time': target_times - del_interval,\n 'max_time': target_times + del_interval}"], {}), "({'target_time': target_times, 'min_time': target_times -\n del_interval, 'max_time': target_times + del_interval})\n", (964, 1081), True, 'import pandas as pd\n'), ((1248, 1314), 'os.path.join', 'os.path.join', (['App.APP_HOME', '"""test_data"""', 'download_results_filename'], {}), "(App.APP_HOME, 'test_data', download_results_filename)\n", (1260, 1314), False, 'import os\n'), ((2550, 2667), 'chmap.data.download.image_download.synchronic_euv_download', 'synchronic_euv_download', (['synch_times', 'App.RAW_DATA_HOME', 'db_session'], {'download': '(True)', 'overwrite': '(False)', 'verbose': '(True)'}), '(synch_times, App.RAW_DATA_HOME, db_session,\n download=True, overwrite=False, verbose=True)\n', (2573, 2667), False, 'from chmap.data.download.image_download import synchronic_euv_download\n'), ((799, 852), 'numpy.arange', 'np.arange', (['period_start', 'period_end', 'interval_cadence'], {}), '(period_start, period_end, interval_cadence)\n', (808, 852), True, 'import numpy as np\n'), ((2136, 2179), 'os.path.join', 'os.path.join', (['database_dir', 'sqlite_filename'], {}), '(database_dir, sqlite_filename)\n', (2148, 2179), False, 'import os\n'), ((2198, 2283), 'chmap.database.db_funs.init_db_conn_old', 'init_db_conn_old', ([], {'db_name': 'use_db', 'chd_base': 'DBClass.Base', 'sqlite_path': 'sqlite_path'}), '(db_name=use_db, chd_base=DBClass.Base, sqlite_path=sqlite_path\n )\n', (2214, 2283), False, 'from chmap.database.db_funs import init_db_conn_old\n'), ((2395, 2485), 'chmap.database.db_funs.init_db_conn_old', 'init_db_conn_old', ([], {'db_name': 'use_db', 'chd_base': 'DBClass.Base', 'user': 'user', 'password': 'password'}), '(db_name=use_db, chd_base=DBClass.Base, user=user, password\n =password)\n', (2411, 2485), False, 'from chmap.database.db_funs import init_db_conn_old\n')] |
#!/usr/bin/env python
"""
Calculates fractional amplitude of low-frequency fluctuations (fALFF)
Usage:
falff_nifti.py <func.nii.gz> <output.nii.gz> [options]
Arguments:
<func.nii.gz> The functional 4D nifti files
<mask.nii.gz> A brainmask for the functional file
<output.nii.gz> Output filename
Options:
--min-low-freq 0.01 Min low frequency range value Hz [default: 0.01]
--max-low-freq 0.08 Max low frequency range value Hz [default: 0.08]
--min-total-freq 0.00 Min total frequency range value Hz [default: 0.00]
--max-total-freq 0.25 Max total frequency range value Hz [default: 0.25]
--mask-file <mask.nii.gz> Input brain mask
--debug Debug logging
-h,--help Print help
"""
import numpy as np
import nibabel as nib
from scipy.fftpack import fft
import matplotlib.pyplot as plt
from docopt import docopt
arguments = docopt(__doc__)
funcfile = arguments['<func.nii.gz>']
outputname = arguments['<output.nii.gz>']
min_low_freq = arguments['--min-low-freq']
max_low_freq = arguments['--max-low-freq']
min_total_freq = arguments['--min-total-freq']
max_total_freq = arguments['--max-total-freq']
maskfile = arguments['--mask-file']
DEBUG = arguments['--debug']
if DEBUG: print(arguments)
def calculate_falff(timeseries, min_low_freq, max_low_freq, min_total_freq, max_total_freq):
''' this will calculated falff from a timeseries'''
#FALFF CALCULATION
n = len(timeseries)
time = (np.arange(n))*2
#takes fast fourier transform of timeseries
fft_timeseries = fft(timeseries)
#calculates frequency scale
freq_scale = np.fft.fftfreq(n, 1/1)
#calculates power of fft
mag = (abs(fft_timeseries))**0.5
#finds low frequency range (0.01-0.08) and total frequency range (0.0-0.25)
low_ind = np.where((float(min_low_freq) <= freq_scale) & (freq_scale <= float(max_low_freq)))
total_ind = np.where((float(min_total_freq) <= freq_scale) & (freq_scale <= float(max_total_freq)))
#indexes power to low frequency index, total frequency range
low_power = mag[low_ind]
total_power = mag[total_ind]
#calculates sum of lower power and total power
low_pow_sum = np.sum(low_power)
total_pow_sum = np.sum(total_power)
#calculates falff as the sum of power in low frequnecy range divided by sum of power in the total frequency range
falff = np.divide(low_pow_sum, total_pow_sum)
return falff
#load in func and mask data
func_img = nib.load(funcfile)
func_data = func_img.get_data()
#if given input of mask, load in mask file
#OR if not given input of mask, create mask using std
try:
#1. given input of mask file
mask = (nib.load(maskfile)).get_data()
except:
#2. manually create mask
mask = np.where(func_data > (np.std(func_data, axis=(0, 1, 2))), func_data, 0)
#define affine array
affine = func_img.affine
#define x,y,z,t coordinates
x,y,z,t = func_data.shape
#find indices where mask does not = 0
indx,indy,indz,indt = np.where(mask != 0)
#create empy array to save values
falff_vol = np.zeros((x,y,z))
#loop through x,y,z indices, send to calculate_falff func
for x,y,z, t in zip(indx,indy,indz,indt):
falff_vol[x,y,z] = calculate_falff(func_data[x,y,z,:], min_low_freq, max_low_freq, min_total_freq, max_total_freq)
#save falff values to nifti file
output_3D = nib.Nifti1Image(falff_vol, affine)
output_3D.to_filename(outputname)
| [
"nibabel.load",
"numpy.where",
"numpy.arange",
"numpy.fft.fftfreq",
"numpy.std",
"numpy.sum",
"numpy.zeros",
"scipy.fftpack.fft",
"nibabel.Nifti1Image",
"docopt.docopt",
"numpy.divide"
] | [((868, 883), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (874, 883), False, 'from docopt import docopt\n'), ((2456, 2474), 'nibabel.load', 'nib.load', (['funcfile'], {}), '(funcfile)\n', (2464, 2474), True, 'import nibabel as nib\n'), ((2977, 2996), 'numpy.where', 'np.where', (['(mask != 0)'], {}), '(mask != 0)\n', (2985, 2996), True, 'import numpy as np\n'), ((3044, 3063), 'numpy.zeros', 'np.zeros', (['(x, y, z)'], {}), '((x, y, z))\n', (3052, 3063), True, 'import numpy as np\n'), ((3333, 3367), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['falff_vol', 'affine'], {}), '(falff_vol, affine)\n', (3348, 3367), True, 'import nibabel as nib\n'), ((1535, 1550), 'scipy.fftpack.fft', 'fft', (['timeseries'], {}), '(timeseries)\n', (1538, 1550), False, 'from scipy.fftpack import fft\n'), ((1600, 1624), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['n', '(1 / 1)'], {}), '(n, 1 / 1)\n', (1614, 1624), True, 'import numpy as np\n'), ((2171, 2188), 'numpy.sum', 'np.sum', (['low_power'], {}), '(low_power)\n', (2177, 2188), True, 'import numpy as np\n'), ((2209, 2228), 'numpy.sum', 'np.sum', (['total_power'], {}), '(total_power)\n', (2215, 2228), True, 'import numpy as np\n'), ((2360, 2397), 'numpy.divide', 'np.divide', (['low_pow_sum', 'total_pow_sum'], {}), '(low_pow_sum, total_pow_sum)\n', (2369, 2397), True, 'import numpy as np\n'), ((1449, 1461), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1458, 1461), True, 'import numpy as np\n'), ((2657, 2675), 'nibabel.load', 'nib.load', (['maskfile'], {}), '(maskfile)\n', (2665, 2675), True, 'import nibabel as nib\n'), ((2759, 2792), 'numpy.std', 'np.std', (['func_data'], {'axis': '(0, 1, 2)'}), '(func_data, axis=(0, 1, 2))\n', (2765, 2792), True, 'import numpy as np\n')] |
import math
import numpy as np
from random import randint, seed
from copy import deepcopy
from typing import List
from EvaluationUtils.vision_metrics import CVMetrics
from Animator.consolidation_api import CharacterBoundingBox
from Animator.utils import serialize_pickle, deserialize_pickle
seed(1234567)
class Triplet:
def __init__(self, shot_id, pos, anc, neg):
self.shot_id = shot_id
self.positive = pos
self.anchor = anc
self.negative = neg
def __repr__(self):
return f'Triplet({self.shot_id}, Pos: {self.positive}, Anchor: {self.anchor}, Neg: {self.negative})'
class FrameTrack:
"""
mapping the frame-level information
"""
def __init__(self, video_level_index: int, frame_detections: List[CharacterBoundingBox]):
self.video_level_index = video_level_index
self.frame_detections = frame_detections
class ShotTrack:
"""
mapping the shot-level information
"""
def __init__(self, shot_name: str, frame_tracks: List[FrameTrack]):
self.shot_name = shot_name
self.frame_tracks = frame_tracks
class VideoTrack:
"""
mapping the video-level information
"""
def __init__(self, video_path: str, fps: float, frame_width: int, frame_height: int, shot_tracks: List[ShotTrack]):
self.video_path = video_path
self.fps = fps
self.frame_width = frame_width
self.frame_height = frame_height
self.shot_tracks = shot_tracks
def serialize(self, output_path: str) -> None:
serialize_pickle(self, output_path)
def filter_for_sampling(self, max_gap_per_sec: float, fade_frames: int):
"""filter untracked detections, possibly swapping tracks, and shots of a single track"""
shot_to_tracks = dict()
shot_to_multi_track_frames = dict()
for shot_track in self.shot_tracks:
shot_track_ids = set()
track_to_max_jump = dict()
track_to_detections = dict()
multi_track_frames = dict()
# fade-in/fade-out stats
start_frame = shot_track.frame_tracks[0].video_level_index
end_frame = shot_track.frame_tracks[-1].video_level_index
# ignore too short shots
if end_frame - start_frame < self.fps:
continue
# remove track-less detections
for original_frame_track in shot_track.frame_tracks:
# avoid fade-in/out frames
if start_frame + fade_frames > original_frame_track.video_level_index > end_frame - fade_frames:
continue
filtered_frame_track = []
for box in original_frame_track.frame_detections:
if not hasattr(box, 'TrackId') or box.TrackId < 0:
continue
rect_scale = math.sqrt(box.Rect.area())
current_gap = track_to_max_jump.get(box.TrackId,
{'frame_id': original_frame_track.video_level_index,
'gap': 0, 'rectangle': box.Rect, 'scale': rect_scale,
'gap_ratio': 0})
if current_gap['frame_id'] != original_frame_track.video_level_index:
gap = box.Rect.center_distance(current_gap['rectangle'])
frame_skip = original_frame_track.video_level_index - current_gap['frame_id']
gap_ratio = gap/rect_scale/frame_skip
if gap_ratio > current_gap['gap_ratio']:
current_gap['gap'] = gap
current_gap['gap_ratio'] = gap_ratio
current_gap['rectangle'] = deepcopy(box.Rect)
current_gap['frame_id'] = original_frame_track.video_level_index
current_gap['scale'] = rect_scale
track_to_max_jump[box.TrackId] = current_gap
filtered_frame_track.append(box)
shot_track_ids.add(box.TrackId)
original_frame_track.frame_detections = filtered_frame_track
# remove noisy tracks (with possible id swaps)
tracks_to_discard = set()
for track_id, gap_stats in track_to_max_jump.items():
if gap_stats['gap_ratio']*self.fps > max_gap_per_sec:
tracks_to_discard.add(track_id)
for original_frame_track in shot_track.frame_tracks:
filtered_frame_track = []
for box in sorted(original_frame_track.frame_detections, key=lambda b: b.Rect.area(), reverse=True):
# keep up to 50% IoU boxes
if box.TrackId in tracks_to_discard or \
len([b for b in filtered_frame_track
if CVMetrics.bb_intersection_over_union(b.Rect, box.Rect) > .4]) > 0:
continue
filtered_frame_track.append(box)
track_to_detections[box.TrackId] = track_to_detections.get(box.TrackId, []) + [box]
if len(filtered_frame_track) >= 2:
multi_track_frames[original_frame_track.video_level_index] = \
[{'TrackId': b.TrackId, 'ThumbnailId': b.ThumbnailId,
'KeyframeThumbnailId': b.KeyframeThumbnailId}
for b in filtered_frame_track]
original_frame_track.frame_detections = filtered_frame_track
shot_to_multi_track_frames[shot_track.shot_name] = multi_track_frames
# remove tracks with less than 2 tracks (for negative)
if len(shot_track_ids - {-1}) < 2:
shot_track.frame_tracks = []
track_to_detections = None
# index from shot to TrackId to detections for sampling
shot_to_tracks[shot_track.shot_name] = track_to_detections
return shot_to_tracks, shot_to_multi_track_frames
def sample_triplets(self, n_triplets: int, max_gap_per_sec: float, fade_frames: int) -> List[Triplet]:
"""
Sample n shot-level triplets
:param fade_frames: number of frames to not sample from on intro and fade out of each shot
:param max_gap_per_sec: The maximal normalized center distance to be considered as a non-swapped track.
The normalization is per the scale of the bounding box and the FPS.
:param n_triplets: The total triplets to sample
:return: shot to triplets
"""
shots_to_tracks_to_detections, multi_track_shots = self.filter_for_sampling(max_gap_per_sec, fade_frames)
print('Start sampling triplets...')
triplets = []
indexed_shots_to_tracks_to_detections = [s for s in shots_to_tracks_to_detections
if shots_to_tracks_to_detections[s] is not None and
len(shots_to_tracks_to_detections) > 1 and
len(multi_track_shots[s]) > 0]
m = len(indexed_shots_to_tracks_to_detections)
for i in range(n_triplets):
# sample a shot
shot_name = indexed_shots_to_tracks_to_detections[i % m]
shot_tracks = shots_to_tracks_to_detections[shot_name]
# sample anchor and a negative examples from the same frame
anc_neg_frame = multi_track_shots[shot_name]
multi_track_frame_index = randint(0, len(anc_neg_frame)-1)
candidates_detections = list(anc_neg_frame.values())[multi_track_frame_index]
anchor, negative = np.random.choice(candidates_detections, 2, replace=False)
# pick the positive example
anc_track = shot_tracks[anchor['TrackId']]
positive = anc_track[randint(0, len(anc_track)-1)]
# assign
triplets.append(Triplet(shot_name, positive.ThumbnailId, anchor['ThumbnailId'], negative['ThumbnailId']))
return triplets
@staticmethod
def deserialize(pkl_path: str):
return deserialize_pickle(pkl_path)
def __repr__(self):
return f'VideoTrack({self.video_path})'
| [
"EvaluationUtils.vision_metrics.CVMetrics.bb_intersection_over_union",
"numpy.random.choice",
"random.seed",
"copy.deepcopy",
"Animator.utils.serialize_pickle",
"Animator.utils.deserialize_pickle"
] | [((293, 306), 'random.seed', 'seed', (['(1234567)'], {}), '(1234567)\n', (297, 306), False, 'from random import randint, seed\n'), ((1546, 1581), 'Animator.utils.serialize_pickle', 'serialize_pickle', (['self', 'output_path'], {}), '(self, output_path)\n', (1562, 1581), False, 'from Animator.utils import serialize_pickle, deserialize_pickle\n'), ((8244, 8272), 'Animator.utils.deserialize_pickle', 'deserialize_pickle', (['pkl_path'], {}), '(pkl_path)\n', (8262, 8272), False, 'from Animator.utils import serialize_pickle, deserialize_pickle\n'), ((7792, 7849), 'numpy.random.choice', 'np.random.choice', (['candidates_detections', '(2)'], {'replace': '(False)'}), '(candidates_detections, 2, replace=False)\n', (7808, 7849), True, 'import numpy as np\n'), ((3827, 3845), 'copy.deepcopy', 'deepcopy', (['box.Rect'], {}), '(box.Rect)\n', (3835, 3845), False, 'from copy import deepcopy\n'), ((4968, 5022), 'EvaluationUtils.vision_metrics.CVMetrics.bb_intersection_over_union', 'CVMetrics.bb_intersection_over_union', (['b.Rect', 'box.Rect'], {}), '(b.Rect, box.Rect)\n', (5004, 5022), False, 'from EvaluationUtils.vision_metrics import CVMetrics\n')] |
from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D
from tensorflow.keras.layers import Dense, Dropout, Softmax, Flatten, Activation, BatchNormalization
import numpy as np
from matplotlib import pyplot
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras.applications.imagenet_utils import preprocess_input
import logging.config
import tensorflow as tf
from keras.layers import Input
from keras_vggface.vggface import VGGFace
import util.logger_init
import keras
# Wrapper Class around Keras Model
from recognition.feature_extraction_model import FeatureExtractionModel
class VggFaceModel(FeatureExtractionModel):
def __init__(self):
self.log = logging.getLogger(__name__)
self.log.info("init VggFaceModel")
self.sequential_model = self.define_model_vggface16_backend()
self.model = self.sequential_model
def define_model_resnet_backend(self):
return VGGFace(model='resnet50', include_top=False, input_shape=(224, 224, 3), pooling='avg')
def define_model_senet_backend(self):
return VGGFace(model='senet50', include_top=False, input_shape=(224, 224, 3), pooling='avg')
def define_model_vggface16_backend(self):
# Define VGG_FACE_MODEL architecture
# https://medium.com/analytics-vidhya/face-recognition-with-vgg-face-in-keras-96e6bc1951d5
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Convolution2D(4096, (7, 7), activation='relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(4096, (1, 1), activation='relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(2622, (1, 1)))
model.add(Flatten())
model.add(Activation('softmax'))
return model
def remove_last_layer(self):
# Remove Last Softmax layer and get model upto last flatten layer with outputs 2622 units
self.model = Model(inputs=self.sequential_model.layers[0].input,
outputs=self.sequential_model.layers[-2].output)
def load_weights(self):
self.sequential_model.load_weights('../model/vgg_face_weights.h5')
def addAugmentationLayer(self):
data_augmentation = keras.Sequential([
keras.layers.experimental.preprocessing.RandomRotation(factor=0.4, fill_mode="wrap"),
keras.layers.experimental.preprocessing.RandomTranslation(height_factor=0.2, width_factor=0.2, fill_mode="wrap"),
keras.layers.experimental.preprocessing.RandomFlip("horizontal"),
keras.layers.experimental.preprocessing.RandomContrast(factor=0.2),
keras.layers.experimental.preprocessing.RandomHeight(factor=0.2),
keras.layers.experimental.preprocessing.RandomWidth(factor=0.2)
])
def vgg_face(self, img):
return self.model(img)
def debug_model(self, img_name):
# crop_img = load_img(os.getcwd() + '/' + img_name, target_size=(224, 224))
crop_img = load_img(img_name, target_size=(224, 224))
crop_img = img_to_array(crop_img)
crop_img = np.expand_dims(crop_img, axis=0)
crop_img = preprocess_input(crop_img)
# -------------- Debugging CNN ------------------------------
# https://machinelearningmastery.com/how-to-visualize-filters-and-feature-maps-in-convolutional-neural-networks/
img_debug = load_img(img_name, target_size=(224, 224))
# convert the image to an array
img_debug = img_to_array(img_debug)
# expand dimensions so that it represents a single 'sample'
img_debug = np.expand_dims(crop_img, axis=0)
# prepare the image (e.g. scale pixel values for the vgg)
img_debug = preprocess_input(crop_img)
# get feature map for first hidden layer
# redefine model to output right after the first hidden layer
model_with_feature_map = Model(inputs=self.sequential_model.inputs,
outputs=self.sequential_model.layers[1].output)
# get feature map for first hidden layer
feature_maps = model_with_feature_map.predict(crop_img)
# plot all 64 maps in an 8x8 squares
square = 8
ix = 1
for _ in range(square):
for _ in range(square):
# specify subplot and turn of axis
ax = pyplot.subplot(square, square, ix)
ax.set_xticks([])
ax.set_yticks([])
# plot filter channel in grayscale
pyplot.imshow(feature_maps[0, :, :, ix - 1], cmap='gray')
ix += 1
# show the figure
pyplot.savefig('../output/debug.jpg')
pyplot.show()
# -------------- Debugging CNN ------------------------------
def preprocessing_input(self, image):
"""Returns the data format of the VggFaceModel """
self.log.info('Starting preprocessing VggFaceModel net model...')
return tf.keras.applications.vgg16.preprocess_input(image)
def get_feature_vector_name(self):
return 'data-vgg-face.json'
| [
"tensorflow.keras.layers.Convolution2D",
"keras.layers.experimental.preprocessing.RandomFlip",
"matplotlib.pyplot.imshow",
"tensorflow.keras.models.Model",
"tensorflow.keras.preprocessing.image.img_to_array",
"keras.layers.experimental.preprocessing.RandomRotation",
"tensorflow.keras.models.Sequential",... | [((1029, 1119), 'keras_vggface.vggface.VGGFace', 'VGGFace', ([], {'model': '"""resnet50"""', 'include_top': '(False)', 'input_shape': '(224, 224, 3)', 'pooling': '"""avg"""'}), "(model='resnet50', include_top=False, input_shape=(224, 224, 3),\n pooling='avg')\n", (1036, 1119), False, 'from keras_vggface.vggface import VGGFace\n'), ((1175, 1264), 'keras_vggface.vggface.VGGFace', 'VGGFace', ([], {'model': '"""senet50"""', 'include_top': '(False)', 'input_shape': '(224, 224, 3)', 'pooling': '"""avg"""'}), "(model='senet50', include_top=False, input_shape=(224, 224, 3),\n pooling='avg')\n", (1182, 1264), False, 'from keras_vggface.vggface import VGGFace\n'), ((1469, 1481), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1479, 1481), False, 'from tensorflow.keras.models import Sequential, Model\n'), ((3653, 3758), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'self.sequential_model.layers[0].input', 'outputs': 'self.sequential_model.layers[-2].output'}), '(inputs=self.sequential_model.layers[0].input, outputs=self.\n sequential_model.layers[-2].output)\n', (3658, 3758), False, 'from tensorflow.keras.models import Sequential, Model\n'), ((4721, 4763), 'tensorflow.keras.preprocessing.image.load_img', 'load_img', (['img_name'], {'target_size': '(224, 224)'}), '(img_name, target_size=(224, 224))\n', (4729, 4763), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array\n'), ((4783, 4805), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['crop_img'], {}), '(crop_img)\n', (4795, 4805), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array\n'), ((4825, 4857), 'numpy.expand_dims', 'np.expand_dims', (['crop_img'], {'axis': '(0)'}), '(crop_img, axis=0)\n', (4839, 4857), True, 'import numpy as np\n'), ((4877, 4903), 'tensorflow.keras.applications.imagenet_utils.preprocess_input', 'preprocess_input', (['crop_img'], {}), '(crop_img)\n', (4893, 4903), False, 'from tensorflow.keras.applications.imagenet_utils import preprocess_input\n'), ((5118, 5160), 'tensorflow.keras.preprocessing.image.load_img', 'load_img', (['img_name'], {'target_size': '(224, 224)'}), '(img_name, target_size=(224, 224))\n', (5126, 5160), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array\n'), ((5221, 5244), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['img_debug'], {}), '(img_debug)\n', (5233, 5244), False, 'from tensorflow.keras.preprocessing.image import load_img, img_to_array\n'), ((5333, 5365), 'numpy.expand_dims', 'np.expand_dims', (['crop_img'], {'axis': '(0)'}), '(crop_img, axis=0)\n', (5347, 5365), True, 'import numpy as np\n'), ((5452, 5478), 'tensorflow.keras.applications.imagenet_utils.preprocess_input', 'preprocess_input', (['crop_img'], {}), '(crop_img)\n', (5468, 5478), False, 'from tensorflow.keras.applications.imagenet_utils import preprocess_input\n'), ((5631, 5726), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'self.sequential_model.inputs', 'outputs': 'self.sequential_model.layers[1].output'}), '(inputs=self.sequential_model.inputs, outputs=self.sequential_model.\n layers[1].output)\n', (5636, 5726), False, 'from tensorflow.keras.models import Sequential, Model\n'), ((6379, 6416), 'matplotlib.pyplot.savefig', 'pyplot.savefig', (['"""../output/debug.jpg"""'], {}), "('../output/debug.jpg')\n", (6393, 6416), False, 'from matplotlib import pyplot\n'), ((6425, 6438), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (6436, 6438), False, 'from matplotlib import pyplot\n'), ((6702, 6753), 'tensorflow.keras.applications.vgg16.preprocess_input', 'tf.keras.applications.vgg16.preprocess_input', (['image'], {}), '(image)\n', (6746, 6753), True, 'import tensorflow as tf\n'), ((1500, 1548), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {'input_shape': '(224, 224, 3)'}), '((1, 1), input_shape=(224, 224, 3))\n', (1513, 1548), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((1568, 1612), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (1581, 1612), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((1632, 1653), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1645, 1653), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((1673, 1717), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (1686, 1717), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((1737, 1773), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (1749, 1773), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((1793, 1814), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1806, 1814), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((1834, 1879), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""'}), "(128, (3, 3), activation='relu')\n", (1847, 1879), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((1899, 1920), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (1912, 1920), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((1940, 1985), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""'}), "(128, (3, 3), activation='relu')\n", (1953, 1985), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2005, 2041), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (2017, 2041), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2061, 2082), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (2074, 2082), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2102, 2147), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""'}), "(256, (3, 3), activation='relu')\n", (2115, 2147), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2167, 2188), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (2180, 2188), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2208, 2253), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""'}), "(256, (3, 3), activation='relu')\n", (2221, 2253), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2273, 2294), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (2286, 2294), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2314, 2359), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""'}), "(256, (3, 3), activation='relu')\n", (2327, 2359), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2379, 2415), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (2391, 2415), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2435, 2456), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (2448, 2456), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2476, 2521), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""'}), "(512, (3, 3), activation='relu')\n", (2489, 2521), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2541, 2562), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (2554, 2562), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2582, 2627), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""'}), "(512, (3, 3), activation='relu')\n", (2595, 2627), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2647, 2668), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (2660, 2668), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2688, 2733), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""'}), "(512, (3, 3), activation='relu')\n", (2701, 2733), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2753, 2789), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (2765, 2789), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2809, 2830), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (2822, 2830), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2850, 2895), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""'}), "(512, (3, 3), activation='relu')\n", (2863, 2895), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2915, 2936), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (2928, 2936), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((2956, 3001), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""'}), "(512, (3, 3), activation='relu')\n", (2969, 3001), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((3021, 3042), 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (3034, 3042), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((3062, 3107), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""'}), "(512, (3, 3), activation='relu')\n", (3075, 3107), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((3127, 3163), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (3139, 3163), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((3183, 3229), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(4096)', '(7, 7)'], {'activation': '"""relu"""'}), "(4096, (7, 7), activation='relu')\n", (3196, 3229), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((3249, 3261), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3256, 3261), False, 'from tensorflow.keras.layers import Dense, Dropout, Softmax, Flatten, Activation, BatchNormalization\n'), ((3281, 3327), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(4096)', '(1, 1)'], {'activation': '"""relu"""'}), "(4096, (1, 1), activation='relu')\n", (3294, 3327), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((3347, 3359), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3354, 3359), False, 'from tensorflow.keras.layers import Dense, Dropout, Softmax, Flatten, Activation, BatchNormalization\n'), ((3379, 3406), 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['(2622)', '(1, 1)'], {}), '(2622, (1, 1))\n', (3392, 3406), False, 'from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D\n'), ((3426, 3435), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3433, 3435), False, 'from tensorflow.keras.layers import Dense, Dropout, Softmax, Flatten, Activation, BatchNormalization\n'), ((3455, 3476), 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (3465, 3476), False, 'from tensorflow.keras.layers import Dense, Dropout, Softmax, Flatten, Activation, BatchNormalization\n'), ((3982, 4070), 'keras.layers.experimental.preprocessing.RandomRotation', 'keras.layers.experimental.preprocessing.RandomRotation', ([], {'factor': '(0.4)', 'fill_mode': '"""wrap"""'}), "(factor=0.4,\n fill_mode='wrap')\n", (4036, 4070), False, 'import keras\n'), ((4080, 4196), 'keras.layers.experimental.preprocessing.RandomTranslation', 'keras.layers.experimental.preprocessing.RandomTranslation', ([], {'height_factor': '(0.2)', 'width_factor': '(0.2)', 'fill_mode': '"""wrap"""'}), "(height_factor=0.2,\n width_factor=0.2, fill_mode='wrap')\n", (4137, 4196), False, 'import keras\n'), ((4206, 4270), 'keras.layers.experimental.preprocessing.RandomFlip', 'keras.layers.experimental.preprocessing.RandomFlip', (['"""horizontal"""'], {}), "('horizontal')\n", (4256, 4270), False, 'import keras\n'), ((4284, 4350), 'keras.layers.experimental.preprocessing.RandomContrast', 'keras.layers.experimental.preprocessing.RandomContrast', ([], {'factor': '(0.2)'}), '(factor=0.2)\n', (4338, 4350), False, 'import keras\n'), ((4364, 4428), 'keras.layers.experimental.preprocessing.RandomHeight', 'keras.layers.experimental.preprocessing.RandomHeight', ([], {'factor': '(0.2)'}), '(factor=0.2)\n', (4416, 4428), False, 'import keras\n'), ((4442, 4505), 'keras.layers.experimental.preprocessing.RandomWidth', 'keras.layers.experimental.preprocessing.RandomWidth', ([], {'factor': '(0.2)'}), '(factor=0.2)\n', (4493, 4505), False, 'import keras\n'), ((6093, 6127), 'matplotlib.pyplot.subplot', 'pyplot.subplot', (['square', 'square', 'ix'], {}), '(square, square, ix)\n', (6107, 6127), False, 'from matplotlib import pyplot\n'), ((6263, 6320), 'matplotlib.pyplot.imshow', 'pyplot.imshow', (['feature_maps[0, :, :, ix - 1]'], {'cmap': '"""gray"""'}), "(feature_maps[0, :, :, ix - 1], cmap='gray')\n", (6276, 6320), False, 'from matplotlib import pyplot\n')] |
from create_allele_counts import get_primer_intervals
def pair_counts(sam_fname, paired=False, qual_min=30, max_reads=-1,
max_isize = 700, VERBOSE = 0,
fwd_primer_regions = None, rev_primer_regions = None):
'''
'''
import numpy as np
import pysam
from collections import defaultdict
from itertools import combinations
a = {(True,True):0, (True, False):0, (False, True):0, (False, False):0}
c = {'R':0, 'L':0, 'N':0, 'E':0}
nuc_alpha = np.array(['A', 'C', 'G', 'T'], dtype='S1')
# Open BAM or SAM file
with pysam.Samfile(sam_fname) as samfile:
ac = []
acc = []
refs = {}
read_count = 0
for nref in range(samfile.nreferences):
if VERBOSE: print(("allocating for:", samfile.getrname(nref), "length:", samfile.lengths[nref]))
refs[nref]=samfile.getrname(nref)
ac.append((samfile.getrname(nref), np.zeros((len(nuc_alpha),samfile.lengths[nref]), dtype =int)))
acc.append((samfile.getrname(nref), {}))
while True:
# find read pairs and skip secondary or supplementary alignments
try:
read1 = next(samfile)
while read1.is_secondary or read1.is_supplementary:
read1 = next(samfile)
read2 = next(samfile)
while read2.is_secondary or read2.is_supplementary:
read2 = next(samfile)
except:
break
if read1.is_unmapped or read2.is_unmapped or np.abs(read1.isize)>max_isize:
continue
if (read1.is_reverse==read2.is_reverse):
continue
if (read1.qname!=read2.qname):
continue
read_count+=1
if read_count%1000==0:
print(read_count)
if max_reads>0 and read_count>max_reads:
break
ref_name = refs[read1.rname]
# determine which read maps to the 5p and which one the 3p end
# pull out only positions that map, indels will be ignored in cocounts
if read2.is_reverse:
aln1 = np.array(read1.get_aligned_pairs(matches_only=True))
aln2 = np.array(read2.get_aligned_pairs(matches_only=True))
seq1 = np.fromstring(read1.seq, 'S1')[aln1[:,0]]
qual1 = np.fromstring(read1.qual, np.int8)[aln1[:,0]] - 33
seq2 = np.fromstring(read2.seq, 'S1')[aln2[:,0]]
qual2 = np.fromstring(read2.qual, np.int8)[aln2[:,0]] - 33
else:
aln2 = np.array(read1.get_aligned_pairs(matches_only=True))
aln1 = np.array(read2.get_aligned_pairs(matches_only=True))
seq1 = np.fromstring(read2.seq, 'S1')[aln1[:,0]]
qual1 = np.fromstring(read2.qual, np.int8)[aln1[:,0]] - 33
seq2 = np.fromstring(read1.seq, 'S1')[aln2[:,0]]
qual2 = np.fromstring(read1.qual, np.int8)[aln2[:,0]] - 33
isize = np.abs(read1.isize)
L1 = aln1.shape[0]
L2 = aln2.shape[0]
## merge reads
# allocate vectors
merged_qual = np.zeros(isize, dtype=int)
merged_seq = np.zeros(isize, dtype='S1')
merged_pos = np.zeros((isize,2), dtype=int)
# handle edge cases where one read in contained in the other,
# i.e. the 5p read extends for longer than the 3p end of the 3p read
# This can result for example from quality trimming.
leftoverhang = aln1[0,1] - aln2[0,1]
rightoverhang = aln1[-1,1] - aln2[-1,1]
if leftoverhang>0: # take only the better read2
merged_pos=aln2
merged_qual=qual2
merged_seq=qual2
c['L']+=1
elif rightoverhang>0: # take only the better read1
merged_pos=aln1
merged_qual=qual1
merged_seq=qual1
c['R']+=1
else: # proper merging happens here
# difference between end of aln1 and beginning of aln2 is overlap on reference
overlap = max(0, aln1[-1,1] - aln2[0,1]+1)
c['N']+=1
# note that the exact coordinates might be off bc of indels
# but what we are doing is conservate and only mapped positions
# will be reported
seg1 = L1 - overlap # end of non-overlap segment
seg3 = isize - L2 + overlap # beginnning of non-overlap segment
if seg1>0:
merged_pos[:seg1] = aln1[:seg1]
merged_qual[:seg1] = qual1[:seg1]
merged_seq[:seg1] = seq1[:seg1]
else:
seg1=0
merged_pos[seg3:] = aln2[overlap:]
merged_qual[seg3:] = qual2[overlap:]
merged_seq[seg3:] = seq2[overlap:]
if overlap:
try:
seq_agree = (seq1[seg1:]==seq2[:overlap])&(aln1[seg1:,1]==aln2[:overlap,1])
better = qual1[seg1:]<qual2[:overlap]
from1 = np.where(seq_agree&better)[0]
from2 = np.where(seq_agree&(~better))[0]
merged_pos[seg1 + from1] = aln1[seg1 + from1]
merged_qual[seg1 + from1] = qual1[seg1 + from1]
merged_seq[seg1+from1] = seq1[seg1+from1]
merged_pos[seg1 + from2] = aln2[from2]
merged_qual[seg1 + from2] = qual2[from2]
merged_seq[seg1+from2] = seq2[from2]
except:
c['E']+=1
continue
# mask regions in the merged read that likely derive from primer sequence
not_primer = np.ones_like(merged_seq, 'bool')
if rev_primer_regions:
read_end = merged_pos[-1,1]
for b,e in rev_primer_regions[ref_name]:
p_length = e-b
if read_end-b>0 and read_end-b<p_length:
not_primer[-(read_end-b):]=False
break
if fwd_primer_regions:
read_start = merged_pos[0,1]
for b,e in fwd_primer_regions[ref_name]:
p_length = e-b
if read_start-b>0 and read_start-b<p_length:
not_primer[:e-read_start]=False
break
counts = ac[read1.rname][1]
cocounts = acc[read1.rname][1]
good_ind = (merged_qual>qual_min)¬_primer
for ni,nuc in enumerate(nuc_alpha):
correct_state = merged_seq==nuc
counts[ni,merged_pos[correct_state&good_ind,1]] += 1
combo = list(zip(merged_pos[good_ind], merged_seq[good_ind]))
for (p1, n1), (p2,n2) in combinations(combo, 2):
posp = (p1[1], p2[1])
p = n1+n2
if posp not in cocounts:
cocounts[posp]={p:1}
continue
if p not in cocounts[posp]:
cocounts[posp][p]=1
else:
cocounts[posp][p]+=1
return ac, acc
if __name__ == '__main__':
import argparse, gzip
import pickle as pickle
parser = argparse.ArgumentParser(description='create pair counts',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--bam_file',
help='bam file to pile up')
parser.add_argument('--out_dir',
help='directory to save results')
parser.add_argument('--max_reads', type=int,default=-1,
help='maximum number of reads to process')
parser.add_argument('--primers', type=str, help='file with primers to mask in pile up')
args = parser.parse_args()
fwd_primer_intervals, rev_primer_intervals = get_primer_intervals(args.primers)
print((fwd_primer_intervals, rev_primer_intervals))
ac, acc = pair_counts(args.bam_file, qual_min=30, VERBOSE=3, max_isize = 600, paired=True, max_reads=args.max_reads,
fwd_primer_regions = fwd_primer_intervals, rev_primer_regions = rev_primer_intervals)
acc_renamed = []
for refname, counts in acc:
acc_renamed.append((refname.replace('/', '_'), counts))
acc = acc_renamed
ac_renamed = []
for refname, counts in ac:
ac_renamed.append((refname.replace('/', '_'), counts))
ac = ac_renamed
with gzip.open(args.out_dir+'/pair_counts.pkl.gz', 'w') as fh:
pickle.dump((ac,acc), fh)
| [
"numpy.abs",
"numpy.ones_like",
"pickle.dump",
"argparse.ArgumentParser",
"gzip.open",
"create_allele_counts.get_primer_intervals",
"numpy.where",
"itertools.combinations",
"numpy.array",
"numpy.zeros",
"pysam.Samfile",
"numpy.fromstring"
] | [((505, 547), 'numpy.array', 'np.array', (["['A', 'C', 'G', 'T']"], {'dtype': '"""S1"""'}), "(['A', 'C', 'G', 'T'], dtype='S1')\n", (513, 547), True, 'import numpy as np\n'), ((7551, 7669), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""create pair counts"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='create pair counts', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (7574, 7669), False, 'import argparse, gzip\n'), ((8188, 8222), 'create_allele_counts.get_primer_intervals', 'get_primer_intervals', (['args.primers'], {}), '(args.primers)\n', (8208, 8222), False, 'from create_allele_counts import get_primer_intervals\n'), ((584, 608), 'pysam.Samfile', 'pysam.Samfile', (['sam_fname'], {}), '(sam_fname)\n', (597, 608), False, 'import pysam\n'), ((8797, 8849), 'gzip.open', 'gzip.open', (["(args.out_dir + '/pair_counts.pkl.gz')", '"""w"""'], {}), "(args.out_dir + '/pair_counts.pkl.gz', 'w')\n", (8806, 8849), False, 'import argparse, gzip\n'), ((8863, 8889), 'pickle.dump', 'pickle.dump', (['(ac, acc)', 'fh'], {}), '((ac, acc), fh)\n', (8874, 8889), True, 'import pickle as pickle\n'), ((3091, 3110), 'numpy.abs', 'np.abs', (['read1.isize'], {}), '(read1.isize)\n', (3097, 3110), True, 'import numpy as np\n'), ((3258, 3284), 'numpy.zeros', 'np.zeros', (['isize'], {'dtype': 'int'}), '(isize, dtype=int)\n', (3266, 3284), True, 'import numpy as np\n'), ((3310, 3337), 'numpy.zeros', 'np.zeros', (['isize'], {'dtype': '"""S1"""'}), "(isize, dtype='S1')\n", (3318, 3337), True, 'import numpy as np\n'), ((3363, 3394), 'numpy.zeros', 'np.zeros', (['(isize, 2)'], {'dtype': 'int'}), '((isize, 2), dtype=int)\n', (3371, 3394), True, 'import numpy as np\n'), ((5995, 6027), 'numpy.ones_like', 'np.ones_like', (['merged_seq', '"""bool"""'], {}), "(merged_seq, 'bool')\n", (6007, 6027), True, 'import numpy as np\n'), ((7089, 7111), 'itertools.combinations', 'combinations', (['combo', '(2)'], {}), '(combo, 2)\n', (7101, 7111), False, 'from itertools import combinations\n'), ((1574, 1593), 'numpy.abs', 'np.abs', (['read1.isize'], {}), '(read1.isize)\n', (1580, 1593), True, 'import numpy as np\n'), ((2363, 2393), 'numpy.fromstring', 'np.fromstring', (['read1.seq', '"""S1"""'], {}), "(read1.seq, 'S1')\n", (2376, 2393), True, 'import numpy as np\n'), ((2503, 2533), 'numpy.fromstring', 'np.fromstring', (['read2.seq', '"""S1"""'], {}), "(read2.seq, 'S1')\n", (2516, 2533), True, 'import numpy as np\n'), ((2813, 2843), 'numpy.fromstring', 'np.fromstring', (['read2.seq', '"""S1"""'], {}), "(read2.seq, 'S1')\n", (2826, 2843), True, 'import numpy as np\n'), ((2953, 2983), 'numpy.fromstring', 'np.fromstring', (['read1.seq', '"""S1"""'], {}), "(read1.seq, 'S1')\n", (2966, 2983), True, 'import numpy as np\n'), ((2429, 2463), 'numpy.fromstring', 'np.fromstring', (['read1.qual', 'np.int8'], {}), '(read1.qual, np.int8)\n', (2442, 2463), True, 'import numpy as np\n'), ((2569, 2603), 'numpy.fromstring', 'np.fromstring', (['read2.qual', 'np.int8'], {}), '(read2.qual, np.int8)\n', (2582, 2603), True, 'import numpy as np\n'), ((2879, 2913), 'numpy.fromstring', 'np.fromstring', (['read2.qual', 'np.int8'], {}), '(read2.qual, np.int8)\n', (2892, 2913), True, 'import numpy as np\n'), ((3019, 3053), 'numpy.fromstring', 'np.fromstring', (['read1.qual', 'np.int8'], {}), '(read1.qual, np.int8)\n', (3032, 3053), True, 'import numpy as np\n'), ((5294, 5322), 'numpy.where', 'np.where', (['(seq_agree & better)'], {}), '(seq_agree & better)\n', (5302, 5322), True, 'import numpy as np\n'), ((5356, 5385), 'numpy.where', 'np.where', (['(seq_agree & ~better)'], {}), '(seq_agree & ~better)\n', (5364, 5385), True, 'import numpy as np\n')] |
# Copyright 2021 The Distla Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for pops.py."""
import jax
import jax.numpy as jnp
import numpy as np
import pytest
from distla_core.utils import initializers as init
from distla_core.linalg.utils import testutils
from distla_core.utils import pops
from distla_core.utils import config
DTYPE = jnp.float32
AXIS_NAME = pops.AXIS_NAME
NROW = config.NROWS
NCOL = config.NCOLS
NPROCS = config.NPROCS
GRID = config.GRID
matrix_shapes = [(16, 16), (32, 16), (16, 32)]
Ns = [8, 16, 32]
dtypes = [jnp.float32]
def _local_shape(matrix_shape):
m = matrix_shape[0] // GRID[0]
n = matrix_shape[1] // GRID[1]
return m, n
@pytest.mark.parametrize("matrix_shape", matrix_shapes)
def test_zeros(matrix_shape):
dtype = np.float32
actual = init.zeros(matrix_shape, dtype)
np.testing.assert_allclose(actual, 0.0)
actualnp = pops.undistribute_global(actual)
assert actualnp.shape == matrix_shape
@pytest.mark.parametrize("matrix_shape", matrix_shapes)
def test_ones(matrix_shape):
dtype = np.float32
actual = init.ones(matrix_shape, dtype)
np.testing.assert_allclose(actual, 1.0)
actualnp = pops.undistribute_global(actual)
assert actualnp.shape == matrix_shape
@pytest.mark.parametrize("matrix_shape", matrix_shapes)
@pytest.mark.parametrize("mu", [-1.0, 0.0, 1.0])
@pytest.mark.parametrize("sig", [0.5, 1.0, 1.5])
def test_normal(matrix_shape, mu, sig):
dtype = np.float32
seed = 0
sig = dtype(sig)
mu = dtype(mu)
actual = init.normal(matrix_shape, dtype=dtype, mu=mu, sigma=sig, seed=seed)
local_shape = _local_shape(matrix_shape)
keys = jax.random.split(jax.random.PRNGKey(seed), jax.local_device_count())
for n in range(jax.local_device_count()):
expected = jax.random.normal(keys[n], local_shape, dtype=dtype) * sig + mu
tol = testutils.eps(jax.lax.Precision.HIGHEST, dtype)
np.testing.assert_allclose(
actual[n], expected, atol=10 * tol, rtol=10 * tol)
actualnp = pops.undistribute_global(actual)
assert actualnp.shape == matrix_shape
@pytest.mark.parametrize("matrix_shape", matrix_shapes)
@pytest.mark.parametrize("minval, maxval", [(0.0, 1.0), (-1, 1), (1, 2)])
def test_uniform(matrix_shape, minval, maxval):
dtype = np.float32
seed = 0
actual = init.uniform(
matrix_shape, dtype=dtype, minval=minval, maxval=maxval, seed=seed)
local_shape = _local_shape(matrix_shape)
keys = jax.random.split(jax.random.PRNGKey(seed), jax.local_device_count())
for n in range(jax.local_device_count()):
expected = jax.random.uniform(
keys[n], local_shape, dtype=dtype, minval=minval, maxval=maxval)
tol = testutils.eps(jax.lax.Precision.HIGHEST, dtype)
np.testing.assert_allclose(
actual[n], expected, atol=10 * tol, rtol=10 * tol)
actualnp = pops.undistribute_global(actual)
assert actualnp.shape == matrix_shape
| [
"distla_core.linalg.utils.testutils.eps",
"distla_core.utils.initializers.ones",
"jax.random.PRNGKey",
"jax.random.uniform",
"jax.local_device_count",
"numpy.testing.assert_allclose",
"jax.random.normal",
"pytest.mark.parametrize",
"distla_core.utils.initializers.normal",
"distla_core.utils.pops.u... | [((1283, 1337), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""matrix_shape"""', 'matrix_shapes'], {}), "('matrix_shape', matrix_shapes)\n", (1306, 1337), False, 'import pytest\n'), ((1563, 1617), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""matrix_shape"""', 'matrix_shapes'], {}), "('matrix_shape', matrix_shapes)\n", (1586, 1617), False, 'import pytest\n'), ((1841, 1895), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""matrix_shape"""', 'matrix_shapes'], {}), "('matrix_shape', matrix_shapes)\n", (1864, 1895), False, 'import pytest\n'), ((1897, 1944), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mu"""', '[-1.0, 0.0, 1.0]'], {}), "('mu', [-1.0, 0.0, 1.0])\n", (1920, 1944), False, 'import pytest\n'), ((1946, 1993), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sig"""', '[0.5, 1.0, 1.5]'], {}), "('sig', [0.5, 1.0, 1.5])\n", (1969, 1993), False, 'import pytest\n'), ((2664, 2718), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""matrix_shape"""', 'matrix_shapes'], {}), "('matrix_shape', matrix_shapes)\n", (2687, 2718), False, 'import pytest\n'), ((2720, 2792), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""minval, maxval"""', '[(0.0, 1.0), (-1, 1), (1, 2)]'], {}), "('minval, maxval', [(0.0, 1.0), (-1, 1), (1, 2)])\n", (2743, 2792), False, 'import pytest\n'), ((1400, 1431), 'distla_core.utils.initializers.zeros', 'init.zeros', (['matrix_shape', 'dtype'], {}), '(matrix_shape, dtype)\n', (1410, 1431), True, 'from distla_core.utils import initializers as init\n'), ((1434, 1473), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual', '(0.0)'], {}), '(actual, 0.0)\n', (1460, 1473), True, 'import numpy as np\n'), ((1487, 1519), 'distla_core.utils.pops.undistribute_global', 'pops.undistribute_global', (['actual'], {}), '(actual)\n', (1511, 1519), False, 'from distla_core.utils import pops\n'), ((1679, 1709), 'distla_core.utils.initializers.ones', 'init.ones', (['matrix_shape', 'dtype'], {}), '(matrix_shape, dtype)\n', (1688, 1709), True, 'from distla_core.utils import initializers as init\n'), ((1712, 1751), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual', '(1.0)'], {}), '(actual, 1.0)\n', (1738, 1751), True, 'import numpy as np\n'), ((1765, 1797), 'distla_core.utils.pops.undistribute_global', 'pops.undistribute_global', (['actual'], {}), '(actual)\n', (1789, 1797), False, 'from distla_core.utils import pops\n'), ((2113, 2180), 'distla_core.utils.initializers.normal', 'init.normal', (['matrix_shape'], {'dtype': 'dtype', 'mu': 'mu', 'sigma': 'sig', 'seed': 'seed'}), '(matrix_shape, dtype=dtype, mu=mu, sigma=sig, seed=seed)\n', (2124, 2180), True, 'from distla_core.utils import initializers as init\n'), ((2588, 2620), 'distla_core.utils.pops.undistribute_global', 'pops.undistribute_global', (['actual'], {}), '(actual)\n', (2612, 2620), False, 'from distla_core.utils import pops\n'), ((2884, 2969), 'distla_core.utils.initializers.uniform', 'init.uniform', (['matrix_shape'], {'dtype': 'dtype', 'minval': 'minval', 'maxval': 'maxval', 'seed': 'seed'}), '(matrix_shape, dtype=dtype, minval=minval, maxval=maxval, seed=seed\n )\n', (2896, 2969), True, 'from distla_core.utils import initializers as init\n'), ((3408, 3440), 'distla_core.utils.pops.undistribute_global', 'pops.undistribute_global', (['actual'], {}), '(actual)\n', (3432, 3440), False, 'from distla_core.utils import pops\n'), ((2250, 2274), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['seed'], {}), '(seed)\n', (2268, 2274), False, 'import jax\n'), ((2276, 2300), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (2298, 2300), False, 'import jax\n'), ((2319, 2343), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (2341, 2343), False, 'import jax\n'), ((2435, 2482), 'distla_core.linalg.utils.testutils.eps', 'testutils.eps', (['jax.lax.Precision.HIGHEST', 'dtype'], {}), '(jax.lax.Precision.HIGHEST, dtype)\n', (2448, 2482), False, 'from distla_core.linalg.utils import testutils\n'), ((2487, 2564), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual[n]', 'expected'], {'atol': '(10 * tol)', 'rtol': '(10 * tol)'}), '(actual[n], expected, atol=10 * tol, rtol=10 * tol)\n', (2513, 2564), True, 'import numpy as np\n'), ((3041, 3065), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['seed'], {}), '(seed)\n', (3059, 3065), False, 'import jax\n'), ((3067, 3091), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (3089, 3091), False, 'import jax\n'), ((3110, 3134), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (3132, 3134), False, 'import jax\n'), ((3152, 3240), 'jax.random.uniform', 'jax.random.uniform', (['keys[n]', 'local_shape'], {'dtype': 'dtype', 'minval': 'minval', 'maxval': 'maxval'}), '(keys[n], local_shape, dtype=dtype, minval=minval, maxval\n =maxval)\n', (3170, 3240), False, 'import jax\n'), ((3255, 3302), 'distla_core.linalg.utils.testutils.eps', 'testutils.eps', (['jax.lax.Precision.HIGHEST', 'dtype'], {}), '(jax.lax.Precision.HIGHEST, dtype)\n', (3268, 3302), False, 'from distla_core.linalg.utils import testutils\n'), ((3307, 3384), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['actual[n]', 'expected'], {'atol': '(10 * tol)', 'rtol': '(10 * tol)'}), '(actual[n], expected, atol=10 * tol, rtol=10 * tol)\n', (3333, 3384), True, 'import numpy as np\n'), ((2361, 2413), 'jax.random.normal', 'jax.random.normal', (['keys[n]', 'local_shape'], {'dtype': 'dtype'}), '(keys[n], local_shape, dtype=dtype)\n', (2378, 2413), False, 'import jax\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 28 16:23:37 2016
@author: <NAME> (<EMAIL>)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import sys
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import data_utils
import multi_task_model
import subprocess
import stat
#tf.app.flags.DEFINE_float("learning_rate", 0.1, "Learning rate.")
#tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.9,
# "Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 16,
"Batch size to use during training.")
tf.app.flags.DEFINE_integer("size", 128, "Size of each model layer.")
tf.app.flags.DEFINE_integer("word_embedding_size", 128, "Size of the word embedding")
tf.app.flags.DEFINE_integer("num_layers", 1, "Number of layers in the model.")
tf.app.flags.DEFINE_integer("in_vocab_size", 10000, "max vocab Size.")
tf.app.flags.DEFINE_integer("out_vocab_size", 10000, "max tag vocab Size.")
tf.app.flags.DEFINE_string("data_dir", "/tmp", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "/tmp", "Training directory.")
tf.app.flags.DEFINE_integer("max_train_data_size", 0,
"Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 300,
"How many training steps to do per checkpoint.")
tf.app.flags.DEFINE_integer("max_training_steps", 1000, # 10000
"Max training steps.")
tf.app.flags.DEFINE_integer("max_test_data_size", 0,
"Max size of test set.")
tf.app.flags.DEFINE_boolean("use_attention", True,
"Use attention based RNN")
tf.app.flags.DEFINE_integer("max_sequence_length", 0,
"Max sequence length.")
tf.app.flags.DEFINE_float("dropout_keep_prob", 0.5,
"dropout keep cell input and output prob.")
tf.app.flags.DEFINE_boolean("bidirectional_rnn", True,
"Use birectional RNN")
tf.app.flags.DEFINE_string("task", None, "Options: joint; intent; tagging")
FLAGS = tf.app.flags.FLAGS
if FLAGS.max_sequence_length == 0:
print ('Please indicate max sequence length. Exit')
exit()
if FLAGS.task is None:
print ('Please indicate task to run. Available options: intent; tagging; joint')
exit()
task = dict({'intent':0, 'tagging':0, 'joint':0})
if FLAGS.task == 'intent':
task['intent'] = 1
elif FLAGS.task == 'tagging':
task['tagging'] = 1
elif FLAGS.task == 'joint':
task['intent'] = 1
task['tagging'] = 1
task['joint'] = 1
_buckets = [(FLAGS.max_sequence_length, FLAGS.max_sequence_length)]
#_buckets = [(3, 10), (10, 25)]
# metrics function using conlleval.pl
def conlleval(p, g, w, filename):
'''
INPUT:
p :: predictions
g :: groundtruth
w :: corresponding words
OUTPUT:
filename :: name of the file where the predictions
are written. it will be the input of conlleval.pl script
for computing the performance in terms of precision
recall and f1 score
'''
out = ''
for sl, sp, sw in zip(g, p, w):
out += 'BOS O O\n'
for wl, wp, w in zip(sl, sp, sw):
out += w + ' ' + wl + ' ' + wp + '\n'
out += 'EOS O O\n\n'
f = open(filename, 'w')
f.writelines(out[:-1]) # remove the ending \n on last line
f.close()
return get_perf(filename)
def get_perf(filename):
''' run conlleval.pl perl script to obtain
precision/recall and F1 score '''
_conlleval = os.path.dirname(os.path.realpath(__file__)) + '/conlleval.pl'
os.chmod(_conlleval, stat.S_IRWXU) # give the execute permissions
proc = subprocess.Popen(["perl",
_conlleval],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
_str = ''.join(open(filename).readlines())
stdout, _ = proc.communicate(_str.encode('utf-8'))
#stdout, _ = proc.communicate(b''.join(open(filename).readlines()))
for line in stdout.split(b'\n'):
if b'accuracy' in line:
out = line.split()
break
precision = float(out[6][:-2])
recall = float(out[8][:-2])
f1score = float(out[10])
return {'p': precision, 'r': recall, 'f1': f1score}
def read_data(source_path, target_path, label_path, max_size=None):
"""Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source input - word sequence.
target_path: path to the file with token-ids for the target output - tag sequence;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
label_path: path to the file with token-ids for the sequence classification label
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target, label) tuple read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source, target, and label are lists of token-ids.
"""
data_set = [[] for _ in _buckets]
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
with tf.gfile.GFile(label_path, mode="r") as label_file:
source, target, label = source_file.readline(), target_file.readline(), label_file.readline()
counter = 0
while source and target and label and (not max_size or counter < max_size):
counter += 1
if counter % 100000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
label_ids = [int(x) for x in label.split()]
# target_ids.append(data_utils.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids) < source_size and len(target_ids) < target_size:
data_set[bucket_id].append([source_ids, target_ids, label_ids])
break
source, target, label = source_file.readline(), target_file.readline(), label_file.readline()
return data_set # 3 outputs in each unit: source_ids, target_ids, label_ids
def create_model(session, source_vocab_size, target_vocab_size, label_vocab_size):
"""Create model and initialize or load parameters in session."""
with tf.variable_scope("model", reuse=None):
model_train = multi_task_model.MultiTaskModel(
source_vocab_size, target_vocab_size, label_vocab_size, _buckets,
FLAGS.word_embedding_size, FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,
dropout_keep_prob=FLAGS.dropout_keep_prob, use_lstm=True,
forward_only=False,
use_attention=FLAGS.use_attention,
bidirectional_rnn=FLAGS.bidirectional_rnn,
task=task)
with tf.variable_scope("model", reuse=True):
model_test = multi_task_model.MultiTaskModel(
source_vocab_size, target_vocab_size, label_vocab_size, _buckets,
FLAGS.word_embedding_size, FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,
dropout_keep_prob=FLAGS.dropout_keep_prob, use_lstm=True,
forward_only=True,
use_attention=FLAGS.use_attention,
bidirectional_rnn=FLAGS.bidirectional_rnn,
task=task)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model_train.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
session.run(tf.initialize_all_variables())
return model_train, model_test
def train():
print ('Applying Parameters:')
for k,v in FLAGS.__dict__['__flags'].items():
print ('%s: %s' % (k, str(v)))
print("Preparing data in %s" % FLAGS.data_dir)
vocab_path = ''
tag_vocab_path = ''
label_vocab_path = ''
in_seq_train, out_seq_train, label_train, in_seq_dev, out_seq_dev, label_dev, in_seq_test, out_seq_test, label_test, vocab_path, tag_vocab_path, label_vocab_path = data_utils.prepare_multi_task_data(
FLAGS.data_dir, FLAGS.in_vocab_size, FLAGS.out_vocab_size)
result_dir = FLAGS.train_dir + '/test_results'
if not os.path.isdir(result_dir):
os.makedirs(result_dir)
current_taging_valid_out_file = result_dir + '/tagging.valid.hyp.txt'
current_taging_test_out_file = result_dir + '/tagging.test.hyp.txt'
vocab, rev_vocab = data_utils.initialize_vocabulary(vocab_path)
tag_vocab, rev_tag_vocab = data_utils.initialize_vocabulary(tag_vocab_path)
label_vocab, rev_label_vocab = data_utils.initialize_vocabulary(label_vocab_path)
with tf.Session() as sess:
# Create model.
print("Max sequence length: %d." % _buckets[0][0])
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.size))
model, model_test = create_model(sess, len(vocab), len(tag_vocab), len(label_vocab))
print ("Creating model with source_vocab_size=%d, target_vocab_size=%d, and label_vocab_size=%d." % (len(vocab), len(tag_vocab), len(label_vocab)))
# Read data into buckets and compute their sizes.
print ("Reading train/valid/test data (training set limit: %d)."
% FLAGS.max_train_data_size)
dev_set = read_data(in_seq_dev, out_seq_dev, label_dev)
test_set = read_data(in_seq_test, out_seq_test, label_test)
train_set = read_data(in_seq_train, out_seq_train, label_train)
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
train_total_size = float(sum(train_bucket_sizes))
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
# This is the training loop.
step_time, loss = 0.0, 0.0
current_step = 0
best_valid_score = 0
best_test_score = 0
while model.global_step.eval() < FLAGS.max_training_steps:
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs, tags, tag_weights, batch_sequence_length, labels = model.get_batch(train_set, bucket_id)
if task['joint'] == 1:
_, step_loss, tagging_logits, classification_logits = model.joint_step(sess, encoder_inputs, tags, tag_weights, labels,
batch_sequence_length, bucket_id, False)
elif task['tagging'] == 1:
_, step_loss, tagging_logits = model.tagging_step(sess, encoder_inputs, tags, tag_weights,
batch_sequence_length, bucket_id, False)
elif task['intent'] == 1:
_, step_loss, classification_logits = model.classification_step(sess, encoder_inputs, labels,
batch_sequence_length, bucket_id, False)
step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint
loss += step_loss / FLAGS.steps_per_checkpoint
current_step += 1
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % FLAGS.steps_per_checkpoint == 0:
perplexity = math.exp(loss) if loss < 300 else float('inf')
print ("global step %d step-time %.2f. Training perplexity %.2f"
% (model.global_step.eval(), step_time, perplexity))
sys.stdout.flush()
# Save checkpoint and zero timer and loss.
checkpoint_path = os.path.join(FLAGS.train_dir, "model.ckpt")
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
step_time, loss = 0.0, 0.0
def run_valid_test(data_set, mode): # mode: Eval, Test
# Run evals on development/test set and print the accuracy.
word_list = list()
ref_tag_list = list()
hyp_tag_list = list()
ref_label_list = list()
hyp_label_list = list()
correct_count = 0
accuracy = 0.0
tagging_eval_result = dict()
for bucket_id in xrange(len(_buckets)):
eval_loss = 0.0
count = 0
for i in xrange(len(data_set[bucket_id])):
count += 1
encoder_inputs, tags, tag_weights, sequence_length, labels = model_test.get_one(
data_set, bucket_id, i)
tagging_logits = []
classification_logits = []
if task['joint'] == 1:
_, step_loss, tagging_logits, classification_logits = model_test.joint_step(sess, encoder_inputs, tags, tag_weights, labels,
sequence_length, bucket_id, True)
elif task['tagging'] == 1:
_, step_loss, tagging_logits = model_test.tagging_step(sess, encoder_inputs, tags, tag_weights,
sequence_length, bucket_id, True)
elif task['intent'] == 1:
_, step_loss, classification_logits = model_test.classification_step(sess, encoder_inputs, labels,
sequence_length, bucket_id, True)
eval_loss += step_loss / len(data_set[bucket_id])
hyp_label = None
if task['intent'] == 1:
ref_label_list.append(rev_label_vocab[labels[0][0]])
hyp_label = np.argmax(classification_logits[0],0)
hyp_label_list.append(rev_label_vocab[hyp_label])
if labels[0] == hyp_label:
correct_count += 1
if task['tagging'] == 1:
word_list.append([rev_vocab[x[0]] for x in encoder_inputs[:sequence_length[0]]])
ref_tag_list.append([rev_tag_vocab[x[0]] for x in tags[:sequence_length[0]]])
hyp_tag_list.append([rev_tag_vocab[np.argmax(x)] for x in tagging_logits[:sequence_length[0]]])
accuracy = float(correct_count)*100/count
if task['intent'] == 1:
print(" %s accuracy: %.2f %d/%d" % (mode, accuracy, correct_count, count))
sys.stdout.flush()
if task['tagging'] == 1:
if mode == 'Eval':
taging_out_file = current_taging_valid_out_file
elif mode == 'Test':
taging_out_file = current_taging_test_out_file
tagging_eval_result = conlleval(hyp_tag_list, ref_tag_list, word_list, taging_out_file)
print(" %s f1-score: %.2f" % (mode, tagging_eval_result['f1']))
sys.stdout.flush()
return accuracy, tagging_eval_result
# valid
valid_accuracy, valid_tagging_result = run_valid_test(dev_set, 'Eval')
if task['tagging'] == 1 and valid_tagging_result['f1'] > best_valid_score:
best_valid_score = valid_tagging_result['f1']
# save the best output file
subprocess.call(['mv', current_taging_valid_out_file, current_taging_valid_out_file + '.best_f1_%.2f' % best_valid_score])
# test, run test after each validation for development purpose.
test_accuracy, test_tagging_result = run_valid_test(test_set, 'Test')
if task['tagging'] == 1 and test_tagging_result['f1'] > best_test_score:
best_test_score = test_tagging_result['f1']
# save the best output file
subprocess.call(['mv', current_taging_test_out_file, current_taging_test_out_file + '.best_f1_%.2f' % best_test_score])
def main(_):
train()
if __name__ == "__main__":
tf.app.run()
| [
"data_utils.initialize_vocabulary",
"tensorflow.gfile.GFile",
"math.exp",
"tensorflow.app.run",
"tensorflow.gfile.Exists",
"subprocess.Popen",
"tensorflow.Session",
"os.chmod",
"os.path.isdir",
"subprocess.call",
"tensorflow.app.flags.DEFINE_boolean",
"sys.stdout.flush",
"tensorflow.initiali... | [((631, 718), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""max_gradient_norm"""', '(5.0)', '"""Clip gradients to this norm."""'], {}), "('max_gradient_norm', 5.0,\n 'Clip gradients to this norm.')\n", (656, 718), True, 'import tensorflow as tf\n'), ((741, 828), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(16)', '"""Batch size to use during training."""'], {}), "('batch_size', 16,\n 'Batch size to use during training.')\n", (768, 828), True, 'import tensorflow as tf\n'), ((853, 922), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""size"""', '(128)', '"""Size of each model layer."""'], {}), "('size', 128, 'Size of each model layer.')\n", (880, 922), True, 'import tensorflow as tf\n'), ((923, 1012), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""word_embedding_size"""', '(128)', '"""Size of the word embedding"""'], {}), "('word_embedding_size', 128,\n 'Size of the word embedding')\n", (950, 1012), True, 'import tensorflow as tf\n'), ((1009, 1087), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_layers"""', '(1)', '"""Number of layers in the model."""'], {}), "('num_layers', 1, 'Number of layers in the model.')\n", (1036, 1087), True, 'import tensorflow as tf\n'), ((1088, 1158), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""in_vocab_size"""', '(10000)', '"""max vocab Size."""'], {}), "('in_vocab_size', 10000, 'max vocab Size.')\n", (1115, 1158), True, 'import tensorflow as tf\n'), ((1159, 1234), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""out_vocab_size"""', '(10000)', '"""max tag vocab Size."""'], {}), "('out_vocab_size', 10000, 'max tag vocab Size.')\n", (1186, 1234), True, 'import tensorflow as tf\n'), ((1235, 1299), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""data_dir"""', '"""/tmp"""', '"""Data directory"""'], {}), "('data_dir', '/tmp', 'Data directory')\n", (1261, 1299), True, 'import tensorflow as tf\n'), ((1300, 1370), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""train_dir"""', '"""/tmp"""', '"""Training directory."""'], {}), "('train_dir', '/tmp', 'Training directory.')\n", (1326, 1370), True, 'import tensorflow as tf\n'), ((1371, 1481), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""max_train_data_size"""', '(0)', '"""Limit on the size of training data (0: no limit)."""'], {}), "('max_train_data_size', 0,\n 'Limit on the size of training data (0: no limit).')\n", (1398, 1481), True, 'import tensorflow as tf\n'), ((1506, 1615), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""steps_per_checkpoint"""', '(300)', '"""How many training steps to do per checkpoint."""'], {}), "('steps_per_checkpoint', 300,\n 'How many training steps to do per checkpoint.')\n", (1533, 1615), True, 'import tensorflow as tf\n'), ((1640, 1718), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""max_training_steps"""', '(1000)', '"""Max training steps."""'], {}), "('max_training_steps', 1000, 'Max training steps.')\n", (1667, 1718), True, 'import tensorflow as tf\n'), ((1756, 1833), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""max_test_data_size"""', '(0)', '"""Max size of test set."""'], {}), "('max_test_data_size', 0, 'Max size of test set.')\n", (1783, 1833), True, 'import tensorflow as tf\n'), ((1862, 1939), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""use_attention"""', '(True)', '"""Use attention based RNN"""'], {}), "('use_attention', True, 'Use attention based RNN')\n", (1889, 1939), True, 'import tensorflow as tf\n'), ((1968, 2045), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""max_sequence_length"""', '(0)', '"""Max sequence length."""'], {}), "('max_sequence_length', 0, 'Max sequence length.')\n", (1995, 2045), True, 'import tensorflow as tf\n'), ((2074, 2173), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""dropout_keep_prob"""', '(0.5)', '"""dropout keep cell input and output prob."""'], {}), "('dropout_keep_prob', 0.5,\n 'dropout keep cell input and output prob.')\n", (2099, 2173), True, 'import tensorflow as tf\n'), ((2196, 2273), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""bidirectional_rnn"""', '(True)', '"""Use birectional RNN"""'], {}), "('bidirectional_rnn', True, 'Use birectional RNN')\n", (2223, 2273), True, 'import tensorflow as tf\n'), ((2302, 2377), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""task"""', 'None', '"""Options: joint; intent; tagging"""'], {}), "('task', None, 'Options: joint; intent; tagging')\n", (2328, 2377), True, 'import tensorflow as tf\n'), ((3888, 3922), 'os.chmod', 'os.chmod', (['_conlleval', 'stat.S_IRWXU'], {}), '(_conlleval, stat.S_IRWXU)\n', (3896, 3922), False, 'import os\n'), ((3967, 4057), 'subprocess.Popen', 'subprocess.Popen', (["['perl', _conlleval]"], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), "(['perl', _conlleval], stdin=subprocess.PIPE, stdout=\n subprocess.PIPE)\n", (3983, 4057), False, 'import subprocess\n'), ((7979, 8025), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (8008, 8025), True, 'import tensorflow as tf\n'), ((8774, 8871), 'data_utils.prepare_multi_task_data', 'data_utils.prepare_multi_task_data', (['FLAGS.data_dir', 'FLAGS.in_vocab_size', 'FLAGS.out_vocab_size'], {}), '(FLAGS.data_dir, FLAGS.in_vocab_size,\n FLAGS.out_vocab_size)\n', (8808, 8871), False, 'import data_utils\n'), ((9154, 9198), 'data_utils.initialize_vocabulary', 'data_utils.initialize_vocabulary', (['vocab_path'], {}), '(vocab_path)\n', (9186, 9198), False, 'import data_utils\n'), ((9228, 9276), 'data_utils.initialize_vocabulary', 'data_utils.initialize_vocabulary', (['tag_vocab_path'], {}), '(tag_vocab_path)\n', (9260, 9276), False, 'import data_utils\n'), ((9310, 9360), 'data_utils.initialize_vocabulary', 'data_utils.initialize_vocabulary', (['label_vocab_path'], {}), '(label_vocab_path)\n', (9342, 9360), False, 'import data_utils\n'), ((16347, 16359), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (16357, 16359), True, 'import tensorflow as tf\n'), ((5638, 5675), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['source_path'], {'mode': '"""r"""'}), "(source_path, mode='r')\n", (5652, 5675), True, 'import tensorflow as tf\n'), ((6976, 7014), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': 'None'}), "('model', reuse=None)\n", (6993, 7014), True, 'import tensorflow as tf\n'), ((7034, 7420), 'multi_task_model.MultiTaskModel', 'multi_task_model.MultiTaskModel', (['source_vocab_size', 'target_vocab_size', 'label_vocab_size', '_buckets', 'FLAGS.word_embedding_size', 'FLAGS.size', 'FLAGS.num_layers', 'FLAGS.max_gradient_norm', 'FLAGS.batch_size'], {'dropout_keep_prob': 'FLAGS.dropout_keep_prob', 'use_lstm': '(True)', 'forward_only': '(False)', 'use_attention': 'FLAGS.use_attention', 'bidirectional_rnn': 'FLAGS.bidirectional_rnn', 'task': 'task'}), '(source_vocab_size, target_vocab_size,\n label_vocab_size, _buckets, FLAGS.word_embedding_size, FLAGS.size,\n FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,\n dropout_keep_prob=FLAGS.dropout_keep_prob, use_lstm=True, forward_only=\n False, use_attention=FLAGS.use_attention, bidirectional_rnn=FLAGS.\n bidirectional_rnn, task=task)\n', (7065, 7420), False, 'import multi_task_model\n'), ((7477, 7515), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""model"""'], {'reuse': '(True)'}), "('model', reuse=True)\n", (7494, 7515), True, 'import tensorflow as tf\n'), ((7534, 7919), 'multi_task_model.MultiTaskModel', 'multi_task_model.MultiTaskModel', (['source_vocab_size', 'target_vocab_size', 'label_vocab_size', '_buckets', 'FLAGS.word_embedding_size', 'FLAGS.size', 'FLAGS.num_layers', 'FLAGS.max_gradient_norm', 'FLAGS.batch_size'], {'dropout_keep_prob': 'FLAGS.dropout_keep_prob', 'use_lstm': '(True)', 'forward_only': '(True)', 'use_attention': 'FLAGS.use_attention', 'bidirectional_rnn': 'FLAGS.bidirectional_rnn', 'task': 'task'}), '(source_vocab_size, target_vocab_size,\n label_vocab_size, _buckets, FLAGS.word_embedding_size, FLAGS.size,\n FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,\n dropout_keep_prob=FLAGS.dropout_keep_prob, use_lstm=True, forward_only=\n True, use_attention=FLAGS.use_attention, bidirectional_rnn=FLAGS.\n bidirectional_rnn, task=task)\n', (7565, 7919), False, 'import multi_task_model\n'), ((8040, 8083), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['ckpt.model_checkpoint_path'], {}), '(ckpt.model_checkpoint_path)\n', (8055, 8083), True, 'import tensorflow as tf\n'), ((8932, 8957), 'os.path.isdir', 'os.path.isdir', (['result_dir'], {}), '(result_dir)\n', (8945, 8957), False, 'import os\n'), ((8965, 8988), 'os.makedirs', 'os.makedirs', (['result_dir'], {}), '(result_dir)\n', (8976, 8988), False, 'import os\n'), ((9369, 9381), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (9379, 9381), True, 'import tensorflow as tf\n'), ((3838, 3864), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3854, 3864), False, 'import os\n'), ((5701, 5738), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['target_path'], {'mode': '"""r"""'}), "(target_path, mode='r')\n", (5715, 5738), True, 'import tensorflow as tf\n'), ((8301, 8330), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (8328, 8330), True, 'import tensorflow as tf\n'), ((10644, 10669), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (10667, 10669), True, 'import numpy as np\n'), ((10864, 10875), 'time.time', 'time.time', ([], {}), '()\n', (10873, 10875), False, 'import time\n'), ((5766, 5802), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['label_path'], {'mode': '"""r"""'}), "(label_path, mode='r')\n", (5780, 5802), True, 'import tensorflow as tf\n'), ((12141, 12159), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (12157, 12159), False, 'import sys\n'), ((12237, 12280), 'os.path.join', 'os.path.join', (['FLAGS.train_dir', '"""model.ckpt"""'], {}), "(FLAGS.train_dir, 'model.ckpt')\n", (12249, 12280), False, 'import os\n'), ((11659, 11670), 'time.time', 'time.time', ([], {}), '()\n', (11668, 11670), False, 'import time\n'), ((11948, 11962), 'math.exp', 'math.exp', (['loss'], {}), '(loss)\n', (11956, 11962), False, 'import math\n'), ((15715, 15842), 'subprocess.call', 'subprocess.call', (["['mv', current_taging_valid_out_file, current_taging_valid_out_file + \n '.best_f1_%.2f' % best_valid_score]"], {}), "(['mv', current_taging_valid_out_file, \n current_taging_valid_out_file + '.best_f1_%.2f' % best_valid_score])\n", (15730, 15842), False, 'import subprocess\n'), ((16171, 16295), 'subprocess.call', 'subprocess.call', (["['mv', current_taging_test_out_file, current_taging_test_out_file + \n '.best_f1_%.2f' % best_test_score]"], {}), "(['mv', current_taging_test_out_file, \n current_taging_test_out_file + '.best_f1_%.2f' % best_test_score])\n", (16186, 16295), False, 'import subprocess\n'), ((6149, 6167), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6165, 6167), False, 'import sys\n'), ((14914, 14932), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (14930, 14932), False, 'import sys\n'), ((15364, 15382), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (15380, 15382), False, 'import sys\n'), ((14179, 14217), 'numpy.argmax', 'np.argmax', (['classification_logits[0]', '(0)'], {}), '(classification_logits[0], 0)\n', (14188, 14217), True, 'import numpy as np\n'), ((14658, 14670), 'numpy.argmax', 'np.argmax', (['x'], {}), '(x)\n', (14667, 14670), True, 'import numpy as np\n')] |
"""
Plots fig S2:
Specifically, zonal-mean root-mean-square of stationary wave meridional wind
at 850 hPa for both reanalysis and aquaplanet simulation data for ANNUAL and NDJFM.
NOTE: since the reviewer asked for a measure of interannual variability in
stationary wave amplitude, here we calculate stationary waves as the annual or
seasonal mean for each year and then calculate the rms over all years.
"""
import numpy as np
import xarray as xr
from matplotlib import pyplot as plt
from ds21grl import dim_aqua,dim_erai
from ds21grl.config import data_name,dir_interim,dir_fig
# INPUT -----------------------------------------------------------
ilat = 70
write2file = 1
# -----------------------------------------------------------------
# read rms data from experiments L+, 2L+, Lk1 and erai
filename1 = dir_interim + data_name[0] + '/zm_rms_V850_stw_pl_70N_' + dim_erai.timestamp + '.nc'
filename2 = dir_interim + data_name[2] + '/zm_rms_V850_stw_ml_70N_' + dim_aqua.timestamp + '.nc'
filename3 = dir_interim + data_name[6] + '/zm_rms_V850_stw_ml_70N_' + dim_aqua.timestamp + '.nc'
filename4 = dir_interim + data_name[7] + '/zm_rms_V850_stw_ml_70N_' + dim_aqua.timestamp + '.nc'
ds1 = xr.open_dataset(filename1)
ds2 = xr.open_dataset(filename2)
ds3 = xr.open_dataset(filename3)
ds4 = xr.open_dataset(filename4)
rms_erai = ds1['rms_stw'].values
rms_L = ds2['rms_stw'].values
rms_2L = ds3['rms_stw'].values
rms_Lk1 = ds4['rms_stw'].values
# plotting
positions = np.array([1,2,3,4])
fontsize = 12
figsize = np.array([10,5])
plt.figure(figsize=(figsize[0],figsize[1]))
plt.subplots_adjust(hspace=0.3,wspace=0.2,left=0.075,right=0.95,top=0.95,bottom=0.1)
plt.plot(positions[0],rms_erai[0],'k',marker='o',markersize=10)
plt.plot(positions[1],rms_L[0],'k',marker='o',markersize=10)
plt.plot(positions[2],rms_2L[0],'k',marker='o',markersize=10)
plt.plot(positions[3],rms_Lk1[0],'k',marker='o',markersize=10)
plt.plot(positions[0],rms_erai[1],'tab:blue',marker='o',markersize=10)
plt.plot(positions[1],rms_L[1],'tab:blue',marker='o',markersize=10)
plt.plot(positions[2],rms_2L[1],'tab:blue',marker='o',markersize=10)
plt.plot(positions[3],rms_Lk1[1],'tab:blue',marker='o',markersize=10)
plt.plot([],[],'k',marker='o',markersize=10,label='ANN')
plt.plot([],[],'tab:blue',marker='o',markersize=10,label='NDJFM')
plt.legend(loc='lower right',frameon=False,fontsize=fontsize,labelcolor='linecolor',markerscale=0)
plt.xticks(positions,['reanalysis','L+','2L+',r'L$_{k1}$'],fontsize=fontsize)
plt.yticks(np.arange(0,3,0.5),np.arange(0,3,0.5),fontsize=fontsize)
plt.xlabel('experiment',fontsize=fontsize)
plt.ylabel(r'$[\overline{v}^*_{rms}]_{850hPa}$',fontsize=fontsize)
plt.ylim([0,2.5])
if write2file == 1:
plt.savefig(dir_fig + 'fig_S2.pdf')
plt.show()
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"xarray.open_dataset",
"matplotlib.pyplot.... | [((1283, 1309), 'xarray.open_dataset', 'xr.open_dataset', (['filename1'], {}), '(filename1)\n', (1298, 1309), True, 'import xarray as xr\n'), ((1327, 1353), 'xarray.open_dataset', 'xr.open_dataset', (['filename2'], {}), '(filename2)\n', (1342, 1353), True, 'import xarray as xr\n'), ((1371, 1397), 'xarray.open_dataset', 'xr.open_dataset', (['filename3'], {}), '(filename3)\n', (1386, 1397), True, 'import xarray as xr\n'), ((1415, 1441), 'xarray.open_dataset', 'xr.open_dataset', (['filename4'], {}), '(filename4)\n', (1430, 1441), True, 'import xarray as xr\n'), ((1625, 1647), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (1633, 1647), True, 'import numpy as np\n'), ((1672, 1689), 'numpy.array', 'np.array', (['[10, 5]'], {}), '([10, 5])\n', (1680, 1689), True, 'import numpy as np\n'), ((1690, 1734), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(figsize[0], figsize[1])'}), '(figsize=(figsize[0], figsize[1]))\n', (1700, 1734), True, 'from matplotlib import pyplot as plt\n'), ((1734, 1828), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.3)', 'wspace': '(0.2)', 'left': '(0.075)', 'right': '(0.95)', 'top': '(0.95)', 'bottom': '(0.1)'}), '(hspace=0.3, wspace=0.2, left=0.075, right=0.95, top=\n 0.95, bottom=0.1)\n', (1753, 1828), True, 'from matplotlib import pyplot as plt\n'), ((1820, 1887), 'matplotlib.pyplot.plot', 'plt.plot', (['positions[0]', 'rms_erai[0]', '"""k"""'], {'marker': '"""o"""', 'markersize': '(10)'}), "(positions[0], rms_erai[0], 'k', marker='o', markersize=10)\n", (1828, 1887), True, 'from matplotlib import pyplot as plt\n'), ((1884, 1948), 'matplotlib.pyplot.plot', 'plt.plot', (['positions[1]', 'rms_L[0]', '"""k"""'], {'marker': '"""o"""', 'markersize': '(10)'}), "(positions[1], rms_L[0], 'k', marker='o', markersize=10)\n", (1892, 1948), True, 'from matplotlib import pyplot as plt\n'), ((1945, 2010), 'matplotlib.pyplot.plot', 'plt.plot', (['positions[2]', 'rms_2L[0]', '"""k"""'], {'marker': '"""o"""', 'markersize': '(10)'}), "(positions[2], rms_2L[0], 'k', marker='o', markersize=10)\n", (1953, 2010), True, 'from matplotlib import pyplot as plt\n'), ((2007, 2073), 'matplotlib.pyplot.plot', 'plt.plot', (['positions[3]', 'rms_Lk1[0]', '"""k"""'], {'marker': '"""o"""', 'markersize': '(10)'}), "(positions[3], rms_Lk1[0], 'k', marker='o', markersize=10)\n", (2015, 2073), True, 'from matplotlib import pyplot as plt\n'), ((2071, 2145), 'matplotlib.pyplot.plot', 'plt.plot', (['positions[0]', 'rms_erai[1]', '"""tab:blue"""'], {'marker': '"""o"""', 'markersize': '(10)'}), "(positions[0], rms_erai[1], 'tab:blue', marker='o', markersize=10)\n", (2079, 2145), True, 'from matplotlib import pyplot as plt\n'), ((2142, 2213), 'matplotlib.pyplot.plot', 'plt.plot', (['positions[1]', 'rms_L[1]', '"""tab:blue"""'], {'marker': '"""o"""', 'markersize': '(10)'}), "(positions[1], rms_L[1], 'tab:blue', marker='o', markersize=10)\n", (2150, 2213), True, 'from matplotlib import pyplot as plt\n'), ((2210, 2282), 'matplotlib.pyplot.plot', 'plt.plot', (['positions[2]', 'rms_2L[1]', '"""tab:blue"""'], {'marker': '"""o"""', 'markersize': '(10)'}), "(positions[2], rms_2L[1], 'tab:blue', marker='o', markersize=10)\n", (2218, 2282), True, 'from matplotlib import pyplot as plt\n'), ((2279, 2352), 'matplotlib.pyplot.plot', 'plt.plot', (['positions[3]', 'rms_Lk1[1]', '"""tab:blue"""'], {'marker': '"""o"""', 'markersize': '(10)'}), "(positions[3], rms_Lk1[1], 'tab:blue', marker='o', markersize=10)\n", (2287, 2352), True, 'from matplotlib import pyplot as plt\n'), ((2355, 2416), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]', '"""k"""'], {'marker': '"""o"""', 'markersize': '(10)', 'label': '"""ANN"""'}), "([], [], 'k', marker='o', markersize=10, label='ANN')\n", (2363, 2416), True, 'from matplotlib import pyplot as plt\n'), ((2412, 2482), 'matplotlib.pyplot.plot', 'plt.plot', (['[]', '[]', '"""tab:blue"""'], {'marker': '"""o"""', 'markersize': '(10)', 'label': '"""NDJFM"""'}), "([], [], 'tab:blue', marker='o', markersize=10, label='NDJFM')\n", (2420, 2482), True, 'from matplotlib import pyplot as plt\n'), ((2478, 2585), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'frameon': '(False)', 'fontsize': 'fontsize', 'labelcolor': '"""linecolor"""', 'markerscale': '(0)'}), "(loc='lower right', frameon=False, fontsize=fontsize, labelcolor=\n 'linecolor', markerscale=0)\n", (2488, 2585), True, 'from matplotlib import pyplot as plt\n'), ((2578, 2664), 'matplotlib.pyplot.xticks', 'plt.xticks', (['positions', "['reanalysis', 'L+', '2L+', 'L$_{k1}$']"], {'fontsize': 'fontsize'}), "(positions, ['reanalysis', 'L+', '2L+', 'L$_{k1}$'], fontsize=\n fontsize)\n", (2588, 2664), True, 'from matplotlib import pyplot as plt\n'), ((2724, 2767), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""experiment"""'], {'fontsize': 'fontsize'}), "('experiment', fontsize=fontsize)\n", (2734, 2767), True, 'from matplotlib import pyplot as plt\n'), ((2767, 2834), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$[\\\\overline{v}^*_{rms}]_{850hPa}$"""'], {'fontsize': 'fontsize'}), "('$[\\\\overline{v}^*_{rms}]_{850hPa}$', fontsize=fontsize)\n", (2777, 2834), True, 'from matplotlib import pyplot as plt\n'), ((2835, 2853), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 2.5]'], {}), '([0, 2.5])\n', (2843, 2853), True, 'from matplotlib import pyplot as plt\n'), ((2915, 2925), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2923, 2925), True, 'from matplotlib import pyplot as plt\n'), ((2667, 2687), 'numpy.arange', 'np.arange', (['(0)', '(3)', '(0.5)'], {}), '(0, 3, 0.5)\n', (2676, 2687), True, 'import numpy as np\n'), ((2686, 2706), 'numpy.arange', 'np.arange', (['(0)', '(3)', '(0.5)'], {}), '(0, 3, 0.5)\n', (2695, 2706), True, 'import numpy as np\n'), ((2878, 2913), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(dir_fig + 'fig_S2.pdf')"], {}), "(dir_fig + 'fig_S2.pdf')\n", (2889, 2913), True, 'from matplotlib import pyplot as plt\n')] |
import copy
from joblib import Parallel
import numpy as np
import time
import numbers
from itertools import product
from collections import defaultdict
from sklearn import clone
from sklearn.pipeline import Pipeline
from sklearn.model_selection import check_cv, GridSearchCV, RandomizedSearchCV
from sklearn.model_selection._validation import _fit_and_score, _insert_error_scores, _aggregate_score_dicts, _normalize_score_results, _translate_train_sizes, _incremental_fit_estimator
from sklearn.utils.validation import indexable, check_random_state, _check_fit_params
from sklearn.metrics import check_scoring
from sklearn.metrics._scorer import _check_multimetric_scoring
from sklearn.base import is_classifier
from sklearn.utils.fixes import delayed
def init_eval_set(src_eval_set_selection, src_fit_params, X, y):
"""
fit_paramsにeval_metricが入力されており、eval_setが入力されていないときの処理
Parameters
----------
src_eval_set_selection : {'all', 'test', 'train', 'original', 'original_transformed'}, optional
eval_setに渡すデータの決め方 ('all': X, 'test': X[test], 'train': X[train], 'original': 入力そのまま, 'original_transformed': 入力そのまま&パイプラインの時は最終学習器以外の変換実行)
src_fit_params : Dict
処理前の学習時パラメータ
"""
fit_params = copy.deepcopy(src_fit_params)
eval_set_selection = src_eval_set_selection
# fit_paramsにeval_metricが設定されているときのみ以下の処理を実施
if 'eval_metric' in src_fit_params and src_fit_params['eval_metric'] is not None:
# fit_paramsにeval_setが存在しないとき、入力データをそのまま追加
if 'eval_set' not in src_fit_params:
print('There is no "eval_set" in fit_params, so "eval_set" is set to (self.X, self.y)')
fit_params['eval_set'] = [(X, y)]
if src_eval_set_selection is None: # eval_set_selection未指定時、eval_setが入力されていなければeval_set_selection='test'とする
eval_set_selection = 'test'
if eval_set_selection not in ['all', 'train', 'test']: # eval_set_selectionの指定が間違っていたらエラーを出す
raise ValueError('The `eval_set_selection` argument should be "all", "train", or "test" when `eval_set` is not in `fit_params`')
# src_fit_paramsにeval_setが存在するとき、eval_set_selection未指定ならばeval_set_selection='original_transformed'とする
else:
if src_eval_set_selection is None:
eval_set_selection = 'original_transformed'
return fit_params, eval_set_selection
def _transform_except_last_estimator(transformer, X_src, X_train):
"""パイプラインのとき、最終学習器以外のtransformを適用"""
if transformer is not None:
transformer.fit(X_train)
X_dst = transformer.transform(X_src)
return X_dst
else:
return X_src
def _eval_set_selection(eval_set_selection, transformer,
fit_params, train, test):
"""eval_setの中から学習データ or テストデータのみを抽出"""
fit_params_modified = copy.deepcopy(fit_params)
# eval_setが存在しない or Noneなら、そのままfit_paramsを返す
eval_sets = [v for v in fit_params.keys() if 'eval_set' in v]
if len(eval_sets) == 0 or fit_params[eval_sets[0]] is None:
return fit_params_modified
eval_set_name = eval_sets[0] # eval_setの列名(pipelineでは列名が変わるため)
# 元のeval_setからX, yを取得
X_fit = fit_params[eval_set_name][0][0]
y_fit = fit_params[eval_set_name][0][1]
# eval_setに該当データを入力し直す
if eval_set_selection == 'train':
fit_params_modified[eval_set_name] = [(_transform_except_last_estimator(transformer, X_fit[train], X_fit[train])\
, y_fit[train])]
elif eval_set_selection == 'test':
fit_params_modified[eval_set_name] = [(_transform_except_last_estimator(transformer, X_fit[test], X_fit[train])\
, y_fit[test])]
elif eval_set_selection == 'all':
fit_params_modified[eval_set_name] = [(_transform_except_last_estimator(transformer, X_fit, X_fit[train])\
, y_fit)]
else:
fit_params_modified[eval_set_name] = [(_transform_except_last_estimator(transformer, X_fit, X_fit)\
, y_fit)]
return fit_params_modified
def _fit_and_score_eval_set(eval_set_selection, transformer,
estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, return_estimator=False,
split_progress=None, candidate_progress=None,
error_score=np.nan,
print_message=None):
"""Fit estimator and compute scores for a given dataset split."""
# eval_setの中から学習データ or テストデータのみを抽出
fit_params_modified = _eval_set_selection(eval_set_selection, transformer,
fit_params, train, test)
if print_message is not None:
print(print_message)
# 学習してスコア計算
result = _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters,
fit_params_modified,
return_train_score=return_train_score,
return_parameters=return_parameters, return_n_test_samples=return_n_test_samples,
return_times=return_times, return_estimator=return_estimator,
split_progress=split_progress, candidate_progress=candidate_progress,
error_score=error_score)
return result
def _make_transformer(eval_set_selection, estimator):
"""estimatorがパイプラインのとき、最終学習器以外の変換器(前処理クラスのリスト)を作成"""
if isinstance(estimator, Pipeline) and eval_set_selection != 'original':
transformer = Pipeline([step for i, step in enumerate(estimator.steps) if i < len(estimator) - 1])
return transformer
else:
return None
def cross_validate_eval_set(eval_set_selection,
estimator, X, y=None, groups=None, scoring=None, cv=None,
n_jobs=None, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs', return_train_score=False,
return_estimator=False, error_score=np.nan):
"""
Evaluate a scores by cross-validation with `eval_set` argument in `fit_params`
This method is suitable for calculating cross validation scores with `early_stopping_round` in XGBoost or LightGBM.
Parameters
----------
eval_set_selection : {'all', 'train', 'test', 'original', 'original_transformed'}
Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit. Can be for example a list, or an array.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
scoring : str, callable, list, tuple, or dict, default=None
Strategy to evaluate the performance of the cross-validated model on
the test set.
If `scoring` represents a single score, one can use:
- a single string (see :ref:`scoring_parameter`);
- a callable (see :ref:`scoring`) that returns a single value.
If `scoring` represents multiple scores, one can use:
- a list or tuple of unique strings;
- a callable returning a dictionary where the keys are the metric
names and the values are the metric scores;
- a dictionary with metric names as keys and callables a values.
See :ref:`multimetric_grid_search` for an example.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`.Fold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the cross-validation splits.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
The verbosity level.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
pre_dispatch : int or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A str, giving an expression as a function of n_jobs,
as in '2*n_jobs'
return_train_score : bool, default=False
Whether to include train scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
.. versionadded:: 0.19
.. versionchanged:: 0.21
Default value was changed from ``True`` to ``False``
return_estimator : bool, default=False
Whether to return the estimators fitted on each split.
.. versionadded:: 0.20
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
Returns
-------
scores : dict of float arrays of shape (n_splits,)
Array of scores of the estimator for each run of the cross validation.
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
if callable(scoring):
scorers = scoring
elif scoring is None or isinstance(scoring, str):
scorers = check_scoring(estimator, scoring)
else:
scorers = _check_multimetric_scoring(estimator, scoring)
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
results = parallel(
delayed(_fit_and_score_eval_set)(
eval_set_selection, transformer,
clone(estimator), X, y, scorers, train, test, verbose, None,
fit_params, return_train_score=return_train_score,
return_times=True, return_estimator=return_estimator,
error_score=error_score)
for train, test in cv.split(X, y, groups))
# For callabe scoring, the return type is only know after calling. If the
# return type is a dictionary, the error scores can now be inserted with
# the correct key.
if callable(scoring):
_insert_error_scores(results, error_score)
results = _aggregate_score_dicts(results)
ret = {}
ret['fit_time'] = results["fit_time"]
ret['score_time'] = results["score_time"]
if return_estimator:
ret['estimator'] = results["estimator"]
test_scores_dict = _normalize_score_results(results["test_scores"])
if return_train_score:
train_scores_dict = _normalize_score_results(results["train_scores"])
for name in test_scores_dict:
ret['test_%s' % name] = test_scores_dict[name]
if return_train_score:
key = 'train_%s' % name
ret[key] = train_scores_dict[name]
return ret
def cross_val_score_eval_set(eval_set_selection,
estimator, X, y=None, groups=None, scoring=None,
cv=None, n_jobs=None, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs', error_score=np.nan):
"""
Evaluate a score by cross-validation with `eval_set` argument in `fit_params`
This method is suitable for calculating cross validation score with `early_stopping_round` in XGBoost or LightGBM.
Parameters
----------
eval_set_selection : {'all', 'train', 'test', 'original', 'original_transformed'}
Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit. Can be for example a list, or an array.
y : array-like of shape (n_samples,) or (n_samples, n_outputs), \
default=None
The target variable to try to predict in the case of
supervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
scoring : str or callable, default=None
A str (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)`` which should return only
a single value.
Similar to :func:`cross_validate`
but only a single metric is permitted.
If None, the estimator's default scorer (if available) is used.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the cross-validation splits.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : int, default=0
The verbosity level.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
pre_dispatch : int or str, default='2*n_jobs'
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A str, giving an expression as a function of n_jobs,
as in '2*n_jobs'
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
Returns
-------
scores : ndarray of float of shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
# To ensure multimetric format is not supported
scorer = check_scoring(estimator, scoring=scoring)
cv_results = cross_validate_eval_set(eval_set_selection=eval_set_selection,
estimator=estimator, X=X, y=y, groups=groups,
scoring={'score': scorer}, cv=cv,
n_jobs=n_jobs, verbose=verbose,
fit_params=fit_params,
pre_dispatch=pre_dispatch,
error_score=error_score)
return cv_results['test_score']
def validation_curve_eval_set(eval_set_selection,
estimator, X, y, param_name, param_range, groups=None,
cv=None, scoring=None, n_jobs=None, pre_dispatch="all",
verbose=0, error_score=np.nan, fit_params=None):
"""Validation curve.
Determine training and test scores for varying parameter values with `eval_set` argument in `fit_params`
Parameters
----------
eval_set_selection : {'all', 'train', 'test', 'original', 'original_transformed'}
Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
Target relative to X for classification or regression;
None for unsupervised learning.
param_name : str
Name of the parameter that will be varied.
param_range : array-like of shape (n_values,)
The values of the parameter that will be evaluated.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
scoring : str or callable, default=None
A str (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the combinations of each parameter
value and each cross-validation split.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. versionadded:: 0.24
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
Returns
-------
train_scores : array of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array of shape (n_ticks, n_cv_folds)
Scores on test set.
"""
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
results = parallel(delayed(_fit_and_score_eval_set)(
eval_set_selection, transformer,
clone(estimator), X, y, scorer, train, test, verbose,
parameters={param_name: v}, fit_params=fit_params,
return_train_score=True, error_score=error_score,
print_message=f'Caluculating score. {param_name}={v}')
# NOTE do not change order of iteration to allow one time cv splitters
for train, test in cv.split(X, y, groups) for v in param_range)
n_params = len(param_range)
results = _aggregate_score_dicts(results)
train_scores = results["train_scores"].reshape(-1, n_params).T
test_scores = results["test_scores"].reshape(-1, n_params).T
return train_scores, test_scores
def learning_curve_eval_set(eval_set_selection,
estimator, X, y, groups=None,
train_sizes=np.linspace(0.1, 1.0, 5), cv=None,
scoring=None, exploit_incremental_learning=False,
n_jobs=None, pre_dispatch="all", verbose=0, shuffle=False,
random_state=None, error_score=np.nan, return_times=False,
fit_params=None):
"""Learning curve.
Determines cross-validated training and test scores for different training set sizes with `eval_set` argument in `fit_params`
Parameters
----------
eval_set_selection : {'all', 'train', 'test', 'original', 'original_transformed'}
Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`GroupKFold`).
train_sizes : array-like of shape (n_ticks,), \
default=np.linspace(0.1, 1.0, 5)
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- int, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For int/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used. These splitters are instantiated
with `shuffle=False` so the splits will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
scoring : str or callable, default=None
A str (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
exploit_incremental_learning : bool, default=False
If the estimator supports incremental learning, this will be
used to speed up fitting for different training set sizes.
n_jobs : int, default=None
Number of jobs to run in parallel. Training the estimator and computing
the score are parallelized over the different training and test sets.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
pre_dispatch : int or str, default='all'
Number of predispatched jobs for parallel execution (default is
all). The option can reduce the allocated memory. The str can
be an expression like '2*n_jobs'.
verbose : int, default=0
Controls the verbosity: the higher, the more messages.
shuffle : bool, default=False
Whether to shuffle training data before taking prefixes of it
based on``train_sizes``.
random_state : int, RandomState instance or None, default=None
Used when ``shuffle`` is True. Pass an int for reproducible
output across multiple function calls.
See :term:`Glossary <random_state>`.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised.
.. versionadded:: 0.20
return_times : bool, default=False
Whether to return the fit and score times.
fit_params : dict, default=None
Parameters to pass to the fit method of the estimator.
.. versionadded:: 0.24
Returns
-------
train_sizes_abs : array of shape (n_unique_ticks,)
Numbers of training examples that has been used to generate the
learning curve. Note that the number of ticks might be less
than n_ticks because duplicate entries will be removed.
train_scores : array of shape (n_ticks, n_cv_folds)
Scores on training sets.
test_scores : array of shape (n_ticks, n_cv_folds)
Scores on test set.
fit_times : array of shape (n_ticks, n_cv_folds)
Times spent for fitting in seconds. Only present if ``return_times``
is True.
score_times : array of shape (n_ticks, n_cv_folds)
Times spent for scoring in seconds. Only present if ``return_times``
is True.
"""
if exploit_incremental_learning and not hasattr(estimator, "partial_fit"):
raise ValueError("An estimator must support the partial_fit interface "
"to exploit incremental learning")
X, y, groups = indexable(X, y, groups)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# Store it as list as we will be iterating over the list multiple times
cv_iter = list(cv.split(X, y, groups))
scorer = check_scoring(estimator, scoring=scoring)
n_max_training_samples = len(cv_iter[0][0])
# Because the lengths of folds can be significantly different, it is
# not guaranteed that we use all of the available training data when we
# use the first 'n_max_training_samples' samples.
train_sizes_abs = _translate_train_sizes(train_sizes,
n_max_training_samples)
n_unique_ticks = train_sizes_abs.shape[0]
if verbose > 0:
print("[learning_curve] Training set sizes: " + str(train_sizes_abs))
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch,
verbose=verbose)
if shuffle:
rng = check_random_state(random_state)
cv_iter = ((rng.permutation(train), test) for train, test in cv_iter)
if exploit_incremental_learning:
classes = np.unique(y) if is_classifier(estimator) else None
out = parallel(delayed(_incremental_fit_estimator)(
clone(estimator), X, y, classes, train, test, train_sizes_abs,
scorer, verbose, return_times, error_score=error_score,
fit_params=fit_params)
for train, test in cv_iter
)
out = np.asarray(out).transpose((2, 1, 0))
else:
train_test_proportions = []
for train, test in cv_iter:
for n_train_samples in train_sizes_abs:
train_test_proportions.append((train[:n_train_samples], test))
results = parallel(delayed(_fit_and_score_eval_set)(
eval_set_selection, transformer,
clone(estimator), X, y, scorer, train, test, verbose,
parameters=None, fit_params=fit_params, return_train_score=True,
error_score=error_score, return_times=return_times)
for train, test in train_test_proportions
)
results = _aggregate_score_dicts(results)
train_scores = results["train_scores"].reshape(-1, n_unique_ticks).T
test_scores = results["test_scores"].reshape(-1, n_unique_ticks).T
out = [train_scores, test_scores]
if return_times:
fit_times = results["fit_time"].reshape(-1, n_unique_ticks).T
score_times = results["score_time"].reshape(-1, n_unique_ticks).T
out.extend([fit_times, score_times])
ret = train_sizes_abs, out[0], out[1]
if return_times:
ret = ret + (out[2], out[3])
return ret
class GridSearchCVEvalSet(GridSearchCV):
"""
Exhaustive search over specified parameter values for an estimator with `eval_set` argument in `fit_params`.
"""
def fit(self, eval_set_selection,
X, y=None, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
eval_set_selection : {'all', 'train', 'test', 'original', 'original_transformed'}
Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples, n_output) \
or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
**fit_params : dict of str -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
refit_metric = "score"
if callable(self.scoring):
scorers = self.scoring
elif self.scoring is None or isinstance(self.scoring, str):
scorers = check_scoring(self.estimator, self.scoring)
else:
scorers = _check_multimetric_scoring(self.estimator, self.scoring)
self._check_refit_for_multimetric(scorers)
refit_metric = self.refit
X, y, groups = indexable(X, y, groups)
fit_params = _check_fit_params(X, fit_params)
cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator))
n_splits = cv_orig.get_n_splits(X, y, groups)
base_estimator = clone(self.estimator)
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
parallel = Parallel(n_jobs=self.n_jobs,
pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(scorer=scorers,
fit_params=fit_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=self.error_score,
verbose=self.verbose)
results = {}
with parallel:
all_candidate_params = []
all_out = []
all_more_results = defaultdict(list)
def evaluate_candidates(candidate_params, cv=None,
more_results=None):
cv = cv or cv_orig
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print("Fitting {0} folds for each of {1} candidates,"
" totalling {2} fits".format(
n_splits, n_candidates, n_candidates * n_splits))
out = parallel(delayed(_fit_and_score_eval_set)(
eval_set_selection, transformer,
clone(base_estimator),
X, y,
train=train, test=test,
parameters=parameters,
split_progress=(
split_idx,
n_splits),
candidate_progress=(
cand_idx,
n_candidates),
print_message=f'cand={cand_idx}/{n_candidates}, cv={split_idx}: {parameters}',
**fit_and_score_kwargs)
for (cand_idx, parameters),
(split_idx, (train, test)) in product(
enumerate(candidate_params),
enumerate(cv.split(X, y, groups))))
if len(out) < 1:
raise ValueError('No fits were performed. '
'Was the CV iterator empty? '
'Were there no candidates?')
elif len(out) != n_candidates * n_splits:
raise ValueError('cv.split and cv.get_n_splits returned '
'inconsistent results. Expected {} '
'splits, got {}'
.format(n_splits,
len(out) // n_candidates))
# For callable self.scoring, the return type is only know after
# calling. If the return type is a dictionary, the error scores
# can now be inserted with the correct key. The type checking
# of out will be done in `_insert_error_scores`.
if callable(self.scoring):
_insert_error_scores(out, self.error_score)
all_candidate_params.extend(candidate_params)
all_out.extend(out)
if more_results is not None:
for key, value in more_results.items():
all_more_results[key].extend(value)
nonlocal results
results = self._format_results(
all_candidate_params, n_splits, all_out,
all_more_results)
return results
self._run_search(evaluate_candidates)
# multimetric is determined here because in the case of a callable
# self.scoring the return type is only known after calling
first_test_score = all_out[0]['test_scores']
self.multimetric_ = isinstance(first_test_score, dict)
# check refit_metric now for a callabe scorer that is multimetric
if callable(self.scoring) and self.multimetric_:
self._check_refit_for_multimetric(first_test_score)
refit_metric = self.refit
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
# If callable, refit is expected to return the index of the best
# parameter set.
if callable(self.refit):
self.best_index_ = self.refit(results)
if not isinstance(self.best_index_, numbers.Integral):
raise TypeError('best_index_ returned is not an integer')
if (self.best_index_ < 0 or
self.best_index_ >= len(results["params"])):
raise IndexError('best_index_ index out of range')
else:
self.best_index_ = results["rank_test_%s"
% refit_metric].argmin()
self.best_score_ = results["mean_test_%s" % refit_metric][
self.best_index_]
self.best_params_ = results["params"][self.best_index_]
if self.refit:
# we clone again after setting params in case some
# of the params are estimators as well.
self.best_estimator_ = clone(clone(base_estimator).set_params(
**self.best_params_))
refit_start_time = time.time()
if y is not None:
self.best_estimator_.fit(X, y, **fit_params)
else:
self.best_estimator_.fit(X, **fit_params)
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers
self.cv_results_ = results
self.n_splits_ = n_splits
return self
class RandomizedSearchCVEvalSet(RandomizedSearchCV):
"""
Randomized search on hyper parameters with `eval_set` argument in `fit_params`.
"""
def fit(self, eval_set_selection,
X, y=None, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
eval_set_selection : {'all', 'train', 'test', 'original', 'original_transformed'}
Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples, n_output) \
or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
**fit_params : dict of str -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
refit_metric = "score"
if callable(self.scoring):
scorers = self.scoring
elif self.scoring is None or isinstance(self.scoring, str):
scorers = check_scoring(self.estimator, self.scoring)
else:
scorers = _check_multimetric_scoring(self.estimator, self.scoring)
self._check_refit_for_multimetric(scorers)
refit_metric = self.refit
X, y, groups = indexable(X, y, groups)
fit_params = _check_fit_params(X, fit_params)
cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator))
n_splits = cv_orig.get_n_splits(X, y, groups)
base_estimator = clone(self.estimator)
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
parallel = Parallel(n_jobs=self.n_jobs,
pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(scorer=scorers,
fit_params=fit_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=self.error_score,
verbose=self.verbose)
results = {}
with parallel:
all_candidate_params = []
all_out = []
all_more_results = defaultdict(list)
def evaluate_candidates(candidate_params, cv=None,
more_results=None):
cv = cv or cv_orig
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print("Fitting {0} folds for each of {1} candidates,"
" totalling {2} fits".format(
n_splits, n_candidates, n_candidates * n_splits))
out = parallel(delayed(_fit_and_score_eval_set)(
eval_set_selection, transformer,
clone(base_estimator),
X, y,
train=train, test=test,
parameters=parameters,
split_progress=(
split_idx,
n_splits),
candidate_progress=(
cand_idx,
n_candidates),
print_message=f'cand={cand_idx}/{n_candidates}, cv={split_idx}: {parameters}',
**fit_and_score_kwargs)
for (cand_idx, parameters),
(split_idx, (train, test)) in product(
enumerate(candidate_params),
enumerate(cv.split(X, y, groups))))
if len(out) < 1:
raise ValueError('No fits were performed. '
'Was the CV iterator empty? '
'Were there no candidates?')
elif len(out) != n_candidates * n_splits:
raise ValueError('cv.split and cv.get_n_splits returned '
'inconsistent results. Expected {} '
'splits, got {}'
.format(n_splits,
len(out) // n_candidates))
# For callable self.scoring, the return type is only know after
# calling. If the return type is a dictionary, the error scores
# can now be inserted with the correct key. The type checking
# of out will be done in `_insert_error_scores`.
if callable(self.scoring):
_insert_error_scores(out, self.error_score)
all_candidate_params.extend(candidate_params)
all_out.extend(out)
if more_results is not None:
for key, value in more_results.items():
all_more_results[key].extend(value)
nonlocal results
results = self._format_results(
all_candidate_params, n_splits, all_out,
all_more_results)
return results
self._run_search(evaluate_candidates)
# multimetric is determined here because in the case of a callable
# self.scoring the return type is only known after calling
first_test_score = all_out[0]['test_scores']
self.multimetric_ = isinstance(first_test_score, dict)
# check refit_metric now for a callabe scorer that is multimetric
if callable(self.scoring) and self.multimetric_:
self._check_refit_for_multimetric(first_test_score)
refit_metric = self.refit
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
# If callable, refit is expected to return the index of the best
# parameter set.
if callable(self.refit):
self.best_index_ = self.refit(results)
if not isinstance(self.best_index_, numbers.Integral):
raise TypeError('best_index_ returned is not an integer')
if (self.best_index_ < 0 or
self.best_index_ >= len(results["params"])):
raise IndexError('best_index_ index out of range')
else:
self.best_index_ = results["rank_test_%s"
% refit_metric].argmin()
self.best_score_ = results["mean_test_%s" % refit_metric][
self.best_index_]
self.best_params_ = results["params"][self.best_index_]
if self.refit:
# we clone again after setting params in case some
# of the params are estimators as well.
self.best_estimator_ = clone(clone(base_estimator).set_params(
**self.best_params_))
refit_start_time = time.time()
if y is not None:
self.best_estimator_.fit(X, y, **fit_params)
else:
self.best_estimator_.fit(X, **fit_params)
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers
self.cv_results_ = results
self.n_splits_ = n_splits
return self | [
"sklearn.model_selection._validation._translate_train_sizes",
"sklearn.utils.validation._check_fit_params",
"sklearn.model_selection._validation._insert_error_scores",
"sklearn.model_selection._validation._aggregate_score_dicts",
"sklearn.clone",
"copy.deepcopy",
"sklearn.base.is_classifier",
"sklearn... | [((1285, 1314), 'copy.deepcopy', 'copy.deepcopy', (['src_fit_params'], {}), '(src_fit_params)\n', (1298, 1314), False, 'import copy\n'), ((2942, 2967), 'copy.deepcopy', 'copy.deepcopy', (['fit_params'], {}), '(fit_params)\n', (2955, 2967), False, 'import copy\n'), ((5142, 5537), 'sklearn.model_selection._validation._fit_and_score', '_fit_and_score', (['estimator', 'X', 'y', 'scorer', 'train', 'test', 'verbose', 'parameters', 'fit_params_modified'], {'return_train_score': 'return_train_score', 'return_parameters': 'return_parameters', 'return_n_test_samples': 'return_n_test_samples', 'return_times': 'return_times', 'return_estimator': 'return_estimator', 'split_progress': 'split_progress', 'candidate_progress': 'candidate_progress', 'error_score': 'error_score'}), '(estimator, X, y, scorer, train, test, verbose, parameters,\n fit_params_modified, return_train_score=return_train_score,\n return_parameters=return_parameters, return_n_test_samples=\n return_n_test_samples, return_times=return_times, return_estimator=\n return_estimator, split_progress=split_progress, candidate_progress=\n candidate_progress, error_score=error_score)\n', (5156, 5537), False, 'from sklearn.model_selection._validation import _fit_and_score, _insert_error_scores, _aggregate_score_dicts, _normalize_score_results, _translate_train_sizes, _incremental_fit_estimator\n'), ((11977, 12000), 'sklearn.utils.validation.indexable', 'indexable', (['X', 'y', 'groups'], {}), '(X, y, groups)\n', (11986, 12000), False, 'from sklearn.utils.validation import indexable, check_random_state, _check_fit_params\n'), ((12517, 12584), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs', 'verbose': 'verbose', 'pre_dispatch': 'pre_dispatch'}), '(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)\n', (12525, 12584), False, 'from joblib import Parallel\n'), ((13281, 13312), 'sklearn.model_selection._validation._aggregate_score_dicts', '_aggregate_score_dicts', (['results'], {}), '(results)\n', (13303, 13312), False, 'from sklearn.model_selection._validation import _fit_and_score, _insert_error_scores, _aggregate_score_dicts, _normalize_score_results, _translate_train_sizes, _incremental_fit_estimator\n'), ((13513, 13561), 'sklearn.model_selection._validation._normalize_score_results', '_normalize_score_results', (["results['test_scores']"], {}), "(results['test_scores'])\n", (13537, 13561), False, 'from sklearn.model_selection._validation import _fit_and_score, _insert_error_scores, _aggregate_score_dicts, _normalize_score_results, _translate_train_sizes, _incremental_fit_estimator\n'), ((18750, 18791), 'sklearn.metrics.check_scoring', 'check_scoring', (['estimator'], {'scoring': 'scoring'}), '(estimator, scoring=scoring)\n', (18763, 18791), False, 'from sklearn.metrics import check_scoring\n'), ((23823, 23846), 'sklearn.utils.validation.indexable', 'indexable', (['X', 'y', 'groups'], {}), '(X, y, groups)\n', (23832, 23846), False, 'from sklearn.utils.validation import indexable, check_random_state, _check_fit_params\n'), ((23923, 23964), 'sklearn.metrics.check_scoring', 'check_scoring', (['estimator'], {'scoring': 'scoring'}), '(estimator, scoring=scoring)\n', (23936, 23964), False, 'from sklearn.metrics import check_scoring\n'), ((24072, 24139), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs', 'pre_dispatch': 'pre_dispatch', 'verbose': 'verbose'}), '(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose)\n', (24080, 24139), False, 'from joblib import Parallel\n'), ((24703, 24734), 'sklearn.model_selection._validation._aggregate_score_dicts', '_aggregate_score_dicts', (['results'], {}), '(results)\n', (24725, 24734), False, 'from sklearn.model_selection._validation import _fit_and_score, _insert_error_scores, _aggregate_score_dicts, _normalize_score_results, _translate_train_sizes, _incremental_fit_estimator\n'), ((25052, 25076), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1.0)', '(5)'], {}), '(0.1, 1.0, 5)\n', (25063, 25076), True, 'import numpy as np\n'), ((31407, 31430), 'sklearn.utils.validation.indexable', 'indexable', (['X', 'y', 'groups'], {}), '(X, y, groups)\n', (31416, 31430), False, 'from sklearn.utils.validation import indexable, check_random_state, _check_fit_params\n'), ((31627, 31668), 'sklearn.metrics.check_scoring', 'check_scoring', (['estimator'], {'scoring': 'scoring'}), '(estimator, scoring=scoring)\n', (31640, 31668), False, 'from sklearn.metrics import check_scoring\n'), ((31943, 32002), 'sklearn.model_selection._validation._translate_train_sizes', '_translate_train_sizes', (['train_sizes', 'n_max_training_samples'], {}), '(train_sizes, n_max_training_samples)\n', (31965, 32002), False, 'from sklearn.model_selection._validation import _fit_and_score, _insert_error_scores, _aggregate_score_dicts, _normalize_score_results, _translate_train_sizes, _incremental_fit_estimator\n'), ((32299, 32366), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs', 'pre_dispatch': 'pre_dispatch', 'verbose': 'verbose'}), '(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose)\n', (32307, 32366), False, 'from joblib import Parallel\n'), ((13223, 13265), 'sklearn.model_selection._validation._insert_error_scores', '_insert_error_scores', (['results', 'error_score'], {}), '(results, error_score)\n', (13243, 13265), False, 'from sklearn.model_selection._validation import _fit_and_score, _insert_error_scores, _aggregate_score_dicts, _normalize_score_results, _translate_train_sizes, _incremental_fit_estimator\n'), ((13617, 13666), 'sklearn.model_selection._validation._normalize_score_results', '_normalize_score_results', (["results['train_scores']"], {}), "(results['train_scores'])\n", (13641, 13666), False, 'from sklearn.model_selection._validation import _fit_and_score, _insert_error_scores, _aggregate_score_dicts, _normalize_score_results, _translate_train_sizes, _incremental_fit_estimator\n'), ((32422, 32454), 'sklearn.utils.validation.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (32440, 32454), False, 'from sklearn.utils.validation import indexable, check_random_state, _check_fit_params\n'), ((33587, 33618), 'sklearn.model_selection._validation._aggregate_score_dicts', '_aggregate_score_dicts', (['results'], {}), '(results)\n', (33609, 33618), False, 'from sklearn.model_selection._validation import _fit_and_score, _insert_error_scores, _aggregate_score_dicts, _normalize_score_results, _translate_train_sizes, _incremental_fit_estimator\n'), ((36404, 36427), 'sklearn.utils.validation.indexable', 'indexable', (['X', 'y', 'groups'], {}), '(X, y, groups)\n', (36413, 36427), False, 'from sklearn.utils.validation import indexable, check_random_state, _check_fit_params\n'), ((36449, 36481), 'sklearn.utils.validation._check_fit_params', '_check_fit_params', (['X', 'fit_params'], {}), '(X, fit_params)\n', (36466, 36481), False, 'from sklearn.utils.validation import indexable, check_random_state, _check_fit_params\n'), ((36639, 36660), 'sklearn.clone', 'clone', (['self.estimator'], {}), '(self.estimator)\n', (36644, 36660), False, 'from sklearn import clone\n'), ((36780, 36840), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs', 'pre_dispatch': 'self.pre_dispatch'}), '(n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch)\n', (36788, 36840), False, 'from joblib import Parallel\n'), ((45402, 45425), 'sklearn.utils.validation.indexable', 'indexable', (['X', 'y', 'groups'], {}), '(X, y, groups)\n', (45411, 45425), False, 'from sklearn.utils.validation import indexable, check_random_state, _check_fit_params\n'), ((45447, 45479), 'sklearn.utils.validation._check_fit_params', '_check_fit_params', (['X', 'fit_params'], {}), '(X, fit_params)\n', (45464, 45479), False, 'from sklearn.utils.validation import indexable, check_random_state, _check_fit_params\n'), ((45637, 45658), 'sklearn.clone', 'clone', (['self.estimator'], {}), '(self.estimator)\n', (45642, 45658), False, 'from sklearn import clone\n'), ((45778, 45838), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs', 'pre_dispatch': 'self.pre_dispatch'}), '(n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch)\n', (45786, 45838), False, 'from joblib import Parallel\n'), ((12038, 12062), 'sklearn.base.is_classifier', 'is_classifier', (['estimator'], {}), '(estimator)\n', (12051, 12062), False, 'from sklearn.base import is_classifier\n'), ((12189, 12222), 'sklearn.metrics.check_scoring', 'check_scoring', (['estimator', 'scoring'], {}), '(estimator, scoring)\n', (12202, 12222), False, 'from sklearn.metrics import check_scoring\n'), ((12251, 12297), 'sklearn.metrics._scorer._check_multimetric_scoring', '_check_multimetric_scoring', (['estimator', 'scoring'], {}), '(estimator, scoring)\n', (12277, 12297), False, 'from sklearn.metrics._scorer import _check_multimetric_scoring\n'), ((23884, 23908), 'sklearn.base.is_classifier', 'is_classifier', (['estimator'], {}), '(estimator)\n', (23897, 23908), False, 'from sklearn.base import is_classifier\n'), ((31468, 31492), 'sklearn.base.is_classifier', 'is_classifier', (['estimator'], {}), '(estimator)\n', (31481, 31492), False, 'from sklearn.base import is_classifier\n'), ((32605, 32629), 'sklearn.base.is_classifier', 'is_classifier', (['estimator'], {}), '(estimator)\n', (32618, 32629), False, 'from sklearn.base import is_classifier\n'), ((32589, 32601), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (32598, 32601), True, 'import numpy as np\n'), ((37503, 37520), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (37514, 37520), False, 'from collections import defaultdict\n'), ((42694, 42705), 'time.time', 'time.time', ([], {}), '()\n', (42703, 42705), False, 'import time\n'), ((42902, 42913), 'time.time', 'time.time', ([], {}), '()\n', (42911, 42913), False, 'import time\n'), ((46501, 46518), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (46512, 46518), False, 'from collections import defaultdict\n'), ((51692, 51703), 'time.time', 'time.time', ([], {}), '()\n', (51701, 51703), False, 'import time\n'), ((51900, 51911), 'time.time', 'time.time', ([], {}), '()\n', (51909, 51911), False, 'import time\n'), ((12641, 12673), 'sklearn.utils.fixes.delayed', 'delayed', (['_fit_and_score_eval_set'], {}), '(_fit_and_score_eval_set)\n', (12648, 12673), False, 'from sklearn.utils.fixes import delayed\n'), ((12732, 12748), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (12737, 12748), False, 'from sklearn import clone\n'), ((24187, 24219), 'sklearn.utils.fixes.delayed', 'delayed', (['_fit_and_score_eval_set'], {}), '(_fit_and_score_eval_set)\n', (24194, 24219), False, 'from sklearn.utils.fixes import delayed\n'), ((24270, 24286), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (24275, 24286), False, 'from sklearn import clone\n'), ((32941, 32956), 'numpy.asarray', 'np.asarray', (['out'], {}), '(out)\n', (32951, 32956), True, 'import numpy as np\n'), ((36150, 36193), 'sklearn.metrics.check_scoring', 'check_scoring', (['self.estimator', 'self.scoring'], {}), '(self.estimator, self.scoring)\n', (36163, 36193), False, 'from sklearn.metrics import check_scoring\n'), ((36230, 36286), 'sklearn.metrics._scorer._check_multimetric_scoring', '_check_multimetric_scoring', (['self.estimator', 'self.scoring'], {}), '(self.estimator, self.scoring)\n', (36256, 36286), False, 'from sklearn.metrics._scorer import _check_multimetric_scoring\n'), ((36533, 36557), 'sklearn.base.is_classifier', 'is_classifier', (['estimator'], {}), '(estimator)\n', (36546, 36557), False, 'from sklearn.base import is_classifier\n'), ((45148, 45191), 'sklearn.metrics.check_scoring', 'check_scoring', (['self.estimator', 'self.scoring'], {}), '(self.estimator, self.scoring)\n', (45161, 45191), False, 'from sklearn.metrics import check_scoring\n'), ((45228, 45284), 'sklearn.metrics._scorer._check_multimetric_scoring', '_check_multimetric_scoring', (['self.estimator', 'self.scoring'], {}), '(self.estimator, self.scoring)\n', (45254, 45284), False, 'from sklearn.metrics._scorer import _check_multimetric_scoring\n'), ((45531, 45555), 'sklearn.base.is_classifier', 'is_classifier', (['estimator'], {}), '(estimator)\n', (45544, 45555), False, 'from sklearn.base import is_classifier\n'), ((32663, 32698), 'sklearn.utils.fixes.delayed', 'delayed', (['_incremental_fit_estimator'], {}), '(_incremental_fit_estimator)\n', (32670, 32698), False, 'from sklearn.utils.fixes import delayed\n'), ((32712, 32728), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (32717, 32728), False, 'from sklearn import clone\n'), ((33219, 33251), 'sklearn.utils.fixes.delayed', 'delayed', (['_fit_and_score_eval_set'], {}), '(_fit_and_score_eval_set)\n', (33226, 33251), False, 'from sklearn.utils.fixes import delayed\n'), ((33310, 33326), 'sklearn.clone', 'clone', (['estimator'], {}), '(estimator)\n', (33315, 33326), False, 'from sklearn import clone\n'), ((40191, 40234), 'sklearn.model_selection._validation._insert_error_scores', '_insert_error_scores', (['out', 'self.error_score'], {}), '(out, self.error_score)\n', (40211, 40234), False, 'from sklearn.model_selection._validation import _fit_and_score, _insert_error_scores, _aggregate_score_dicts, _normalize_score_results, _translate_train_sizes, _incremental_fit_estimator\n'), ((49189, 49232), 'sklearn.model_selection._validation._insert_error_scores', '_insert_error_scores', (['out', 'self.error_score'], {}), '(out, self.error_score)\n', (49209, 49232), False, 'from sklearn.model_selection._validation import _fit_and_score, _insert_error_scores, _aggregate_score_dicts, _normalize_score_results, _translate_train_sizes, _incremental_fit_estimator\n'), ((42591, 42612), 'sklearn.clone', 'clone', (['base_estimator'], {}), '(base_estimator)\n', (42596, 42612), False, 'from sklearn import clone\n'), ((51589, 51610), 'sklearn.clone', 'clone', (['base_estimator'], {}), '(base_estimator)\n', (51594, 51610), False, 'from sklearn import clone\n'), ((38067, 38099), 'sklearn.utils.fixes.delayed', 'delayed', (['_fit_and_score_eval_set'], {}), '(_fit_and_score_eval_set)\n', (38074, 38099), False, 'from sklearn.utils.fixes import delayed\n'), ((38214, 38235), 'sklearn.clone', 'clone', (['base_estimator'], {}), '(base_estimator)\n', (38219, 38235), False, 'from sklearn import clone\n'), ((47065, 47097), 'sklearn.utils.fixes.delayed', 'delayed', (['_fit_and_score_eval_set'], {}), '(_fit_and_score_eval_set)\n', (47072, 47097), False, 'from sklearn.utils.fixes import delayed\n'), ((47212, 47233), 'sklearn.clone', 'clone', (['base_estimator'], {}), '(base_estimator)\n', (47217, 47233), False, 'from sklearn import clone\n')] |
import numpy as np
from pysc2.lib import actions
import tensorflow as tf
def compute_trajectory_loss ( y_true, y_pred ):
combinedLoss = tf.reduce_mean(y_true) - 0 * tf.reduce_mean(y_pred[-1])
return combinedLoss
class Agent():
def __init__(self, envParams ):
self.welcomeStr = 'PLACEHOLDER-AGENT'
self.learningStrategyStr = 'none'
self.architectureStr = 'none'
self.envParams = envParams
self.bringup()
def bringup ( self ):
self.hello_world()
self.model = self.build_model()
self.initialize_placeholders()
return
def initialize_placeholders ( self ):
nEnvs = self.envParams['simultaneousEnvironments']
nSteps = self.envParams['nTrajectorySteps']
nChannels = self.envParams['screenChannelsRetained'] \
* self.envParams['nStackedFrames']
nNonSpatialInputs = self.envParams['nonSpatialInputDimensions'] \
* self.envParams['nStackedFrames']
xRes = self.envParams['screenResX']
yRes = self.envParams['screenResX']
self.rewards = np.zeros( (nEnvs, nSteps+1), dtype=np.float32)
self.valuePredictions = np.zeros( (nEnvs, nSteps+1), dtype=np.float32)
self.nStepReturns = np.zeros((nEnvs, nSteps), dtype=np.float32)
self.advantages = np.zeros((nEnvs, nSteps), dtype=np.float32)
self.logProbs = np.zeros((nEnvs, nSteps), dtype=np.float32)
self.entropy = np.zeros((nEnvs, nSteps), dtype=np.float32)
self.loss = np.zeros((nEnvs, nSteps), dtype=np.float32)
# policy mask needs to keep track of which action arguments are active
self.policyMask = np.zeros( ( nEnvs, self.policySize ), dtype = np.float32)
# Initialize placeholders for spatial and non-spatial [ stacked] trajectory observations
self.nEnvTrajectoryBatch = np.zeros( ( nEnvs, nSteps, nChannels, xRes, yRes ), dtype=np.float32 )
self.nEnvOneStepBatch = np.zeros( ( nEnvs, 1, nChannels, xRes, yRes ), dtype=np.float32 )
# reward, cumulative score, player supply, enemy supply, action chosen, actionArgs
self.nEnvTrajectoryBatchNonSpatial = np.zeros( ( nEnvs, nSteps, nNonSpatialInputs, ), dtype=np.float32 )
self.nEnvOneStepBatchNonSpatial = np.zeros( ( nEnvs, 1, nNonSpatialInputs, ), dtype=np.float32 )
# say hello & share high level architecture & learning strategy
def hello_world( self ):
print('hi I\'m the %s\n | architecture: %s\n | learning strategy: %s'
% (self.welcomeStr, self.architectureStr, self.learningStrategyStr))
# define model architecture
def build_model( self ):
return None
def model_summary( self ):
if self.model is not None:
return self.model.summary()
else:
return 'i have no model, i go where the randomness takes me'
def choose_action ( self, actionProb, eGreedy = .9 ):
if np.random.random() > eGreedy:
if self.envParams['debugFlag']:
print('!venturing out in action selection')
actionID = np.random.choice( np.array( self.envParams['allowedActionIDs'], dtype=np.int ),
size=1, p=np.array(actionProb) )
actionID = actionID[0]
else:
if self.envParams['debugFlag']:
print('staying greedy in action selection')
actionID = self.envParams['allowedActionIDs'][ np.argmax( self.envParams['allowedActionIDs'] ) ]
return actionID
def normalize_array( self, arrayInput ):
return (arrayInput - arrayInput.min()) / (arrayInput - arrayInput.min()).sum()
def mask_unusable_actions ( self, availableActions, actionProbabilityDistribution ) :
for iAction in range( len(actionProbabilityDistribution) ):
if self.envParams['allowedActionIDs'][iAction] not in availableActions:
actionProbabilityDistribution[iAction] = 0
if not np.isclose( actionProbabilityDistribution.sum(), 1.00000 ):
actionProbabilityDistribution = self.normalize_array( actionProbabilityDistribution )
return actionProbabilityDistribution
def choose_coordinate ( self, coordinateArray, eGreedy = .9 ):
if np.random.random() > eGreedy:
if self.envParams['debugFlag']:
print('!venturing out in coordinate selection')
availableCoordinates = list( range( self.envParams['screenResX'] * self.envParams['screenResY'] ))
chosenIndex = np.random.choice( np.array( availableCoordinates, dtype=np.int ),
size=1, p = np.array(coordinateArray) )[0]
else:
if self.envParams['debugFlag']:
print('staying greedy in coordinate selection')
chosenIndex = np.argmax( coordinateArray )
maxCoord = np.unravel_index( chosenIndex, (self.envParams['screenResX'], self.envParams['screenResY']))
return maxCoord[0], maxCoord[1]
def sample_and_mask (self, obs, batchedOutputs ):
batchSelectedActionFunctionCalls = []
batchSelectedActionIDs = []
batchSelectedActionIndexes = []
batchSelectedActionArguments = []
batchSelectedActionModifiers = []
batchPredictedValues = []
for iEnv in range ( self.envParams['simultaneousEnvironments'] ):
policyIStart = self.policyInds['actionDistStart']
policyIEnd = self.policyInds['actionDistEnd']
point1IStart = self.policyInds['actionCoord1Start']
point1IEnd = self.policyInds['actionCoord1End']
point2IStart = self.policyInds['actionCoord2Start']
point2IEnd = self.policyInds['actionCoord2End']
# reset policy mask
self.policyMask[ iEnv, : ] = 0
actionProbabilityDistribution = self.mask_unusable_actions ( \
obs[iEnv].observation['available_actions'], \
batchedOutputs[iEnv][ policyIStart:policyIEnd ] )
actionId = self.choose_action ( actionProbabilityDistribution )
batchSelectedActionIDs += [ actionId ] # actionID
actionIndex = self.envParams['allowedActionIDs'].index( actionId )
self.policyMask[ iEnv, policyIStart:policyIEnd ] = 1
actionArguments = []
batchActionArguments = []
probabilisticPointMap1 = batchedOutputs[iEnv][point1IStart:point1IEnd]
probabilisticPointMap2 = batchedOutputs[iEnv][point2IStart:point2IEnd]
x1, y1 = self.choose_coordinate ( probabilisticPointMap1 )
x2, y2 = self.choose_coordinate ( probabilisticPointMap2 )
if self.envParams['allowedActionIDRequiresLocation'][actionIndex] == 1:
actionArguments = [ [ x1, y1 ]]
self.policyMask [ iEnv, point1IStart:point1IEnd ] = 1
elif self.envParams['allowedActionIDRequiresLocation'][actionIndex] == 2:
actionArguments = [[ x1, y1 ], [ x2, y2 ]]
self.policyMask [ iEnv, point1IStart:point1IEnd ] = 1
self.policyMask [ iEnv, point2IStart:point2IEnd ] = 1
# queued
if self.envParams['allowedActionIDRequiresModifier'][actionIndex] == 1:
queuedActionModifier = int( round( batchedOutputs[iEnv][ self.policyInds['actionModifierQueue']] ) ) # int
self.policyMask[ iEnv, self.policyInds['actionModifierQueue'] ] = 1
actionArguments.insert( 0, [ queuedActionModifier ] )
# select add
if self.envParams['allowedActionIDRequiresModifier'][actionIndex] == 2:
selectActionModifier = int( round( batchedOutputs[iEnv][ self.policyInds['actionModifierSelect']] ) ) # int
self.policyMask[ iEnv, self.policyInds['actionModifierSelect'] ] = 1
actionArguments.insert( 0, [ selectActionModifier ] )
batchSelectedActionFunctionCalls += [ actions.FunctionCall( actionId, actionArguments ) ]
batchActionArguments += [ actionArguments ]
if self.envParams['debugFlag']:
print('choosing action ' + str(actionId) + ', ' + str(actionArguments) )
return batchSelectedActionFunctionCalls, batchSelectedActionIDs, batchActionArguments
def batch_predict ( self, nEnvOneStepBatch, nEnvOneStepBatchNonSpatial ):
return self.model.predict( x = [ nEnvOneStepBatch, nEnvOneStepBatchNonSpatial ],
batch_size = self.envParams['simultaneousEnvironments'] )
def step_in_envs ( self, obs, localPipeEnds, batchSelectedActionFunctionCalls, batchSelectedActionIDs ):
for iEnv in range ( self.envParams['simultaneousEnvironments'] ):
selectedActionFunctionCall = batchSelectedActionFunctionCalls[iEnv]
selectedActionID = batchSelectedActionIDs[iEnv]
# ensure the agent action is possible
''' issue call '''
if selectedActionID in obs[iEnv].observation['available_actions']:
localPipeEnds[iEnv].send ( ( 'step', selectedActionFunctionCall ) )
obs[iEnv] = localPipeEnds[iEnv].recv()
# take no-op action and advance to game state where we can act again
else:
localPipeEnds[iEnv].send ( ('step', actions.FunctionCall( 0, [])) )
obs[iEnv] = localPipeEnds[iEnv].recv()
return obs, 0
def parse_rewards(self, obs):
return [ obs[iEnv].reward for iEnv in list(obs.keys()) ]
def inplace_update_trajectory_observations ( self, iStep, obs ): #, actionID, actionArguments ):
for iEnv in range( self.envParams['simultaneousEnvironments'] ):
newObs = obs[iEnv]
# spatial data
self.nEnvOneStepBatch[iEnv, 0, self.envParams['screenChannelsRetained']:, :, :] = \
self.nEnvOneStepBatch[iEnv, 0, 0:-self.envParams['screenChannelsRetained'], :, :]
self.nEnvOneStepBatch[iEnv, 0, 0:self.envParams['screenChannelsRetained'], :, :] = \
newObs.observation['screen'][self.envParams['screenChannelsToKeep'],:,:]
self.nEnvTrajectoryBatch[iEnv, iStep, :, :, : ] = self.nEnvOneStepBatch[iEnv, 0, :, :, :]
# non-spatial data
self.nEnvOneStepBatchNonSpatial[iEnv, 0, self.envParams['nonSpatialInputDimensions']:,] = \
self.nEnvOneStepBatchNonSpatial[iEnv, 0, 0:-self.envParams['nonSpatialInputDimensions'],]
self.nEnvOneStepBatchNonSpatial[iEnv, 0, 0:self.envParams['nonSpatialInputDimensions'],] = \
[ newObs.observation['game_loop'][0], # game time
newObs.observation['score_cumulative'][0], # cumulative score
newObs.reward, # prev reward
newObs.observation['player'][3], # used supply
np.sum(newObs.observation['multi_select'][:,2]), # total multi selected unit health
np.sum(newObs.observation['single_select'][:,2]), # total single selected unit health
0, # action
0, # action modifier
0, # action coordinate x1
0, # action coordinate y1
0, # action coordinate x2
0 ] # action coordinate y2
self.nEnvTrajectoryBatchNonSpatial[ iEnv, iStep, :,] = self.nEnvOneStepBatchNonSpatial[ iEnv, 0, :,]
def compute_returns_advantages ( self ):
nextRewards = self.rewards[:, 1:]
nextValues = self.valuePredictions[:, 1:]
for iEnv in range ( self.envParams['simultaneousEnvironments']):
# compute n-Step returns
for iStep in reversed( range ( self.envParams['nTrajectorySteps'] ) ) :
if iStep == ( self.envParams['nTrajectorySteps'] - 1 ) :
self.nStepReturns[ iEnv, iStep ] = nextValues[ iEnv, -1 ] # final return bootstrap
else:
self.nStepReturns[ iEnv, iStep ] = nextRewards[ iEnv, iStep ] + \
self.envParams['futureDiscountRate'] \
* self.nStepReturns[ iEnv, iStep + 1 ]
# prepare for training loop
self.advantages[iEnv, :] = self.nStepReturns[iEnv, :] - self.valuePredictions[iEnv, 0:-1]
def inplace_update_logProbs_and_entropy ( self, iStep, concatModelOutputNESS ) :
for iEnv in range ( self.envParams['simultaneousEnvironments'] ):
activePolicy = concatModelOutputNESS[iEnv] * self.policyMask[iEnv]
self.logProbs[iEnv, iStep] = np.sum( -1 * np.ma.log( activePolicy ).filled(0) )
self.entropy[iEnv, iStep] = -1 * np.sum( np.ma.log( activePolicy ).filled(0) * activePolicy )
def compute_loss (self):
self.compute_returns_advantages ( )
for iEnv in range ( self.envParams['simultaneousEnvironments'] ):
for iStep in range ( self.envParams['nTrajectorySteps'] ):
policyLoss = self.advantages[iEnv, iStep] * self.logProbs[iEnv, iStep]
valueLoss = np.square( self.nStepReturns[iEnv, iStep] - self.valuePredictions[iEnv, iStep] )/2.0
self.loss[ iEnv, iStep] = \
self.envParams['policyWeight'] * policyLoss \
+ self.envParams['valueWeight'] * valueLoss \
+ self.envParams['entropyWeight'] * self.entropy[iEnv, iStep]
if self.envParams['debugFlag']:
print( 'iEnv: ' + str(iEnv) + ' ; iStep: ' + str(iStep) )
print( '\t policyLossTerm: ' + str( policyLoss ))
print( '\t valueLossTerm: ' + str( valueLoss ))
print( '\t entropyLossTerm: ' + str( self.entropy[iEnv, iStep] ))
print( '\t totalLoss: ' + str(self.loss[ iEnv, iStep]))
if not np.isfinite( self.loss[ iEnv, iStep] ):
print( 'policyLossTerm: ' + str( policyLoss ))
print( 'valueLossTerm: ' + str( valueLoss ))
print( 'entropyLossTerm: ' + str( self.entropy[iEnv, iStep] ))
raise ValueError('non-finite loss encountered')
def flatten_first_dimensions ( self, inputData ):
inputDataShape = inputData.shape
outputShape = tuple( [inputDataShape[0]*inputDataShape[1] ] + [ i for i in inputDataShape[2:] ] )
output = np.reshape( inputData, outputShape )
return output
def train ( self ):
spatialInputs = self.flatten_first_dimensions( self.nEnvTrajectoryBatch )
nonSpatialInputs = self.flatten_first_dimensions( self.nEnvTrajectoryBatchNonSpatial )
loss = self.flatten_first_dimensions( self.loss )
verbosityLevel = 0
if self.envParams['debugFlag']:
verbosityLevel = 1
self.model.fit( x = [ spatialInputs, nonSpatialInputs ], y = loss, verbose = verbosityLevel)
def model_checkpoint( self ):
# serialize model to JSON
model_json = self.model.to_json()
filePath = self.envParams['experimentDirectory'] + self.welcomeStr
with open(filePath + '_model.json', 'w') as json_file:
json_file.write(model_json)
# serialize weights to HDF5
self.model.save_weights(filePath + '_model.h5')
print(' saved model to disk ')
| [
"numpy.reshape",
"pysc2.lib.actions.FunctionCall",
"numpy.random.random",
"numpy.argmax",
"numpy.square",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.isfinite",
"numpy.unravel_index",
"numpy.ma.log",
"tensorflow.reduce_mean"
] | [((142, 164), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['y_true'], {}), '(y_true)\n', (156, 164), True, 'import tensorflow as tf\n'), ((1189, 1236), 'numpy.zeros', 'np.zeros', (['(nEnvs, nSteps + 1)'], {'dtype': 'np.float32'}), '((nEnvs, nSteps + 1), dtype=np.float32)\n', (1197, 1236), True, 'import numpy as np\n'), ((1268, 1315), 'numpy.zeros', 'np.zeros', (['(nEnvs, nSteps + 1)'], {'dtype': 'np.float32'}), '((nEnvs, nSteps + 1), dtype=np.float32)\n', (1276, 1315), True, 'import numpy as np\n'), ((1344, 1387), 'numpy.zeros', 'np.zeros', (['(nEnvs, nSteps)'], {'dtype': 'np.float32'}), '((nEnvs, nSteps), dtype=np.float32)\n', (1352, 1387), True, 'import numpy as np\n'), ((1414, 1457), 'numpy.zeros', 'np.zeros', (['(nEnvs, nSteps)'], {'dtype': 'np.float32'}), '((nEnvs, nSteps), dtype=np.float32)\n', (1422, 1457), True, 'import numpy as np\n'), ((1482, 1525), 'numpy.zeros', 'np.zeros', (['(nEnvs, nSteps)'], {'dtype': 'np.float32'}), '((nEnvs, nSteps), dtype=np.float32)\n', (1490, 1525), True, 'import numpy as np\n'), ((1549, 1592), 'numpy.zeros', 'np.zeros', (['(nEnvs, nSteps)'], {'dtype': 'np.float32'}), '((nEnvs, nSteps), dtype=np.float32)\n', (1557, 1592), True, 'import numpy as np\n'), ((1622, 1665), 'numpy.zeros', 'np.zeros', (['(nEnvs, nSteps)'], {'dtype': 'np.float32'}), '((nEnvs, nSteps), dtype=np.float32)\n', (1630, 1665), True, 'import numpy as np\n'), ((1789, 1841), 'numpy.zeros', 'np.zeros', (['(nEnvs, self.policySize)'], {'dtype': 'np.float32'}), '((nEnvs, self.policySize), dtype=np.float32)\n', (1797, 1841), True, 'import numpy as np\n'), ((1980, 2046), 'numpy.zeros', 'np.zeros', (['(nEnvs, nSteps, nChannels, xRes, yRes)'], {'dtype': 'np.float32'}), '((nEnvs, nSteps, nChannels, xRes, yRes), dtype=np.float32)\n', (1988, 2046), True, 'import numpy as np\n'), ((2083, 2144), 'numpy.zeros', 'np.zeros', (['(nEnvs, 1, nChannels, xRes, yRes)'], {'dtype': 'np.float32'}), '((nEnvs, 1, nChannels, xRes, yRes), dtype=np.float32)\n', (2091, 2144), True, 'import numpy as np\n'), ((2286, 2348), 'numpy.zeros', 'np.zeros', (['(nEnvs, nSteps, nNonSpatialInputs)'], {'dtype': 'np.float32'}), '((nEnvs, nSteps, nNonSpatialInputs), dtype=np.float32)\n', (2294, 2348), True, 'import numpy as np\n'), ((2396, 2453), 'numpy.zeros', 'np.zeros', (['(nEnvs, 1, nNonSpatialInputs)'], {'dtype': 'np.float32'}), '((nEnvs, 1, nNonSpatialInputs), dtype=np.float32)\n', (2404, 2453), True, 'import numpy as np\n'), ((5111, 5207), 'numpy.unravel_index', 'np.unravel_index', (['chosenIndex', "(self.envParams['screenResX'], self.envParams['screenResY'])"], {}), "(chosenIndex, (self.envParams['screenResX'], self.envParams\n ['screenResY']))\n", (5127, 5207), True, 'import numpy as np\n'), ((15319, 15353), 'numpy.reshape', 'np.reshape', (['inputData', 'outputShape'], {}), '(inputData, outputShape)\n', (15329, 15353), True, 'import numpy as np\n'), ((171, 197), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['y_pred[-1]'], {}), '(y_pred[-1])\n', (185, 197), True, 'import tensorflow as tf\n'), ((3092, 3110), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3108, 3110), True, 'import numpy as np\n'), ((4461, 4479), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4477, 4479), True, 'import numpy as np\n'), ((5050, 5076), 'numpy.argmax', 'np.argmax', (['coordinateArray'], {}), '(coordinateArray)\n', (5059, 5076), True, 'import numpy as np\n'), ((3267, 3325), 'numpy.array', 'np.array', (["self.envParams['allowedActionIDs']"], {'dtype': 'np.int'}), "(self.envParams['allowedActionIDs'], dtype=np.int)\n", (3275, 3325), True, 'import numpy as np\n'), ((3614, 3659), 'numpy.argmax', 'np.argmax', (["self.envParams['allowedActionIDs']"], {}), "(self.envParams['allowedActionIDs'])\n", (3623, 3659), True, 'import numpy as np\n'), ((8530, 8577), 'pysc2.lib.actions.FunctionCall', 'actions.FunctionCall', (['actionId', 'actionArguments'], {}), '(actionId, actionArguments)\n', (8550, 8577), False, 'from pysc2.lib import actions\n'), ((11601, 11649), 'numpy.sum', 'np.sum', (["newObs.observation['multi_select'][:, 2]"], {}), "(newObs.observation['multi_select'][:, 2])\n", (11607, 11649), True, 'import numpy as np\n'), ((11703, 11752), 'numpy.sum', 'np.sum', (["newObs.observation['single_select'][:, 2]"], {}), "(newObs.observation['single_select'][:, 2])\n", (11709, 11752), True, 'import numpy as np\n'), ((3379, 3399), 'numpy.array', 'np.array', (['actionProb'], {}), '(actionProb)\n', (3387, 3399), True, 'import numpy as np\n'), ((4767, 4811), 'numpy.array', 'np.array', (['availableCoordinates'], {'dtype': 'np.int'}), '(availableCoordinates, dtype=np.int)\n', (4775, 4811), True, 'import numpy as np\n'), ((13927, 14005), 'numpy.square', 'np.square', (['(self.nStepReturns[iEnv, iStep] - self.valuePredictions[iEnv, iStep])'], {}), '(self.nStepReturns[iEnv, iStep] - self.valuePredictions[iEnv, iStep])\n', (13936, 14005), True, 'import numpy as np\n'), ((14740, 14775), 'numpy.isfinite', 'np.isfinite', (['self.loss[iEnv, iStep]'], {}), '(self.loss[iEnv, iStep])\n', (14751, 14775), True, 'import numpy as np\n'), ((4871, 4896), 'numpy.array', 'np.array', (['coordinateArray'], {}), '(coordinateArray)\n', (4879, 4896), True, 'import numpy as np\n'), ((10021, 10048), 'pysc2.lib.actions.FunctionCall', 'actions.FunctionCall', (['(0)', '[]'], {}), '(0, [])\n', (10041, 10048), False, 'from pysc2.lib import actions\n'), ((13440, 13463), 'numpy.ma.log', 'np.ma.log', (['activePolicy'], {}), '(activePolicy)\n', (13449, 13463), True, 'import numpy as np\n'), ((13531, 13554), 'numpy.ma.log', 'np.ma.log', (['activePolicy'], {}), '(activePolicy)\n', (13540, 13554), True, 'import numpy as np\n')] |
from .prs import PRS, SubStream_Container
import random
import torch
import numpy as np
from collections import deque
class DelayBuffer(PRS):
"""
Delayed Buffer for new data samples that need to be learned in chunks.
and used to made the decision later whether to enter the buffer or not.
"""
def reset(self):
"""reset the buffer.
"""
self.rsvr = dict()
self.rsvr_available_idx = deque(range(self.rsvr_total_size))
self.substreams = SubStream_Container(self.rsvr_total_size)
self.n = 0
np.random.seed(self.config['random_seed'])
random.seed(self.config['random_seed'])
torch.manual_seed(self.config['random_seed'])
return
| [
"torch.manual_seed",
"numpy.random.seed",
"random.seed"
] | [((564, 606), 'numpy.random.seed', 'np.random.seed', (["self.config['random_seed']"], {}), "(self.config['random_seed'])\n", (578, 606), True, 'import numpy as np\n'), ((615, 654), 'random.seed', 'random.seed', (["self.config['random_seed']"], {}), "(self.config['random_seed'])\n", (626, 654), False, 'import random\n'), ((663, 708), 'torch.manual_seed', 'torch.manual_seed', (["self.config['random_seed']"], {}), "(self.config['random_seed'])\n", (680, 708), False, 'import torch\n')] |
#!/usr/bin/env python
"""
Created on March 1, 2016
@author: <NAME>, <EMAIL>, <NAME>, University of Chicago
Use ./CalcP.py -h to see usage
Credit for the arbfit code goes to Nablaquabla
"""
import numpy as np
import matplotlib.pylab as plt
from mpfit import mpfit
VERSION="0.9"
from scipy.stats import kendalltau
from operator import itemgetter
import numpy as np
import scipy.stats as ss
import sys
import argparse
import itertools as it
import time
import arbfit
import pickle
arbFit = arbfit.arbFit
#import matplotlib.pyplot as plt
#import matplotlib.cm as cm
from scipy.stats import norm
import os.path
import scipy.stats as ss
import pandas as pd
import statsmodels.stats.multitest as ssm
def main(args):
#def calcP(fn_jtk,pkl):
#fn_ser = sys.argv[1]
fn_jtk = args.filename
fn_pkl = args.null
fit = args.fit
#ser = pd.read_table(fn_ser,index_col=0)
#NUM = ser.shape[1]
jtk = pd.read_table(fn_jtk,index_col='ID')
fn_jtk_core = fn_jtk.split('/')[-1] if '/' in fn_jtk else fn_jtk
fn_pkl_core = fn_pkl.split('/')[-1] if '/' in fn_pkl else fn_pkl
if '.pkl' in fn_pkl:
params,taus = pickle.load(open(fn_pkl,'rb'))
else:
if 'boot' not in fn_pkl_core:
taus = pd.read_table(fn_pkl)['Tau']
keys,intvalues,yerr,p0,limit = prepare(taus)
else:
taus = pd.read_table(fn_pkl)['TauMean']
keys,intvalues,yerr,p0,limit = prepare(taus)
if fit:
for _ in range(2):
params = GammaFit(keys,intvalues,yerr,p0,limit)
p0 = params[0]
params = p0
gd = ss.gamma(params[0],params[1],params[2])
if 'boot' not in fn_jtk_core:
keys = jtk['Tau']
else:
keys = jtk['TauMean']
#print p0
keys = list(keys)
#if 'TauMean' in keys:
#print keys.index('TauMean')
#print
keys = np.array(list(keys),dtype=float)
empPs = empP(keys,taus)
jtk.loc[:,'empP']=empPs
if 'BF' in jtk.columns:
empPs = jtk[['BF','empP']].apply(min,axis=1)
jtk.loc[:,'empP']=empPs
ps = gd.sf(keys)
jtk['GammaP'] = ps
ps = jtk[['empP','GammaP']].apply(min,axis=1)
jtk.loc[:,'GammaP']=ps
jtk['GammaBH'] = list(ssm.multipletests(ps,method='fdr_bh')[1])
fn_out = fn_jtk.replace('.txt','_GammaP.txt')
jtk.to_csv(fn_out,sep='\t',na_rep=np.nan)
def empP(taus,emps):
taus = np.array(taus)
emps = np.array(emps)
ps = [(np.sum(emps>=t)+1)/float(len(emps)+1) for t in taus]
return np.array(ps)
def prepare(taus):
i = 0
#print NUM
#TOT = NUM*(NUM-1)/2
d_hugh = {}
for tau in taus:
if tau not in d_hugh:
d_hugh[tau] = 0
d_hugh[tau]+=1
keys = sorted(d_hugh.keys())
values = [d_hugh[key] for key in keys]
#intkeys = [int(np.round((o+1.)/2.*NUM*(NUM-1)/2,0)) for o in keys]
intvalues = [v/float(np.sum(values)) for v in values]
gd = lambda x,p: ss.gamma(p[0],p[1],p[2]).cdf(x)
yerr = [1e-5/(1*i+1)]*(len(intvalues)-sum(np.cumsum(intvalues)>0.9))+[1e-6/(1*i+1)]*sum(np.cumsum(intvalues)>0.9)
a,b,c = ss.gamma.fit(taus)
#a = np.mean(taus)**2/np.var(taus)
#b = 1e-8
#c = np.var(taus)/(np.mean(taus))
p0 = [a,b,c]
ind = list(np.cumsum(intvalues)>0.9).index(1)
limit = keys[ind]
return keys,intvalues,yerr,p0,limit
def GammaFit(x,ydata,yerr,p0,limit):
gd = lambda x,p : ss.gamma(p[0],p[1],p[2]).cdf(x)
#=========================================================================#
# --- 'Magic' happens here ---
#=========================================================================#
# Create a vectorized function that checks whether a model value
# is larger than a limit set by you. So if the model is smaller than the limit provided
# the fit function will return the simple chi^2
def checkLimit(x,lim,y,model):
if x > lim and y<model:
return 1e5
else:
return 1.0
checkLimitVectorized = np.vectorize(checkLimit)
# Add a limit=None argument to the fitfunc. The model is your function that you
# want to fit to the data. In the return statement it now checks for each data
def fitfunc(p, fjac=None, x=None, y=None, err=None, limit=None):
model = gd(x,p)
status = 0
return [status, checkLimitVectorized(x,limit,y,model)*(y-model)/err]
#=========================================================================#
# Initialize fit info dictionary and try to fit function to data
#=========================================================================#
# Create an info dictionary for each parameter in p0
# If you want to add some bounds to the parameters you can use the limited and
# limits keyword to tell mpfit that a parameter is actually bound and what
# bounds it uses so parinfo[2]['limited'] = [1,0] would mean that p0[2] has a
# lower bound. parinfo[2]['limits'] = [-10,0] would mean that this lower bound
# is -10.
#print p0
parbase1 = {'value':0,'fixed':0,'limited':[1,1],'limits':[0.,1000.]}
parbase2 = {'value':0,'fixed':0,'limited':[1,1],'limits':[0.,1000.]}
parbase3 = {'value':0,'fixed':0,'limited':[1,1],'limits':[0.,1000.]}
parinfo = [parbase1,parbase2,parbase3]
for i in range(len(p0)):
parinfo[i]['value'] = p0[i]
parinfo[i]['limits'] = [0.8*p0[i],1.2*p0[i]]
# Define the function arguments that you want to pass to your fit. Here you have
# to adjust the limit keyword properly.
#print limit
fa = {'x': x, 'y': ydata, 'err': yerr, 'limit': limit}
# Perform the fit using mpfit
#print 'p0 is',p0
#print 'fitfunc is',fitfunc
m = mpfit(fitfunc, p0, parinfo=parinfo, functkw=fa,quiet=1)
#print 'm is', m
# Get degrees of freedom. This assumes that you don't use any bounds on your fit
# Otherwise you have to substract them from your dof. The -1 takes care of the
# additional overall limit that you impose on your fit
dof = len(x) - len(m.params) - 1
# Calculate the fit errors
#print 'm.perror is', m.perror
#print 'm.fnorm is', m.fnorm
#print 'dof is', dof
if m.perror==None:
m.perror=0
pcerror = m.perror * np.sqrt(m.fnorm / dof)
# Convert the parameter output to the pars output format from easyfit
par = [m.params,m.perror,pcerror]
if(m.status <=0):
print('status = ', m.status)
return par
def __create_parser__():
p = argparse.ArgumentParser(
description="Python script for calculating the p-values generated by empirical JTK_CYCLE with asymmetry search, which is described in Hutchison, Maienschein-Cline, and Chiang et al. Improved statistical methods enable greater sensitivity in rhythm detection for genome-wide data, PLoS Computational Biology 2015 11(3): e1004094. This script was written by <NAME>, <EMAIL>, <NAME>, University of Chicago.",
epilog="Please contact the correpsonding author if you have any questions.",
version=VERSION
)
analysis = p.add_argument_group(title="CalcP analysis options")
analysis.add_argument("-f", "--filename",
dest="filename",
action='store',
metavar="filename string",
type=str,
help='An output file from eJTK.py containing the time series analyzed')
analysis.add_argument("-n", "--null",
dest="null",
action='store',
metavar="null filename string",
type=str,
help='An output file from eJTK.py which is generated from Gaussian noise with the same header (time points) as the time series analyzed and input with the -f flag')
analysis.add_argument("-t","--fit",
dest="fit",
action='store_true',
default=False,
help="A flag without arguments indicating that the p-value calculation should use the fitting method gauranteed to produce conservative results. THIS FITTING HAS BEEN SHOWN TO BE UNSTABLE IN CERTAIN SITUATIONS AND WILL BE FIXED IN AN UPCOMING VERSION..")
return p
if __name__=="__main__":
parser = __create_parser__()
args = parser.parse_args()
main(args)
| [
"numpy.sqrt",
"argparse.ArgumentParser",
"statsmodels.stats.multitest.multipletests",
"scipy.stats.gamma.fit",
"scipy.stats.gamma",
"numpy.array",
"numpy.sum",
"pandas.read_table",
"numpy.cumsum",
"numpy.vectorize",
"mpfit.mpfit"
] | [((922, 959), 'pandas.read_table', 'pd.read_table', (['fn_jtk'], {'index_col': '"""ID"""'}), "(fn_jtk, index_col='ID')\n", (935, 959), True, 'import pandas as pd\n'), ((1632, 1673), 'scipy.stats.gamma', 'ss.gamma', (['params[0]', 'params[1]', 'params[2]'], {}), '(params[0], params[1], params[2])\n', (1640, 1673), True, 'import scipy.stats as ss\n'), ((2450, 2464), 'numpy.array', 'np.array', (['taus'], {}), '(taus)\n', (2458, 2464), True, 'import numpy as np\n'), ((2476, 2490), 'numpy.array', 'np.array', (['emps'], {}), '(emps)\n', (2484, 2490), True, 'import numpy as np\n'), ((2566, 2578), 'numpy.array', 'np.array', (['ps'], {}), '(ps)\n', (2574, 2578), True, 'import numpy as np\n'), ((3159, 3177), 'scipy.stats.gamma.fit', 'ss.gamma.fit', (['taus'], {}), '(taus)\n', (3171, 3177), True, 'import scipy.stats as ss\n'), ((4090, 4114), 'numpy.vectorize', 'np.vectorize', (['checkLimit'], {}), '(checkLimit)\n', (4102, 4114), True, 'import numpy as np\n'), ((5801, 5857), 'mpfit.mpfit', 'mpfit', (['fitfunc', 'p0'], {'parinfo': 'parinfo', 'functkw': 'fa', 'quiet': '(1)'}), '(fitfunc, p0, parinfo=parinfo, functkw=fa, quiet=1)\n', (5806, 5857), False, 'from mpfit import mpfit\n'), ((6579, 7121), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Python script for calculating the p-values generated by empirical JTK_CYCLE with asymmetry search, which is described in Hutchison, Maienschein-Cline, and Chiang et al. Improved statistical methods enable greater sensitivity in rhythm detection for genome-wide data, PLoS Computational Biology 2015 11(3): e1004094. This script was written by <NAME>, <EMAIL>, <NAME>, University of Chicago."""', 'epilog': '"""Please contact the correpsonding author if you have any questions."""', 'version': 'VERSION'}), "(description=\n 'Python script for calculating the p-values generated by empirical JTK_CYCLE with asymmetry search, which is described in Hutchison, Maienschein-Cline, and Chiang et al. Improved statistical methods enable greater sensitivity in rhythm detection for genome-wide data, PLoS Computational Biology 2015 11(3): e1004094. This script was written by <NAME>, <EMAIL>, <NAME>, University of Chicago.'\n , epilog=\n 'Please contact the correpsonding author if you have any questions.',\n version=VERSION)\n", (6602, 7121), False, 'import argparse\n'), ((6334, 6356), 'numpy.sqrt', 'np.sqrt', (['(m.fnorm / dof)'], {}), '(m.fnorm / dof)\n', (6341, 6356), True, 'import numpy as np\n'), ((2269, 2307), 'statsmodels.stats.multitest.multipletests', 'ssm.multipletests', (['ps'], {'method': '"""fdr_bh"""'}), "(ps, method='fdr_bh')\n", (2286, 2307), True, 'import statsmodels.stats.multitest as ssm\n'), ((1243, 1264), 'pandas.read_table', 'pd.read_table', (['fn_pkl'], {}), '(fn_pkl)\n', (1256, 1264), True, 'import pandas as pd\n'), ((1375, 1396), 'pandas.read_table', 'pd.read_table', (['fn_pkl'], {}), '(fn_pkl)\n', (1388, 1396), True, 'import pandas as pd\n'), ((2502, 2519), 'numpy.sum', 'np.sum', (['(emps >= t)'], {}), '(emps >= t)\n', (2508, 2519), True, 'import numpy as np\n'), ((2942, 2956), 'numpy.sum', 'np.sum', (['values'], {}), '(values)\n', (2948, 2956), True, 'import numpy as np\n'), ((2996, 3022), 'scipy.stats.gamma', 'ss.gamma', (['p[0]', 'p[1]', 'p[2]'], {}), '(p[0], p[1], p[2])\n', (3004, 3022), True, 'import scipy.stats as ss\n'), ((3490, 3516), 'scipy.stats.gamma', 'ss.gamma', (['p[0]', 'p[1]', 'p[2]'], {}), '(p[0], p[1], p[2])\n', (3498, 3516), True, 'import scipy.stats as ss\n'), ((3120, 3140), 'numpy.cumsum', 'np.cumsum', (['intvalues'], {}), '(intvalues)\n', (3129, 3140), True, 'import numpy as np\n'), ((3301, 3321), 'numpy.cumsum', 'np.cumsum', (['intvalues'], {}), '(intvalues)\n', (3310, 3321), True, 'import numpy as np\n'), ((3074, 3094), 'numpy.cumsum', 'np.cumsum', (['intvalues'], {}), '(intvalues)\n', (3083, 3094), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import cv2
import numpy as np
import tensorflow as tf
from src import utils
box_size = 368
hm_factor = 8
joints_num = 21
scales = [1.0, 0.7]
limb_parents = [1, 15, 1, 2, 3, 1, 5, 6, 14, 8, 9, 14, 11, 12, 14, 14, 1, 4, 7, 10, 13]
with tf.Session() as sess:
saver = tf.train.import_meta_graph('./models/tf_model/vnect_tf.meta')
saver.restore(sess, tf.train.latest_checkpoint('./models/tf_model/'))
# saver = tf.train.import_meta_graph('./models/trained/vnect_tf-1.meta')
# saver.restore(sess, tf.train.latest_checkpoint('./models/trained/'))
graph = tf.get_default_graph()
input_batch = graph.get_tensor_by_name('Placeholder:0')
heatmap = graph.get_tensor_by_name('split_2:0')
x_heatmap = graph.get_tensor_by_name('split_2:1')
y_heatmap = graph.get_tensor_by_name('split_2:2')
z_heatmap = graph.get_tensor_by_name('split_2:3')
# from src.vnect_model import VNect
# model = VNect()
# input_batch = model.input_holder
# heatmap = model.heatmap
# x_heatmap, y_heatmap, z_heatmap = model.x_heatmap, model.y_heatmap, model.z_heatmap
# sess.run(tf.global_variables_initializer())
img = cv2.imread('./pic/test_pic.jpg')
img_square = utils.img_scale_squarify(img, box_size)
img_square = img_square[np.newaxis, ...]
hm, xm, ym, zm = sess.run([heatmap, x_heatmap, y_heatmap, z_heatmap], {input_batch: img_square/255-0.4})
joints_2d = utils.extract_2d_joints_from_heatmaps(hm[0, ...], box_size, hm_factor)
for i in range(21):
if i == 0:
himg = hm[0, :, :, i]
ximg = xm[0, :, :, i]
yimg = ym[0, :, :, i]
zimg = zm[0, :, :, i]
else:
tmp = hm[0, :, :, i]
himg = np.hstack([himg, tmp])
tmp = xm[0, :, :, i]
ximg = np.hstack([ximg, tmp])
tmp = ym[0, :, :, i]
yimg = np.hstack([yimg, tmp])
tmp = zm[0, :, :, i]
zimg = np.hstack([zimg, tmp])
all_hm = np.vstack([himg, ximg, yimg, zimg])
cv2.imshow('all heatmaps', all_hm*128)
img_res2d = utils.draw_limbs_2d(img_square[0, ...], joints_2d, limb_parents)
cv2.imshow('2D results', img_res2d)
cv2.waitKey()
cv2.destroyAllWindows()
print(hm[0, :, :, 0])
# np.savetxt('original', hm[0, :, :, 0]) | [
"numpy.hstack",
"tensorflow.Session",
"src.utils.extract_2d_joints_from_heatmaps",
"cv2.imshow",
"cv2.waitKey",
"tensorflow.train.import_meta_graph",
"src.utils.draw_limbs_2d",
"src.utils.img_scale_squarify",
"numpy.vstack",
"cv2.destroyAllWindows",
"tensorflow.train.latest_checkpoint",
"cv2.i... | [((286, 298), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (296, 298), True, 'import tensorflow as tf\n'), ((320, 381), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['"""./models/tf_model/vnect_tf.meta"""'], {}), "('./models/tf_model/vnect_tf.meta')\n", (346, 381), True, 'import tensorflow as tf\n'), ((621, 643), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (641, 643), True, 'import tensorflow as tf\n'), ((1202, 1234), 'cv2.imread', 'cv2.imread', (['"""./pic/test_pic.jpg"""'], {}), "('./pic/test_pic.jpg')\n", (1212, 1234), False, 'import cv2\n'), ((1252, 1291), 'src.utils.img_scale_squarify', 'utils.img_scale_squarify', (['img', 'box_size'], {}), '(img, box_size)\n', (1276, 1291), False, 'from src import utils\n'), ((1464, 1534), 'src.utils.extract_2d_joints_from_heatmaps', 'utils.extract_2d_joints_from_heatmaps', (['hm[0, ...]', 'box_size', 'hm_factor'], {}), '(hm[0, ...], box_size, hm_factor)\n', (1501, 1534), False, 'from src import utils\n'), ((2043, 2078), 'numpy.vstack', 'np.vstack', (['[himg, ximg, yimg, zimg]'], {}), '([himg, ximg, yimg, zimg])\n', (2052, 2078), True, 'import numpy as np\n'), ((2083, 2123), 'cv2.imshow', 'cv2.imshow', (['"""all heatmaps"""', '(all_hm * 128)'], {}), "('all heatmaps', all_hm * 128)\n", (2093, 2123), False, 'import cv2\n'), ((2139, 2203), 'src.utils.draw_limbs_2d', 'utils.draw_limbs_2d', (['img_square[0, ...]', 'joints_2d', 'limb_parents'], {}), '(img_square[0, ...], joints_2d, limb_parents)\n', (2158, 2203), False, 'from src import utils\n'), ((2208, 2243), 'cv2.imshow', 'cv2.imshow', (['"""2D results"""', 'img_res2d'], {}), "('2D results', img_res2d)\n", (2218, 2243), False, 'import cv2\n'), ((2249, 2262), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (2260, 2262), False, 'import cv2\n'), ((2267, 2290), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2288, 2290), False, 'import cv2\n'), ((406, 454), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['"""./models/tf_model/"""'], {}), "('./models/tf_model/')\n", (432, 454), True, 'import tensorflow as tf\n'), ((1781, 1803), 'numpy.hstack', 'np.hstack', (['[himg, tmp]'], {}), '([himg, tmp])\n', (1790, 1803), True, 'import numpy as np\n'), ((1856, 1878), 'numpy.hstack', 'np.hstack', (['[ximg, tmp]'], {}), '([ximg, tmp])\n', (1865, 1878), True, 'import numpy as np\n'), ((1931, 1953), 'numpy.hstack', 'np.hstack', (['[yimg, tmp]'], {}), '([yimg, tmp])\n', (1940, 1953), True, 'import numpy as np\n'), ((2006, 2028), 'numpy.hstack', 'np.hstack', (['[zimg, tmp]'], {}), '([zimg, tmp])\n', (2015, 2028), True, 'import numpy as np\n')] |
"""
Example of defining a custom (image) transform using FFCV.
For tutorial, see https://docs.ffcv.io/ffcv_examples/custom_transforms.html.
"""
import time
import numpy as np
import torchvision
from ffcv.fields import IntField, RGBImageField
from ffcv.fields.decoders import SimpleRGBImageDecoder
from ffcv.loader import Loader, OrderOption
from ffcv.pipeline.compiler import Compiler
from ffcv.pipeline.operation import Operation, AllocationQuery
from ffcv.transforms import ToTensor
from ffcv.writer import DatasetWriter
from dataclasses import replace
class PickACorner(Operation):
def generate_code(self):
parallel_range = Compiler.get_iterator()
def pick_a_corner(images, dst):
which_corner = np.random.rand(images.shape[0])
for i in parallel_range(images.shape[0]):
if which_corner[i] == 0:
dst[i] = images[i,:images.shape[1]//2, :images.shape[2]//2]
else:
dst[i] = images[i,-images.shape[1]//2:, -images.shape[2]//2:]
return dst
pick_a_corner.is_parallel = True
return pick_a_corner
def declare_state_and_memory(self, previous_state):
h, w, c = previous_state.shape
new_shape = (h // 2, w // 2, c)
new_state = replace(previous_state, shape=new_shape)
mem_allocation = AllocationQuery(new_shape, previous_state.dtype)
return (new_state, mem_allocation)
# Step 1: Create an FFCV-compatible CIFAR-10 dataset
ds = torchvision.datasets.CIFAR10('/tmp', train=True, download=True)
writer = DatasetWriter('/tmp/cifar.beton', {
'image': RGBImageField(),
'label': IntField()
})
writer.from_indexed_dataset(ds)
# Step 2: Create data loaders
BATCH_SIZE = 512
# Create loaders
image_pipelines = {
'with': [SimpleRGBImageDecoder(), PickACorner(), ToTensor()],
'without': [SimpleRGBImageDecoder(), ToTensor()]
}
for name, pipeline in image_pipelines.items():
loader = Loader(f'/tmp/cifar.beton', batch_size=BATCH_SIZE,
num_workers=16, order=OrderOption.RANDOM,
drop_last=True, pipelines={'image': pipeline})
# First epoch includes compilation time
for ims, labs in loader: pass
start_time = time.time()
for _ in range(100):
for ims, labs in loader: pass
print(f'Method: {name} | Shape: {ims.shape} | Time per epoch: {(time.time() - start_time) / 100:.5f}s') | [
"ffcv.fields.RGBImageField",
"ffcv.transforms.ToTensor",
"numpy.random.rand",
"ffcv.fields.IntField",
"ffcv.pipeline.compiler.Compiler.get_iterator",
"ffcv.loader.Loader",
"dataclasses.replace",
"torchvision.datasets.CIFAR10",
"ffcv.fields.decoders.SimpleRGBImageDecoder",
"time.time",
"ffcv.pipe... | [((1513, 1576), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', (['"""/tmp"""'], {'train': '(True)', 'download': '(True)'}), "('/tmp', train=True, download=True)\n", (1541, 1576), False, 'import torchvision\n'), ((1978, 2122), 'ffcv.loader.Loader', 'Loader', (['f"""/tmp/cifar.beton"""'], {'batch_size': 'BATCH_SIZE', 'num_workers': '(16)', 'order': 'OrderOption.RANDOM', 'drop_last': '(True)', 'pipelines': "{'image': pipeline}"}), "(f'/tmp/cifar.beton', batch_size=BATCH_SIZE, num_workers=16, order=\n OrderOption.RANDOM, drop_last=True, pipelines={'image': pipeline})\n", (1984, 2122), False, 'from ffcv.loader import Loader, OrderOption\n'), ((2254, 2265), 'time.time', 'time.time', ([], {}), '()\n', (2263, 2265), False, 'import time\n'), ((642, 665), 'ffcv.pipeline.compiler.Compiler.get_iterator', 'Compiler.get_iterator', ([], {}), '()\n', (663, 665), False, 'from ffcv.pipeline.compiler import Compiler\n'), ((1296, 1336), 'dataclasses.replace', 'replace', (['previous_state'], {'shape': 'new_shape'}), '(previous_state, shape=new_shape)\n', (1303, 1336), False, 'from dataclasses import replace\n'), ((1362, 1410), 'ffcv.pipeline.operation.AllocationQuery', 'AllocationQuery', (['new_shape', 'previous_state.dtype'], {}), '(new_shape, previous_state.dtype)\n', (1377, 1410), False, 'from ffcv.pipeline.operation import Operation, AllocationQuery\n'), ((1635, 1650), 'ffcv.fields.RGBImageField', 'RGBImageField', ([], {}), '()\n', (1648, 1650), False, 'from ffcv.fields import IntField, RGBImageField\n'), ((1665, 1675), 'ffcv.fields.IntField', 'IntField', ([], {}), '()\n', (1673, 1675), False, 'from ffcv.fields import IntField, RGBImageField\n'), ((1809, 1832), 'ffcv.fields.decoders.SimpleRGBImageDecoder', 'SimpleRGBImageDecoder', ([], {}), '()\n', (1830, 1832), False, 'from ffcv.fields.decoders import SimpleRGBImageDecoder\n'), ((1849, 1859), 'ffcv.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (1857, 1859), False, 'from ffcv.transforms import ToTensor\n'), ((1878, 1901), 'ffcv.fields.decoders.SimpleRGBImageDecoder', 'SimpleRGBImageDecoder', ([], {}), '()\n', (1899, 1901), False, 'from ffcv.fields.decoders import SimpleRGBImageDecoder\n'), ((1903, 1913), 'ffcv.transforms.ToTensor', 'ToTensor', ([], {}), '()\n', (1911, 1913), False, 'from ffcv.transforms import ToTensor\n'), ((733, 764), 'numpy.random.rand', 'np.random.rand', (['images.shape[0]'], {}), '(images.shape[0])\n', (747, 764), True, 'import numpy as np\n'), ((2397, 2408), 'time.time', 'time.time', ([], {}), '()\n', (2406, 2408), False, 'import time\n')] |
#%% Import modules
import numpy as np
import torch
def train(A, ss, epoch, single_model, single_optim, loss_MSE):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
FR_ORDER_TRAIN = A["FR_ORDER_TRAIN"]
POS_NOR_ORDER_TRAIN = A["POS_NOR_ORDER_TRAIN"]
VEL_NOR_ORDER_TRAIN = A["VEL_NOR_ORDER_TRAIN"]
ACC_NOR_ORDER_TRAIN = A["ACC_NOR_ORDER_TRAIN"]
train_unsort_data = torch.from_numpy(FR_ORDER_TRAIN[ss]).type(torch.FloatTensor)
train_label = np.concatenate((POS_NOR_ORDER_TRAIN[ss], VEL_NOR_ORDER_TRAIN[ss], ACC_NOR_ORDER_TRAIN[ss]), axis=2)
train_label = torch.from_numpy(train_label).type(torch.FloatTensor)
single_unsort_dataset = torch.utils.data.TensorDataset(train_unsort_data, train_label)
single_unsort_dataloader = torch.utils.data.DataLoader(dataset = single_unsort_dataset, batch_size=32, shuffle=True)
single_model.to(device)
loss_MSE.to(device)
# Training
for ep in range(epoch):
for n, (Data, Label) in enumerate(single_unsort_dataloader):
single_optim.zero_grad()
fr_data = Data
valid_pos = Label[:, -1, :2]
valid_vel = Label[:, -1, 2:4]
valid_acc = Label[:, -1, 4:6]
valid_pos = valid_pos.to(device)
valid_vel = valid_vel.to(device)
valid_acc = valid_acc.to(device)
fr_data = fr_data.to(device)
pred_pos, pred_vel, pred_acc = single_model(fr_data)
loss_vel = loss_MSE(pred_vel, valid_vel)
loss_acc = loss_MSE(pred_acc, valid_acc)
loss_pos = loss_MSE(pred_pos, valid_pos)
loss = loss_vel+ loss_acc + loss_pos
loss.backward()
single_optim.step()
with torch.no_grad():
print('epoch[{}], loss:{:.4f} >> pos loss:{:.4f}, vel loss:{:.4f}, acc loss:{:.4f}'
.format(ep+1, loss.item(), loss_pos.item(), loss_vel.item(), loss_acc.item()))
torch.save(single_model, 'model.pth') | [
"torch.utils.data.DataLoader",
"torch.utils.data.TensorDataset",
"torch.from_numpy",
"torch.cuda.is_available",
"torch.save",
"numpy.concatenate",
"torch.no_grad"
] | [((501, 604), 'numpy.concatenate', 'np.concatenate', (['(POS_NOR_ORDER_TRAIN[ss], VEL_NOR_ORDER_TRAIN[ss], ACC_NOR_ORDER_TRAIN[ss])'], {'axis': '(2)'}), '((POS_NOR_ORDER_TRAIN[ss], VEL_NOR_ORDER_TRAIN[ss],\n ACC_NOR_ORDER_TRAIN[ss]), axis=2)\n', (515, 604), True, 'import numpy as np\n'), ((706, 768), 'torch.utils.data.TensorDataset', 'torch.utils.data.TensorDataset', (['train_unsort_data', 'train_label'], {}), '(train_unsort_data, train_label)\n', (736, 768), False, 'import torch\n'), ((800, 891), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'single_unsort_dataset', 'batch_size': '(32)', 'shuffle': '(True)'}), '(dataset=single_unsort_dataset, batch_size=32,\n shuffle=True)\n', (827, 891), False, 'import torch\n'), ((2056, 2093), 'torch.save', 'torch.save', (['single_model', '"""model.pth"""'], {}), "(single_model, 'model.pth')\n", (2066, 2093), False, 'import torch\n'), ((151, 176), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (174, 176), False, 'import torch\n'), ((422, 458), 'torch.from_numpy', 'torch.from_numpy', (['FR_ORDER_TRAIN[ss]'], {}), '(FR_ORDER_TRAIN[ss])\n', (438, 458), False, 'import torch\n'), ((619, 648), 'torch.from_numpy', 'torch.from_numpy', (['train_label'], {}), '(train_label)\n', (635, 648), False, 'import torch\n'), ((1837, 1852), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1850, 1852), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
from __future__ import division
import csv
import numpy as np
import random
import pickle
import datetime
from NonSpatialFns import *
# <codecell>
#Script
#Number of state transitions to observe
M = int(2e7)
# time vector
time = np.zeros(M)
#Define parameters
init=10 #10 #initial number of infected hepatocytes
v_init = 0#initial viral load
ALT_init = 100 #initial ALT level
rho = 8.18 #viral export rate
c = 22.3 #viral clearance rate
gamma = 1500 #scaling factor -
R = 4.1825 #average HCV RNA in infected hepatocyte
N_liver = int(1e11) #Number of cells in liver
alpha = 1 #1/latent period (days)
alpha_x = 1.3e-2 #1/long-term latent period
nu_T = 1.4e-2 #death rate of healthy cells
nu_I = 1/7 #death rate of infected cells
phi_T = 10*nu_T #regeneration rate of dead healthy cells
phi_I = .8*phi_T #regeneration rate of dead infected cells
beta_V = .5e-8 #viral transmision rate
beta_L = R*1e-5/(60*24) #cell-cell transmission rate
eta = .01 #proportion of infected cells that go long-term latent
kappa = 0 #.1 #proportion of dead infected cells regenerated as infected cells
changes = 12;
delta = .33 #ALT degradation rate
N=N_liver/1e7 #initial number of hepatocytes
eps = (delta*ALT_init)/(nu_T*N) #rate of ALT production
#Construct matrix of state transition vectors
trans_vecs = np.zeros([6, changes])
#state 1: infection of healthy cell by cell-> latent
trans_vecs[0,0] = -1;
trans_vecs[1,0] = 1;
#state 2: infection of healthy cell by virus -> latent
trans_vecs[0,1] = -1;
trans_vecs[1,1] = 1;
#state 3: infection of healthy cell by cell -> long-term latent
trans_vecs[0,2] = -1;
trans_vecs[2,2] = 1;
#state 4: infection of healthy cell by virus -> long-term latent
trans_vecs[0,3] = -1;
trans_vecs[2,3] = 1;
#state 5: death of healthy cell
trans_vecs[0,4] = -1;
trans_vecs[4,4] = 1;
#state 6: movement of latent cell into infected
trans_vecs[1,5] = -1;
trans_vecs[3,5] = 1;
#state 7: death of latent cell
trans_vecs[1,6] = -1;
trans_vecs[4,6] = 1;
#state 8: movement of long-term latent cell into infected
trans_vecs[2,7] = -1;
trans_vecs[3,7] = 1;
#state 9: death of long-term latent cell
trans_vecs[2,8] = -1;
trans_vecs[4,8] = 1;
#state 10: death of infected cell
trans_vecs[3,9] = -1;
trans_vecs[5,9] = 1;
#state 11: regeneration of dead healthy cell
trans_vecs[4,10] = -1;
trans_vecs[0,10] = 1;
#state 12: regeneration of dead infected cell into healthy cell
trans_vecs[5,11] = -1;
trans_vecs[0,11] = 1;
#state 13: regeneration of dead infected cell into infected cell
#trans_vecs[5,12] = -1;
#trans_vecs[3,12] = 1;
#Intialize random uniform numbers for distributions
time_vec_array = -np.log(np.random.random([M,changes]))
#Initialize state variable vectors
T = np.zeros(M)
E = np.zeros(M)
Ex = np.zeros(M)
I = np.zeros(M)
Dt = np.zeros(M)
Di = np.zeros(M)
VL = np.zeros(M)
ALT = np.zeros(M)
#Initialize lists
InfectedList = range(init)
LatentList = []
LatentXList= []
DeadList = []
InfectionChain = []
Infecteds = []
lastCellID = init-1 #get last cellID
#Input initial conditions
I[0] = init;
T[0] = N-init;
VL[0] = v_init
j = 0
minKey = 0
finalFile = False
statesVec = [T[0], E[0], Ex[0], I[0], Dt[0], Di[0]]
#define names for output files
now = datetime.datetime.now()
OutPrevFileName = 'Infecteds_'+ '1e' + str(int(np.log10(M))) + '_'+ now.strftime("%I%M%S%B%d") + 'MedLat.txt'
OutChainFileName = 'InfectionChain_'+ '1e' + str(int(np.log10(M))) + '_' +now.strftime("%I%M%S%B%d") + 'MedLat.txt'
WorkspaceFilename = 'WorkSpace_' + '1e' + str(int(np.log10(M))) + '_' +now.strftime("%I%M%S%B%d") + 'MedLat.txt'
############ Run Model ###################
while I[j] >= 0 and j<M-1:
#Generate Transition Probabilities
mult_vec = [T[j],T[j],T[j], T[j],T[j], E[j],E[j], Ex[j], Ex[j], I[j], Dt[j], Di[j]]
Qij = GenTransitionProbs(changes, eta, beta_L, beta_V, nu_T, alpha, alpha_x, nu_I, phi_T, phi_I, I[j], VL[j], mult_vec)
#Calculate what the next state transition should be
time[j+1], state_idx = CalcNextState(Qij, time_vec_array[j], time[j])
#Update the state vector
statesVec = UpdateStateVector(statesVec, trans_vecs, state_idx)
[T[j+1], E[j+1], Ex[j+1], I[j+1], Dt[j+1], Di[j+1]] = statesVec
#Update Cell lists given appropriate state transition
if state_idx in [0,1,2,3]:
InfectedList, LatentList, LatentXList, Infector, lastCellID = UpdateInfectionLists(state_idx, InfectedList, LatentList, LatentXList, lastCellID)
InfectionChain = UpdateInfectionChain(InfectionChain, Infector, lastCellID, time[j], minKey)
elif state_idx in [5,7]:
InfectedList, LatentList, LatentXList = UpdateLatent2Infectious(state_idx, InfectedList, LatentList, LatentXList)
elif state_idx in [6,8,9]:
InfectedList, LatentList, LatentXList = UpdateKillCell(state_idx, InfectedList, LatentList, LatentXList)
#Update list of Infecteds
Infecteds = UpdateInfecteds(Infecteds,InfectedList, LatentList, LatentXList, time[j], minKey)
#update viral load and ALT
VL[j+1] = UpdateVL(rho, N_liver, N, R, gamma, c, I[j+1])
ALT[j+1] = UpdateALT(eps, nu_T, nu_I, delta, ALT[j], T[j], E[j], Ex[j], I[j], time[j+1]-time[j])
j+=1
#write output to file every timestep
if minKey < int(time[j]) or j == M-1:
if j == M-1:
finalFile = True
Infecteds, InfectionChain, minKey = OutputTempFiles(Infecteds, InfectionChain, minKey, OutPrevFileName, OutChainFileName, finalFile)
#######################################
## Output files and save workspace ####
kwargs = {'T' : T, 'E' : E, 'Ex': Ex, 'I': I, 'Dt':Dt, 'Di' : Di, 'time' :time, 'VL':VL, 'ALT' : ALT, 'Infecteds' :Infecteds, 'InfectionChain' : InfectionChain}
saveWorkspace(kwargs, WorkspaceFilename)
| [
"numpy.random.random",
"datetime.datetime.now",
"numpy.zeros",
"numpy.log10"
] | [((296, 307), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (304, 307), True, 'import numpy as np\n'), ((1373, 1395), 'numpy.zeros', 'np.zeros', (['[6, changes]'], {}), '([6, changes])\n', (1381, 1395), True, 'import numpy as np\n'), ((2794, 2805), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (2802, 2805), True, 'import numpy as np\n'), ((2810, 2821), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (2818, 2821), True, 'import numpy as np\n'), ((2827, 2838), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (2835, 2838), True, 'import numpy as np\n'), ((2843, 2854), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (2851, 2854), True, 'import numpy as np\n'), ((2860, 2871), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (2868, 2871), True, 'import numpy as np\n'), ((2877, 2888), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (2885, 2888), True, 'import numpy as np\n'), ((2894, 2905), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (2902, 2905), True, 'import numpy as np\n'), ((2912, 2923), 'numpy.zeros', 'np.zeros', (['M'], {}), '(M)\n', (2920, 2923), True, 'import numpy as np\n'), ((3290, 3313), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3311, 3313), False, 'import datetime\n'), ((2698, 2728), 'numpy.random.random', 'np.random.random', (['[M, changes]'], {}), '([M, changes])\n', (2714, 2728), True, 'import numpy as np\n'), ((3361, 3372), 'numpy.log10', 'np.log10', (['M'], {}), '(M)\n', (3369, 3372), True, 'import numpy as np\n'), ((3477, 3488), 'numpy.log10', 'np.log10', (['M'], {}), '(M)\n', (3485, 3488), True, 'import numpy as np\n'), ((3590, 3601), 'numpy.log10', 'np.log10', (['M'], {}), '(M)\n', (3598, 3601), True, 'import numpy as np\n')] |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.appearance.hunt` module.
"""
import numpy as np
from itertools import permutations
from colour.appearance import (VIEWING_CONDITIONS_HUNT, InductionFactors_Hunt,
XYZ_to_Hunt)
from colour.appearance.tests.common import AbstractColourAppearanceModelTest
from colour.utilities import (as_float_array, domain_range_scale,
ignore_numpy_errors, tstack)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['TestHuntColourAppearanceModel']
class TestHuntColourAppearanceModel(AbstractColourAppearanceModelTest):
"""
Defines :mod:`colour.appearance.hunt` module unit tests methods for
*Hunt* colour appearance model.
"""
FIXTURE_BASENAME = 'hunt.csv'
OUTPUT_ATTRIBUTES = {
'J': 'J',
'C_94': 'C',
'h_S': 'h',
's': 's',
'Q': 'Q',
'M94': 'M'
}
def output_specification_from_data(self, data):
"""
Returns the *Hunt* colour appearance model output specification from
given data.
Parameters
----------
data : list
Fixture data.
Returns
-------
CAM_Specification_Hunt
Hunt colour appearance model specification.
"""
XYZ = tstack([data['X'], data['Y'], data['Z']])
XYZ_w = tstack([data['X_w'], data['Y_w'], data['Z_w']])
XYZ_b = tstack([data['X_w'], 0.2 * data['Y_w'], data['Z_w']])
specification = XYZ_to_Hunt(
XYZ,
XYZ_w,
XYZ_b,
data['L_A'],
InductionFactors_Hunt(data['N_c'], data['N_b']),
CCT_w=data['T'])
return specification
def test_domain_range_scale_XYZ_to_Hunt(self):
"""
Tests :func:`colour.appearance.hunt.XYZ_to_Hunt` definition domain
and range scale support.
"""
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_w = np.array([95.05, 100.00, 108.88])
XYZ_b = np.array([95.05, 100.00, 108.88])
L_A = 318.31
surround = VIEWING_CONDITIONS_HUNT['Normal Scenes']
CCT_w = 6504.0
specification = XYZ_to_Hunt(
XYZ, XYZ_w, XYZ_b, L_A, surround, CCT_w=CCT_w)
d_r = (
('reference', 1, 1),
(1, 0.01, np.array([1, 1, 1 / 360, 1, 1, 1, np.nan, np.nan])),
(100, 1, np.array([1, 1, 100 / 360, 1, 1, 1, np.nan, np.nan])),
)
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
XYZ_to_Hunt(
XYZ * factor_a,
XYZ_w * factor_a,
XYZ_b * factor_a,
L_A,
surround,
CCT_w=CCT_w),
as_float_array(specification) * factor_b,
decimal=7)
@ignore_numpy_errors
def test_raise_exception_CIECAM02_to_XYZ(self):
"""
Tests :func:`colour.appearance.hunt.XYZ_to_Hunt` definition raised
exception.
"""
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_w = np.array([95.05, 100.00, 108.88])
XYZ_b = np.array([95.05, 100.00, 108.88])
L_A = 318.31
surround = VIEWING_CONDITIONS_HUNT['Normal Scenes']
CCT_w = 6504.0
S = S_w = 0.5
try:
XYZ_to_Hunt(XYZ, XYZ_w, XYZ_b, L_A, surround)
except ValueError:
pass
try:
XYZ_to_Hunt(XYZ, XYZ_w, XYZ_b, L_A, surround, CCT_w=CCT_w, S=S)
except ValueError:
pass
try:
XYZ_to_Hunt(XYZ, XYZ_w, XYZ_b, L_A, surround, CCT_w=CCT_w, S_w=S_w)
except ValueError:
pass
@ignore_numpy_errors
def test_XYZ_p_CIECAM02_to_XYZ(self):
"""
Tests :func:`colour.appearance.hunt.XYZ_to_Hunt` definition *XYZ_p*
argument handling.
"""
XYZ = np.array([19.01, 20.00, 21.78])
XYZ_w = np.array([95.05, 100.00, 108.88])
XYZ_b = XYZ_p = np.array([95.05, 100.00, 108.88])
L_A = 318.31
surround = VIEWING_CONDITIONS_HUNT['Normal Scenes']
CCT_w = 6504.0
np.testing.assert_almost_equal(
XYZ_to_Hunt(
XYZ,
XYZ_w,
XYZ_b,
L_A,
surround,
XYZ_p=XYZ_p,
CCT_w=CCT_w,
),
np.array([
30.046267861960700, 0.121050839936350, 269.273759446144600,
0.019909320692942, 22.209765491265024, 0.123896438259997,
np.nan, np.nan
]),
decimal=7)
@ignore_numpy_errors
def test_nan_XYZ_to_Hunt(self):
"""
Tests :func:`colour.appearance.hunt.XYZ_to_Hunt` definition
nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
XYZ = np.array(case)
XYZ_w = np.array(case)
XYZ_b = np.array(case)
L_A = case[0]
surround = InductionFactors_Hunt(case[0], case[0])
CCT_w = case[0]
XYZ_to_Hunt(XYZ, XYZ_w, XYZ_b, L_A, surround, CCT_w=CCT_w)
| [
"colour.utilities.domain_range_scale",
"colour.utilities.as_float_array",
"numpy.array",
"colour.appearance.XYZ_to_Hunt",
"itertools.permutations",
"colour.appearance.InductionFactors_Hunt",
"colour.utilities.tstack"
] | [((1584, 1625), 'colour.utilities.tstack', 'tstack', (["[data['X'], data['Y'], data['Z']]"], {}), "([data['X'], data['Y'], data['Z']])\n", (1590, 1625), False, 'from colour.utilities import as_float_array, domain_range_scale, ignore_numpy_errors, tstack\n'), ((1642, 1689), 'colour.utilities.tstack', 'tstack', (["[data['X_w'], data['Y_w'], data['Z_w']]"], {}), "([data['X_w'], data['Y_w'], data['Z_w']])\n", (1648, 1689), False, 'from colour.utilities import as_float_array, domain_range_scale, ignore_numpy_errors, tstack\n'), ((1706, 1759), 'colour.utilities.tstack', 'tstack', (["[data['X_w'], 0.2 * data['Y_w'], data['Z_w']]"], {}), "([data['X_w'], 0.2 * data['Y_w'], data['Z_w']])\n", (1712, 1759), False, 'from colour.utilities import as_float_array, domain_range_scale, ignore_numpy_errors, tstack\n'), ((2197, 2227), 'numpy.array', 'np.array', (['[19.01, 20.0, 21.78]'], {}), '([19.01, 20.0, 21.78])\n', (2205, 2227), True, 'import numpy as np\n'), ((2245, 2277), 'numpy.array', 'np.array', (['[95.05, 100.0, 108.88]'], {}), '([95.05, 100.0, 108.88])\n', (2253, 2277), True, 'import numpy as np\n'), ((2295, 2327), 'numpy.array', 'np.array', (['[95.05, 100.0, 108.88]'], {}), '([95.05, 100.0, 108.88])\n', (2303, 2327), True, 'import numpy as np\n'), ((2457, 2515), 'colour.appearance.XYZ_to_Hunt', 'XYZ_to_Hunt', (['XYZ', 'XYZ_w', 'XYZ_b', 'L_A', 'surround'], {'CCT_w': 'CCT_w'}), '(XYZ, XYZ_w, XYZ_b, L_A, surround, CCT_w=CCT_w)\n', (2468, 2515), False, 'from colour.appearance import VIEWING_CONDITIONS_HUNT, InductionFactors_Hunt, XYZ_to_Hunt\n'), ((3440, 3470), 'numpy.array', 'np.array', (['[19.01, 20.0, 21.78]'], {}), '([19.01, 20.0, 21.78])\n', (3448, 3470), True, 'import numpy as np\n'), ((3488, 3520), 'numpy.array', 'np.array', (['[95.05, 100.0, 108.88]'], {}), '([95.05, 100.0, 108.88])\n', (3496, 3520), True, 'import numpy as np\n'), ((3538, 3570), 'numpy.array', 'np.array', (['[95.05, 100.0, 108.88]'], {}), '([95.05, 100.0, 108.88])\n', (3546, 3570), True, 'import numpy as np\n'), ((4296, 4326), 'numpy.array', 'np.array', (['[19.01, 20.0, 21.78]'], {}), '([19.01, 20.0, 21.78])\n', (4304, 4326), True, 'import numpy as np\n'), ((4344, 4376), 'numpy.array', 'np.array', (['[95.05, 100.0, 108.88]'], {}), '([95.05, 100.0, 108.88])\n', (4352, 4376), True, 'import numpy as np\n'), ((4402, 4434), 'numpy.array', 'np.array', (['[95.05, 100.0, 108.88]'], {}), '([95.05, 100.0, 108.88])\n', (4410, 4434), True, 'import numpy as np\n'), ((1890, 1937), 'colour.appearance.InductionFactors_Hunt', 'InductionFactors_Hunt', (["data['N_c']", "data['N_b']"], {}), "(data['N_c'], data['N_b'])\n", (1911, 1937), False, 'from colour.appearance import VIEWING_CONDITIONS_HUNT, InductionFactors_Hunt, XYZ_to_Hunt\n'), ((3724, 3769), 'colour.appearance.XYZ_to_Hunt', 'XYZ_to_Hunt', (['XYZ', 'XYZ_w', 'XYZ_b', 'L_A', 'surround'], {}), '(XYZ, XYZ_w, XYZ_b, L_A, surround)\n', (3735, 3769), False, 'from colour.appearance import VIEWING_CONDITIONS_HUNT, InductionFactors_Hunt, XYZ_to_Hunt\n'), ((3840, 3903), 'colour.appearance.XYZ_to_Hunt', 'XYZ_to_Hunt', (['XYZ', 'XYZ_w', 'XYZ_b', 'L_A', 'surround'], {'CCT_w': 'CCT_w', 'S': 'S'}), '(XYZ, XYZ_w, XYZ_b, L_A, surround, CCT_w=CCT_w, S=S)\n', (3851, 3903), False, 'from colour.appearance import VIEWING_CONDITIONS_HUNT, InductionFactors_Hunt, XYZ_to_Hunt\n'), ((3974, 4041), 'colour.appearance.XYZ_to_Hunt', 'XYZ_to_Hunt', (['XYZ', 'XYZ_w', 'XYZ_b', 'L_A', 'surround'], {'CCT_w': 'CCT_w', 'S_w': 'S_w'}), '(XYZ, XYZ_w, XYZ_b, L_A, surround, CCT_w=CCT_w, S_w=S_w)\n', (3985, 4041), False, 'from colour.appearance import VIEWING_CONDITIONS_HUNT, InductionFactors_Hunt, XYZ_to_Hunt\n'), ((4593, 4664), 'colour.appearance.XYZ_to_Hunt', 'XYZ_to_Hunt', (['XYZ', 'XYZ_w', 'XYZ_b', 'L_A', 'surround'], {'XYZ_p': 'XYZ_p', 'CCT_w': 'CCT_w'}), '(XYZ, XYZ_w, XYZ_b, L_A, surround, XYZ_p=XYZ_p, CCT_w=CCT_w)\n', (4604, 4664), False, 'from colour.appearance import VIEWING_CONDITIONS_HUNT, InductionFactors_Hunt, XYZ_to_Hunt\n'), ((4805, 4949), 'numpy.array', 'np.array', (['[30.0462678619607, 0.12105083993635, 269.2737594461446, 0.019909320692942, \n 22.209765491265024, 0.123896438259997, np.nan, np.nan]'], {}), '([30.0462678619607, 0.12105083993635, 269.2737594461446, \n 0.019909320692942, 22.209765491265024, 0.123896438259997, np.nan, np.nan])\n', (4813, 4949), True, 'import numpy as np\n'), ((5290, 5318), 'itertools.permutations', 'permutations', (['(cases * 3)'], {'r': '(3)'}), '(cases * 3, r=3)\n', (5302, 5318), False, 'from itertools import permutations\n'), ((5365, 5379), 'numpy.array', 'np.array', (['case'], {}), '(case)\n', (5373, 5379), True, 'import numpy as np\n'), ((5400, 5414), 'numpy.array', 'np.array', (['case'], {}), '(case)\n', (5408, 5414), True, 'import numpy as np\n'), ((5435, 5449), 'numpy.array', 'np.array', (['case'], {}), '(case)\n', (5443, 5449), True, 'import numpy as np\n'), ((5499, 5538), 'colour.appearance.InductionFactors_Hunt', 'InductionFactors_Hunt', (['case[0]', 'case[0]'], {}), '(case[0], case[0])\n', (5520, 5538), False, 'from colour.appearance import VIEWING_CONDITIONS_HUNT, InductionFactors_Hunt, XYZ_to_Hunt\n'), ((5579, 5637), 'colour.appearance.XYZ_to_Hunt', 'XYZ_to_Hunt', (['XYZ', 'XYZ_w', 'XYZ_b', 'L_A', 'surround'], {'CCT_w': 'CCT_w'}), '(XYZ, XYZ_w, XYZ_b, L_A, surround, CCT_w=CCT_w)\n', (5590, 5637), False, 'from colour.appearance import VIEWING_CONDITIONS_HUNT, InductionFactors_Hunt, XYZ_to_Hunt\n'), ((2601, 2651), 'numpy.array', 'np.array', (['[1, 1, 1 / 360, 1, 1, 1, np.nan, np.nan]'], {}), '([1, 1, 1 / 360, 1, 1, 1, np.nan, np.nan])\n', (2609, 2651), True, 'import numpy as np\n'), ((2675, 2727), 'numpy.array', 'np.array', (['[1, 1, 100 / 360, 1, 1, 1, np.nan, np.nan]'], {}), '([1, 1, 100 / 360, 1, 1, 1, np.nan, np.nan])\n', (2683, 2727), True, 'import numpy as np\n'), ((2803, 2828), 'colour.utilities.domain_range_scale', 'domain_range_scale', (['scale'], {}), '(scale)\n', (2821, 2828), False, 'from colour.utilities import as_float_array, domain_range_scale, ignore_numpy_errors, tstack\n'), ((2898, 2993), 'colour.appearance.XYZ_to_Hunt', 'XYZ_to_Hunt', (['(XYZ * factor_a)', '(XYZ_w * factor_a)', '(XYZ_b * factor_a)', 'L_A', 'surround'], {'CCT_w': 'CCT_w'}), '(XYZ * factor_a, XYZ_w * factor_a, XYZ_b * factor_a, L_A,\n surround, CCT_w=CCT_w)\n', (2909, 2993), False, 'from colour.appearance import VIEWING_CONDITIONS_HUNT, InductionFactors_Hunt, XYZ_to_Hunt\n'), ((3156, 3185), 'colour.utilities.as_float_array', 'as_float_array', (['specification'], {}), '(specification)\n', (3170, 3185), False, 'from colour.utilities import as_float_array, domain_range_scale, ignore_numpy_errors, tstack\n')] |
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from itertools import product
import six
import logging
logger = logging.getLogger(__name__)
from vttools import scrape
from numpy.testing import assert_string_equal, assert_equal, assert_raises
from nose.tools import assert_true
from scrape_test_source import eat_porridge, porridge_for_the_bears, has_defaults
def test_scrape():
res = scrape.scrape_function('porridge_for_the_bears', __name__)
for k in ('input_ports', 'output_ports', 'doc_string',
'f_type', 'func_name', 'module_path'):
assert_true(k in res)
def test_enum():
res = scrape.scrape_function('has_defaults', __name__)
assert_equal(res['input_ports'][-1]['values'], has_defaults.e)
def test_obj_src():
string_result = scrape.obj_src(eat_porridge)
initial_txt_should_be = str(
'def eat_porridge(this_sucks, temperature, wtf):')
initial_txt_actual = str(string_result.split('\n')[0])
assert_string_equal(initial_txt_actual, initial_txt_should_be)
def _optional_test_helper(tst, tar):
assert_equal(scrape._type_optional(tst)[1], tar)
def test_type_optional():
test_string = ('array, optional', 'array', 'array (optional)')
targets = (True, False, True)
for tst, tar in zip(test_string, targets):
yield _optional_test_helper, tst, tar
def test_stacked_output_port():
res = scrape.scrape_function('porridge_for_the_bears', __name__)
assert_equal(3, len(res['output_ports']))
def test_enum_type():
"""
Example function docstrings:
1) numpy.linalg.svd()
Parameters :
a : (..., M, N) array_like
A real or complex matrix of shape (M, N) .
full_matrices : bool, optional
If True (default), u and v have the shapes (M, M) and (N, N),
respectively. Otherwise, the shapes are (M, K) and (K, N),
respectively, where K = min(M, N).
compute_uv : bool, optional
Whether or not to compute u and v in addition to s. True by
default.
Returns :
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
full_matrices. Only returned when compute_uv is True.
s : (..., K) array
The singular values for every matrix, sorted in descending
order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
full_matrices. Only returned when compute_uv is True.
"""
test_str1 = '{True, False, Maybe}'
test_str2 = 'array'
test_str3 = '{true, FALSE, 452}'
test_str4 = '{12.5, 5.3}'
test_str5 = '{ (..., M, M), (..., M, K) } array'
test_str6 = '{ (..., N, N), (..., K, N) } array'
assert_equal(scrape._enum_type(test_str1)[1], True)
assert_equal(scrape._enum_type(test_str1)[2], ['True', 'False',
'Maybe'])
assert_equal(scrape._enum_type(test_str2)[1], False)
assert_raises(ValueError, scrape._enum_type, test_str3)
assert_raises(ValueError, scrape._enum_type, test_str4)
assert_equal(scrape._enum_type(test_str5)[1], True)
assert_equal(scrape._enum_type(test_str6)[1], True)
object_type_strings = ('any', 'object')
array_type_strings = ('array', 'array-like', 'array_like', 'array like',
'Array', 'ndarray', 'ndarray-like', '(N, ) array',
'(N, Maoeu, 8) array', '(,) array', '(, ) array',
'np.array', 'np.ndarray', '(N, M, P) array',
'(..., K) array',
'(..., M, N) array_like', '(N, M, P) ndarray',
'(M,) array_like', '(M) array_like', 'MxN array',
'array_like, shape (M, N)', 'ndarray, float', 'ndarrays',
'2D array', '2-D array',
'array_like (1-D)', 'array_like (1D or 2D)',
'array_like (cast to booleans)',
'int or [int, int] or array-like or [array, array]',
'array_likes')
matrix_type_strings = (tuple('{}matrix'.format(p)
for p in ('np.', 'numpy.', '')) +
('(N, M) matrix', ))
list_type_strings = ('list', 'List', 'list-like', 'list_like',
'list like', 'listlike')
tuple_type_strings = ('tuple'),
seq_type_strings = ('sequence', '1D sequence', '1-D sequence')
dtype_type_strings = ('dtype', 'dtype like', 'np.dtype', 'numpy.dtype',
'data-type', 'data type', 'data type code',
'dtype specifier',
'numpy dtype')
bool_type_strings = ('bool', 'boolean')
file_type_strings = ('file', 'filename', 'file handle',
'file object', 'file handle object')
scalar_type_strings = ('scalar', 'number')
float_type_strings = (tuple('{}float{}'.format(prefix, n)
for prefix, n in product(('np.', 'numpy.', ''),
(16, 32, 64, 128)))
+ ('double', 'single', 'float', 'float (only if)'))
# known fails 'int (cast to 0 or 1)',
int_type_strings = (('integer', 'InTeGeR',) +
tuple('{}{}int{}'.format(prefix, u, n)
for prefix, u, n
in product(('np.', 'numpy.', ''),
('u', ''),
(8, 16, 32, 64))))
complex_type_strings = ('complex', )
dict_type_strings = ('dict', 'dictionary')
str_type_strings = ('str', 'string', 'str-like')
callable_type_strings = ('function', 'func', 'callable',
'callable f(x,*args)', 'function(x) -> f')
def test_normalize_simple():
# Example function docstrings:
# 1) numpy.outer()
# Parameters :
# a : (M,) array_like
# First input vector. Input is flattened if not already
# 1-dimensional.
# b : (N,) array_like
# Second input vector. Input is flattened if not already
# 1-dimensional.
# Returns :
# out : (M, N) ndarray
# 2) numpy.linalg.svd()
# Parameters :
# a : (..., M, N) array_like
# A real or complex matrix of shape (M, N) .
# full_matrices : bool, optional
# If True (default), u and v have the shapes (M, M) and (N, N),
# respectively. Otherwise, the shapes are (M, K) and (K, N),
# respectively, where K = min(M, N).
# compute_uv : bool, optional
# Whether or not to compute u and v in addition to s. True by
# default.
# Returns :
# u : { (..., M, M), (..., M, K) } array
# Unitary matrices. The actual shape depends on the value of
# full_matrices. Only returned when compute_uv is True.
# s : (..., K) array
# The singular values for every matrix, sorted in descending
# order.
# v : { (..., N, N), (..., K, N) } array
# Unitary matrices. The actual shape depends on the value of
# full_matrices. Only returned when compute_uv is True.
test_dict = {
'object': object_type_strings,
'array': array_type_strings,
'matrix': matrix_type_strings,
'list': list_type_strings,
'tuple': tuple_type_strings,
'seq': seq_type_strings,
'dtype': dtype_type_strings,
'bool': bool_type_strings,
'file': file_type_strings,
'scalar': scalar_type_strings,
'float': float_type_strings,
'int': int_type_strings,
'complex': complex_type_strings,
'dict': dict_type_strings,
'str': str_type_strings,
'callable': callable_type_strings,
}
# make sure we test everything!
test_keys = set(six.iterkeys(test_dict))
sig_keys = set(six.iterkeys(scrape.sig_map))
assert_equal(test_keys, sig_keys)
for k, v in six.iteritems(test_dict):
for ts in v:
yield _normalize_test_helper, ts, k
def _normalize_test_helper(tst, targ):
assert_equal(scrape._normalize_type(tst), targ)
def test_check_alt_types():
test_strings = ('float or int',
'scalar or tuple of scalars',
'int or scalar',
'scalar or sequence of scalars',
'MxN ndarray',
'integer value',
'aardvark',
'aardvark of doom',
'list or aardavrk',
'aardvark or integer'
)
targets = ('float',
'tuple',
'scalar',
'seq',
'array',
'int',
None,
None,
'list',
'int')
for ts, tar in zip(test_strings, targets):
yield _normalize_test_helper, ts, tar,
def test_truncate_description():
original_description1 = ['length of three']
original_description2 = ['This object is the original description '
'stripped from the doc string. The object is ',
'actually a list of strings.']
word_count = 6
# Test to make sure descriptions that are smaller than the
# specified word count pass through correctly
assert_equal(scrape._truncate_description(original_description1,
word_count),
'length of three')
# Test that function descriptions less than word_count are cropped and
# passed through correctly
assert_equal(scrape._truncate_description(original_description2,
word_count),
'This object is the original description')
def _func_helper(func, test_string, expected_string):
assert_equal(func(test_string), expected_string)
def test_guess_type():
"""
The function _guess_type() is used in the function _enum_type(). The
initial input is the stripped type string.
e.g. {14, 0.333, 5j, True, False, Maybe}
The input string is then checked to make sure that there are enclosing
curly braces, after which the enum string is separated out using the
commas, any string declarations are then removed (i.e. ' or "), and each
element of the original enum string is converted to an element of a list
of strings. Each of these separated elements are then entered into the
_guess_type() function.
All of these test strings are parameter types that should be caught and
evaluated using the _guess_type() function.
"""
test_strting = ('0.333', '14', '5j', 'Volume')
target_strings = ('float', 'int', 'complex', 'str')
for tst, tar in zip(test_strting, target_strings):
yield _func_helper, scrape._guess_enum_val_type, tst, tar
def test_dicts_match():
RE_keys = set(six.iterkeys(scrape._RE_DICT))
sig_keys = set(six.iterkeys(scrape.sig_map))
p_keys = set(scrape.precedence_list)
assert_equal(RE_keys, sig_keys)
assert_equal(RE_keys, p_keys)
def _default_tester_helper(func, expect_dict):
res = scrape._extract_default_vals(func)
assert_equal(res, expect_dict)
def test_default():
test_data = ((eat_porridge, {}),
(has_defaults, {'a': None, 'b': 1,
'c': 'str', 'd': (),
'e': None}))
for func, res in test_data:
yield _default_tester_helper, func, res
def test_module_scrape():
tests = (({}, {'black_list': ['has_defaults']}, ['has_defaults']),
({}, {'exclude_markers': ['porridge']},
['eat_porridge', 'porridge_for_the_bears']),
({'exclude_private': False}, {}, ['_private']))
for pre, post, tst_lst in tests:
yield _mod_scrape_test_helper, 'scrape_test_source', pre, post, tst_lst
def _mod_scrape_test_helper(mod_name, kwargs_with, kwargs_without,
test_members):
res = scrape.scrape_module(mod_name, **kwargs_with)
for n in test_members:
assert_true(n in res)
res = scrape.scrape_module(mod_name, **kwargs_without)
for n in test_members:
assert_true(n not in res)
| [
"logging.getLogger",
"vttools.scrape._extract_default_vals",
"vttools.scrape._truncate_description",
"vttools.scrape.obj_src",
"numpy.testing.assert_equal",
"vttools.scrape._type_optional",
"vttools.scrape.scrape_function",
"itertools.product",
"numpy.testing.assert_raises",
"vttools.scrape._norma... | [((2657, 2684), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2674, 2684), False, 'import logging\n'), ((2939, 2997), 'vttools.scrape.scrape_function', 'scrape.scrape_function', (['"""porridge_for_the_bears"""', '__name__'], {}), "('porridge_for_the_bears', __name__)\n", (2961, 2997), False, 'from vttools import scrape\n'), ((3169, 3217), 'vttools.scrape.scrape_function', 'scrape.scrape_function', (['"""has_defaults"""', '__name__'], {}), "('has_defaults', __name__)\n", (3191, 3217), False, 'from vttools import scrape\n'), ((3222, 3284), 'numpy.testing.assert_equal', 'assert_equal', (["res['input_ports'][-1]['values']", 'has_defaults.e'], {}), "(res['input_ports'][-1]['values'], has_defaults.e)\n", (3234, 3284), False, 'from numpy.testing import assert_string_equal, assert_equal, assert_raises\n'), ((3327, 3355), 'vttools.scrape.obj_src', 'scrape.obj_src', (['eat_porridge'], {}), '(eat_porridge)\n', (3341, 3355), False, 'from vttools import scrape\n'), ((3511, 3573), 'numpy.testing.assert_string_equal', 'assert_string_equal', (['initial_txt_actual', 'initial_txt_should_be'], {}), '(initial_txt_actual, initial_txt_should_be)\n', (3530, 3573), False, 'from numpy.testing import assert_string_equal, assert_equal, assert_raises\n'), ((3933, 3991), 'vttools.scrape.scrape_function', 'scrape.scrape_function', (['"""porridge_for_the_bears"""', '__name__'], {}), "('porridge_for_the_bears', __name__)\n", (3955, 3991), False, 'from vttools import scrape\n'), ((5666, 5721), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'scrape._enum_type', 'test_str3'], {}), '(ValueError, scrape._enum_type, test_str3)\n', (5679, 5721), False, 'from numpy.testing import assert_string_equal, assert_equal, assert_raises\n'), ((5726, 5781), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'scrape._enum_type', 'test_str4'], {}), '(ValueError, scrape._enum_type, test_str4)\n', (5739, 5781), False, 'from numpy.testing import assert_string_equal, assert_equal, assert_raises\n'), ((10911, 10944), 'numpy.testing.assert_equal', 'assert_equal', (['test_keys', 'sig_keys'], {}), '(test_keys, sig_keys)\n', (10923, 10944), False, 'from numpy.testing import assert_string_equal, assert_equal, assert_raises\n'), ((10961, 10985), 'six.iteritems', 'six.iteritems', (['test_dict'], {}), '(test_dict)\n', (10974, 10985), False, 'import six\n'), ((13955, 13986), 'numpy.testing.assert_equal', 'assert_equal', (['RE_keys', 'sig_keys'], {}), '(RE_keys, sig_keys)\n', (13967, 13986), False, 'from numpy.testing import assert_string_equal, assert_equal, assert_raises\n'), ((13991, 14020), 'numpy.testing.assert_equal', 'assert_equal', (['RE_keys', 'p_keys'], {}), '(RE_keys, p_keys)\n', (14003, 14020), False, 'from numpy.testing import assert_string_equal, assert_equal, assert_raises\n'), ((14080, 14114), 'vttools.scrape._extract_default_vals', 'scrape._extract_default_vals', (['func'], {}), '(func)\n', (14108, 14114), False, 'from vttools import scrape\n'), ((14119, 14149), 'numpy.testing.assert_equal', 'assert_equal', (['res', 'expect_dict'], {}), '(res, expect_dict)\n', (14131, 14149), False, 'from numpy.testing import assert_string_equal, assert_equal, assert_raises\n'), ((14955, 15000), 'vttools.scrape.scrape_module', 'scrape.scrape_module', (['mod_name'], {}), '(mod_name, **kwargs_with)\n', (14975, 15000), False, 'from vttools import scrape\n'), ((15070, 15118), 'vttools.scrape.scrape_module', 'scrape.scrape_module', (['mod_name'], {}), '(mod_name, **kwargs_without)\n', (15090, 15118), False, 'from vttools import scrape\n'), ((3118, 3139), 'nose.tools.assert_true', 'assert_true', (['(k in res)'], {}), '(k in res)\n', (3129, 3139), False, 'from nose.tools import assert_true\n'), ((10833, 10856), 'six.iterkeys', 'six.iterkeys', (['test_dict'], {}), '(test_dict)\n', (10845, 10856), False, 'import six\n'), ((10877, 10905), 'six.iterkeys', 'six.iterkeys', (['scrape.sig_map'], {}), '(scrape.sig_map)\n', (10889, 10905), False, 'import six\n'), ((11114, 11141), 'vttools.scrape._normalize_type', 'scrape._normalize_type', (['tst'], {}), '(tst)\n', (11136, 11141), False, 'from vttools import scrape\n'), ((12265, 12328), 'vttools.scrape._truncate_description', 'scrape._truncate_description', (['original_description1', 'word_count'], {}), '(original_description1, word_count)\n', (12293, 12328), False, 'from vttools import scrape\n'), ((12537, 12600), 'vttools.scrape._truncate_description', 'scrape._truncate_description', (['original_description2', 'word_count'], {}), '(original_description2, word_count)\n', (12565, 12600), False, 'from vttools import scrape\n'), ((13828, 13857), 'six.iterkeys', 'six.iterkeys', (['scrape._RE_DICT'], {}), '(scrape._RE_DICT)\n', (13840, 13857), False, 'import six\n'), ((13878, 13906), 'six.iterkeys', 'six.iterkeys', (['scrape.sig_map'], {}), '(scrape.sig_map)\n', (13890, 13906), False, 'import six\n'), ((15037, 15058), 'nose.tools.assert_true', 'assert_true', (['(n in res)'], {}), '(n in res)\n', (15048, 15058), False, 'from nose.tools import assert_true\n'), ((15154, 15179), 'nose.tools.assert_true', 'assert_true', (['(n not in res)'], {}), '(n not in res)\n', (15165, 15179), False, 'from nose.tools import assert_true\n'), ((3630, 3656), 'vttools.scrape._type_optional', 'scrape._type_optional', (['tst'], {}), '(tst)\n', (3651, 3656), False, 'from vttools import scrape\n'), ((5436, 5464), 'vttools.scrape._enum_type', 'scrape._enum_type', (['test_str1'], {}), '(test_str1)\n', (5453, 5464), False, 'from vttools import scrape\n'), ((5492, 5520), 'vttools.scrape._enum_type', 'scrape._enum_type', (['test_str1'], {}), '(test_str1)\n', (5509, 5520), False, 'from vttools import scrape\n'), ((5622, 5650), 'vttools.scrape._enum_type', 'scrape._enum_type', (['test_str2'], {}), '(test_str2)\n', (5639, 5650), False, 'from vttools import scrape\n'), ((5799, 5827), 'vttools.scrape._enum_type', 'scrape._enum_type', (['test_str5'], {}), '(test_str5)\n', (5816, 5827), False, 'from vttools import scrape\n'), ((5855, 5883), 'vttools.scrape._enum_type', 'scrape._enum_type', (['test_str6'], {}), '(test_str6)\n', (5872, 5883), False, 'from vttools import scrape\n'), ((7644, 7693), 'itertools.product', 'product', (["('np.', 'numpy.', '')", '(16, 32, 64, 128)'], {}), "(('np.', 'numpy.', ''), (16, 32, 64, 128))\n", (7651, 7693), False, 'from itertools import product\n'), ((8068, 8126), 'itertools.product', 'product', (["('np.', 'numpy.', '')", "('u', '')", '(8, 16, 32, 64)'], {}), "(('np.', 'numpy.', ''), ('u', ''), (8, 16, 32, 64))\n", (8075, 8126), False, 'from itertools import product\n')] |
import os
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
from test import test
import torchvision
class TrainLoop(object):
def __init__(self, model, optimizer, source_loader, test_source_loader, target_loader, patience, l2, penalty_weight, penalty_anneal_epochs, checkpoint_path=None, checkpoint_epoch=None, cuda=True, logging=False):
if checkpoint_path is None:
# Save to current directory
self.checkpoint_path = os.getcwd()
else:
self.checkpoint_path = checkpoint_path
if not os.path.isdir(self.checkpoint_path):
os.mkdir(self.checkpoint_path)
self.save_epoch_fmt = os.path.join(self.checkpoint_path, 'IRM_{}ep.pt')
self.cuda_mode = cuda
self.model = model
self.device = next(self.model.parameters()).device
self.optimizer = optimizer
self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=patience)
self.source_loader = source_loader
self.test_source_loader = test_source_loader
self.target_loader = target_loader
self.history = {'loss': [], 'accuracy_source':[], 'accuracy_target':[]}
self.cur_epoch = 0
self.dummy = torch.tensor(1.).to(self.device).requires_grad_()
self.l2 = l2
self.penalty_weight = penalty_weight
self.penalty_anneal_epochs = penalty_anneal_epochs
self.total_iter = 0
if checkpoint_epoch is not None:
self.load_checkpoint(checkpoint_epoch)
self.logging = logging
if self.logging:
from torch.utils.tensorboard import SummaryWriter
self.writer = SummaryWriter()
def train(self, n_epochs=1, save_every=1):
while self.cur_epoch < n_epochs:
print('Epoch {}/{}'.format(self.cur_epoch + 1, n_epochs))
cur_loss = 0
source_iter = tqdm(enumerate(self.source_loader))
for t, batch in source_iter:
loss_it = self.train_step(batch)
self.total_iter += 1
cur_loss += loss_it
self.history['loss'].append(cur_loss/(t+1))
print('Current loss: {}.'.format(cur_loss/(t+1)))
print('Current LR: {}'.format(self.optimizer.state_dict()['param_groups'][0]['lr']))
if self.logging:
self.writer.add_scalar('train/task_loss', cur_loss_task, self.total_iter)
self.writer.add_scalar('train/hypervolume_loss', cur_hypervolume, self.total_iter)
self.writer.add_scalar('misc/LR', self.optimizer_task.param_groups[0]['lr'], self.total_iter)
self.history['accuracy_source'].append(test(self.test_source_loader, self.model, self.device, source_target = 'source', epoch = self.cur_epoch, tb_writer = self.writer if self.logging else None))
self.history['accuracy_target'].append(test(self.target_loader, self.model, self.device, source_target = 'target', epoch = self.cur_epoch, tb_writer = self.writer if self.logging else None))
print('Valid. on SOURCE data - Current acc., best acc., and epoch: {:0.4f}, {:0.4f}, {}'.format(self.history['accuracy_source'][-1], np.max(self.history['accuracy_source']), 1+np.argmax(self.history['accuracy_source'])))
print('Valid. on TARGET data - Current acc., best acc., and epoch: {:0.4f}, {:0.4f}, {}'.format(self.history['accuracy_target'][-1], np.max(self.history['accuracy_target']), 1+np.argmax(self.history['accuracy_target'])))
if self.cur_epoch % save_every == 0 or self.history['accuracy_target'][-1] > np.max([-np.inf]+self.history['accuracy_target'][:-1]):
self.checkpointing()
self.cur_epoch += 1
self.scheduler.step()
# saving final models
print('Saving final model...')
self.checkpointing()
return 1. - np.max(self.history['accuracy_target'])
def train_step(self, batch):
self.model.train()
loss_acc = 0
penalty = 0
for domain in range(3):
x = batch[domain].to(self.device)
y_task = batch[domain+3].to(self.device)
out = self.model(x)
loss_current = torch.nn.CrossEntropyLoss()(out*self.dummy, y_task)
penalty += self.penalty(loss_current, self.dummy)
loss_acc += loss_current
weight_norm = torch.tensor(0.).to(self.device)
for w in self.model.parameters():
weight_norm += w.norm().pow(2)
loss = loss_acc / 3
#penalty = penalty / 3
loss += self.l2 * weight_norm
penalty_weight = (self.penalty_weight if self.cur_epoch >= self.penalty_anneal_epochs else 1.0)
loss += penalty_weight * penalty
if penalty_weight > 1.0:
# Rescale the entire loss to keep gradients in a reasonable range
loss /= penalty_weight
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def checkpointing(self):
# Checkpointing
print('Checkpointing...')
ckpt = {'model_state': self.model.state_dict(),
'history': self.history,
'cur_epoch': self.cur_epoch,
'optimizer_state': self.optimizer.state_dict(),
'scheduler_state': self.scheduler.state_dict()}
torch.save(ckpt, self.save_epoch_fmt.format(self.cur_epoch))
def load_checkpoint(self, epoch):
ckpt = self.save_epoch_fmt_task.format(epoch)
if os.path.isfile(ckpt):
ckpt = torch.load(ckpt)
# Load model state
self.model.load_state_dict(ckpt['model_state'])
# Load optimizer state
self.optimizer.load_state_dict(ckpt['optimizer_state'])
# Load scheduler state
self.scheduler.load_state_dict(ckpt['scheduler_state'])
# Load history
self.history = ckpt['history']
self.cur_epoch = ckpt['cur_epoch']
else:
print('No checkpoint found at: {}'.format(ckpt))
def print_grad_norms(self, model):
norm = 0.0
for params in list(filter(lambda p: p.grad is not None, model.parameters())):
norm += params.grad.norm(2).item()
print('Sum of grads norms: {}'.format(norm))
def penalty(self, loss, dummy):
grad = torch.autograd.grad(loss, [dummy], create_graph=True)[0]
return torch.sum(grad**2)
| [
"numpy.argmax",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.CrossEntropyLoss",
"torch.load",
"os.path.join",
"torch.optim.lr_scheduler.StepLR",
"test.test",
"os.getcwd",
"os.path.isfile",
"numpy.max",
"torch.tensor",
"os.path.isdir",
"torch.sum",
"torch.autograd.grad",
"os.mkdir"
... | [((634, 683), 'os.path.join', 'os.path.join', (['self.checkpoint_path', '"""IRM_{}ep.pt"""'], {}), "(self.checkpoint_path, 'IRM_{}ep.pt')\n", (646, 683), False, 'import os\n'), ((831, 898), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['self.optimizer'], {'step_size': 'patience'}), '(self.optimizer, step_size=patience)\n', (862, 898), False, 'import torch\n'), ((4905, 4925), 'os.path.isfile', 'os.path.isfile', (['ckpt'], {}), '(ckpt)\n', (4919, 4925), False, 'import os\n'), ((5673, 5693), 'torch.sum', 'torch.sum', (['(grad ** 2)'], {}), '(grad ** 2)\n', (5682, 5693), False, 'import torch\n'), ((465, 476), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (474, 476), False, 'import os\n'), ((1507, 1522), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (1520, 1522), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((3493, 3532), 'numpy.max', 'np.max', (["self.history['accuracy_target']"], {}), "(self.history['accuracy_target'])\n", (3499, 3532), True, 'import numpy as np\n'), ((4938, 4954), 'torch.load', 'torch.load', (['ckpt'], {}), '(ckpt)\n', (4948, 4954), False, 'import torch\n'), ((5607, 5660), 'torch.autograd.grad', 'torch.autograd.grad', (['loss', '[dummy]'], {'create_graph': '(True)'}), '(loss, [dummy], create_graph=True)\n', (5626, 5660), False, 'import torch\n'), ((537, 572), 'os.path.isdir', 'os.path.isdir', (['self.checkpoint_path'], {}), '(self.checkpoint_path)\n', (550, 572), False, 'import os\n'), ((578, 608), 'os.mkdir', 'os.mkdir', (['self.checkpoint_path'], {}), '(self.checkpoint_path)\n', (586, 608), False, 'import os\n'), ((2383, 2541), 'test.test', 'test', (['self.test_source_loader', 'self.model', 'self.device'], {'source_target': '"""source"""', 'epoch': 'self.cur_epoch', 'tb_writer': '(self.writer if self.logging else None)'}), "(self.test_source_loader, self.model, self.device, source_target=\n 'source', epoch=self.cur_epoch, tb_writer=self.writer if self.logging else\n None)\n", (2387, 2541), False, 'from test import test\n'), ((2582, 2730), 'test.test', 'test', (['self.target_loader', 'self.model', 'self.device'], {'source_target': '"""target"""', 'epoch': 'self.cur_epoch', 'tb_writer': '(self.writer if self.logging else None)'}), "(self.target_loader, self.model, self.device, source_target='target',\n epoch=self.cur_epoch, tb_writer=self.writer if self.logging else None)\n", (2586, 2730), False, 'from test import test\n'), ((3763, 3790), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (3788, 3790), False, 'import torch\n'), ((3915, 3932), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (3927, 3932), False, 'import torch\n'), ((2871, 2910), 'numpy.max', 'np.max', (["self.history['accuracy_source']"], {}), "(self.history['accuracy_source'])\n", (2877, 2910), True, 'import numpy as np\n'), ((3095, 3134), 'numpy.max', 'np.max', (["self.history['accuracy_target']"], {}), "(self.history['accuracy_target'])\n", (3101, 3134), True, 'import numpy as np\n'), ((3264, 3320), 'numpy.max', 'np.max', (["([-np.inf] + self.history['accuracy_target'][:-1])"], {}), "([-np.inf] + self.history['accuracy_target'][:-1])\n", (3270, 3320), True, 'import numpy as np\n'), ((1130, 1147), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (1142, 1147), False, 'import torch\n'), ((2914, 2956), 'numpy.argmax', 'np.argmax', (["self.history['accuracy_source']"], {}), "(self.history['accuracy_source'])\n", (2923, 2956), True, 'import numpy as np\n'), ((3138, 3180), 'numpy.argmax', 'np.argmax', (["self.history['accuracy_target']"], {}), "(self.history['accuracy_target'])\n", (3147, 3180), True, 'import numpy as np\n')] |
import pandas as pd
from rdkit import Chem
import numpy as np
import json
from gensim.models import Word2Vec
from gensim.test.utils import get_tmpfile
from gensim.models import KeyedVectors
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
import re
""" Load trained Word2Vec model - Gensim on full KEGG
Input: None
Output: trained word2vec model
"""
def load_w2v():
return KeyedVectors.load("../vectors_fullKEGG.kv", mmap="r")
""" Return a list of all chemical fragments in SMARTS form """
""" Input: filepath (fp) to txt file """
""" Output: list of SMARTS fragments """
def get_chem_fragments(fp):
fragments = []
with open(fp, "r") as f:
for line in f:
fragments.append(line.strip())
return fragments
""" Find the fragments within a list of smiles strings
Input: SMILES string
Output: List of lists of fragments within smiles strings
"""
def find_frags_within_SMILES(cpd_list, smiles, frags):
cpd_frags = []
removed_cpds = []
i = 0
for smi in smiles:
#Turn AA into mol file
mol = Chem.MolFromSmiles(smi)
#Loop through all fragments to find occurances within AAs
individual_frags = []
for f in frags:
try:
#If a fragment is found in an AA, add it to the individual frags list
if mol.HasSubstructMatch(Chem.MolFromSmarts(f)):
individual_frags.append(f)
except:
removed_cpds.append(cpd_list[i])
pass
#Add each individual AA to AA frags - remove
cpd_frags.append(list(set(individual_frags)))
i += 1
return cpd_frags, [x for x in cpd_list if x not in removed_cpds]
""" Find all SMILES sequences for a random subset of KEGG
Input: kegg dataframe, number of samples to be collected
Output: a list of smiles strings from the random sample
"""
def find_random_SMILES(kegg_df, n_samples, cpds_to_ignore):
#Remove all compounds which are being classified
kegg_df = kegg_df[~kegg_df["MOL file"].isin(cpds_to_ignore)]
#Remove all empty SMILES values
kegg_df = kegg_df.dropna(subset=["Original SMILES"])
kegg_df = kegg_df[kegg_df["Original SMILES"] != ""]
#Randomly sample KEGG
sub_df = kegg_df.sample(n_samples)
#Return smiles strings
return sub_df["Original SMILES"].tolist(), sub_df["MOL file"].tolist()
""" Find & add all fragment vectors within a compound
Goal is to have a single vector for each compound
Inputs: trained word2vec model, list of lists of fragments within amino acids
Outputs: one vector (sum of all fragment vectors) per amino acid
"""
def add_frag_vectors(cpd_list, word2vec, frags):
vectors = []
removed_cpds = []
i = 0
#loop through amino acids
for cpd in frags:
vs = []
#Loop through fragments within each amino acid, add vectors to a list
for f in cpd:
try:
vs.append(word2vec[f])
except:
pass
#Only sum vectors if vectors were present in the compound
if vs:
vectors.append(np.sum(vs, axis=0).astype("float64"))
else:
removed_cpds.append(cpd_list[i])
#Ensure the correct compound gets removed
i+=1
return vectors, [x for x in cpd_list if x not in removed_cpds]
""" Run TSNE visualization
Input: dataframe of compoud vectors (df["label"] is the compound label)
Output: Visualization of the trained vectors
"""
def TSNE_visual(df, n_categories):
#find values to pass to TSNE
data_values = df[list(range(0,100))].values
tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
tsne_results = tsne.fit_transform(data_values)
df["tsne-2d-one"] = tsne_results[:,0]
df["tsne-2d-two"] = tsne_results[:,1]
pal = sns.color_palette("hls", n_categories)
plt.figure(figsize=(16,10))
sns.scatterplot(
x="tsne-2d-one", y="tsne-2d-two",
hue="label",
palette=sns.color_palette(palette=pal),
data=df,
legend="full"
)
plt.show()
""" Find all compounds associated with a particular class of compounds within KEGG
Input: dataframe of KEGG data, trained W2V model, fragment list, label of class to search for
Output: dataframe of vectors associated with a particular class
"""
def get_class_dataframe(kegg_df, word2vec, frags, class_label, cpd_classes):
#Find all compound IDs associated with a particular label
cpd_ids = [k for k,v in cpd_classes.items() if v == class_label]
cpd_smiles = kegg_df[kegg_df["MOL file"].isin(cpd_ids)]["Original SMILES"].tolist()
class_frags = find_frags_within_SMILES(cpd_smiles, frags)
vectors = add_frag_vectors(word2vec, class_frags)
class_df = pd.DataFrame(vectors)
class_df["label"] = [class_label] * len(class_df)
print("Number of", class_label, "compounds:", len(class_df))
return class_df
""" Builds a KEGG network, finds the central nodes, calculates distances between all nodes
Input: None (assumes newKEGG_reactionEdges.json exists within the current directory
Output: networkx graph of KEGG (unipartite, consisting only of compounds), the central node of the network, distances between all cpds
"""
def KEGG_network():
#Load KEGG json file
f = open("newKEGG_reactionEdges.json", "r")
kegg = json.load(f)
#Find all reaction-compound pairs
rxn_list = []
cpd_list = []
cpd_rxn_pairs = []
#loop through both products and substrates
for option in ["products", "substrates"]:
for rxn in kegg[option]:
#add each reaction to a master list
rxn_list.append(rxn)
for cpd in kegg["products"][rxn]:
#add each compound to a master list
cpd_list.append(cpd)
#create a tuple of each cpd_rxn pair, add them to a master list
cpd_rxn_pairs.append(tuple([cpd, rxn]))
#remove duplicates of reactions and compounds
rxn_list = list(set(rxn_list))
cpd_list = list(set(cpd_list))
#Create a bipartite graph using reactions, compounds, and the cpd/rxn pair
KEGG_graph = nx.Graph()
KEGG_graph.add_nodes_from(rxn_list, bipartite=0)
KEGG_graph.add_nodes_from(cpd_list, bipartite=1)
KEGG_graph.add_edges_from(cpd_rxn_pairs)
#Create a project of only compounds
KEGG_cpd_graph = nx.bipartite.projected_graph(KEGG_graph, cpd_list)
#Find the central node of the largest connected component
lcc = max(nx.connected_components(KEGG_cpd_graph), key=len)
lcc_graph = KEGG_cpd_graph.subgraph(lcc)
## CENTER(s) ##
centers = ['C00006', 'C00014', 'C00025', 'C00001', 'C00011']
#Calculate distances between all nodes
distances = dict(nx.all_pairs_shortest_path_length(KEGG_cpd_graph))
return KEGG_cpd_graph, centers, distances
""" Find the maximum distance between a given compound and the centers of the graph
Input: Compound, centers of the largest connected component within the graph
Output: Distance (int), or "NC" if not connected
"""
def find_distance(cpd, centers, distances):
d = []
#Find the distance between the "centers" of the largest connected component
for c in centers:
try:
d.append(distances[c][cpd])
except:
pass
#If the random compound is not in the largest connected component, labeld "NC" (not connected)
if not d:
return "NC"
#Otherwise, label with the max distance from the center
else:
return str(max(d))
def main():
#Load w2v model, kegg dataframe, and all fragments
word2vec = load_w2v()
kegg_df = pd.read_csv("../kegg_data.csv")
frags = get_chem_fragments("../frags.txt")
KEGG_cpd_graph, centers, distances = KEGG_network()
## RANDOM CPDS ##
#Find 10 of them, ignoring no compounds (initially)
rand_cpds, cpd_list = find_random_SMILES(kegg_df, 1000, [])
rand_frags, cpd_list = find_frags_within_SMILES(cpd_list, rand_cpds, frags)
rand_vectors, cpd_list = add_frag_vectors(cpd_list, word2vec, rand_frags)
rand_df = pd.DataFrame(rand_vectors)
rand_df["Cpds"] = cpd_list
#Label by max distance from central compound
cpd_distance = []
for index, row in rand_df.iterrows():
cpd_distance.append(find_distance(re.sub(".mol", "", row["Cpds"]), centers, distances))
rand_df["label"] = cpd_distance
#Remove all "NC" labels for clearer interpretation
sub_df = rand_df[rand_df["label"] != "NC"]
#print("Number of random vectors:", len(rand_df))
#Run TSNE
#TSNE_visual(rand_df, len(rand_df["label"].unique()))
TSNE_visual(sub_df, len(sub_df["label"].unique()))
if __name__ == "__main__":
main()
| [
"networkx.bipartite.projected_graph",
"networkx.all_pairs_shortest_path_length",
"seaborn.color_palette",
"pandas.read_csv",
"rdkit.Chem.MolFromSmiles",
"networkx.Graph",
"sklearn.manifold.TSNE",
"gensim.models.KeyedVectors.load",
"json.load",
"networkx.connected_components",
"matplotlib.pyplot.... | [((447, 500), 'gensim.models.KeyedVectors.load', 'KeyedVectors.load', (['"""../vectors_fullKEGG.kv"""'], {'mmap': '"""r"""'}), "('../vectors_fullKEGG.kv', mmap='r')\n", (464, 500), False, 'from gensim.models import KeyedVectors\n'), ((3701, 3759), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'verbose': '(1)', 'perplexity': '(40)', 'n_iter': '(300)'}), '(n_components=2, verbose=1, perplexity=40, n_iter=300)\n', (3705, 3759), False, 'from sklearn.manifold import TSNE\n'), ((3907, 3945), 'seaborn.color_palette', 'sns.color_palette', (['"""hls"""', 'n_categories'], {}), "('hls', n_categories)\n", (3924, 3945), True, 'import seaborn as sns\n'), ((3951, 3979), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 10)'}), '(figsize=(16, 10))\n', (3961, 3979), True, 'import matplotlib.pyplot as plt\n'), ((4160, 4170), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4168, 4170), True, 'import matplotlib.pyplot as plt\n'), ((4853, 4874), 'pandas.DataFrame', 'pd.DataFrame', (['vectors'], {}), '(vectors)\n', (4865, 4874), True, 'import pandas as pd\n'), ((5441, 5453), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5450, 5453), False, 'import json\n'), ((6211, 6221), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (6219, 6221), True, 'import networkx as nx\n'), ((6435, 6485), 'networkx.bipartite.projected_graph', 'nx.bipartite.projected_graph', (['KEGG_graph', 'cpd_list'], {}), '(KEGG_graph, cpd_list)\n', (6463, 6485), True, 'import networkx as nx\n'), ((7711, 7742), 'pandas.read_csv', 'pd.read_csv', (['"""../kegg_data.csv"""'], {}), "('../kegg_data.csv')\n", (7722, 7742), True, 'import pandas as pd\n'), ((8162, 8188), 'pandas.DataFrame', 'pd.DataFrame', (['rand_vectors'], {}), '(rand_vectors)\n', (8174, 8188), True, 'import pandas as pd\n'), ((1131, 1154), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smi'], {}), '(smi)\n', (1149, 1154), False, 'from rdkit import Chem\n'), ((6563, 6602), 'networkx.connected_components', 'nx.connected_components', (['KEGG_cpd_graph'], {}), '(KEGG_cpd_graph)\n', (6586, 6602), True, 'import networkx as nx\n'), ((6808, 6857), 'networkx.all_pairs_shortest_path_length', 'nx.all_pairs_shortest_path_length', (['KEGG_cpd_graph'], {}), '(KEGG_cpd_graph)\n', (6841, 6857), True, 'import networkx as nx\n'), ((4079, 4109), 'seaborn.color_palette', 'sns.color_palette', ([], {'palette': 'pal'}), '(palette=pal)\n', (4096, 4109), True, 'import seaborn as sns\n'), ((8376, 8407), 're.sub', 're.sub', (['""".mol"""', '""""""', "row['Cpds']"], {}), "('.mol', '', row['Cpds'])\n", (8382, 8407), False, 'import re\n'), ((1420, 1441), 'rdkit.Chem.MolFromSmarts', 'Chem.MolFromSmarts', (['f'], {}), '(f)\n', (1438, 1441), False, 'from rdkit import Chem\n'), ((3187, 3205), 'numpy.sum', 'np.sum', (['vs'], {'axis': '(0)'}), '(vs, axis=0)\n', (3193, 3205), True, 'import numpy as np\n')] |
#!/usr/bin/python3
import collections
import contextlib
import itertools
import logging
import math
import os.path
import re
import json
import sys
import types
from typing import Iterable, List, Tuple
import matplotlib
import numpy as np
from absl import app, flags
import scipy.stats
FLAGS = flags.FLAGS
flags.DEFINE_string('pdf_dir', '', 'directory to which PDF files are written')
flags.DEFINE_string('png_dir', '', 'directory to which PNG files are written')
flags.register_validator(
'pdf_dir',
lambda value: value or FLAGS.png_dir,
message='no output directory is specified',
)
flags.register_validator(
'png_dir',
lambda value: value or FLAGS.pdf_dir,
message='no output directory is specified',
)
flags.DEFINE_list('pq_size', '', 'numpy data dump for pq size history')
flags.DEFINE_list(
'bucket_distribution',
'',
'numpy data dump for bucket distribution history',
)
flags.DEFINE_string('metadata', '', 'metadata of pq size history')
matplotlib.use('Agg')
from matplotlib import pyplot as plt # isort:skip pylint:disable=all
SAVEFIG_KWARGS = {
'dpi': 300,
'bbox_inches': 'tight',
'pad_inches': 0,
'metadata': {
'CreationDate': None,
},
}
XTICK_ROTATION = 90
def def_var(key, value):
print(f'{key} = {value:.2g}', file=sys.stderr)
key = key.incr('_', '')
print(f'\\newcommand\\{key}{{{value:.2g}}}')
@contextlib.contextmanager
def figure(name: str):
logging.info('drawing figure %s', name)
yield
if FLAGS.pdf_dir:
plt.savefig(
os.path.join(FLAGS.pdf_dir, f'{name}.pdf'),
transparent=True,
**SAVEFIG_KWARGS,
)
if FLAGS.png_dir:
plt.savefig(
os.path.join(FLAGS.png_dir, f'{name}.png'),
transparent=False,
**SAVEFIG_KWARGS,
)
plt.close()
def main(argv: List[str]):
plt.rcParams['font.family'] = 'Linux Libertine'
plt.rcParams['font.size'] = 12
plt.rcParams["figure.figsize"] = (6, 1.65)
draw(argv)
# https://stackoverflow.com/questions/14693701/how-can-i-remove-the-ansi-escape-sequences-from-a-string-in-python
_ANSI_ESCAPE = re.compile(r'(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]')
class QueryMetric(types.SimpleNamespace):
"""Metrics of one SSSP query."""
kernel_time: float
cycle_count: int
spill_count: int
push_count: int
cvc_idle_count: List[int]
read_hit: List[float]
write_hit: List[float]
visited_vertex_count: int
discarded_by_update_vertex_count: int
discarded_by_filter_vertex_count: int
teps: float
work_efficiency: float
@property
def visited_edge_count(self) -> int:
return sum([
self.visited_vertex_count,
self.discarded_by_filter_vertex_count,
self.discarded_by_update_vertex_count,
])
class DataPoints(types.SimpleNamespace):
mean: List[float]
stdev: List[float]
def __init__(self):
super().__init__()
self.mean = []
self.stdev = []
def __iadd__(self, stats: Iterable[float]) -> 'DataPoints':
stat_tuple = [x for x in stats]
self.mean.append(scipy.stats.hmean(stat_tuple))
self.stdev.append(scipy.stats.gstd(stat_tuple))
return self
def _sample(history: np.array, size: int = 1000) -> Tuple[np.array, np.array]:
step = history.shape[0] // min(size, history.shape[0])
x = np.arange(0, history.shape[0], step, dtype=np.int64)
y = history[::step]
x[-1] = history.shape[0] - 1
y[-1] = history[-1]
return x, y
def draw(argv: List[str]) -> None:
cgpq_chunk_size = 1024
cgpq_chunk_megabytes = cgpq_chunk_size * 8 / 1e6
with figure('bucket-distribution'):
with open(FLAGS.metadata) as fp:
metadata = json.load(fp)
for filename in FLAGS.bucket_distribution:
history = np.load(filename)
logging.info('loaded file "%s"', filename)
max_of_each_bucket = np.max(history, 0)
logging.info(
'budget/actual: %f',
np.max(max_of_each_bucket) * max_of_each_bucket.size /
np.sum(max_of_each_bucket))
plt.plot(*_sample(history), label=filename.split('.', 1)[0])
plt.xlabel('Number of Traversed Edges')
plt.ylabel('Bucket Size')
with figure('pq-size'):
plt.gcf().set_size_inches(6, 2.05)
for filename in FLAGS.pq_size:
history = np.load(filename)
logging.info('loaded file "%s"', filename)
name = filename.split('.', 1)[0]
vertex_count = metadata[name]['nv']
edge_count = metadata[name]['ne']
plt.plot(
*_sample(history),
label=f'{name} |V|={vertex_count} |E|={edge_count}',
)
plt.xlabel('Number of Traversed Edges')
plt.ylabel('Number of Active Vertices')
plt.legend(loc='upper right')
datasets = []
cvc_idle = DataPoints()
spill_percentage = DataPoints()
visited_vertices_percentage = DataPoints()
discarded_by_filter_percentage = DataPoints()
read_hit = DataPoints()
write_hit = DataPoints()
raw_teps = DataPoints()
uniq_teps = DataPoints()
work_efficiency = DataPoints()
for filename in argv[1:]:
datasets.append(os.path.basename(filename).split('.', 1)[0])
with open(filename) as fp:
logging.info('reading file "%s"', filename)
metrics: List[QueryMetric] = []
for line in fp:
line = _ANSI_ESCAPE.sub('', line)
if not line.startswith('I'):
continue
line = line.split('] ')[-1].strip()
items = line.split()
if 'kernel time:' in line:
metrics.append(QueryMetric())
metrics[-1].kernel_time = float(items[2])
metrics[-1].cvc_idle_count = []
metrics[-1].read_hit = []
metrics[-1].write_hit = []
metrics[-1].spill_count = 0
elif 'TEPS:' in line:
metrics[-1].teps = float(items[1])
elif '#idle' in line:
metrics[-1].cvc_idle_count.append(
int(items[2]) / metrics[-1].cycle_count * 100)
elif '#edges visited:' in line:
metrics[-1].work_efficiency = int(items[2]) / int(
items[-1].rstrip(')'))
elif '#vertices visited:' in line:
metrics[-1].visited_vertex_count = int(items[2])
elif '#discarded by update:' in line:
metrics[-1].discarded_by_update_vertex_count = int(items[3])
elif '#discarded by filter:' in line:
metrics[-1].discarded_by_filter_vertex_count = int(items[3])
elif '#push:' in line:
metrics[-1].push_count = int(items[1])
elif 'cycle count:' in line:
metrics[-1].cycle_count = int(items[2])
elif 'read hit :' in line:
metrics[-1].read_hit.append(int(items[3]) / int(items[5]) * 100)
elif 'write hit :' in line:
metrics[-1].write_hit.append(int(items[3]) / int(items[5]) * 100)
elif 'spill count :' in line:
metrics[-1].spill_count += int(items[3])
cvc_idle += itertools.chain.from_iterable(
m.cvc_idle_count for m in metrics)
spill_percentage += (m.spill_count * cgpq_chunk_size * 100 / m.push_count
for m in metrics
if m.spill_count > 0)
visited_vertices_percentage += (
m.visited_vertex_count * 100 / m.visited_edge_count for m in metrics)
discarded_by_filter_percentage += (m.discarded_by_filter_vertex_count *
100 / m.visited_edge_count
for m in metrics)
read_hit += itertools.chain.from_iterable(m.read_hit for m in metrics)
write_hit += itertools.chain.from_iterable(m.write_hit for m in metrics)
raw_teps += (m.visited_edge_count / m.kernel_time / 1e6 for m in metrics)
uniq_teps += (m.teps / 1e6 for m in metrics)
work_efficiency += (m.work_efficiency for m in metrics)
with figure('cvc-idle'):
plt.bar(
datasets,
cvc_idle.mean,
)
plt.xlabel('Dataset')
plt.xticks(rotation=XTICK_ROTATION)
plt.ylabel('CVC Idling (%)')
with figure('spill-stack'):
plt.bar(
datasets,
spill_percentage.mean,
)
plt.xlabel('Dataset')
plt.xticks(rotation=XTICK_ROTATION)
plt.ylabel('Spilled Vertices (%)')
with figure('cache-rate'):
plt.errorbar(
datasets,
read_hit.mean,
fmt='o',
label='Read',
)
plt.errorbar(
datasets,
write_hit.mean,
fmt='s',
label='Write',
)
plt.xlabel('Dataset')
plt.xticks(rotation=XTICK_ROTATION)
plt.ylabel('Cache Hit Rate (%)')
plt.ylim([0, 100])
plt.gca().yaxis.grid()
plt.legend()
with figure('discarded-vertices'):
plt.bar(
datasets,
visited_vertices_percentage.mean,
bottom=discarded_by_filter_percentage.mean,
label='Processed by edge fetcher',
)
plt.bar(
datasets,
discarded_by_filter_percentage.mean,
label='Discarded by CVC filtering',
)
plt.xlabel('Dataset')
plt.xticks(rotation=XTICK_ROTATION)
plt.ylabel('Active Vertices (%)')
plt.legend()
with figure('teps'):
plt.errorbar(
datasets,
raw_teps.mean,
fmt='o',
label='Traversal',
)
plt.errorbar(
datasets,
uniq_teps.mean,
fmt='s',
label='Algorithm',
)
plt.xlabel('Dataset')
plt.xticks(rotation=XTICK_ROTATION)
plt.ylabel('Throughput (MTEPS)')
plt.gca().yaxis.grid()
plt.legend()
with figure('work-efficiency'):
plt.bar(
datasets,
work_efficiency.mean,
)
plt.xlabel('Dataset')
plt.xticks(rotation=XTICK_ROTATION)
plt.ylabel('Amount of Work')
if __name__ == '__main__':
app.run(main)
| [
"re.compile",
"matplotlib.pyplot.ylabel",
"absl.flags.register_validator",
"matplotlib.pyplot.errorbar",
"logging.info",
"numpy.arange",
"absl.flags.DEFINE_list",
"matplotlib.pyplot.xlabel",
"absl.app.run",
"numpy.max",
"matplotlib.pyplot.close",
"itertools.chain.from_iterable",
"matplotlib.... | [((309, 387), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""pdf_dir"""', '""""""', '"""directory to which PDF files are written"""'], {}), "('pdf_dir', '', 'directory to which PDF files are written')\n", (328, 387), False, 'from absl import app, flags\n'), ((388, 466), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""png_dir"""', '""""""', '"""directory to which PNG files are written"""'], {}), "('png_dir', '', 'directory to which PNG files are written')\n", (407, 466), False, 'from absl import app, flags\n'), ((467, 588), 'absl.flags.register_validator', 'flags.register_validator', (['"""pdf_dir"""', '(lambda value: value or FLAGS.png_dir)'], {'message': '"""no output directory is specified"""'}), "('pdf_dir', lambda value: value or FLAGS.png_dir,\n message='no output directory is specified')\n", (491, 588), False, 'from absl import app, flags\n'), ((600, 721), 'absl.flags.register_validator', 'flags.register_validator', (['"""png_dir"""', '(lambda value: value or FLAGS.pdf_dir)'], {'message': '"""no output directory is specified"""'}), "('png_dir', lambda value: value or FLAGS.pdf_dir,\n message='no output directory is specified')\n", (624, 721), False, 'from absl import app, flags\n'), ((733, 804), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""pq_size"""', '""""""', '"""numpy data dump for pq size history"""'], {}), "('pq_size', '', 'numpy data dump for pq size history')\n", (750, 804), False, 'from absl import app, flags\n'), ((805, 904), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""bucket_distribution"""', '""""""', '"""numpy data dump for bucket distribution history"""'], {}), "('bucket_distribution', '',\n 'numpy data dump for bucket distribution history')\n", (822, 904), False, 'from absl import app, flags\n'), ((916, 982), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""metadata"""', '""""""', '"""metadata of pq size history"""'], {}), "('metadata', '', 'metadata of pq size history')\n", (935, 982), False, 'from absl import app, flags\n'), ((984, 1005), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (998, 1005), False, 'import matplotlib\n'), ((2101, 2160), 're.compile', 're.compile', (['"""(?:\\\\x1B[@-_]|[\\\\x80-\\\\x9F])[0-?]*[ -/]*[@-~]"""'], {}), "('(?:\\\\x1B[@-_]|[\\\\x80-\\\\x9F])[0-?]*[ -/]*[@-~]')\n", (2111, 2160), False, 'import re\n'), ((1442, 1481), 'logging.info', 'logging.info', (['"""drawing figure %s"""', 'name'], {}), "('drawing figure %s', name)\n", (1454, 1481), False, 'import logging\n'), ((1787, 1798), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1796, 1798), True, 'from matplotlib import pyplot as plt\n'), ((3276, 3328), 'numpy.arange', 'np.arange', (['(0)', 'history.shape[0]', 'step'], {'dtype': 'np.int64'}), '(0, history.shape[0], step, dtype=np.int64)\n', (3285, 3328), True, 'import numpy as np\n'), ((9642, 9655), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (9649, 9655), False, 'from absl import app, flags\n'), ((4538, 4577), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Traversed Edges"""'], {}), "('Number of Traversed Edges')\n", (4548, 4577), True, 'from matplotlib import pyplot as plt\n'), ((4582, 4621), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Active Vertices"""'], {}), "('Number of Active Vertices')\n", (4592, 4621), True, 'from matplotlib import pyplot as plt\n'), ((4626, 4655), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (4636, 4655), True, 'from matplotlib import pyplot as plt\n'), ((7791, 7823), 'matplotlib.pyplot.bar', 'plt.bar', (['datasets', 'cvc_idle.mean'], {}), '(datasets, cvc_idle.mean)\n', (7798, 7823), True, 'from matplotlib import pyplot as plt\n'), ((7851, 7872), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dataset"""'], {}), "('Dataset')\n", (7861, 7872), True, 'from matplotlib import pyplot as plt\n'), ((7877, 7912), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': 'XTICK_ROTATION'}), '(rotation=XTICK_ROTATION)\n', (7887, 7912), True, 'from matplotlib import pyplot as plt\n'), ((7917, 7945), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""CVC Idling (%)"""'], {}), "('CVC Idling (%)')\n", (7927, 7945), True, 'from matplotlib import pyplot as plt\n'), ((7981, 8021), 'matplotlib.pyplot.bar', 'plt.bar', (['datasets', 'spill_percentage.mean'], {}), '(datasets, spill_percentage.mean)\n', (7988, 8021), True, 'from matplotlib import pyplot as plt\n'), ((8049, 8070), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dataset"""'], {}), "('Dataset')\n", (8059, 8070), True, 'from matplotlib import pyplot as plt\n'), ((8075, 8110), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': 'XTICK_ROTATION'}), '(rotation=XTICK_ROTATION)\n', (8085, 8110), True, 'from matplotlib import pyplot as plt\n'), ((8115, 8149), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Spilled Vertices (%)"""'], {}), "('Spilled Vertices (%)')\n", (8125, 8149), True, 'from matplotlib import pyplot as plt\n'), ((8184, 8244), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['datasets', 'read_hit.mean'], {'fmt': '"""o"""', 'label': '"""Read"""'}), "(datasets, read_hit.mean, fmt='o', label='Read')\n", (8196, 8244), True, 'from matplotlib import pyplot as plt\n'), ((8288, 8350), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['datasets', 'write_hit.mean'], {'fmt': '"""s"""', 'label': '"""Write"""'}), "(datasets, write_hit.mean, fmt='s', label='Write')\n", (8300, 8350), True, 'from matplotlib import pyplot as plt\n'), ((8394, 8415), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dataset"""'], {}), "('Dataset')\n", (8404, 8415), True, 'from matplotlib import pyplot as plt\n'), ((8420, 8455), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': 'XTICK_ROTATION'}), '(rotation=XTICK_ROTATION)\n', (8430, 8455), True, 'from matplotlib import pyplot as plt\n'), ((8460, 8492), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cache Hit Rate (%)"""'], {}), "('Cache Hit Rate (%)')\n", (8470, 8492), True, 'from matplotlib import pyplot as plt\n'), ((8497, 8515), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 100]'], {}), '([0, 100])\n', (8505, 8515), True, 'from matplotlib import pyplot as plt\n'), ((8547, 8559), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8557, 8559), True, 'from matplotlib import pyplot as plt\n'), ((8602, 8737), 'matplotlib.pyplot.bar', 'plt.bar', (['datasets', 'visited_vertices_percentage.mean'], {'bottom': 'discarded_by_filter_percentage.mean', 'label': '"""Processed by edge fetcher"""'}), "(datasets, visited_vertices_percentage.mean, bottom=\n discarded_by_filter_percentage.mean, label='Processed by edge fetcher')\n", (8609, 8737), True, 'from matplotlib import pyplot as plt\n'), ((8776, 8871), 'matplotlib.pyplot.bar', 'plt.bar', (['datasets', 'discarded_by_filter_percentage.mean'], {'label': '"""Discarded by CVC filtering"""'}), "(datasets, discarded_by_filter_percentage.mean, label=\n 'Discarded by CVC filtering')\n", (8783, 8871), True, 'from matplotlib import pyplot as plt\n'), ((8903, 8924), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dataset"""'], {}), "('Dataset')\n", (8913, 8924), True, 'from matplotlib import pyplot as plt\n'), ((8929, 8964), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': 'XTICK_ROTATION'}), '(rotation=XTICK_ROTATION)\n', (8939, 8964), True, 'from matplotlib import pyplot as plt\n'), ((8969, 9002), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Active Vertices (%)"""'], {}), "('Active Vertices (%)')\n", (8979, 9002), True, 'from matplotlib import pyplot as plt\n'), ((9007, 9019), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9017, 9019), True, 'from matplotlib import pyplot as plt\n'), ((9048, 9113), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['datasets', 'raw_teps.mean'], {'fmt': '"""o"""', 'label': '"""Traversal"""'}), "(datasets, raw_teps.mean, fmt='o', label='Traversal')\n", (9060, 9113), True, 'from matplotlib import pyplot as plt\n'), ((9157, 9223), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['datasets', 'uniq_teps.mean'], {'fmt': '"""s"""', 'label': '"""Algorithm"""'}), "(datasets, uniq_teps.mean, fmt='s', label='Algorithm')\n", (9169, 9223), True, 'from matplotlib import pyplot as plt\n'), ((9267, 9288), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dataset"""'], {}), "('Dataset')\n", (9277, 9288), True, 'from matplotlib import pyplot as plt\n'), ((9293, 9328), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': 'XTICK_ROTATION'}), '(rotation=XTICK_ROTATION)\n', (9303, 9328), True, 'from matplotlib import pyplot as plt\n'), ((9333, 9365), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Throughput (MTEPS)"""'], {}), "('Throughput (MTEPS)')\n", (9343, 9365), True, 'from matplotlib import pyplot as plt\n'), ((9397, 9409), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9407, 9409), True, 'from matplotlib import pyplot as plt\n'), ((9449, 9488), 'matplotlib.pyplot.bar', 'plt.bar', (['datasets', 'work_efficiency.mean'], {}), '(datasets, work_efficiency.mean)\n', (9456, 9488), True, 'from matplotlib import pyplot as plt\n'), ((9516, 9537), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Dataset"""'], {}), "('Dataset')\n", (9526, 9537), True, 'from matplotlib import pyplot as plt\n'), ((9542, 9577), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': 'XTICK_ROTATION'}), '(rotation=XTICK_ROTATION)\n', (9552, 9577), True, 'from matplotlib import pyplot as plt\n'), ((9582, 9610), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amount of Work"""'], {}), "('Amount of Work')\n", (9592, 9610), True, 'from matplotlib import pyplot as plt\n'), ((3624, 3637), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (3633, 3637), False, 'import json\n'), ((3701, 3718), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (3708, 3718), True, 'import numpy as np\n'), ((3725, 3767), 'logging.info', 'logging.info', (['"""loaded file "%s\\""""', 'filename'], {}), '(\'loaded file "%s"\', filename)\n', (3737, 3767), False, 'import logging\n'), ((3795, 3813), 'numpy.max', 'np.max', (['history', '(0)'], {}), '(history, 0)\n', (3801, 3813), True, 'import numpy as np\n'), ((4041, 4080), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Traversed Edges"""'], {}), "('Number of Traversed Edges')\n", (4051, 4080), True, 'from matplotlib import pyplot as plt\n'), ((4087, 4112), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Bucket Size"""'], {}), "('Bucket Size')\n", (4097, 4112), True, 'from matplotlib import pyplot as plt\n'), ((4230, 4247), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (4237, 4247), True, 'import numpy as np\n'), ((4254, 4296), 'logging.info', 'logging.info', (['"""loaded file "%s\\""""', 'filename'], {}), '(\'loaded file "%s"\', filename)\n', (4266, 4296), False, 'import logging\n'), ((5096, 5139), 'logging.info', 'logging.info', (['"""reading file "%s\\""""', 'filename'], {}), '(\'reading file "%s"\', filename)\n', (5108, 5139), False, 'import logging\n'), ((6837, 6901), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['(m.cvc_idle_count for m in metrics)'], {}), '(m.cvc_idle_count for m in metrics)\n', (6866, 6901), False, 'import itertools\n'), ((7428, 7486), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['(m.read_hit for m in metrics)'], {}), '(m.read_hit for m in metrics)\n', (7457, 7486), False, 'import itertools\n'), ((7506, 7565), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['(m.write_hit for m in metrics)'], {}), '(m.write_hit for m in metrics)\n', (7535, 7565), False, 'import itertools\n'), ((4144, 4153), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (4151, 4153), True, 'from matplotlib import pyplot as plt\n'), ((3940, 3966), 'numpy.sum', 'np.sum', (['max_of_each_bucket'], {}), '(max_of_each_bucket)\n', (3946, 3966), True, 'import numpy as np\n'), ((8520, 8529), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8527, 8529), True, 'from matplotlib import pyplot as plt\n'), ((9370, 9379), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (9377, 9379), True, 'from matplotlib import pyplot as plt\n'), ((3875, 3901), 'numpy.max', 'np.max', (['max_of_each_bucket'], {}), '(max_of_each_bucket)\n', (3881, 3901), True, 'import numpy as np\n')] |
# -*- coding: UTF-8 -*-
"""
此脚本用于展示spectral embedding的效果
"""
import numpy as np
import matplotlib.pyplot as plt
from spectral_embedding_ import spectral_embedding
def generate_data():
"""
生成邻接矩阵
"""
data = np.array([
[0, 1, 1, 1, 0, 0, 0],
[1, 0, 1, 1, 1, 0, 0],
[1, 1, 0, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 1, 1, 0]])
return data
def visualize(data):
"""
将模型结果可视化
"""
fig = plt.figure(figsize=(6, 6), dpi=80)
ax = fig.add_subplot(1, 1, 1)
ax.scatter(data[:, 0], data[:, 1], s=200, edgecolors="k")
plt.show()
def run():
"""
程序的入口
"""
data = generate_data()
# 使用spectral embedding将数据转换为2维向量
re = spectral_embedding(data, n_components=2, drop_first=False)
visualize(re)
if __name__ == "__main__":
run()
| [
"spectral_embedding_.spectral_embedding",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((226, 406), 'numpy.array', 'np.array', (['[[0, 1, 1, 1, 0, 0, 0], [1, 0, 1, 1, 1, 0, 0], [1, 1, 0, 1, 0, 0, 0], [1, 1,\n 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 0, 1], [0, 0, 0,\n 0, 1, 1, 0]]'], {}), '([[0, 1, 1, 1, 0, 0, 0], [1, 0, 1, 1, 1, 0, 0], [1, 1, 0, 1, 0, 0, \n 0], [1, 1, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 0, 1],\n [0, 0, 0, 0, 1, 1, 0]])\n', (234, 406), True, 'import numpy as np\n'), ((533, 567), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)', 'dpi': '(80)'}), '(figsize=(6, 6), dpi=80)\n', (543, 567), True, 'import matplotlib.pyplot as plt\n'), ((668, 678), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (676, 678), True, 'import matplotlib.pyplot as plt\n'), ((791, 849), 'spectral_embedding_.spectral_embedding', 'spectral_embedding', (['data'], {'n_components': '(2)', 'drop_first': '(False)'}), '(data, n_components=2, drop_first=False)\n', (809, 849), False, 'from spectral_embedding_ import spectral_embedding\n')] |
"""
clustering.py
2018.06.11
"""
import sys
import os
import argparse
import tensorflow as tf
import numpy as np
import facenet
from scipy import misc
from sklearn.cluster import KMeans
class FaceNet:
def __init__(self, sess, args):
self.session = sess
facenet.load_model(args.model)
# Get input and output tensors
self.images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
self.embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
self.phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
self.embedding_size = self.embeddings.get_shape()[1]
def preprocess(self, images):
processed_images = []
for img in images:
if img.ndim == 2:
img = facenet.to_rgb(img)
img = facenet.prewhiten(img)
processed_images.append(img)
return processed_images
def extract_feature(self, images):
emb_array = self.session.run(self.embeddings, feed_dict={self.images_placeholder:images, self.phase_train_placeholder:False})
return emb_array
class FaceClustering:
def __init__(self, num_clusters):
self.kmeans = KMeans(n_clusters = num_clusters, random_state=0)
def clustering(self, x):
self.kmeans.fit(x)
return self.kmeans.labels_
def main(args):
print('Creating networks and loading parameters')
input_images = facenet.load_data(facenet.get_image_paths(args.input_dir), do_random_crop=False, do_random_flip=False, image_size=args.image_size)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) as sess:
print('Loading feature extraction model')
feature_net = FaceNet(sess, args)
batch_count = len(input_images) // args.batch_size
if len(input_images) % args.batch_size != 0:
batch_count += 1
all_embeddings = []
for i in range(batch_count):
batch_images = input_images[i*args.batch_size:(i+1)*args.batch_size]
embeddings = feature_net.extract_feature(batch_images)
if all_embeddings == []:
all_embeddings = embeddings
else:
all_embeddings = np.concatenate([all_embeddings, embeddings])
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
face_cluster = FaceClustering(num_clusters=args.num_clusters)
labels = face_cluster.clustering(all_embeddings)
for i in range(len(labels)):
if not os.path.exists(os.path.join(args.output_dir, str(labels[i]))):
os.mkdir(os.path.join(args.output_dir, str(labels[i])))
misc.imsave(os.path.join(args.output_dir, str(labels[i]), str(i) + '.jpg'), input_images[i])
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str,
help='Could be either a directory containing the meta_file and ckpt_file or a model protobuf (.pb) file')
parser.add_argument('input_dir', type=str,
help='Directory of input images (cropped and aligned)')
parser.add_argument('output_dir', type=str,
help='Directory of output images (separated by class id)')
parser.add_argument('--num_clusters', type=int,
help='Number of face cluster', default=5)
parser.add_argument('--batch_size', type=int,
help='Number of images to process in a batch.', default=10)
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=160)
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=0.5)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| [
"sklearn.cluster.KMeans",
"os.path.exists",
"tensorflow.ConfigProto",
"facenet.get_image_paths",
"argparse.ArgumentParser",
"os.mkdir",
"facenet.prewhiten",
"numpy.concatenate",
"tensorflow.GPUOptions",
"facenet.to_rgb",
"facenet.load_model",
"tensorflow.get_default_graph"
] | [((1630, 1701), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': 'args.gpu_memory_fraction'}), '(per_process_gpu_memory_fraction=args.gpu_memory_fraction)\n', (1643, 1701), True, 'import tensorflow as tf\n'), ((2952, 2977), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2975, 2977), False, 'import argparse\n'), ((286, 316), 'facenet.load_model', 'facenet.load_model', (['args.model'], {}), '(args.model)\n', (304, 316), False, 'import facenet\n'), ((1247, 1294), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'num_clusters', 'random_state': '(0)'}), '(n_clusters=num_clusters, random_state=0)\n', (1253, 1294), False, 'from sklearn.cluster import KMeans\n'), ((1498, 1537), 'facenet.get_image_paths', 'facenet.get_image_paths', (['args.input_dir'], {}), '(args.input_dir)\n', (1521, 1537), False, 'import facenet\n'), ((2444, 2475), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (2458, 2475), False, 'import os\n'), ((2485, 2510), 'os.mkdir', 'os.mkdir', (['args.output_dir'], {}), '(args.output_dir)\n', (2493, 2510), False, 'import os\n'), ((869, 891), 'facenet.prewhiten', 'facenet.prewhiten', (['img'], {}), '(img)\n', (886, 891), False, 'import facenet\n'), ((391, 413), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (411, 413), True, 'import tensorflow as tf\n'), ((470, 492), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (490, 492), True, 'import tensorflow as tf\n'), ((567, 589), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (587, 589), True, 'import tensorflow as tf\n'), ((831, 850), 'facenet.to_rgb', 'facenet.to_rgb', (['img'], {}), '(img)\n', (845, 850), False, 'import facenet\n'), ((1729, 1796), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options', 'log_device_placement': '(False)'}), '(gpu_options=gpu_options, log_device_placement=False)\n', (1743, 1796), True, 'import tensorflow as tf\n'), ((2387, 2431), 'numpy.concatenate', 'np.concatenate', (['[all_embeddings, embeddings]'], {}), '([all_embeddings, embeddings])\n', (2401, 2431), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import pafy
"""
url = 'https://youtu.be/u68EWmtKZw0?list=TLPQMDkwMzIwMjCcOgKmuF00yg'
vPafy = pafy.new(url)
play = vPafy.getbest(preftype="mp4")
cap = cv2.VideoCapture(play.url)
"""
cap = cv2.VideoCapture(0)
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
while True:
_,image = cap.read()
image = cv2.resize(image,(640,640))
(h, w) = image.shape[:2]
rows = open('models/MobileNetSSD_deploy.prototxt').read().strip().split('\n')
net = cv2.dnn.readNetFromCaffe("models/MobileNetSSD_deploy.prototxt", "models/MobileNetSSD_deploy.caffemodel")
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 0.007,(300, 300), 127.5)
net.setInput(blob)
preds = net.forward()
for i in np.arange(0, preds.shape[2]):
confidence = preds[0, 0, i, 2]
if confidence > 0.3:
idx = int(preds[0, 0, i, 1])
label = "{}: {:.2f}%".format(CLASSES[idx], confidence * 100)
box = preds[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
cv2.rectangle(image, (startX, startY), (endX, endY),COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(image, label, (startX, y),cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 1)
cv2.imshow('MobileSSD',image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"cv2.dnn.readNetFromCaffe",
"cv2.imshow",
"cv2.putText",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.resize",
"cv2.waitKey",
"numpy.arange"
] | [((217, 236), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (233, 236), False, 'import cv2\n'), ((1711, 1734), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1732, 1734), False, 'import cv2\n'), ((601, 630), 'cv2.resize', 'cv2.resize', (['image', '(640, 640)'], {}), '(image, (640, 640))\n', (611, 630), False, 'import cv2\n'), ((755, 863), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (['"""models/MobileNetSSD_deploy.prototxt"""', '"""models/MobileNetSSD_deploy.caffemodel"""'], {}), "('models/MobileNetSSD_deploy.prototxt',\n 'models/MobileNetSSD_deploy.caffemodel')\n", (779, 863), False, 'import cv2\n'), ((1016, 1044), 'numpy.arange', 'np.arange', (['(0)', 'preds.shape[2]'], {}), '(0, preds.shape[2])\n', (1025, 1044), True, 'import numpy as np\n'), ((1611, 1641), 'cv2.imshow', 'cv2.imshow', (['"""MobileSSD"""', 'image'], {}), "('MobileSSD', image)\n", (1621, 1641), False, 'import cv2\n'), ((893, 922), 'cv2.resize', 'cv2.resize', (['image', '(300, 300)'], {}), '(image, (300, 300))\n', (903, 922), False, 'import cv2\n'), ((1377, 1445), 'cv2.rectangle', 'cv2.rectangle', (['image', '(startX, startY)', '(endX, endY)', 'COLORS[idx]', '(2)'], {}), '(image, (startX, startY), (endX, endY), COLORS[idx], 2)\n', (1390, 1445), False, 'import cv2\n'), ((1522, 1611), 'cv2.putText', 'cv2.putText', (['image', 'label', '(startX, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', 'COLORS[idx]', '(1)'], {}), '(image, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n COLORS[idx], 1)\n', (1533, 1611), False, 'import cv2\n'), ((1648, 1662), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1659, 1662), False, 'import cv2\n'), ((1281, 1303), 'numpy.array', 'np.array', (['[w, h, w, h]'], {}), '([w, h, w, h])\n', (1289, 1303), True, 'import numpy as np\n')] |
"""
Vectorize() with support for decorating methods; for example::
from scipy.stats import rv_continuous
from scipy_ext import vectorize
class dist(rv_continuous):
@vectorize(excluded=('n',), otypes=(float,))
def _cdf(self, x, n):
if n < 5:
return f(x) # One expensive calculation.
else:
return g(x) # A different expensive calculation.
"""
from __future__ import division
from numpy import arange, stack, vectorize as numpy_vectorize
class _vectorize(numpy_vectorize):
"""
Method decorator, working just like `numpy.vectorize()`.
"""
def __get__(self, instance, owner):
# Vectorize stores the decorated function (former "unbound method")
# as pyfunc. Bound method's __get__ returns the method itself.
self.pyfunc = self.pyfunc.__get__(instance, owner)
return self
def vectorize(*args, **kwargs):
"""
Allows using `@vectorize` as well as `@vectorize()`.
"""
if args and callable(args[0]):
# Guessing the argument is the method.
return _vectorize(args[0])
else:
# Wait for the second call.
return lambda m: _vectorize(m, *args, **kwargs)
def varange(starts, count):
"""
Vectorized `arange()` taking a sequence of starts and a count of elements.
For example::
>>> varange(1, 5)
array([1, 2, 3, 4, 5])
>>> varange((1, 3), 5)
array([[1, 2, 3, 4, 5],
[3, 4, 5, 6, 7]])
"""
try:
return stack(arange(s, s + count) for s in starts)
except TypeError:
return arange(starts, starts + count)
| [
"numpy.arange"
] | [((1635, 1665), 'numpy.arange', 'arange', (['starts', '(starts + count)'], {}), '(starts, starts + count)\n', (1641, 1665), False, 'from numpy import arange, stack, vectorize as numpy_vectorize\n'), ((1560, 1580), 'numpy.arange', 'arange', (['s', '(s + count)'], {}), '(s, s + count)\n', (1566, 1580), False, 'from numpy import arange, stack, vectorize as numpy_vectorize\n')] |
# @title: pbt_trainer.py
# @author: <NAME>
# @date: 02.09.2021
############################################################
# Imports
import torch
import time
import numpy as np
import random
from torch.utils.tensorboard import SummaryWriter
from src.utility.container import (
ModelContainer,
UtilityContainer,
StatisticContainer,
ReinforceContainer,
FrameType,
PopulationContainer,
PopulationModelContainer,
ImageContainer,
ModelSavingContainer,
)
from tqdm import tqdm
import torchvision.transforms as transforms
from src.gridworld.gridworld import Gridworld
import logging
import torchvision
from torch.distributions.categorical import Categorical
from src.gridworld_trainer.reinforce.memory import MemoryReinforce, MemoryReinforce3D
from src.gridworld_trainer.reinforce.model import ReinforceNetwork, ReinforceNetwork3D
import os
import copy
import torch.optim as optim
############################################################
# Code
class PopulationBasedTrainerReinforce:
def __init__(
self,
util_container: UtilityContainer,
model_container: ModelContainer,
pbt_container: PopulationContainer,
):
"""
Initializes a trainer for training or population based training with transfer training
@params:
util_container a utility container with needed information for initialization
model_container a utility container with information for the model
pbt_container a utility container with information for population based training
"""
self.model_container = model_container
self.utility_container = util_container
self.pbt_container = pbt_container
self.algorithm_name = "Reinforce"
self.frameType_name = ""
self.get_state = None
self.reset_env = None
self.make_model = None
self.make_memory = None
self.start_time = int(time.time())
if self.utility_container.path is None:
self.path = "../../../logs/gridworld/reinforce/logs-" + str(self.start_time)
else:
self.path = self.utility_container.path
if not os.path.exists(self.path):
os.makedirs(self.path)
if self.utility_container.init_logging:
logging.basicConfig(
filename=self.path + "/info.log", filemode="w", level=logging.DEBUG
)
if model_container.device is None:
if torch.cuda.is_available():
self.device = torch.device("cuda:0")
if self.utility_container.logging:
logging.info("Running on the GPU")
else:
self.device = torch.device("cpu")
if self.utility_container.logging:
logging.info("Running on the CPU")
model_container.device = self.device
if self.utility_container.seed is None:
self.seed = self.start_time
self.utility_container.seed = self.seed
else:
self.seed = self.utility_container.seed
torch.manual_seed(self.seed)
np.random.seed(self.seed)
random.seed(self.seed)
if self.utility_container.logging:
logging.info(f"Seed: {self.seed}")
self.transformer = transforms.ToTensor()
self.env = Gridworld.make(self.utility_container.environment_id)
self.obs_size = self.env.obs_size
self.num_actions = self.env.n_actions
self.model_container.num_inputs_x = self.obs_size
self.model_container.num_inputs_y = self.obs_size
self.model_container.num_outputs = self.num_actions
self.init_frame_type()
self.obs, _ = self.reset_env()
def init_frame_type(self):
"""
Initializes several functions depending on the frame type
"""
if self.utility_container.frame_type == FrameType.Single:
self.frameType_name = "single_frame"
self.get_state = self.get_state_single_frame
self.reset_env = self.reset_env_single_frame
self.model_container.num_channel = 3
self.model_container.num_images = 1
self.make_model = self.make_model_2d
self.make_memory = self.make_memory_2d
if self.utility_container.frame_type == FrameType.Stacked2D:
self.frameType_name = "2D_stacked_frames"
self.get_state = self.get_state_2d_stacked_frame
self.reset_env = self.reset_env_2d_stacked_frame
self.model_container.num_channel = 9
self.model_container.num_images = 1
self.make_model = self.make_model_2d
self.make_memory = self.make_memory_2d
if self.utility_container.frame_type == FrameType.Stacked3D:
self.frameType_name = "3D_stacked_frames"
self.get_state = self.get_state_3d_stacked_frame
self.reset_env = self.reset_env_3d_stacked_frame
self.model_container.num_channel = 3
self.model_container.num_images = 3
self.make_model = self.make_model
self.make_memory = self.make_memory_3d
def make_memory_2d(self, max_memory_size):
"""
Generates a memory for the reinforce algorithm for 2d stacked frames and single image frame type
@param:
max_memory_size the maximum size for the memory
@return:
the generated memory
"""
memory = MemoryReinforce(
max_memory_size,
self.obs_size,
self.obs_size,
self.model_container.num_channel,
self.device,
)
return memory
def make_memory_3d(self, max_memory_size):
"""
Generates a memory for the reinforce algorithm for 3d stacked frames
@param:
max_memory_size the maximum size for the memory
@return:
the generated memory
"""
memory = MemoryReinforce3D(
max_memory_size,
self.obs_size,
self.obs_size,
self.model_container.num_images,
self.model_container.num_channel,
self.device,
)
return memory
def make_model_2d(self):
"""
Method for generating a mode for 2d stacked frames or a single frame
@return
the generated model
"""
actor = ReinforceNetwork(self.model_container, self.obs)
return actor
def make_model_3d(self):
"""
Method for generating a mode for 3d stacked frames
@return
the generated model
"""
actor = ReinforceNetwork3D(self.model_container, self.obs)
return actor
def get_state_single_frame(self, state, images):
"""
Transforms the output of the environment into a input state of the model for single frame type
@params:
state the environment output
images the previous images, not used in single frame type
@return:
the input state for the model
the updated images, None in single frame type
"""
state = self.transformer(state)
state = state.to(self.device)
return state, None
def get_state_2d_stacked_frame(self, next_img, images: ImageContainer):
"""
Transforms the output of the environment into a input state of the model for 2d stacked frames
@params:
state the environment output
images the previous images
@return:
the input state for the model
the updated images
"""
images.img_3 = images.img_2
images.img_2 = images.img_1
state = self.transformer(next_img)
images.img_1 = state.to(self.device)
state = torch.cat([images.img_1, images.img_2, images.img_3])
return state, images
def get_state_3d_stacked_frame(self, next_img, images: ImageContainer):
"""
Transforms the output of the environment into a input state of the model for 3d stacked frames
@params:
state the environment output
images the previous
@return:
the input state for the model
the updated images
"""
images.img_3 = images.img_2
images.img_2 = images.img_1
state = self.transformer(next_img)
images.img_1 = state.to(self.device)
state = torch.cat([images.img_1, images.img_2, images.img_3])
return state, images
def reset_env_single_frame(self):
"""
Resets the environment for the specific frame type
Single frame type
@return:
the next input state for the mode
the updated images, none in case of single frame type
"""
state = self.transformer(self.env.reset()).to(self.device)
return state, None
def reset_env_2d_stacked_frame(self):
"""
Resets the environment for the specific frame type
2d stacked frames
@return:
the next input state for the mode
the updated images
"""
images = ImageContainer()
img_1 = self.transformer(self.env.reset())
images.img_1 = img_1.to(self.device)
images.img_2 = torch.zeros(40 * 40 * 3).view(3, 40, 40).to(self.device)
images.img_3 = torch.zeros(40 * 40 * 3).view(3, 40, 40).to(self.device)
state = torch.cat([images.img_1, images.img_2, images.img_3])
return state, images
def reset_env_3d_stacked_frame(self):
"""
Resets the environment for the specific frame type
3d stacked frames
@return:
the next input state for the mode
the updated images
"""
images = ImageContainer()
img_1 = self.transformer(self.env.reset())
images.img_1 = img_1.to(self.device)
images.img_2 = torch.zeros(40 * 40 * 3).view(3, 40, 40).to(self.device)
images.img_3 = torch.zeros(40 * 40 * 3).view(3, 40, 40).to(self.device)
state = torch.stack([images.img_1, images.img_2, images.img_3])
return state, images
def save_model(self, pbm_conatiner: PopulationModelContainer):
"""
Saving a model to a file
@params:
pbm_container the population model container with the model to save
"""
file = pbm_conatiner.path + str(pbm_conatiner.id) + ".pth"
saves = ModelSavingContainer()
saves.model_state = pbm_conatiner.model.state_dict()
saves.optimizer_state = pbm_conatiner.optimizer.state_dict()
torch.save(saves, file)
def load_model(self, path, model, optimizer):
"""
Loading a model from a file
@params
path the file of the model
model the model to overwrite
optimizer the optimizer to overwrite
@returns
the loaded model
the loaded optimizer
"""
file = path
load = torch.load(file)
model.load_state_dict(load.model_state)
optimizer.load_state_dict(load.optimizer_sate)
return model, optimizer
def update_statistics(self, info, iteration_statistics, statistics):
"""
Updates the given statistics with the new info
@params:
info the new information
iteration_statistics, statistics the statistic container to update
"""
if info.success:
statistics.success += 1
iteration_statistics.success += 1
statistics.avg_steps.update(info.num_steps)
iteration_statistics.avg_steps.update(info.num_steps)
else:
statistics.avg_steps.update(self.env.max_steps)
iteration_statistics.avg_steps.update(self.env.max_steps)
statistics.count += 1
iteration_statistics.count += 1
statistics.avg_reward.update(info.reward)
iteration_statistics.avg_reward.update(info.reward)
statistics.avg_reward_penalty.update(info.reward_penalty)
iteration_statistics.avg_reward_penalty.update(info.reward_penalty)
def log_statistics(self, iteration, pbm_container: PopulationModelContainer):
"""
Logging the statistics to tensorboard
@params:
iteration the current iteration
pbm_container the container with statistics and writer
"""
accuracy = pbm_container.statistics.success / pbm_container.statistics.count
pbm_container.writer.add_scalar("mean accuracy", accuracy, iteration)
pbm_container.writer.add_scalar(
"mean reward", pbm_container.statistics.avg_reward.avg, iteration
)
pbm_container.writer.add_scalar(
"mean steps", pbm_container.statistics.avg_steps.avg, iteration
)
pbm_container.writer.add_scalar(
"mean reward penalty",
pbm_container.statistics.avg_reward_penalty.avg,
iteration,
)
iteration_accuracy = (
pbm_container.iteration_statistics.success
/ pbm_container.iteration_statistics.count
)
pbm_container.writer.add_scalar(
"mean iteration accuracy", iteration_accuracy, iteration
)
pbm_container.writer.add_scalar(
"mean iteration reward",
pbm_container.iteration_statistics.avg_reward.avg,
iteration,
)
pbm_container.writer.add_scalar(
"mean iteration steps",
pbm_container.iteration_statistics.avg_steps.avg,
iteration,
)
pbm_container.writer.add_scalar(
"mean iteration reward penalty",
pbm_container.iteration_statistics.avg_reward_penalty.avg,
iteration,
)
pbm_container.writer.add_scalar(
"learning rate",
pbm_container.hyper_container[-1].learning_rate(),
iteration,
)
pbm_container.writer.add_scalar(
"discount", pbm_container.hyper_container[-1].discount(), iteration
)
pbm_container.writer.add_scalar(
"weight decay", pbm_container.hyper_container[-1].weight_decay(), iteration
)
pbm_container.writer.add_scalar(
"momentum", pbm_container.hyper_container[-1].momentum_sgd(), iteration
)
pbm_container.writer.add_scalar(
"policy epochs",
pbm_container.hyper_container[-1].policy_epochs(),
iteration,
)
pbm_container.writer.add_scalar(
"entropy coefficient",
pbm_container.hyper_container[-1].entropy_coefficient(),
iteration,
)
pbm_container.writer.add_scalar("parent", pbm_container.parent[-1], iteration)
pbm_container.writer.add_scalar("score", pbm_container.score[-1], iteration)
pbm_container.writer.add_scalar(
"score history", pbm_container.score_history, iteration
)
self.make_grid(iteration, pbm_container)
def reset_iteration_statistics(self, pbm_container: PopulationModelContainer):
"""
Resets the statistics for the current iteration
@params:
pbm_container the container of the agent to reset
"""
pbm_container.iteration_statistics.count = 0
pbm_container.iteration_statistics.success = 0
pbm_container.iteration_statistics.avg_reward.reset()
pbm_container.iteration_statistics.avg_steps.reset()
pbm_container.iteration_statistics.avg_reward_penalty.reset()
def make_grid(self, iteration, pbm_container: PopulationModelContainer):
"""
Let the agent play a full episode in the environment and generates an image grid of all the steps
@params
iteration the current iteration
pbm_container agent container
"""
done = False
images = []
state, imgs = self.reset_env()
img = self.env.render()
img = self.transformer(img)
images.append(img)
while not done:
with torch.no_grad():
action = Categorical(logits=pbm_container.model.forward(state))
action = action.sample()
next_img, reward, done, info = self.env.step(action)
state, imgs = self.get_state(next_img, imgs)
img = self.env.render()
img = self.transformer(img)
images.append(img)
img_grid = torchvision.utils.make_grid(images)
pbm_container.writer.add_image("Update", img_grid, global_step=iteration)
def hyper_string(self, pbm_container: PopulationModelContainer):
"""
Generates a formatted string with all the hyperparameters of an agent
@params
pbm_container the agent container
@returns
the generated string
"""
return [
"Hyperparameter:",
"Environment ID: "
+ self.utility_container.environment_id
+ "; Training Type: "
+ self.algorithm_name
+ "; Frame Type: "
+ self.frameType_name
+ "; Device: "
+ str(self.device)
+ "; Policy epochs: "
+ str(pbm_container.hyper_container[-1].policy_epochs())
+ "; Number of updates: "
+ str(pbm_container.hyper_container[-1].num_updates())
+ "; Max memory size: "
+ str(pbm_container.hyper_container[-1].max_memory_size())
+ "; Entropy coefficient: "
+ str(pbm_container.hyper_container[-1].entropy_coefficient())
+ "; Discount: "
+ str(pbm_container.hyper_container[-1].discount())
+ "; Learning rate: "
+ str(pbm_container.hyper_container[-1].learning_rate())
+ "; Batch size: "
+ str(pbm_container.hyper_container[-1].batch_size())
+ "; Optimizer: "
+ pbm_container.hyper_container[-1].optimizer_name
+ "; Weight decay: "
+ str(pbm_container.hyper_container[-1].weight_decay())
+ "; Momentum: "
+ str(pbm_container.hyper_container[-1].momentum_sgd()),
]
def print_hyper(self, pbm_container: PopulationModelContainer, episode=0):
"""
Prints a string with all the hyperparameters in the log and the tensorboard
@params
pbm_container the agent container
episode the current episode, 0 default
"""
hypers = self.hyper_string(pbm_container)
pbm_container.writer.add_text(hypers[0], hypers[1], global_step=episode)
if self.utility_container.logging:
logging.info(f"Model ID: {pbm_container.id}: {hypers[0]} {hypers[1]}")
def fit_optimizer(self, pbm_container: PopulationModelContainer):
"""
Fits the optimizer with the model and the hyperparameters
@params
pbm_container the agent container
@returns
the fitted optimizer
"""
if type(pbm_container.hyper_container[-1].optimizer).__name__ == "SGD":
optimizer = pbm_container.hyper_container[-1].optimizer(
pbm_container.model.parameters(),
lr=pbm_container.hyper_container[-1].learning_rate(),
weight_decay=pbm_container.hyper_container[-1].weight_decay(),
momentum=pbm_container.hyper_container[-1].momentum(),
)
else:
optimizer = pbm_container.hyper_container[-1].optimizer(
pbm_container.model.parameters(),
lr=pbm_container.hyper_container[-1].learning_rate(),
weight_decay=pbm_container.hyper_container[-1].weight_decay(),
)
return optimizer
def make_model_container(self, model_id: int):
"""
Generates a new agent
@params
model_id the id of the agent
@returns
the generated agent
"""
parameter = ReinforceContainer()
trainer_container = PopulationModelContainer()
trainer_container.id = model_id
trainer_container.model = self.make_model()
trainer_container.hyper_container.append(parameter)
trainer_container.path = self.path + "/" + str(model_id)
trainer_container.writer = SummaryWriter(trainer_container.path)
trainer_container.writer.add_graph(
model=trainer_container.model, input_to_model=self.obs
)
trainer_container.optimizer = self.fit_optimizer(trainer_container)
trainer_container.parent = [model_id]
return trainer_container
def make_population(self, opti=False):
"""
Generates a population of agents
@params:
mutate True if the hyperparameters should be mutated, false if not
"""
population = []
for i in range(self.pbt_container.population_size):
container = self.make_model_container(i)
container.hyper_container[0] = self.sample_parameter(
container.hyper_container[0], self.seed + i, opti=opti
)
population.append(container)
return population
def sample_parameter(
self, parameter_container: ReinforceContainer, seed=0, opti=False
):
"""
Samples the hyperparameters
@params
parameter_container the container containing the hyperparameters
seed the random seed
mutate True if the learning rate should be mutate
False if the learning rate should be sampled
@returns:
the hyperparmameter container with the new values
"""
parameter_container.optimizer = optim.SGD
parameter_container.optimizer_name = parameter_container.optimizer.__name__
if opti:
parameter_container.learning_rate.mutate(self.pbt_container.mutation_cut)
else:
parameter_container.learning_rate.set(
parameter_container.learning_rate.sample(seed=seed)
)
parameter_container.policy_epochs.mutate(self.pbt_container.mutation_cut)
parameter_container.entropy_coefficient.mutate(self.pbt_container.mutation_cut)
parameter_container.discount.mutate(self.pbt_container.mutation_cut)
parameter_container.weight_decay.mutate(self.pbt_container.mutation_cut)
parameter_container.momentum_sgd.mutate(self.pbt_container.mutation_cut)
return parameter_container
def mutate_hyper(self, population, step):
"""
Mutates the hyperparameters for a whole population
@params:
population the population to mutate
step the current step
"""
for i in reversed(range(len(population))):
if i >= (len(population) - len(population) * self.pbt_container.cut):
population[i].score_history = 0
population[i].hyper_container.append(
copy.copy(population[0].hyper_container[-1])
)
population[i].model.load_state_dict(population[0].model.state_dict())
population[i].parent.append(population[0].id)
if self.utility_container.logging:
logging.info(
f"Model {population[i].id} extends model {population[0].id}"
)
else:
population[i].score_history += self.pbt_container.history_score_cut
population[i].hyper_container.append(
copy.copy(population[i].hyper_container[-1])
)
population[i].hyper_container.append(
self.mutator(step, population[i].hyper_container[-1])
)
self.fit_optimizer(population[i])
self.log_statistics(step, population[i])
self.reset_iteration_statistics(population[i])
self.print_hyper(population[i], step)
def mutator(self, step, params: ReinforceContainer):
"""
Mutates hyperparameters in a container
@params:
step the current step (currently not used)
params a hyperparameter container
@returns:
a hyperparameter container with mutated hyperparameters
"""
hyper = copy.copy(params)
hyper.policy_epochs.mutate(self.pbt_container.mutation_cut)
hyper.entropy_coefficient.mutate(self.pbt_container.mutation_cut)
hyper.learning_rate.mutate(self.pbt_container.mutation_cut)
hyper.discount.mutate(self.pbt_container.mutation_cut)
hyper.weight_decay.mutate(self.pbt_container.mutation_cut)
hyper.momentum_sgd.mutate(self.pbt_container.mutation_cut)
return hyper
def evaluate_all(self, population):
"""
Evaluates a whole population of agents
@params:
population the population of agents
"""
for t in tqdm(population):
self.evaluate_trainer(t, 10)
self.calc_values(population)
population.sort(key=lambda tup: tup.score[-1])
def calc_values(self, population):
"""
Calculates the rating of a whole population and resets the evaluation
@params:
population a population of agents
"""
for a in population:
acc = 1 / ((a.evaluation.success / a.evaluation.count) + 0.000001)
step_value = a.evaluation.avg_steps.avg / 201
reward_value = 1 - a.evaluation.avg_reward.avg
summed_values = (
acc + step_value + reward_value + a.evaluation.avg_reward_penalty.avg
)
new_score = summed_values / (4 + a.score_history * (1 - a.score[-1]))
a.score.append(new_score)
a.evaluation = StatisticContainer()
def eval_episodes(self, episodes):
"""
Calculates the number of steps to play until the next evaluation
@params:
episodes the current number of episodes (currently unused)
@returns
the number of steps to play until the next evaluation
"""
return 5
def evaluate_trainer(self, pbm_container: PopulationModelContainer, evaluates=100):
"""
Evaluates a trainer by playing several episodes
@params:
pbm_container agent to evaluate
evaluates the number of episodes to play for evaluation
"""
for episode in range(evaluates):
state, images = self.reset_env()
done = False
while not done:
with torch.no_grad():
action = Categorical(logits=pbm_container.model.forward(state))
action = action.sample()
next_img, reward, done, info = self.env.step(action)
state, images = self.get_state(next_img, images)
if done:
self.update_statistics(
info, pbm_container.evaluation, pbm_container.statistics
)
pbm_container.iteration_statistics = copy.copy(pbm_container.evaluation)
def generate_memory(
self,
model,
parameter: ReinforceContainer,
do_stats=False,
iteration_statistics=None,
statistics=None,
):
"""
Plays the game and stores the transitions
@params:
model the model to get the action
parameter the parameter container
do_stats True if the statistics should be logged
False otherwise
iteration_statistics statistics for one iteration
statistics all time statistics
@returns:
the memory with generated transitions
"""
memory = self.make_memory(parameter.max_memory_size())
done = False
info = None
state, images = self.reset_env()
for episode in tqdm(range(parameter.max_memory_size())):
if done:
next_state, images = self.reset_env()
if do_stats:
self.update_statistics(info, iteration_statistics, statistics)
else:
next_state = state
action, log_prob = self.get_action(next_state, model)
next_img, reward, done, info = self.env.step(action)
next_state, images = self.get_state(next_img, images)
memory.insert(
episode,
torch.tensor(done, dtype=torch.float32),
action,
log_prob,
torch.tensor(reward, dtype=torch.float32),
state,
)
state = next_state
return memory
def get_action(self, state, model):
"""
Determines the next action
@params:
state the current input state
model the model to determine the action
@returns:
the next action
the logarithmic probability
"""
with torch.no_grad():
logits = model.forward(state)
dist = Categorical(logits=logits)
action = dist.sample()
log_prob = dist.log_prob(action)
return action, log_prob
def update(self, rollouts, model, optimizer, parameter: ReinforceContainer):
"""
Calculates the loss and updates the model
@params:
rollouts memory filled with transitions
model the model to update
optimizer the optimizer
parameter a hyperparameter container
"""
for epoch in tqdm(range(parameter.policy_epochs())):
data = rollouts.batch_sampler(parameter.batch_size())
for sample in data:
actions_batch, returns_batch, obs_batch = sample
actions_batch = actions_batch.to(self.device)
log_probs_batch, entropy_batch = self.evaluate_actions(
obs_batch, actions_batch, model
)
log_probs_batch = log_probs_batch.to(self.device)
returns_batch = returns_batch.to(self.device)
policy_loss = -(log_probs_batch * returns_batch).mean()
entropy_loss = -entropy_batch.mean()
loss = policy_loss + parameter.entropy_coefficient() * entropy_loss
optimizer.zero_grad()
loss.backward(retain_graph=False)
optimizer.step()
def evaluate_actions(self, state, action, model):
"""
Evaluates the taken actions
@params:
state the state to evaluate with
action the action to evaluate
model the model to evaluate with
@returns
logarithmic probability of the action
the entropy of the distribution
"""
logits = model.forward(state)
dist = Categorical(logits=logits)
log_prob = dist.log_prob(action.squeeze(-1)).view(-1, 1)
entropy = dist.entropy().view(-1, 1)
return log_prob, entropy
def init_single(self):
"""
Initializes a baseline run
@returns:
a trained agent
"""
pbm = self.make_model_container(0)
pbm.hyper_container[0].weight_decay.set(0.000001)
pbm.optimizer = self.fit_optimizer(pbm)
self.print_hyper(pbm)
self.train_single(pbm)
return pbm
def train_single(self, pbm: PopulationModelContainer):
"""
Trains a single agent
@params:
pbm the agent to train
@returns:
the trained agent
"""
parameter = pbm.hyper_container[0]
model = pbm.model
optimizer = pbm.optimizer
for updates in range(parameter.num_updates()):
memory = self.generate_memory(
model, parameter, True, pbm.iteration_statistics, pbm.statistics
)
memory.compute_returns(parameter.discount())
self.update(memory, model, optimizer, parameter)
memory.reset()
self.log_statistics(updates * parameter.max_memory_size(), pbm)
self.reset_iteration_statistics(pbm)
return pbm
def init_population_training(self, opti=False):
"""
Initializes a population based training
@returns:
a trained population
"""
population = self.make_population(opti=opti)
for i in population:
i.hyper_container[0] = self.sample_parameter(
i.hyper_container[0], seed=self.seed + i.id
)
population = self.train_population(population)
return population
def train_population(self, population):
"""
Trains a whole population
@params:
population the population to train
@returns:
the trained population
"""
episodes = 0
done_training = False
while not done_training:
next_episodes = self.eval_episodes(episodes)
for eps in range(next_episodes):
memory = self.generate_memory(
population[0].model, population[0].hyper_container[-1]
)
for t in population:
t_memory = copy.copy(memory)
t_memory.compute_returns(t.hyper_container[-1].discount())
self.update(t_memory, t.model, t.optimizer, t.hyper_container[-1])
t_memory.reset()
memory.reset()
if episodes == self.pbt_container.evaluation_steps:
done_training = True
episodes += 1
self.evaluate_all(population)
self.mutate_hyper(population, episodes)
return population
def transfer_trainer(self):
"""
Trains a whole population and transfers the best agent to another environment
"""
population = self.init_population_training()
self.evaluate_all(population)
self.env = Gridworld.make("hardcore-10x10-random")
best = population[0]
best.writer = SummaryWriter(best.path + "-transfer")
best.writer.add_graph(model=best.model, input_to_model=self.obs)
self.train_single(best)
def population_trainer_transfer(self, best_pop=False, comment=None):
"""
Trains a population and transfers the whole population to another environment
@params:
best_pop True if the transfer population should be copies of the best agent
False if the whole population should be transferred
comment comment for logging
"""
if comment is not None:
logging.info(comment)
population = self.init_population_training(opti=True)
self.evaluate_all(population)
self.env = Gridworld.make("hardcore-10x10-random")
logging.info("pb transfer training")
transfer = []
if best_pop:
for i in range(self.pbt_container.population_size):
transfer.append(copy.copy(population[0]))
transfer[i].writer = SummaryWriter(
transfer[i].path + "-transfer" + str(i)
)
transfer[i].writer.add_graph(
model=transfer[i].model, input_to_model=self.obs
)
transfer[i].optimizer = self.fit_optimizer(transfer[i])
transfer[i].evaluation = StatisticContainer()
transfer[i].statistics = StatisticContainer()
transfer[i].score = [0.0]
transfer[i].score_histoy = 0
transfer[i].parent = [i]
else:
for i in range(self.pbt_container.population_size):
transfer.append(copy.copy(population[i]))
transfer[i].writer = SummaryWriter(
transfer[i].path + "-transfer" + str(i)
)
transfer[i].writer.add_graph(
model=transfer[i].model, input_to_model=self.obs
)
transfer[i].optimizer = self.fit_optimizer(transfer[i])
transfer[i].evaluation = StatisticContainer()
transfer[i].statistics = StatisticContainer()
transfer[i].score = [0.0]
transfer[i].score_histoy = 0
transfer[i].parent = [i]
self.train_population(transfer)
def init_single_transfer(self, comment=None):
"""
Initializes a single baseline agent
@param:
comment a logging comment
"""
if comment is not None:
logging.info(comment)
logging.info("Single transfer training")
pbm = self.make_model_container(0)
pbm.hyper_container[0].weight_decay.set(0.000001)
pbm.optimizer = self.fit_optimizer(pbm)
self.print_hyper(pbm)
pbm = self.train_single(pbm)
self.env = Gridworld.make("hardcore-10x10-random")
pbm.writer = SummaryWriter(pbm.path + "-transfer")
pbm.writer.add_graph(model=pbm.model, input_to_model=self.obs)
pbm.evaluation = StatisticContainer()
pbm.statistics = StatisticContainer()
pbm.score = [0.0]
pbm.score_history = 0
self.train_single(pbm)
def init_single_transfer_repeat(self, agents, comment=None):
"""
Generates multiple baseline agents
@params:
agents the number of agents to train
comment a logging comment
"""
if comment is not None:
logging.info(comment)
logging.info("Single transfer training")
for i in range(agents):
self.env = Gridworld.make("empty-10x10-random")
pbm = self.make_model_container(i)
pbm.hyper_container[0].weight_decay.set(0.000001)
pbm.optimizer = self.fit_optimizer(pbm)
self.print_hyper(pbm)
pbm = self.train_single(pbm)
self.env = Gridworld.make("hardcore-10x10-random")
pbm.writer = SummaryWriter(pbm.path + "-transfer")
pbm.writer.add_graph(model=pbm.model, input_to_model=self.obs)
pbm.evaluation = StatisticContainer()
pbm.statistics = StatisticContainer()
pbm.score = [0.0]
pbm.score_history = 0
self.train_single(pbm)
"""
model_container = ModelContainer()
util_container = UtilityContainer()
util_container.logging = True
util_container.init_logging = True
util_container.environment_id = "empty-10x10-random"
# util_container.environment_id = "hardcore-10x10-random"
util_container.save = False
util_container.loading = False
pbt = PopulationContainer()
pbt.population_size = 10
trainer = PopulationBasedTrainerReinforce(util_container, model_container, pbt)
# trainer.init_single()
# trainer.init_population_training()
# trainer.transfer_trainer()
# trainer.init_single_transfer()
# trainer.population_trainer_transfer(best_pop=False, comment="\ntransfer type: best agent \n")
# trainer.population_trainer_transfer(best_pop=True, comment="\ntransfer type: whole pop \n")
# trainer.init_single_transfer_repeat(10)
"""
| [
"src.gridworld_trainer.reinforce.model.ReinforceNetwork3D",
"src.gridworld_trainer.reinforce.memory.MemoryReinforce",
"torch.cuda.is_available",
"torchvision.utils.make_grid",
"copy.copy",
"logging.info",
"torch.utils.tensorboard.SummaryWriter",
"os.path.exists",
"src.utility.container.StatisticCont... | [((3125, 3153), 'torch.manual_seed', 'torch.manual_seed', (['self.seed'], {}), '(self.seed)\n', (3142, 3153), False, 'import torch\n'), ((3162, 3187), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (3176, 3187), True, 'import numpy as np\n'), ((3196, 3218), 'random.seed', 'random.seed', (['self.seed'], {}), '(self.seed)\n', (3207, 3218), False, 'import random\n'), ((3338, 3359), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3357, 3359), True, 'import torchvision.transforms as transforms\n'), ((3380, 3433), 'src.gridworld.gridworld.Gridworld.make', 'Gridworld.make', (['self.utility_container.environment_id'], {}), '(self.utility_container.environment_id)\n', (3394, 3433), False, 'from src.gridworld.gridworld import Gridworld\n'), ((5522, 5636), 'src.gridworld_trainer.reinforce.memory.MemoryReinforce', 'MemoryReinforce', (['max_memory_size', 'self.obs_size', 'self.obs_size', 'self.model_container.num_channel', 'self.device'], {}), '(max_memory_size, self.obs_size, self.obs_size, self.\n model_container.num_channel, self.device)\n', (5537, 5636), False, 'from src.gridworld_trainer.reinforce.memory import MemoryReinforce, MemoryReinforce3D\n'), ((6017, 6166), 'src.gridworld_trainer.reinforce.memory.MemoryReinforce3D', 'MemoryReinforce3D', (['max_memory_size', 'self.obs_size', 'self.obs_size', 'self.model_container.num_images', 'self.model_container.num_channel', 'self.device'], {}), '(max_memory_size, self.obs_size, self.obs_size, self.\n model_container.num_images, self.model_container.num_channel, self.device)\n', (6034, 6166), False, 'from src.gridworld_trainer.reinforce.memory import MemoryReinforce, MemoryReinforce3D\n'), ((6462, 6510), 'src.gridworld_trainer.reinforce.model.ReinforceNetwork', 'ReinforceNetwork', (['self.model_container', 'self.obs'], {}), '(self.model_container, self.obs)\n', (6478, 6510), False, 'from src.gridworld_trainer.reinforce.model import ReinforceNetwork, ReinforceNetwork3D\n'), ((6709, 6759), 'src.gridworld_trainer.reinforce.model.ReinforceNetwork3D', 'ReinforceNetwork3D', (['self.model_container', 'self.obs'], {}), '(self.model_container, self.obs)\n', (6727, 6759), False, 'from src.gridworld_trainer.reinforce.model import ReinforceNetwork, ReinforceNetwork3D\n'), ((7885, 7938), 'torch.cat', 'torch.cat', (['[images.img_1, images.img_2, images.img_3]'], {}), '([images.img_1, images.img_2, images.img_3])\n', (7894, 7938), False, 'import torch\n'), ((8531, 8584), 'torch.cat', 'torch.cat', (['[images.img_1, images.img_2, images.img_3]'], {}), '([images.img_1, images.img_2, images.img_3])\n', (8540, 8584), False, 'import torch\n'), ((9248, 9264), 'src.utility.container.ImageContainer', 'ImageContainer', ([], {}), '()\n', (9262, 9264), False, 'from src.utility.container import ModelContainer, UtilityContainer, StatisticContainer, ReinforceContainer, FrameType, PopulationContainer, PopulationModelContainer, ImageContainer, ModelSavingContainer\n'), ((9537, 9590), 'torch.cat', 'torch.cat', (['[images.img_1, images.img_2, images.img_3]'], {}), '([images.img_1, images.img_2, images.img_3])\n', (9546, 9590), False, 'import torch\n'), ((9883, 9899), 'src.utility.container.ImageContainer', 'ImageContainer', ([], {}), '()\n', (9897, 9899), False, 'from src.utility.container import ModelContainer, UtilityContainer, StatisticContainer, ReinforceContainer, FrameType, PopulationContainer, PopulationModelContainer, ImageContainer, ModelSavingContainer\n'), ((10172, 10227), 'torch.stack', 'torch.stack', (['[images.img_1, images.img_2, images.img_3]'], {}), '([images.img_1, images.img_2, images.img_3])\n', (10183, 10227), False, 'import torch\n'), ((10564, 10586), 'src.utility.container.ModelSavingContainer', 'ModelSavingContainer', ([], {}), '()\n', (10584, 10586), False, 'from src.utility.container import ModelContainer, UtilityContainer, StatisticContainer, ReinforceContainer, FrameType, PopulationContainer, PopulationModelContainer, ImageContainer, ModelSavingContainer\n'), ((10725, 10748), 'torch.save', 'torch.save', (['saves', 'file'], {}), '(saves, file)\n', (10735, 10748), False, 'import torch\n'), ((11134, 11150), 'torch.load', 'torch.load', (['file'], {}), '(file)\n', (11144, 11150), False, 'import torch\n'), ((16673, 16708), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['images'], {}), '(images)\n', (16700, 16708), False, 'import torchvision\n'), ((20249, 20269), 'src.utility.container.ReinforceContainer', 'ReinforceContainer', ([], {}), '()\n', (20267, 20269), False, 'from src.utility.container import ModelContainer, UtilityContainer, StatisticContainer, ReinforceContainer, FrameType, PopulationContainer, PopulationModelContainer, ImageContainer, ModelSavingContainer\n'), ((20299, 20325), 'src.utility.container.PopulationModelContainer', 'PopulationModelContainer', ([], {}), '()\n', (20323, 20325), False, 'from src.utility.container import ModelContainer, UtilityContainer, StatisticContainer, ReinforceContainer, FrameType, PopulationContainer, PopulationModelContainer, ImageContainer, ModelSavingContainer\n'), ((20578, 20615), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['trainer_container.path'], {}), '(trainer_container.path)\n', (20591, 20615), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((24636, 24653), 'copy.copy', 'copy.copy', (['params'], {}), '(params)\n', (24645, 24653), False, 'import copy\n'), ((25280, 25296), 'tqdm.tqdm', 'tqdm', (['population'], {}), '(population)\n', (25284, 25296), False, 'from tqdm import tqdm\n'), ((27445, 27480), 'copy.copy', 'copy.copy', (['pbm_container.evaluation'], {}), '(pbm_container.evaluation)\n', (27454, 27480), False, 'import copy\n'), ((29481, 29507), 'torch.distributions.categorical.Categorical', 'Categorical', ([], {'logits': 'logits'}), '(logits=logits)\n', (29492, 29507), False, 'from torch.distributions.categorical import Categorical\n'), ((31312, 31338), 'torch.distributions.categorical.Categorical', 'Categorical', ([], {'logits': 'logits'}), '(logits=logits)\n', (31323, 31338), False, 'from torch.distributions.categorical import Categorical\n'), ((34504, 34543), 'src.gridworld.gridworld.Gridworld.make', 'Gridworld.make', (['"""hardcore-10x10-random"""'], {}), "('hardcore-10x10-random')\n", (34518, 34543), False, 'from src.gridworld.gridworld import Gridworld\n'), ((34595, 34633), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (["(best.path + '-transfer')"], {}), "(best.path + '-transfer')\n", (34608, 34633), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((35336, 35375), 'src.gridworld.gridworld.Gridworld.make', 'Gridworld.make', (['"""hardcore-10x10-random"""'], {}), "('hardcore-10x10-random')\n", (35350, 35375), False, 'from src.gridworld.gridworld import Gridworld\n'), ((35384, 35420), 'logging.info', 'logging.info', (['"""pb transfer training"""'], {}), "('pb transfer training')\n", (35396, 35420), False, 'import logging\n'), ((37183, 37223), 'logging.info', 'logging.info', (['"""Single transfer training"""'], {}), "('Single transfer training')\n", (37195, 37223), False, 'import logging\n'), ((37460, 37499), 'src.gridworld.gridworld.Gridworld.make', 'Gridworld.make', (['"""hardcore-10x10-random"""'], {}), "('hardcore-10x10-random')\n", (37474, 37499), False, 'from src.gridworld.gridworld import Gridworld\n'), ((37521, 37558), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (["(pbm.path + '-transfer')"], {}), "(pbm.path + '-transfer')\n", (37534, 37558), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((37655, 37675), 'src.utility.container.StatisticContainer', 'StatisticContainer', ([], {}), '()\n', (37673, 37675), False, 'from src.utility.container import ModelContainer, UtilityContainer, StatisticContainer, ReinforceContainer, FrameType, PopulationContainer, PopulationModelContainer, ImageContainer, ModelSavingContainer\n'), ((37701, 37721), 'src.utility.container.StatisticContainer', 'StatisticContainer', ([], {}), '()\n', (37719, 37721), False, 'from src.utility.container import ModelContainer, UtilityContainer, StatisticContainer, ReinforceContainer, FrameType, PopulationContainer, PopulationModelContainer, ImageContainer, ModelSavingContainer\n'), ((38121, 38161), 'logging.info', 'logging.info', (['"""Single transfer training"""'], {}), "('Single transfer training')\n", (38133, 38161), False, 'import logging\n'), ((1966, 1977), 'time.time', 'time.time', ([], {}), '()\n', (1975, 1977), False, 'import time\n'), ((2199, 2224), 'os.path.exists', 'os.path.exists', (['self.path'], {}), '(self.path)\n', (2213, 2224), False, 'import os\n'), ((2238, 2260), 'os.makedirs', 'os.makedirs', (['self.path'], {}), '(self.path)\n', (2249, 2260), False, 'import os\n'), ((2322, 2415), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': "(self.path + '/info.log')", 'filemode': '"""w"""', 'level': 'logging.DEBUG'}), "(filename=self.path + '/info.log', filemode='w', level=\n logging.DEBUG)\n", (2341, 2415), False, 'import logging\n'), ((2500, 2525), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2523, 2525), False, 'import torch\n'), ((3275, 3309), 'logging.info', 'logging.info', (['f"""Seed: {self.seed}"""'], {}), "(f'Seed: {self.seed}')\n", (3287, 3309), False, 'import logging\n'), ((18913, 18983), 'logging.info', 'logging.info', (['f"""Model ID: {pbm_container.id}: {hypers[0]} {hypers[1]}"""'], {}), "(f'Model ID: {pbm_container.id}: {hypers[0]} {hypers[1]}')\n", (18925, 18983), False, 'import logging\n'), ((26139, 26159), 'src.utility.container.StatisticContainer', 'StatisticContainer', ([], {}), '()\n', (26157, 26159), False, 'from src.utility.container import ModelContainer, UtilityContainer, StatisticContainer, ReinforceContainer, FrameType, PopulationContainer, PopulationModelContainer, ImageContainer, ModelSavingContainer\n'), ((29407, 29422), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (29420, 29422), False, 'import torch\n'), ((35195, 35216), 'logging.info', 'logging.info', (['comment'], {}), '(comment)\n', (35207, 35216), False, 'import logging\n'), ((37153, 37174), 'logging.info', 'logging.info', (['comment'], {}), '(comment)\n', (37165, 37174), False, 'import logging\n'), ((38091, 38112), 'logging.info', 'logging.info', (['comment'], {}), '(comment)\n', (38103, 38112), False, 'import logging\n'), ((38217, 38253), 'src.gridworld.gridworld.Gridworld.make', 'Gridworld.make', (['"""empty-10x10-random"""'], {}), "('empty-10x10-random')\n", (38231, 38253), False, 'from src.gridworld.gridworld import Gridworld\n'), ((38513, 38552), 'src.gridworld.gridworld.Gridworld.make', 'Gridworld.make', (['"""hardcore-10x10-random"""'], {}), "('hardcore-10x10-random')\n", (38527, 38552), False, 'from src.gridworld.gridworld import Gridworld\n'), ((38578, 38615), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (["(pbm.path + '-transfer')"], {}), "(pbm.path + '-transfer')\n", (38591, 38615), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((38720, 38740), 'src.utility.container.StatisticContainer', 'StatisticContainer', ([], {}), '()\n', (38738, 38740), False, 'from src.utility.container import ModelContainer, UtilityContainer, StatisticContainer, ReinforceContainer, FrameType, PopulationContainer, PopulationModelContainer, ImageContainer, ModelSavingContainer\n'), ((38770, 38790), 'src.utility.container.StatisticContainer', 'StatisticContainer', ([], {}), '()\n', (38788, 38790), False, 'from src.utility.container import ModelContainer, UtilityContainer, StatisticContainer, ReinforceContainer, FrameType, PopulationContainer, PopulationModelContainer, ImageContainer, ModelSavingContainer\n'), ((2557, 2579), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (2569, 2579), False, 'import torch\n'), ((2734, 2753), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2746, 2753), False, 'import torch\n'), ((16290, 16305), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16303, 16305), False, 'import torch\n'), ((28851, 28890), 'torch.tensor', 'torch.tensor', (['done'], {'dtype': 'torch.float32'}), '(done, dtype=torch.float32)\n', (28863, 28890), False, 'import torch\n'), ((28958, 28999), 'torch.tensor', 'torch.tensor', (['reward'], {'dtype': 'torch.float32'}), '(reward, dtype=torch.float32)\n', (28970, 28999), False, 'import torch\n'), ((35962, 35982), 'src.utility.container.StatisticContainer', 'StatisticContainer', ([], {}), '()\n', (35980, 35982), False, 'from src.utility.container import ModelContainer, UtilityContainer, StatisticContainer, ReinforceContainer, FrameType, PopulationContainer, PopulationModelContainer, ImageContainer, ModelSavingContainer\n'), ((36024, 36044), 'src.utility.container.StatisticContainer', 'StatisticContainer', ([], {}), '()\n', (36042, 36044), False, 'from src.utility.container import ModelContainer, UtilityContainer, StatisticContainer, ReinforceContainer, FrameType, PopulationContainer, PopulationModelContainer, ImageContainer, ModelSavingContainer\n'), ((36685, 36705), 'src.utility.container.StatisticContainer', 'StatisticContainer', ([], {}), '()\n', (36703, 36705), False, 'from src.utility.container import ModelContainer, UtilityContainer, StatisticContainer, ReinforceContainer, FrameType, PopulationContainer, PopulationModelContainer, ImageContainer, ModelSavingContainer\n'), ((36747, 36767), 'src.utility.container.StatisticContainer', 'StatisticContainer', ([], {}), '()\n', (36765, 36767), False, 'from src.utility.container import ModelContainer, UtilityContainer, StatisticContainer, ReinforceContainer, FrameType, PopulationContainer, PopulationModelContainer, ImageContainer, ModelSavingContainer\n'), ((2651, 2685), 'logging.info', 'logging.info', (['"""Running on the GPU"""'], {}), "('Running on the GPU')\n", (2663, 2685), False, 'import logging\n'), ((2825, 2859), 'logging.info', 'logging.info', (['"""Running on the CPU"""'], {}), "('Running on the CPU')\n", (2837, 2859), False, 'import logging\n'), ((23285, 23329), 'copy.copy', 'copy.copy', (['population[0].hyper_container[-1]'], {}), '(population[0].hyper_container[-1])\n', (23294, 23329), False, 'import copy\n'), ((23567, 23641), 'logging.info', 'logging.info', (['f"""Model {population[i].id} extends model {population[0].id}"""'], {}), "(f'Model {population[i].id} extends model {population[0].id}')\n", (23579, 23641), False, 'import logging\n'), ((23865, 23909), 'copy.copy', 'copy.copy', (['population[i].hyper_container[-1]'], {}), '(population[i].hyper_container[-1])\n', (23874, 23909), False, 'import copy\n'), ((26950, 26965), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (26963, 26965), False, 'import torch\n'), ((33737, 33754), 'copy.copy', 'copy.copy', (['memory'], {}), '(memory)\n', (33746, 33754), False, 'import copy\n'), ((35560, 35584), 'copy.copy', 'copy.copy', (['population[0]'], {}), '(population[0])\n', (35569, 35584), False, 'import copy\n'), ((36283, 36307), 'copy.copy', 'copy.copy', (['population[i]'], {}), '(population[i])\n', (36292, 36307), False, 'import copy\n'), ((9384, 9408), 'torch.zeros', 'torch.zeros', (['(40 * 40 * 3)'], {}), '(40 * 40 * 3)\n', (9395, 9408), False, 'import torch\n'), ((9464, 9488), 'torch.zeros', 'torch.zeros', (['(40 * 40 * 3)'], {}), '(40 * 40 * 3)\n', (9475, 9488), False, 'import torch\n'), ((10019, 10043), 'torch.zeros', 'torch.zeros', (['(40 * 40 * 3)'], {}), '(40 * 40 * 3)\n', (10030, 10043), False, 'import torch\n'), ((10099, 10123), 'torch.zeros', 'torch.zeros', (['(40 * 40 * 3)'], {}), '(40 * 40 * 3)\n', (10110, 10123), False, 'import torch\n')] |
""" Batched Render file. """
import dirt
import numpy as np
import tensorflow as tf
from dirt import matrices
import dirt.lighting as lighting
from tensorflow.python.framework import ops
def orthgraphic_projection(w, h, near=0.1, far=10., name=None):
"""Constructs a orthographic projection matrix.
This function returns a orthographic projection matrix, using the OpenGL convention that the camera
looks along the negative-z axis in view/camera space, and the positive-z axis in clip space.
Multiplying view-space homogeneous coordinates by this matrix maps them into clip space.
"""
with ops.name_scope(name, 'OrthographicProjection', [w, h, near, far]) as scope:
### symmetric view case
right = w / 2
left = -right
top = h / 2
bottom = -top
elements = [
[2. / (right-left), 0., 0, -(right+left)/(right-left) ],
[0., 2. / (top-bottom), 0, -(top+bottom)/(top-bottom) ],
[0., 0., -2. / (far - near), -(far+near)/(far-near) ],
[0.,0.,0.,1.]
]
return tf.transpose(tf.convert_to_tensor(elements, dtype=tf.float32))
def perspective_projection(f, c, w, h, near=0.1, far=10., name=None):
"""Constructs a perspective projection matrix.
This function returns a perspective projection matrix, using the OpenGL convention that the camera
looks along the negative-z axis in view/camera space, and the positive-z axis in clip space.
Multiplying view-space homogeneous coordinates by this matrix maps them into clip space.
"""
with ops.name_scope(name, 'PerspectiveProjection', [f, c, w, h, near, far]) as scope:
f = 0.5 * (f[0] + f[1])
pixel_center_offset = 0.5
right = (w - (c[0] + pixel_center_offset)) * (near / f)
left = -(c[0] + pixel_center_offset) * (near / f)
top = (c[1] + pixel_center_offset) * (near / f)
bottom = -(h - c[1] + pixel_center_offset) * (near / f)
elements = [
[2. * near / (right - left), 0., (right + left) / (right - left), 0.],
[0., 2. * near / (top - bottom), (top + bottom) / (top - bottom), 0.],
[0., 0., -(far + near) / (far - near), -2. * far * near / (far - near)],
[0., 0., -1., 0.]
]
return tf.transpose(tf.convert_to_tensor(elements, dtype=tf.float32))
def render_colored_batch(m_v, m_f, m_vc, width, height, camera_f, camera_c, bgcolor=np.zeros(3, dtype=np.float32),
num_channels=3, camera_t=np.zeros(3, dtype=np.float32),
camera_rt=np.zeros(3, dtype=np.float32), name=None,batch_size=None,cam_pred=None):
""" Render a batch of meshes with fixed BG. Supported projection types 1) Perspective, 2) Orthographic. """
with ops.name_scope(name, "render_batch", [m_v]) as name:
assert (num_channels == m_vc.shape[-1] == bgcolor.shape[0])
#projection_matrix = perspective_projection(camera_f, camera_c, width, height, .1, 10)
projection_matrix = orthgraphic_projection(width, height, -(width/2), (width/2)) ### im_w x im_h x im_w cube
## Camera Extrinsics, rotate & trans
view_matrix = matrices.compose( matrices.rodrigues(camera_rt.astype(np.float32)),
matrices.translation(camera_t.astype(np.float32)),
)
## Fixed clr BG
bg = tf.tile(bgcolor[tf.newaxis,tf.newaxis,tf.newaxis,...],[tf.shape(m_v)[0],width,height,1])
m_v = tf.cast(m_v, tf.float32)
m_v = tf.concat([m_v, tf.ones_like(m_v[:, :, -1:])], axis=2)
## Extrinsic multiplication
m_v = tf.matmul(m_v, tf.tile(view_matrix[np.newaxis, ...], (tf.shape(m_v)[0], 1, 1)))
## Intrinsic Camera projection
m_v = tf.matmul(m_v, tf.tile(projection_matrix[np.newaxis, ...], (tf.shape(m_v)[0], 1, 1)))
m_f = tf.tile(tf.cast(m_f, tf.int32)[tf.newaxis, ...], (tf.shape(m_v)[0], 1, 1))
## Rasterize
return dirt.rasterise_batch(bg, m_v, m_vc, m_f, name=name)
def render_overlay_colored_batch(m_v, m_f, m_vc, width, height, camera_f, camera_c, bgcolor=np.zeros(3, dtype=np.float32),
num_channels=3, camera_t=np.zeros(3, dtype=np.float32),
camera_rt=np.zeros(3, dtype=np.float32), name=None,batch_size=None,cam_pred=None):
""" Render a batch of meshes with corresponding BG images. Supported projection types 1) Perspective, 2) Orthographic. """
with ops.name_scope(name, "render_batch", [m_v]) as name:
#projection_matrix = perspective_projection(camera_f, camera_c, width, height, .1, 10)
projection_matrix = orthgraphic_projection(width, height, -(width/2), (width/2)) ### im_w x im_h x im_w cube
## Camera Extrinsics, rotate & trans
view_matrix = matrices.compose( matrices.rodrigues(camera_rt.astype(np.float32)),
matrices.translation(camera_t.astype(np.float32)),
)
## Image BG
bg = bgcolor
m_v = tf.cast(m_v, tf.float32)
m_v = tf.concat([m_v, tf.ones_like(m_v[:, :, -1:])], axis=2)
## Extrinsic multiplication
m_v = tf.matmul(m_v, tf.tile(view_matrix[np.newaxis, ...], (tf.shape(m_v)[0], 1, 1)))
## Intrinsic Camera projection
m_v = tf.matmul(m_v, tf.tile(projection_matrix[np.newaxis, ...], (tf.shape(m_v)[0], 1, 1)))
m_f = tf.tile(tf.cast(m_f, tf.int32)[tf.newaxis, ...], (tf.shape(m_v)[0], 1, 1))
## Rasterize
return dirt.rasterise_batch(bg, m_v, m_vc, m_f, name=name)
| [
"dirt.rasterise_batch",
"tensorflow.shape",
"numpy.zeros",
"tensorflow.ones_like",
"tensorflow.convert_to_tensor",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.cast"
] | [((2464, 2493), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.float32'}), '(3, dtype=np.float32)\n', (2472, 2493), True, 'import numpy as np\n'), ((2545, 2574), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.float32'}), '(3, dtype=np.float32)\n', (2553, 2574), True, 'import numpy as np\n'), ((2611, 2640), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.float32'}), '(3, dtype=np.float32)\n', (2619, 2640), True, 'import numpy as np\n'), ((4200, 4229), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.float32'}), '(3, dtype=np.float32)\n', (4208, 4229), True, 'import numpy as np\n'), ((4281, 4310), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.float32'}), '(3, dtype=np.float32)\n', (4289, 4310), True, 'import numpy as np\n'), ((4347, 4376), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'np.float32'}), '(3, dtype=np.float32)\n', (4355, 4376), True, 'import numpy as np\n'), ((618, 683), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['name', '"""OrthographicProjection"""', '[w, h, near, far]'], {}), "(name, 'OrthographicProjection', [w, h, near, far])\n", (632, 683), False, 'from tensorflow.python.framework import ops\n'), ((1598, 1668), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['name', '"""PerspectiveProjection"""', '[f, c, w, h, near, far]'], {}), "(name, 'PerspectiveProjection', [f, c, w, h, near, far])\n", (1612, 1668), False, 'from tensorflow.python.framework import ops\n'), ((2806, 2849), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['name', '"""render_batch"""', '[m_v]'], {}), "(name, 'render_batch', [m_v])\n", (2820, 2849), False, 'from tensorflow.python.framework import ops\n'), ((3553, 3577), 'tensorflow.cast', 'tf.cast', (['m_v', 'tf.float32'], {}), '(m_v, tf.float32)\n', (3560, 3577), True, 'import tensorflow as tf\n'), ((4055, 4106), 'dirt.rasterise_batch', 'dirt.rasterise_batch', (['bg', 'm_v', 'm_vc', 'm_f'], {'name': 'name'}), '(bg, m_v, m_vc, m_f, name=name)\n', (4075, 4106), False, 'import dirt\n'), ((4558, 4601), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['name', '"""render_batch"""', '[m_v]'], {}), "(name, 'render_batch', [m_v])\n", (4572, 4601), False, 'from tensorflow.python.framework import ops\n'), ((5153, 5177), 'tensorflow.cast', 'tf.cast', (['m_v', 'tf.float32'], {}), '(m_v, tf.float32)\n', (5160, 5177), True, 'import tensorflow as tf\n'), ((5655, 5706), 'dirt.rasterise_batch', 'dirt.rasterise_batch', (['bg', 'm_v', 'm_vc', 'm_f'], {'name': 'name'}), '(bg, m_v, m_vc, m_f, name=name)\n', (5675, 5706), False, 'import dirt\n'), ((1116, 1164), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['elements'], {'dtype': 'tf.float32'}), '(elements, dtype=tf.float32)\n', (1136, 1164), True, 'import tensorflow as tf\n'), ((2329, 2377), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['elements'], {'dtype': 'tf.float32'}), '(elements, dtype=tf.float32)\n', (2349, 2377), True, 'import tensorflow as tf\n'), ((3608, 3636), 'tensorflow.ones_like', 'tf.ones_like', (['m_v[:, :, -1:]'], {}), '(m_v[:, :, -1:])\n', (3620, 3636), True, 'import tensorflow as tf\n'), ((3951, 3973), 'tensorflow.cast', 'tf.cast', (['m_f', 'tf.int32'], {}), '(m_f, tf.int32)\n', (3958, 3973), True, 'import tensorflow as tf\n'), ((5208, 5236), 'tensorflow.ones_like', 'tf.ones_like', (['m_v[:, :, -1:]'], {}), '(m_v[:, :, -1:])\n', (5220, 5236), True, 'import tensorflow as tf\n'), ((5551, 5573), 'tensorflow.cast', 'tf.cast', (['m_f', 'tf.int32'], {}), '(m_f, tf.int32)\n', (5558, 5573), True, 'import tensorflow as tf\n'), ((3504, 3517), 'tensorflow.shape', 'tf.shape', (['m_v'], {}), '(m_v)\n', (3512, 3517), True, 'import tensorflow as tf\n'), ((3993, 4006), 'tensorflow.shape', 'tf.shape', (['m_v'], {}), '(m_v)\n', (4001, 4006), True, 'import tensorflow as tf\n'), ((5593, 5606), 'tensorflow.shape', 'tf.shape', (['m_v'], {}), '(m_v)\n', (5601, 5606), True, 'import tensorflow as tf\n'), ((3753, 3766), 'tensorflow.shape', 'tf.shape', (['m_v'], {}), '(m_v)\n', (3761, 3766), True, 'import tensorflow as tf\n'), ((3894, 3907), 'tensorflow.shape', 'tf.shape', (['m_v'], {}), '(m_v)\n', (3902, 3907), True, 'import tensorflow as tf\n'), ((5354, 5367), 'tensorflow.shape', 'tf.shape', (['m_v'], {}), '(m_v)\n', (5362, 5367), True, 'import tensorflow as tf\n'), ((5495, 5508), 'tensorflow.shape', 'tf.shape', (['m_v'], {}), '(m_v)\n', (5503, 5508), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8
import unicodedata
import math
import logging
import pickle
import numpy as np
import h5py
from .alignment import Alignment, Edits
GAP = '\a' # reserved character that does not get mapped (for gap repairs)
class Sequence2Sequence(object):
'''Sequence to sequence (character-level) error correction with Keras.
Adapted from examples/lstm_seq2seq.py (tutorial by <NAME>
"A ten-minute introduction...") with changes as follows:
- use early stopping to prevent overfitting
- use Dense instead of Embedding to allow input other than indexes
(unit vectors): confidence and alternatives
- use weight tying for output projection
- add underspecification to character projection by conditioning
index zero to lie in the center of other character vectors and
randomly degrading input characters to zero during training
- measure results not only on training set, but validation set as well
- extend for use of large datasets: training uses generators on files
with same generator function called twice (training vs validation),
splitting lines via shared random variable
- efficient preprocessing
- use true zero for encoder padding and decoder start-of-sequence,
use newline character for decoder padding (learned/not masked in training,
treated like end-of-sequence in inference)
- add runtime preprocessing function for convenient single-line testing
- change first layer to bidirectional, stack unidirectional LSTM layers
on top (with HL depth and HL width configurable)
- add beam search decoding (A*)
- detect CPU vs GPU mode automatically
- save/load weights separate from configuration (by recompiling model)
in order to share weights between CPU and GPU model,
and between fixed and variable batchsize/length
- evaluate word and character error rates on separate dataset
- use line-global additive-linear soft attention to connect encoder
(top-HL) outputs to decoder (top-HL) inputs (instead of mere
final-initial state transfer)
- add topology variant: deep bi-directional encoder
- add topology variant: residual connections
- add topology variant: dense bridging final-initial state transfer
- add training variant: scheduled sampling
- add training variant: parallel LM loss
- allow incremental training (e.g. pretraining on clean text)
- allow weight transfer from shallower model (fixing shallow layer
weights) or from language model (as unconditional decoder),
update character mapping and re-use character embeddings
- allow resetting encoder after load/init transfer
Features still (very much) wanting of implementation:
- stateful decoder mode (in non-transfer part of state function)
- attention decoding with (linear-time) hard monotonic alignment
instead of softmax alignment (with quadratic-time complexity)
- context conditioning (with meta-data inputs like OCR engine)
- methods to avoid exposure bias and label/myopic bias:
generalized adversarial training (Huszár 2015),
beam search optimization (Wiseman & Rush 2016),
professor forcing (Lamb & Goyal et al 2016), or
prospective performance network (Wang et al 2018)
- systematic hyperparameter treatment (deviations from Sutskever
should be founded on empirical analysis):
HL width and depth, optimiser choice (RMSprop/SGD) and parameters,
gradient clipping, decay and rate control, initialisers
# Summary of the algorithm
- In the learning phase, we have source sequences from OCR,
and correspding target sequences from GT. We train:
- a stacked LSTM encoder to turn the source sequences
to output sequences and final hidden layer states.
- a stacked LSTM decoder to turns the target sequences
into the same sequence but offset by one timestep in the future,
(a setup called "teacher forcing" in this context),
based on the initial state vectors and the output sequences
from the encoder.
Effectively, the encoder-decoder learns to generate a sequence
`targets[t+1...]` given `targets[...t]`, conditioned
on the source sequence.
- In inference mode, to decode unknown target sequences, we:
- encode the source sequence into encoded sequence and state,
- start with a target sequence of size 1
(just the start-of-sequence character)
- feed-back the state vectors and 1-character target sequence
to the decoder to produce predictions for the next character
- sample the next character using these predictions
(using argmax for greedy and argsort for beam search)
- append the sampled character to the target sequence
- repeat until we generate the end-of-sequence character,
or we hit a character length limit.
# References
- Sequence to Sequence Learning with Neural Networks
https://arxiv.org/abs/1409.3215
- Learning Phrase Representations using
RNN Encoder-Decoder for Statistical Machine Translation
https://arxiv.org/abs/1406.1078
'''
def __init__(self, logger=None, progbars=True):
### model parameters
# How many samples are trained/decoded together (in parallel)?
self.batch_size = 64
# stateful decoder (implicit state transfer between batches)?
self.stateful = False
# number of nodes in the hidden layer (dimensionality of the encoding space)?
self.width = 512
# number of encoder and decoder layers stacked above each other?
self.depth = 2
# indexation of (known/allowed) input and output characters (i.e. vocabulary)
# note: character mapping includes nul for unknown/underspecification,
# and newline for end-of-sequence;
# mapping/voc_size is set by loading or training
self.mapping = ({'': 0}, {0: ''})
self.voc_size = 1 # size of mapping (0 reserved for unknown)
# add input to output in each encoder and decoder hidden layer?
self.residual_connections = False
# encoder hidden layers are all bidirectional LSTMs,
# cross-summarizing forward and backward outputs
# (like -encoder_type bdrnn in Open-NMT)?
self.deep_bidirectional_encoder = False
# use a fully connected non-linear layer to transfer
# encoder final states to decoder initial states instead of copy?
self.bridge_dense = False
### training parameters
# maximum number of epochs to train
# (unless stopping early via validation loss)?
self.epochs = 100
# train with additional output (unweighted sum loss) from LM,
# defined with tied decoder weights and same input, but
# not conditioned on encoder output
# (applies to encoder_decoder_model only, i.e. does not affect
# encoder_model and decoder_model during inference):
self.lm_loss = False
# predict likewise, and use during beam search such that
# decoder scores control entry of local alternatives and
# LM scores rate global alternatives of the beam
# (applies to decoder_model only, but should be used on models
# with lm_loss during training):
self.lm_predict = False
# randomly train with decoder output from self-loop (softmax feedback)
# instead of teacher forcing (with ratio given curve across epochs),
# defined with tied weights and same encoder output
# (applies to encoder_decoder_model only, i.e. does not affect
# encoder_model and decoder_model during inference)?
self.scheduled_sampling = None # 'linear'/'sigmoid'/'exponential'/None
# rate of dropped output connections in encoder and decoder HL?
self.dropout = 0.2
### beam decoder inference parameters
# probability of the input character candidate in each hypothesis
# (unless already misaligned); helps balance precision/recall trade-off
self.rejection_threshold = 0.3
# up to how many new candidates can enter the beam per context/node?
self.beam_width_in = 15
# how much worse relative to the probability of the best candidate
# may new candidates be to enter the beam?
self.beam_threshold_in = 0.2
# up to how many results can be drawn from result generator?
self.beam_width_out = 16
### runtime variables
self.logger = logger or logging.getLogger(__name__)
self.graph = None # for tf access from multiple threads
self.encoder_decoder_model = None # combined model for training
self.encoder_model = None # separate model for inference
self.decoder_model = None # separate model for inference (but see _resync_decoder)
self.aligner = Alignment(0, logger=self.logger) # aligner (for training) with internal state
self.progbars = progbars
self.status = 0 # empty / configured / trained?
def __repr__(self):
return (__name__ +
" (width: %d)" % self.width +
" (depth: %d)" % self.depth +
" (chars: %d)" % self.voc_size +
" (attention)" +
(" (stateful)" if self.stateful else " (stateless)") +
" status: %s" % ("empty" if self.status < 1 else "configured" if self.status < 2 else "trained"))
def configure(self, batch_size=None):
'''Define encoder and decoder models for the configured parameters.
Use given `batch_size` for encoder input if stateful:
configure once for training phase (with parallel lines),
then reconfigure for prediction (with only 1 line each).
'''
from keras.initializers import RandomNormal
from keras.layers import Input, Dense, TimeDistributed, Dropout, Lambda
from keras.layers import RNN, LSTMCell, LSTM, CuDNNLSTM, Bidirectional
from keras.layers import concatenate, average, add
from keras.models import Model
#from keras.utils import plot_model
from keras import backend as K
import tensorflow as tf
from .attention import DenseAnnotationAttention
if batch_size:
self.batch_size = batch_size
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.compat.v1.Session(config=config))
# self.sess = tf.compat.v1.Session()
# K.set_session(self.sess)
# automatically switch to CuDNNLSTM if CUDA GPU is available:
has_cuda = K.backend() == 'tensorflow' and K.tensorflow_backend._get_available_gpus()
self.logger.info('using %s LSTM implementation to compile %s model '
'of depth %d width %d size %d with attention',
'GPU' if has_cuda else 'CPU',
'stateful' if self.stateful else 'stateless',
self.depth, self.width, self.voc_size)
if self.residual_connections:
self.logger.info('encoder and decoder LSTM outputs are added to inputs in all hidden layers'
'(residual_connections)')
if self.deep_bidirectional_encoder:
self.logger.info('encoder LSTM is bidirectional in all hidden layers, '
'with fw/bw cross-summation between layers (deep_bidirectional_encoder)')
if self.bridge_dense:
self.logger.info('state transfer between encoder and decoder LSTM uses '
'non-linear Dense layer as bridge in all hidden layers (bridge_dense)')
lstm = CuDNNLSTM if has_cuda else LSTM
### Define training phase model
# encoder part:
encoder_input = Input(shape=(None, self.voc_size),
name='encoder_input')
char_embedding = Dense(self.width, use_bias=False,
kernel_initializer=RandomNormal(stddev=0.001),
kernel_regularizer=self._regularise_chars,
name='char_embedding')
char_input_proj = TimeDistributed(char_embedding, name='char_input_projection')
encoder_output = char_input_proj(encoder_input)
if self.deep_bidirectional_encoder:
# cross-summary here means: i_next_fw[k] = i_next_bw[k] = o_fw[k-1]+o_bw[k-1]
# i.e. add flipped fw/bw outputs by reshaping last axis into half-width axis and 2-dim axis,
# then reversing the last and reshaping back;
# in numpy this would be:
# x + np.flip(x.reshape(x.shape[:-1] + (int(x.shape[-1]/2),2)), -1).reshape(x.shape))
# in keras this would be something like this (but reshape requires TensorShape no list/tuple):
# x + K.reshape(K.reverse(K.reshape(x, K.int_shape(x)[:-1] + (x.shape[-1].value//2,2)), axes=-1), x.shape)
# in tensorflow this would be (but does not work with batch_shape None):
# x + tf.reshape(tf.reverse(tf.reshape(x, tf.TensorShape(x.shape.as_list()[:-1] + [x.shape[-1].value//2, 2])), [-1]), x.shape)
# it finally works by replacing all None dimensions with -1:
cross_sum = Lambda(lambda x: x + tf.reshape(
tf.reverse(tf.reshape(x, [-1, x.shape[1].value, x.shape[2].value//2, 2]), [-1]),
[-1] + x.shape.as_list()[1:]))
# Set up encoder HL to return output activation (to be attended to by decoder),
# return final states as well (as initial states for the decoder).
# Only the base hidden layer is bidirectional (unless deep_bidirectional_encoder).
encoder_state_outputs = []
for n in range(self.depth):
args = {'name': 'encoder_lstm_%d' % (n+1),
'return_state': True,
'return_sequences': True}
if not has_cuda:
# instead of default 'hard_sigmoid' which deviates from CuDNNLSTM:
args['recurrent_activation'] = 'sigmoid'
layer = lstm(self.width, **args)
if n == 0 or self.deep_bidirectional_encoder:
encoder_output, fw_state_h, fw_state_c, bw_state_h, bw_state_c = (
Bidirectional(layer, name=layer.name)(
encoder_output if n == 0 else cross_sum(encoder_output)))
# prepare for base layer decoder initial_state:
# (the final states of the backward-LSTM, closest to the start of the line,
# in the encoder are used to initialise the state of the decoder)
state_h = bw_state_h # ignore final fw state
state_c = bw_state_c # ignore final fw state
else:
encoder_output2, state_h, state_c = layer(encoder_output)
if self.residual_connections:
# add residual connections:
if n == 1:
#encoder_output = add([encoder_output2, average([encoder_output[:,:,::2], encoder_output[:,:,1::2]])]) # does not work (no _inbound_nodes)
encoder_output = encoder_output2
else:
encoder_output = add([encoder_output2, encoder_output])
else:
encoder_output = encoder_output2
constant_shape = (1, self.width * 2
if n == 0 or self.deep_bidirectional_encoder
else self.width)
# variational dropout (time-constant) – LSTM (but not CuDNNLSTM)
# has the (non-recurrent) dropout keyword option for this:
encoder_output = Dropout(self.dropout, noise_shape=constant_shape)(encoder_output)
if self.bridge_dense:
state_h = Dense(self.width, activation='tanh', name='bridge_h_%d' % (n+1))(state_h)
state_c = Dense(self.width, activation='tanh', name='bridge_c_%d' % (n+1))(state_c)
encoder_state_outputs.extend([state_h, state_c])
# just for convenience:
# include zero as initial attention state in encoder state output
# (besides final encoder state as initial cell state):
attention_init = Lambda(lambda x: K.zeros_like(x)[:, :, 0],
name='attention_state_init')
encoder_state_outputs.append(attention_init(encoder_output))
# decoder-independent half of the encoder annotation
# can be computed for the complete encoder sequence
# at once (independent of the RNN state):
attention_dense = TimeDistributed(Dense(self.width, use_bias=False),
name='attention_dense')
# decoder part:
decoder_input = Input(shape=(None, self.voc_size),
name='decoder_input')
decoder_input0 = char_input_proj(decoder_input)
decoder_output = decoder_input0
if self.lm_loss:
lm_output = decoder_input0
# Set up decoder HL to return full output sequences (so we can train in parallel),
# to use encoder_state_outputs as initial state and return final states as well.
# We don't use those states in the training model, but will use them for inference
# (see further below).
decoder_lstms = []
for n in range(self.depth):
args = {'name': 'decoder_lstm_%d' % (n+1),
'return_state': True,
'return_sequences': True}
if n < self.depth - 1:
if not has_cuda:
# instead of default 'hard_sigmoid' which deviates from CuDNNLSTM:
args['recurrent_activation'] = 'sigmoid'
layer = lstm(self.width, **args)
decoder_output2, _, _ = layer(decoder_output,
initial_state=encoder_state_outputs[2*n:2*n+2])
if self.lm_loss:
lm_output, _, _ = layer(lm_output)
else:
cell = DenseAnnotationAttention(
LSTMCell(self.width,
dropout=self.dropout,
recurrent_activation='sigmoid'),
window_width=5, # use local attention with 10 characters context
input_mode="concatenate", # concat(input, context) when entering cell
output_mode="cell_output") # drop context when leaving cell
layer = RNN(cell, **args)
decoder_output2, _, _, _ = layer(decoder_output,
initial_state=encoder_state_outputs[2*n:2*n+3],
constants=[encoder_output,
attention_dense(encoder_output)])
if self.lm_loss:
lm_output, _, _, _ = layer(lm_output)
decoder_lstms.append(layer)
# add residual connections:
if n > 0 and self.residual_connections:
decoder_output = add([decoder_output2, decoder_output])
else:
decoder_output = decoder_output2
if n < self.depth - 1: # only hidden-to-hidden layer:
constant_shape = (1, self.width)
# variational dropout (time-constant) – LSTM (but not CuDNNLSTM)
# has the (non-recurrent) dropout keyword option for this:
decoder_output = Dropout(self.dropout, noise_shape=constant_shape)(decoder_output)
def char_embedding_transposed(x):
# re-use input embedding (weight tying), but add a bias vector,
# and also add a linear projection in hidden space
# (see Press & Wolf 2017)
# y = softmax( V * P * h + b ) with V=U the input embedding;
# initialise P as identity matrix and b as zero
#proj = K.variable(np.eye(self.width), name='char_output_projection') # trainable=True by default
#bias = K.variable(np.zeros((self.voc_size,)), name='char_output_bias') # trainable=True by default
#return K.softmax(K.dot(h, K.transpose(K.dot(char_embedding.embeddings, proj))) + bias)
# simplified variant with no extra weights (50% faster, equally accurate):
return K.softmax(K.dot(x, K.transpose(char_embedding.kernel)))
char_output_proj = TimeDistributed(Lambda(char_embedding_transposed, name='transpose+softmax'),
name='char_output_projection')
decoder_output = char_output_proj(decoder_output)
if self.lm_loss:
lm_output = char_output_proj(lm_output)
decoder_output = [decoder_output, lm_output] # 2 outputs, 1 combined loss
# Bundle the model that will turn
# `encoder_input_data` and `decoder_input_data` into `decoder_output_data`
self.encoder_decoder_model = Model([encoder_input, decoder_input], decoder_output,
name='encoder_decoder_model')
## Define inference phase model:
# 1) encode source to retrieve output sequence
# (attended) and initial decoder states
# (bw h/c, h/c, attention state)
# 2) run one step of decoder with this initial state
# and a "start of sequence" as target token.
# 3) repeat from 2, feeding back the target token
# from output to input, and passing states
# Re-use the training phase encoder unchanged
# (with sequence and final states as output):
self.encoder_model = Model(
encoder_input,
[encoder_output] + encoder_state_outputs,
name='encoder_model')
# Set up decoder differently:
# - with additional input for encoder output
# (attended sequence)
# - with additional input for initial states
# (not just encoder_state_outputs at first step)
# - keeping and concatenating final states
# (instead of discarding)
# so we can pass states explicitly:
decoder_state_inputs = []
decoder_state_outputs = []
decoder_output = decoder_input0
if self.lm_predict:
lm_output = decoder_input0
for n in range(self.depth):
state_h_in = Input(shape=(self.width,),
name='initial_h_%d_input' % (n+1))
state_c_in = Input(shape=(self.width,),
name='initial_c_%d_input' % (n+1))
decoder_state_inputs.extend([state_h_in, state_c_in])
layer = decoder_lstms[n] # tied weights
if n < self.depth - 1:
decoder_output, state_h_out, state_c_out = layer(
decoder_output,
initial_state=decoder_state_inputs[2*n:2*n+2])
decoder_state_outputs.extend([state_h_out,
state_c_out])
if self.lm_predict:
lm_output, _, _ = layer(
lm_output,
initial_state=decoder_state_inputs[2*n:2*n+2])
else:
attention_input = Input(shape=(None, self.width),
name='attention_input')
attention_state_in = Input(shape=(None,),
name='attention_state_input')
decoder_state_inputs.append(attention_state_in)
# for some obscure reason, layer sharing is impossible
# with DenseAnnotationAttention; so we must redefine
# and then resync weights after training/loading
# (see _resync_decoder):
cell = DenseAnnotationAttention(
LSTMCell(self.width,
dropout=self.dropout,
recurrent_activation='sigmoid'),
window_width=5, # use local attention with 10 characters context
input_mode="concatenate", # concat(input, context) when entering cell
output_mode="cell_output") # drop context when leaving cell
layer = RNN(cell, **args)
decoder_output, state_h_out, state_c_out, attention_state_out = layer(
decoder_output,
initial_state=decoder_state_inputs[2*n:2*n+3],
constants=[attention_input,
attention_dense(attention_input)])
decoder_state_outputs.extend([state_h_out,
state_c_out,
attention_state_out])
if self.lm_predict:
attention_zero = Lambda(lambda x: K.zeros_like(x))(attention_input)
lm_output, _, _, _ = layer(
lm_output,
initial_state=decoder_state_inputs[2*n:2*n+3],
constants=[attention_zero, attention_zero])
decoder_output = char_output_proj(decoder_output)
if self.lm_predict:
lm_output = char_output_proj(lm_output)
decoder_output = [decoder_output, lm_output] # 2 outputs (1 for local, 1 for global scores)
else:
decoder_output = [decoder_output]
# must be resynced each time encoder_decoder_model changes:
self.decoder_model = Model(
[decoder_input, attention_input] + decoder_state_inputs,
decoder_output + decoder_state_outputs,
name='decoder_model')
## Compile model
self._recompile()
# for tf access from multiple threads
# self.encoder_model._make_predict_function()
# self.decoder_model._make_predict_function()
# self.sess.run(tf.global_variables_initializer())
self.graph = tf.compat.v1.get_default_graph()
self.status = 1
def _recompile(self):
from keras.optimizers import Adam
self.encoder_decoder_model.compile(
loss='categorical_crossentropy', # loss_weights=[1.,1.] if self.lm_loss
optimizer=Adam(clipnorm=5), #'adam',
sample_weight_mode='temporal') # sample_weight slows down training slightly (20%)
def _reconfigure_for_mapping(self):
'''Reconfigure character embedding layer after change of mapping (possibly transferring previous weights).'''
assert self.status >= 1
embedding = self.encoder_decoder_model.get_layer(name='char_input_projection').layer # cannot get char_embedding directly
input_dim = embedding.input_spec.axes[-1]
if input_dim < self.voc_size: # more chars than during last training?
if self.status >= 2: # weights exist already (i.e. incremental training)?
self.logger.warning('transferring weights from previous model with only %d character types', input_dim)
# get old weights:
layer_weights = [layer.get_weights() for layer in self.encoder_decoder_model.layers]
# reconfigure with new mapping size (and new initializers):
self.configure()
# set old weights:
for layer, weights in zip(self.encoder_decoder_model.layers, layer_weights):
self.logger.debug('transferring weights for layer %s %s', layer.name, str([w.shape for w in weights]))
if layer.name == 'char_input_projection':
# transfer weights from previous Embedding layer to new one:
new_weights = layer.get_weights() # freshly initialised
#new_weights[0][input_dim:, 0:embedding.units] = weights[0][0,:] # repeat zero vector instead
new_weights[0][0:input_dim, 0:embedding.units] = weights[0]
layer.set_weights(new_weights)
else:
# use old weights:
layer.set_weights(weights)
else:
self.configure()
def _resync_decoder(self):
self.decoder_model.get_layer('decoder_lstm_%d' % self.depth).set_weights(
self.encoder_decoder_model.get_layer('decoder_lstm_%d' % self.depth).get_weights())
def _regularise_chars(self, embedding_matrix):
'''Calculate L2 loss of the char embedding weights
to control for underspecification at zero
(by interpolating between other embedding vectors).
'''
from keras import backend as K
em_dims = embedding_matrix.shape.as_list()
if em_dims[0] == 0: # voc_size starts with 0 before first training
return 0
vec0 = K.slice(embedding_matrix, [0, 0], [1, em_dims[1]]) # zero vector only,
#vec0 = K.repeat_elements(vec0, em_dims[0]-1, axis=0) # repeated
vecs = K.slice(embedding_matrix, [1, 0], [em_dims[0]-1, em_dims[1]]) # all vectors except zero
# make sure only vec0 is affected, i.e. vecs change only via global loss:
vecs = K.stop_gradient(K.mean(vecs, axis=0))
# scale to make gradients benign:
underspecification = 1 * K.sum(K.square(vec0 - vecs)) # c='\0' ~ mean of others
#lowrank = K.sum(0.01 * K.square(embedding_matrix)) # generalization/sparsity
norms = K.sum(K.square(embedding_matrix), axis=1)
norm0 = K.ones_like(norms) # square of target (non-zero) weight norm
lowrank = 0.01 * K.sum(K.square(norm0 - norms))
return K.in_train_phase(lowrank + underspecification, 0.)
def map_files(self, filenames):
num_lines = 0
chars = set(self.mapping[0].keys()) # includes '' (0)
for filename in filenames:
# todo: there must be a better way to detect this:
with_confidence = filename.endswith('.pkl')
with open(filename, 'rb' if with_confidence else 'r') as file:
if with_confidence:
file = pickle.load(file) # read once
for line in file:
if with_confidence:
source_conf, target_text = line
if not source_conf: # empty
line = target_text
elif type(source_conf[0]) is tuple: # prob line
line = ''.join([char for char, prob in source_conf]) + target_text
else: # confmat
line = ''.join([chars for chunk in source_conf
for chars, prob in chunk]) + target_text
line = unicodedata.normalize('NFC', line)
chars.update(set(line))
if GAP in chars:
self.logger.warning('ignoring gap character "%s" in input file "%s"', GAP, filename)
chars.remove(GAP)
num_lines += 1
chars = sorted(list(chars))
if len(chars) > self.voc_size:
# incremental training
c_i = dict((c, i) for i, c in enumerate(chars))
i_c = dict((i, c) for i, c in enumerate(chars))
self.mapping = (c_i, i_c)
self.voc_size = len(c_i)
self._reconfigure_for_mapping()
return num_lines
def train(self, filenames, val_filenames=None):
'''train model on given text files.
Pass the character sequences of lines in `filenames`, paired into
source and target (and possibly, source confidence values),
to the loop training model weights with stochastic gradient descent.
The generator will open each file, looping over the complete set (epoch)
as long as validation error does not increase in between (early stopping).
Validate on a random fraction of lines automatically separated before,
unless `val_filenames` is given, in which case only those files are used
for validation.
'''
from keras.callbacks import EarlyStopping, TerminateOnNaN
from .callbacks import StopSignalCallback, ResetStatesCallback
from .keras_train import fit_generator_autosized, evaluate_generator_autosized
num_lines = self.map_files(filenames)
self.logger.info('Training on "%d" files with %d lines', len(filenames), num_lines)
if val_filenames:
num_lines = self.map_files(val_filenames)
self.logger.info('Validating on "%d" files with %d lines', len(val_filenames), num_lines)
split_rand = None
else:
self.logger.info('Validating on random 20% lines from those files')
split_rand = np.random.uniform(0, 1, (num_lines,)) # reserve split fraction at random line numbers
# Run training
earlystopping = EarlyStopping(monitor='val_loss', patience=3, verbose=1,
mode='min', restore_best_weights=True)
callbacks = [earlystopping, TerminateOnNaN(),
StopSignalCallback(logger=self.logger)]
history = fit_generator_autosized(
self.encoder_decoder_model,
self.gen_data(filenames, split_rand, train=True),
epochs=self.epochs,
workers=1,
# (more than 1 would effectively increase epoch size)
use_multiprocessing=not self.scheduled_sampling,
# (cannot access session/graph for scheduled sampling in other process,
# cannot access model for reset callback in other process)
validation_data=self.gen_data(val_filenames or filenames, split_rand, train=False),
verbose=1 if self.progbars else 0,
callbacks=callbacks)
if 'val_loss' in history.history:
self.logger.info('training finished with val_loss %f',
min(history.history['val_loss']))
if (np.isnan(history.history['val_loss'][-1]) or
earlystopping.stopped_epoch == 0):
# recover weights (which TerminateOnNaN prevented EarlyStopping from doing)
self.encoder_decoder_model.set_weights(earlystopping.best_weights)
self._resync_decoder()
self.status = 2
else:
self.logger.critical('training failed')
self.status = 1
def evaluate(self, filenames, fast=False, normalization='historic_latin', charmap={}, gt_level=1,
confusion=10, histogram=True):
'''evaluate model on text files
Pass the character sequence of lines in ``filenames``, paired into
source and target (and possibly, source confidence values),
to a loop predicting outputs with decoder feedback and greedy+beam search.
The generator will open each file, looping over the complete set once,
printing source/target and predicted lines,
and the overall calculated character and word error rates of source (OCR)
and prediction (greedy/beamed) against target (GT).
If ``fast``, then skip beam search for single lines, and process all batches
in parallel greedily.
For ``normalization`` and ``gt_level``, see ``Alignment.get_adjusted_distance()``.
If ``charmap`` is non-empty, use it (as in str.maketrans) before processing.
If ``confusion`` is greater than zero, then aggregate (non-identity) edits
on the character level, and show this many most-frequent confusions in the end.
'''
# FIXME: stop using both greedy and beamed in 1 function
assert self.status == 2
c_origin_counts = Edits(self.logger, histogram=histogram)
w_origin_counts = Edits(self.logger)
c_greedy_counts = Edits(self.logger, histogram=histogram)
w_greedy_counts = Edits(self.logger)
c_beamed_counts = Edits(self.logger, histogram=histogram)
w_beamed_counts = Edits(self.logger)
c_origin_aligner = Alignment(0, logger=self.logger, confusion=confusion > 0)
w_origin_aligner = Alignment(0, logger=self.logger)
c_greedy_aligner = Alignment(0, logger=self.logger, confusion=confusion > 0)
w_greedy_aligner = Alignment(0, logger=self.logger)
c_beamed_aligner = Alignment(0, logger=self.logger, confusion=confusion > 0)
w_beamed_aligner = Alignment(0, logger=self.logger)
for batch_no, batch in enumerate(self.gen_lines(filenames, False, charmap=charmap)):
lines_source, lines_sourceconf, lines_target, lines_filename = batch
#bar.update(1)
lines_greedy, probs_greedy, scores_greedy, _ = (
self.correct_lines(lines_source, lines_sourceconf,
fast=fast, greedy=True))
if fast:
lines_beamed, probs_beamed, scores_beamed = (
lines_greedy, probs_greedy, scores_greedy)
else:
lines_beamed, probs_beamed, scores_beamed, _ = (
self.correct_lines(lines_source, lines_sourceconf,
fast=False, greedy=False))
for j in range(len(lines_source)):
if not lines_source[j] or not lines_target[j]:
continue # from partially filled batch
self.logger.info('Source input : %s',
lines_source[j].rstrip(u'\n'))
self.logger.info('Target output : %s',
lines_target[j].rstrip(u'\n'))
self.logger.info('Target prediction (greedy): %s [%.2f]',
lines_greedy[j].rstrip(u'\n'), scores_greedy[j])
self.logger.info('Target prediction (beamed): %s [%.2f]',
lines_beamed[j].rstrip(u'\n'), scores_beamed[j])
#metric = get_levenshtein_distance
c_origin_dist = c_origin_aligner.get_adjusted_distance(lines_source[j], lines_target[j],
normalization=normalization,
gtlevel=gt_level)
c_greedy_dist = c_greedy_aligner.get_adjusted_distance(greedy_lines[j], target_lines[j],
normalization=normalization,
gtlevel=gt_level)
c_beamed_dist = c_beamed_aligner.get_adjusted_distance(beamed_lines[j], target_lines[j],
normalization=normalization,
gtlevel=gt_level)
c_origin_counts.add(c_origin_dist, lines_source[j], lines_target[j])
c_greedy_counts.add(c_greedy_dist, lines_greedy[j], lines_target[j])
c_beamed_counts.add(c_beamed_dist, lines_beamed[j], lines_target[j])
tokens_greedy = lines_greedy[j].split(" ")
tokens_beamed = lines_beamed[j].split(" ")
tokens_source = lines_source[j].split(" ")
tokens_target = lines_target[j].split(" ")
w_origin_dist = w_origin_aligner.get_adjusted_distance(tokens_source, tokens_target,
normalization=normalization,
gtlevel=gt_level)
w_greedy_dist = w_greedy_aligner.get_adjusted_distance(tokens_greedy, tokens_target,
normalization=normalization,
gtlevel=gt_level)
w_beamed_dist = w_beamed_aligner.get_adjusted_distance(tokens_beamed, tokens_target,
normalization=normalization,
gtlevel=gt_level)
w_origin_counts.add(w_origin_dist, tokens_source, tokens_target)
w_greedy_counts.add(w_greedy_dist, tokens_greedy, tokens_target)
w_beamed_counts.add(w_beamed_dist, tokens_beamed, tokens_target)
c_greedy_counts.score += sum(scores_greedy)
c_beamed_counts.score += sum(scores_beamed)
self.logger.info('finished %d lines', c_origin_counts.length)
if confusion > 0:
self.logger.info('OCR confusion: %s', c_origin_aligner.get_confusion(confusion))
self.logger.info('greedy confusion: %s', c_greedy_aligner.get_confusion(confusion))
self.logger.info('beamed confusion: %s', c_beamed_aligner.get_confusion(confusion))
if histogram:
self.logger.info('OCR histogram: %s', repr(c_origin_counts.hist()))
self.logger.info('greedy histogram: %s', repr(c_greedy_counts.hist()))
self.logger.info('beamed histogram: %s', repr(c_beamed_counts.hist()))
self.logger.info('ppl greedy: %.3f', math.exp(c_greedy_counts.score/c_greedy_counts.length))
self.logger.info('ppl beamed: %.3f', math.exp(c_beamed_counts.score/c_beamed_counts.length))
self.logger.info("CER OCR: %.3f±%.3f", c_origin_counts.mean, math.sqrt(c_origin_counts.varia))
self.logger.info("CER greedy: %.3f±%.3f", c_greedy_counts.mean, math.sqrt(c_greedy_counts.varia))
self.logger.info("CER beamed: %.3f±%.3f", c_beamed_counts.mean, math.sqrt(c_beamed_counts.varia))
self.logger.info("WER OCR: %.3f±%.3f", w_origin_counts.mean, math.sqrt(w_origin_counts.varia))
self.logger.info("WER greedy: %.3f±%.3f", w_greedy_counts.mean, math.sqrt(w_greedy_counts.varia))
self.logger.info("WER beamed: %.3f±%.3f", w_beamed_counts.mean, math.sqrt(w_beamed_counts.varia))
def predict(self, filenames, fast=False, greedy=False, charmap={}):
'''apply model on text files
Pass the character sequence of lines in ``filenames``, paired into
source and target (and possibly, source confidence values),
to a loop predicting outputs with decoder feedback and greedy/beam search.
The generator will open each file, looping over the complete set once,
yielding predicted lines (along with their filename).
If ``fast``, then skip beam search for single lines, and process all batches
in parallel greedily.
If ``charmap`` is non-empty, use it (as in str.maketrans) before processing.
'''
assert self.status == 2
for batch_no, batch in enumerate(self.gen_lines(filenames,
repeat=False,
unsupervised=True,
charmap=charmap)):
lines_source, lines_sourceconf, _, lines_filename = batch
lines_result, probs_result, scores_result, _ = (
self.correct_lines(lines_source, lines_sourceconf,
fast=fast, greedy=greedy))
yield (lines_filename, lines_result, scores_result)
def correct_lines(self, lines, conf=None, fast=True, greedy=True):
'''apply correction model on text strings
Pass the character sequences `lines` (optionally complemented by
respective confidence values), to a loop predicting outputs with
decoder feedback and greedy or beam search. Each line must end
with a newline character.
If `fast`, process all lines in parallel and all characters at once
greedily.
Otherwise, if `greedy`, process each line greedily (i.e. without
beam search).
Return a 4-tuple of the corrected lines, probability lists,
perplexity scores, and input-output alignments.
'''
assert not fast or greedy, "cannot decode in fast mode with beam search enabled"
if not lines:
return [], [], [], []
# vectorize:
encoder_input_data, _, _, _ = self.vectorize_lines(lines, lines, conf)
if fast:
# encode and decode in batch (all lines at once):
_, output_lines, output_probs, output_scores, alignments = self.decode_batch_greedy(encoder_input_data)
else:
# encode lines in batch (all lines at once):
encoder_outputs = self.encoder_model.predict_on_batch(encoder_input_data)
# decode lines and characters individually:
output_lines, output_probs, output_scores, alignments = [], [], [], []
for j, input_line in enumerate(lines):
if not input_line:
line, probs, score, alignment = '', [], 0, []
elif greedy:
line, probs, score, alignment = self.decode_sequence_greedy(
encoder_outputs=[encoder_output[j:j+1] for encoder_output in encoder_outputs])
else:
# query only 1-best
try:
line, probs, score, alignment = next(self.decode_sequence_beam(
source_seq=encoder_input_data[j], # needed for rejection fallback
encoder_outputs=[encoder_output[j:j+1] for encoder_output in encoder_outputs]))
except StopIteration:
self.logger.error('cannot beam-decode input line %d: "%s"', j, input_line)
line = input_line
probs = [1.0] * len(line)
score = 0
alignment = np.eye(len(line)).tolist()
line = line.replace(GAP, '') # remove if rejected (i.e. not corrected despite underspecification)
output_lines.append(line)
output_probs.append(probs)
output_scores.append(score)
alignments.append(alignment)
return output_lines, output_probs, output_scores, alignments
# for fit_generator()/predict_generator()/evaluate_generator()/standalone
# -- looping, but not shuffling
def gen_data(self, filenames, split=None, train=False, unsupervised=False, charmap={}, reset_cb=None):
'''generate batches of vector data from text file
Open `filenames` in text mode, loop over them producing `batch_size`
lines at a time. Pad lines into the longest line of the batch.
If stateful, call `reset_cb` at the start of each batch (if given)
or reset model directly (otherwise).
Skip lines at `split` positions (if given), depending on `train`
(upper vs lower partition).
Yield vector data batches (for fit_generator/evaluate_generator).
'''
epoch = 0
if train and self.scheduled_sampling:
sample_ratio = 0
for batch in self.gen_lines(filenames, True, split, train, unsupervised, charmap):
if not batch:
epoch += 1
yield False # signal end of epoch to autosized fit/evaluate
if train and self.scheduled_sampling:
# prepare empirical scheduled sampling (i.e. without proper gradient)
attenuation = 3 # 10 enters saturation at about 10 percent of self.epochs
if self.scheduled_sampling == 'linear':
sample_ratio = attenuation * (epoch - 1) / (self.epochs - 1)
elif self.scheduled_sampling == 'sigmoid':
sample_ratio = 1 / (1 + math.exp(5 - 10 * attenuation * epoch / self.epochs))
elif self.scheduled_sampling == 'exponential':
sample_ratio = 1 - 0.9 ** (50 * attenuation * epoch / self.epochs)
else:
raise Exception('unknown function "%s" for scheduled sampling' % self.scheduled_sampling)
#self.logger.debug('sample ratio for this epoch:', sample_ratio)
with self.graph.as_default():
self._resync_decoder()
else:
lines_source, lines_sourceconf, lines_target, lines_filename = batch
if train and self.scheduled_sampling:
line_schedules = np.random.uniform(0, 1, self.batch_size)
else:
line_schedules = None
# vectorize:
encoder_input_data, decoder_input_data, decoder_output_data, decoder_output_weights = (
self.vectorize_lines(lines_source, lines_target, lines_sourceconf))
# yield source/target data to keras consumer loop (fit/evaluate)
if line_schedules is not None: # and epoch > 1:
# calculate greedy/beamed decoder output to yield as as decoder input
indexes = line_schedules < sample_ratio # respect current schedule
if np.count_nonzero(indexes) > 0:
# ensure the generator thread gets to see the same tf graph:
# with self.sess.as_default():
with self.graph.as_default():
decoder_input_data_sampled, _, _, _, _ = self.decode_batch_greedy(encoder_input_data)
# overwrite scheduled lines with data sampled from decoder instead of GT:
decoder_input_data.resize( # zero-fill larger time-steps (in-place)
decoder_input_data_sampled.shape)
decoder_output_data.resize( # zero-fill larger time-steps (in-place)
decoder_input_data_sampled.shape)
decoder_output_weights.resize( # zero-fill larger time-steps (in-place)
decoder_input_data_sampled.shape[:2])
indexes_condition = np.broadcast_to(indexes, # broadcast to data shape
tuple(reversed(decoder_input_data.shape))).transpose()
decoder_input_data = np.where(indexes_condition,
decoder_input_data_sampled,
decoder_input_data)
if train:
# encoder degradation to index zero for learning character underspecification
rand = np.random.uniform(0, 1, self.batch_size)
line_length = encoder_input_data[0].shape[0]
rand = (line_length * rand / 0.01).astype(np.int) # effective degradation ratio
encoder_input_data[np.arange(self.batch_size)[rand < line_length],
rand[rand < line_length], :] = np.eye(self.voc_size)[0]
yield ([encoder_input_data, decoder_input_data],
decoder_output_data, decoder_output_weights)
def gen_lines(self, filenames, repeat=True, split=None, train=False, unsupervised=False, charmap={}):
"""Generate batches of lines from the given files.
split...
repeat...
unpickle...
normalize...
"""
split_ratio = 0.2
epoch = 0
if charmap:
charmap = str.maketrans(charmap)
while True:
lines_source = []
lines_sourceconf = []
lines_target = []
lines_filename = []
for filename in filenames:
with_confidence = filename.endswith('.pkl')
with open(filename, 'rb' if with_confidence else 'r') as file:
if with_confidence:
file = pickle.load(file) # read once
#if (repeat and not with_confidence):
# file.seek(0) # read again
for line_no, line in enumerate(file):
if (isinstance(split, np.ndarray) and
(split[line_no] < split_ratio) == train):
# data shared between training and validation: belongs to other generator, resp.
#print('skipping line %d in favour of other generator' % line_no)
continue
if with_confidence: # binary input with OCR confidence?
source_text, target_text = line # already includes end-of-sequence
if not source_text: # empty
source_text, source_conf = '', []
elif type(source_text[0]) is tuple: # prob line
source_text, source_conf = map(list, zip(*source_text))
source_text = ''.join(source_text)
else: # confmat
source_conf = source_text
source_text = ''.join(chunk[0][0] if chunk else '' for chunk in source_conf)
# start-of-sequence will be added by vectorisation
# end-of-sequence already preserved by pickle format
elif unsupervised and '\t' not in line:
source_text = target_text = line
else:
source_text, target_text = line.split('\t')
# start-of-sequence will be added by vectorisation
# add end-of-sequence:
source_text = source_text + '\n'
# end-of-sequence already preserved by file iterator
if unsupervised:
target_text = source_text
if charmap:
source_text = source_text.translate(charmap)
target_text = target_text.translate(charmap)
source_text = unicodedata.normalize('NFC', source_text)
target_text = unicodedata.normalize('NFC', target_text)
if train:
# align source and target text line:
self.aligner.set_seqs(source_text, target_text)
if self.aligner.is_bad():
if epoch == 0:
self.logger.debug('%s' 'ignoring bad line "%s\t%s"',
'\x1b[2K\x1b[G' if self.progbars else '',
source_text.rstrip(), target_text.rstrip())
continue # avoid training if OCR was too bad
lines_source.append(source_text)
lines_target.append(target_text)
if with_confidence:
lines_sourceconf.append(source_conf)
lines_filename.append(filename)
if len(lines_source) == self.batch_size: # end of batch
yield (lines_source, lines_sourceconf if with_confidence else None,
lines_target, lines_filename)
lines_source = []
lines_sourceconf = []
lines_target = []
lines_filename = []
epoch += 1
if repeat:
yield False
# bury remaining lines (partially filled batch)
else:
if lines_source:
# a partially filled batch remains
lines_source.extend((self.batch_size-len(lines_source))*[''])
lines_target.extend((self.batch_size-len(lines_target))*[''])
if with_confidence:
lines_sourceconf.extend((self.batch_size-len(lines_sourceconf))*[[]])
lines_filename.extend((self.batch_size-len(lines_filename))*[None])
yield (lines_source, lines_sourceconf if with_confidence else None,
lines_target, lines_filename)
break
def vectorize_lines(self, encoder_input_sequences, decoder_input_sequences, encoder_conf_sequences=None):
'''Convert a batch of source and target sequences to arrays.
Take the given (line) lists of encoder and decoder input strings,
`encoder_input_sequences` and `decoder_input_sequences`, map them
to indexes in the input dimension, and turn them into unit vectors,
padding each string to the longest line using zero vectors.
This gives numpy arrays of shape (batch_size, max_length, voc_size).
When `encoder_conf_sequences` is also given, use floating point
probability values instead of integer ones. This can come in either
of two forms: simple lists of probabilities (of equal length as the
strings themselves), or full confusion networks, where every line
is a list of chunks, and each chunk is a list of alternatives, which
is a tuple of a string and its probability. (Chunks/alternatives may
have different length.)
Special cases:
- true zero (no index): padding for encoder and decoder (masked),
and start "symbol" for decoder input
- empty character (index zero): underspecified encoder input
(not allowed in decoder)
'''
# Note: padding and confidence indexing need Dense/dot instead of Embedding/gather.
# Used both for training (teacher forcing) and inference (ignore decoder input/output/weights).
max_encoder_input_length = max(map(len, encoder_input_sequences))
max_decoder_input_length = max(map(len, decoder_input_sequences))
assert len(encoder_input_sequences) == len(decoder_input_sequences)
batch_size = len(encoder_input_sequences)
with_confmat = False
if encoder_conf_sequences:
assert len(encoder_conf_sequences) == len(encoder_input_sequences)
if type(encoder_conf_sequences[0][0]) is list:
with_confmat = True
max_encoder_input_length = max(
[sum(max([len(x[0]) for x in chunk]) if chunk else 0
for chunk in sequence)
for sequence in encoder_conf_sequences])
encoder_input_sequences = encoder_conf_sequences
encoder_input_data = np.zeros((batch_size, max_encoder_input_length, self.voc_size),
dtype=np.float32 if encoder_conf_sequences else np.uint32)
decoder_input_data = np.zeros((batch_size, max_decoder_input_length+1, self.voc_size),
dtype=np.uint32)
decoder_output_data = np.zeros((batch_size, max_decoder_input_length+1, self.voc_size),
dtype=np.uint32)
for i, (enc_seq, dec_seq) in enumerate(zip(encoder_input_sequences, decoder_input_sequences)):
j = 0 # to declare scope outside loop
if with_confmat:
for chunk in enc_seq:
max_chars = max([len(x[0]) for x in chunk]) if chunk else 0
for chars, conf in chunk:
for k, char in enumerate(chars):
if char not in self.mapping[0]:
if char != GAP:
self.logger.error('unmapped character "%s" at encoder input sequence %d position %d',
char, i, j+k)
idx = 0 # underspecification
else:
idx = self.mapping[0][char]
encoder_input_data[i, j+k, idx] = conf
# ...other k for input: padding (keep zero)
j += max_chars
# ...other j for input: padding (keep zero)
else:
for j, char in enumerate(enc_seq):
if char not in self.mapping[0]:
if char != GAP:
self.logger.error('unmapped character "%s" at encoder input sequence %d', char, i)
idx = 0 # underspecification
else:
idx = self.mapping[0][char]
encoder_input_data[i, j, idx] = 1
if encoder_conf_sequences: # binary input with OCR confidence?
encoder_input_data[i, j, idx] = encoder_conf_sequences[i][j]
# ...other j for encoder input: padding (keep zero)
# j == 0 for decoder input: start symbol (keep zero)
for j, char in enumerate(dec_seq):
if char not in self.mapping[0]:
if char != GAP:
self.logger.error('unmapped character "%s" at decoder input sequence %d', char, i)
idx = 0
else:
idx = self.mapping[0][char]
decoder_input_data[i, j+1, idx] = 1
# teacher forcing:
decoder_output_data[i, j, idx] = 1
# j == len(dec_seq) for decoder output: padding (keep zero)
# ...other j for decoder input and output: padding (keep zero)
# index of padded samples, so we can mask them
# with the sample_weight parameter during fit() below
decoder_output_weights = np.ones(decoder_output_data.shape[:-1], dtype=np.float32)
decoder_output_weights[np.all(decoder_output_data == 0, axis=2)] = 0. # true zero (padding)
#sklearn.preprocessing.normalize(decoder_output_weights, norm='l1', copy=False) # since Keras 2.3
if self.lm_loss:
# 2 outputs, 1 combined loss:
decoder_output_data = [decoder_output_data, decoder_output_data]
decoder_output_weights = [decoder_output_weights, decoder_output_weights]
return encoder_input_data, decoder_input_data, decoder_output_data, decoder_output_weights
def save(self, filename):
'''Save model weights and configuration parameters.
Save configured model parameters into `filename`.
(This preserves weights across CPU/GPU implementations or input shape configurations.)
'''
assert self.status > 1 # already trained
self.logger.info('Saving model under "%s"', filename)
self.encoder_decoder_model.save_weights(filename)
with h5py.File(filename, 'a') as file:
config = file.create_group('config')
config.create_dataset('width', data=np.array(self.width))
config.create_dataset('depth', data=np.array(self.depth))
config.create_dataset('stateful', data=np.array(self.stateful))
config.create_dataset('residual_connections', data=np.array(self.residual_connections))
config.create_dataset('deep_bidirectional_encoder', data=np.array(self.deep_bidirectional_encoder))
config.create_dataset('bridge_dense', data=np.array(self.bridge_dense))
config.create_dataset('mapping',
data=np.fromiter((ord(self.mapping[1][i])
if i in self.mapping[1] and self.mapping[1][i] else 0
for i in range(self.voc_size)), dtype=np.uint32))
def load_config(self, filename):
'''Load parameters to prepare configuration/compilation.
Load model configuration from `filename`.
'''
with h5py.File(filename, 'r') as file:
config = file['config']
self.width = config['width'][()]
self.depth = config['depth'][()]
self.stateful = config['stateful'][()]
self.residual_connections = config['residual_connections'][()] \
if 'residual_connections' in config else False # old default
self.deep_bidirectional_encoder = config['deep_bidirectional_encoder'][()] \
if 'deep_bidirectional_encoder' in config else False # old default
self.bridge_dense = config['bridge_dense'][()] \
if 'bridge_dense' in config else False # old default
c_i = dict((chr(c), i) if c > 0 else ('', 0) for i, c in enumerate(config['mapping'][()]))
i_c = dict((i, chr(c)) if c > 0 else (0, '') for i, c in enumerate(config['mapping'][()]))
self.mapping = (c_i, i_c)
self.voc_size = len(c_i)
def load_weights(self, filename):
'''Load weights into the configured/compiled model.
Load weights from `filename` into the compiled and configured model.
(This preserves weights across CPU/GPU implementations or input shape configurations.)
'''
assert self.status > 0 # already compiled
self.logger.info('Loading model from "%s"', filename)
self.encoder_decoder_model.load_weights(filename, by_name=True)
self._resync_decoder()
self.status = 2
def load_transfer_weights(self, filename):
'''Load weights from another model into the configured/compiled model.
Load weights from `filename` into the matching layers of the compiled and configured model.
The other model need not have exactly the same configuration.
(This preserves weights across CPU/GPU implementations or input shape configurations.)
'''
from keras.engine.saving import load_weights_from_hdf5_group_by_name
assert self.status > 0 # already compiled
assert self.depth > 1
with h5py.File(filename, mode='r') as file:
if 'layer_names' not in file.attrs and 'model_weights' in file:
file = file['model_weights']
was_shallow = False
if 'config' in file:
config = file['config']
c_i = dict((chr(c), i) if c > 0 else ('', 0) for i, c in enumerate(config['mapping'][()]))
i_c = dict((i, chr(c)) if c > 0 else (0, '') for i, c in enumerate(config['mapping'][()]))
self.mapping = (c_i, i_c)
self.voc_size = len(c_i)
self._reconfigure_for_mapping()
if config['depth'][()] == self.depth - 1:
was_shallow = True
self.logger.info('Transferring model from "%s"', filename)
load_weights_from_hdf5_group_by_name(file,
[layer.cell # LM does not have attention wrapper in top HL
if layer.name == 'decoder_lstm_%d' % self.depth
else layer
for layer in self.encoder_decoder_model.layers],
skip_mismatch=True, reshape=False)
if was_shallow:
self.logger.info('fixing weights from shallower model')
for i in range(1, self.depth): # fix previous layer weights
self.encoder_decoder_model.get_layer(name='encoder_lstm_%d'%i).trainable = False
self.encoder_decoder_model.get_layer(name='decoder_lstm_%d'%i).trainable = False
self._recompile() # necessary for trainable to take effect
self._resync_decoder()
self.status = 1
def decode_batch_greedy(self, encoder_input_data):
'''Predict from one batch of lines array without alternatives.
Use encoder input lines array `encoder_input_data` (in a full batch)
to produce some encoder output to attend to.
Start decoder with start-of-sequence, then keep decoding until
end-of-sequence is found or output length is way off.
Decode by using the full output distribution as next input.
Pass decoder initial/final states from character to character.
Return a 5-tuple of the full output array (for training phase),
output strings, output probability lists, entropies, and soft
alignments (input-output matrices as list of list of vectors).
'''
encoder_outputs = self.encoder_model.predict_on_batch(encoder_input_data)
encoder_output_data = encoder_outputs[0]
states_values = encoder_outputs[1:]
batch_size = encoder_input_data.shape[0]
batch_length = encoder_input_data.shape[1]
decoder_input_data = np.zeros((batch_size, 1, self.voc_size), dtype=np.uint32)
decoder_output_data = np.zeros((batch_size, batch_length * 2, self.voc_size), dtype=np.uint32)
decoder_output_sequences = [''] * batch_size
decoder_output_probs = [[] for _ in range(batch_size)]
decoder_output_scores = [0.] * batch_size
#decoder_output_alignments = [[]] * batch_size # does not copy!!
decoder_output_alignments = [[] for _ in range(batch_size)]
for i in range(batch_length * 2):
decoder_output_data[:, i] = decoder_input_data[:, -1]
output = self.decoder_model.predict_on_batch(
[decoder_input_data, encoder_output_data] + states_values)
scores = output[0]
states_values = list(output[1:])
alignment = states_values[-1]
indexes = np.nanargmax(scores[:, :, 1:], axis=2) # without index zero (underspecification)
#decoder_input_data = np.eye(self.voc_size, dtype=np.uint32)[indexes+1] # unit vectors
decoder_input_data = scores # soft/confidence input (much better)
logscores = -np.log(scores)
for j, idx in enumerate(indexes[:, -1] + 1):
if decoder_output_sequences[j].endswith('\n') or not np.any(encoder_input_data[j]):
continue
decoder_output_sequences[j] += self.mapping[1][idx]
decoder_output_probs[j].append(scores[j, -1, idx])
decoder_output_scores[j] += logscores[j, -1, idx]
decoder_output_alignments[j].append(alignment[j])
for j in range(batch_size):
if decoder_output_sequences[j]:
decoder_output_scores[j] /= len(decoder_output_sequences[j])
# # calculate rejection scores (decoder input = encoder input):
# decoder_input_data = np.insert(encoder_input_data, 0, 0., axis=1) # add start-of-sequence
# decoder_rej_sequences = [''] * batch_size
# decoder_rej_scores = [0.] * batch_size
# output = self.decoder_model.predict_on_batch([decoder_input_data, encoder_output_data] + encoder_outputs[1:])
# logscores = np.log(output[0])
# for i in range(batch_length):
# indexes = np.nanargmax(encoder_input_data[:, i], axis=1)
# for j, idx in enumerate(indexes):
# if decoder_rej_sequences[j].endswith('\n') or not np.any(encoder_input_data[j]):
# continue
# decoder_rej_sequences[j] += self.mapping[1][int(idx)]
# decoder_rej_scores[j] -= logscores[j, i, idx]
# for j in range(batch_size):
# if len(decoder_rej_sequences[j]) > 0:
# decoder_rej_scores[j] /= len(decoder_rej_sequences[j])
# # select rejection if better than decoder output:
# if decoder_rej_scores[j] < decoder_output_scores[j]:
# decoder_output_sequences[j] = decoder_rej_sequences[j]
# decoder_output_scores[j] = decoder_rej_scores[j]
return (decoder_output_data,
decoder_output_sequences, decoder_output_probs,
decoder_output_scores, decoder_output_alignments)
def decode_sequence_greedy(self, source_seq=None, encoder_outputs=None):
'''Predict from one line vector without alternatives.
Use encoder input line vector `source_seq` (in a batch of size 1)
to produce some encoder output to attend to.
If `encoder_outputs` is given, then bypass that step.
Start decoder with start-of-sequence, then keep decoding until
end-of-sequence is found or output length is way off.
Decode by using the full output distribution as next input.
Pass decoder initial/final states from character to character.
Return a 4-tuple of output string, output probabilities, entropy,
and soft alignment (input-output matrix as list of vectors).
'''
# Encode the source as state vectors.
if encoder_outputs is None:
encoder_outputs = self.encoder_model.predict_on_batch(np.expand_dims(source_seq, axis=0))
attended_seq = encoder_outputs[0]
states_values = encoder_outputs[1:]
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, self.voc_size), dtype=np.uint32)
# The first character (start symbol) stays empty.
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
decoded_text = ''
decoded_probs = []
decoded_score = 0
alignments = []
for i in range(attended_seq.shape[1] * 2):
output = self.decoder_model.predict_on_batch([target_seq, attended_seq] + states_values)
scores = output[0]
if self.lm_predict:
states = output[2:]
else:
states = output[1:]
# Sample a token:
idx = np.nanargmax(scores[0, -1, :])
prob = scores[0, -1, idx]
score = -np.log(prob)
char = self.mapping[1][idx]
if char == '': # underspecification
scores[0, -1, idx] = np.nan
idx = np.nanargmax(scores[0, -1, :])
prob = scores[0, -1, idx]
score = -np.log(prob)
char = self.mapping[1][idx]
decoded_text += char
decoded_probs.append(prob)
decoded_score += score
alignments.append(states[-1][0])
# Exit condition: end-of-sequence character.
if char == '\n':
break
# Update the target sequence (of length 1):
#target_seq = np.eye(self.voc_size, dtype=np.uint32)[[[[idx]]]]
target_seq = scores # soft/confidence input (better)
# Update states:
states_values = list(states)
return (decoded_text, decoded_probs,
decoded_score / len(decoded_text), alignments)
def decode_sequence_beam(self, source_seq=None, encoder_outputs=None):
'''Predict from one line vector with alternatives.
Use encoder input line vector `source_seq` (in a batch of size 1)
to produce some encoder output to attend to.
If `encoder_outputs` is given, then bypass that step.
Start decoder with start-of-sequence, then keep decoding until
end-of-sequence is found or output length is way off, repeatedly.
Decode by using the best predicted output characters and several next-best
alternatives (up to some degradation threshold) as next input.
Follow-up on the N best overall candidates (estimated by accumulated
score, normalized by length and prospective cost), i.e. do A*-like
breadth-first search, with N equal `batch_size`.
Pass decoder initial/final states from character to character,
for each candidate respectively.
Reserve 1 candidate per iteration for running through `source_seq`
(as a rejection fallback) to ensure that path does not fall off the
beam and at least one solution can be found within the search limits.
For each solution, yield a 4-tuple of output string, output probabilities,
entropy, and soft alignment (input-output matrix as list of vectors).
'''
from bisect import insort_left
# Encode the source as state vectors.
if encoder_outputs is None:
encoder_outputs = self.encoder_model.predict_on_batch(np.expand_dims(source_seq, axis=0))
attended_seq = encoder_outputs[0] # constant
attended_len = attended_seq.shape[1]
states_values = encoder_outputs[1:]
# Start with an empty beam (no input, only state):
next_beam = [Node(state=states_values,
value='', scores=np.zeros(self.voc_size),
prob=[], cost=0.0,
alignment=[],
length0=attended_len,
cost0=3.0)] # quite pessimistic
final_beam = []
# how many batches (i.e. char hypotheses) will be processed per line at maximum?
max_batches = attended_len * 2 # (usually) safe limit
for l in range(max_batches):
beam = []
while next_beam:
node = next_beam.pop()
if node.value == '\n': # end-of-sequence symbol?
insort_left(final_beam, node)
# self.logger.debug('%02d found new solution %.2f/"%s"',
# l, node.pro_cost(), str(node).strip('\n'))
else: # normal step
beam.append(node)
if node.length > 1.5 * attended_len:
self.logger.warning('found overlong hypothesis "%s" in "%s"',
str(node),
''.join(self.mapping[1][np.nanargmax(step)] for step in source_seq))
# self.logger.debug('%02d new hypothesis %.2f/"%s"',
# l, node.pro_cost(), str(node).strip('\n'))
if len(beam) >= self.batch_size:
break # enough for one batch
if not beam:
break # will yield StopIteration unless we have some results already
if (len(final_beam) > self.beam_width_out and
final_beam[-1].pro_cost() > beam[0].pro_cost()):
break # it is unlikely that later iterations will find better top n results
# use fringe leaves as minibatch, but with only 1 timestep
target_seq = np.expand_dims(
np.vstack([node.scores for node in beam]),
axis=1) # add time dimension
states_val = [np.vstack([node.state[layer] for node in beam])
for layer in range(len(beam[0].state))] # stack layers across batch
output = self.decoder_model.predict_on_batch(
[target_seq, attended_seq] + states_val)
scores_output = output[0][:, -1] # only last timestep
if self.lm_predict:
lmscores_output = output[1][:, -1]
states_output = list(output[2:])
else:
states_output = list(output[1:]) # from (layers) tuple
for i, node in enumerate(beam): # iterate over batch (1st dim)
# unstack layers for current sample:
states = [layer[i:i+1] for layer in states_output]
scores = scores_output[i]
#
# estimate current alignment target:
alignment = states[-1][0]
misalignment = 0.0
if node.length > 1:
prev_alignment = node.alignment
prev_source_pos = np.matmul(prev_alignment, np.arange(attended_len))
source_pos = np.matmul(alignment, np.arange(attended_len))
misalignment = np.abs(source_pos - prev_source_pos - 1)
if np.max(prev_alignment) == 1.0:
# previous choice was rejection
source_pos = int(prev_source_pos) + 1
else:
source_pos = int(source_pos.round())
else:
source_pos = 0
#
# add fallback/rejection candidates regardless of beam threshold:
source_scores = source_seq[source_pos]
if (self.rejection_threshold
and (misalignment < 0.1 or np.max(node.alignment) == 1.0)
and np.any(source_scores)):
rej_idx = np.nanargmax(source_scores)
# use a fixed minimum probability
if scores[rej_idx] < self.rejection_threshold:
#scores *= self.rejection_threshold - scores[rej_idx] # renormalize
scores[rej_idx] = self.rejection_threshold # overwrite
# self.logger.debug('%s: rej=%s (%.2f)', str(node),
# self.mapping[1][rej_idx], scores[rej_idx])
else:
rej_idx = None
#
# determine beam width from beam threshold to add normal candidates:
scores_order = np.argsort(scores) # still in reverse order (worst first)
highest = scores[scores_order[-1]]
beampos = self.voc_size - np.searchsorted(
scores[scores_order],
#highest - self.beam_threshold_in) # variable beam width (absolute)
highest * self.beam_threshold_in) # variable beam width (relative)
#beampos = self.beam_width_in # fixed beam width
beampos = min(beampos, self.beam_width_in) # mixed beam width
pos = 0
#
# follow up on best predictions, in true order (best first):
for idx in reversed(scores_order):
pos += 1
score = scores[idx]
logscore = -np.log(score)
if self.lm_predict:
# use probability from LM instead of decoder for beam ratings
logscore = -np.log(lmscores_output[i][idx])
alignment1 = alignment
if idx == rej_idx:
# self.logger.debug('adding rejection candidate "%s" [%.2f]',
# self.mapping[1][rej_idx], logscore)
alignment1 = np.eye(attended_len)[source_pos]
rej_idx = None
elif pos > beampos:
if rej_idx: # not yet in beam
continue # search for rejection candidate
else:
break # ignore further alternatives
#
# decode into string:
value = self.mapping[1][idx]
if (np.isnan(logscore) or
value == ''): # underspecification
continue # ignore this alternative
#
# add new hypothesis to the beam:
# for decoder feedback, use a compromise between
# - raw predictions (used in greedy decoder,
# still informative of ambiguity), and
# - argmax unit vectors (allowing alternatives,
# but introducing label bias)
scores1 = np.copy(scores)
# already slightly better than unit vectors:
# scores1 *= scores[idx] / highest
# scores1[idx] = scores[idx] # keep
# only disable maxima iteratively:
scores[idx] = 0
new_node = Node(parent=node, state=states,
value=value, scores=scores1,
prob=score, cost=logscore,
alignment=alignment1)
# self.logger.debug('pro_cost: %3.3f, cum_cost: %3.1f, "%s"',
# new_node.pro_cost(),
# new_node.cum_cost,
# str(new_node).strip('\n'))
insort_left(next_beam, new_node)
# sanitize overall beam size:
if len(next_beam) > max_batches * self.batch_size: # more than can ever be processed within limits?
next_beam = next_beam[-max_batches*self.batch_size:] # to save memory, keep only best
# after max_batches, we still have active hypotheses but to few inactive?
if next_beam and len(final_beam) < self.beam_width_out:
self.logger.warning('max_batches %d is not enough for beam_width_out %d: got only %d, still %d left for: "%s"',
max_batches, self.beam_width_out, len(final_beam), len(next_beam),
''.join(self.mapping[1][np.nanargmax(step)] for step in source_seq))
while final_beam:
node = final_beam.pop()
nodes = node.to_sequence()[1:]
yield (''.join(n.value for n in nodes),
[n.prob for n in nodes],
node.cum_cost / (node.length - 1),
[n.alignment for n in nodes])
class Node(object):
"""One hypothesis in the character beam (trie)"""
def __init__(self, state, value, scores, cost, parent=None, prob=1.0, alignment=None, length0=None, cost0=None):
super(Node, self).__init__()
self._sequence = None
self.value = value # character
self.parent = parent # parent Node, None for root
self.state = state # recurrent layer hidden state
self.cum_cost = parent.cum_cost + cost if parent else cost # e.g. -log(p) of sequence up to current node (including)
# length of
self.length = 1 if parent is None else parent.length + 1
# length of source sequence (for A* prospective cost estimation)
self.length0 = length0 or (parent.length0 if parent else 1)
# additional (average) per-node costs
self.cost0 = cost0 or (parent.cost0 if parent else 0)
# urgency? (l/max_batches)...
# probability
self.prob = prob
self.scores = scores
if alignment is None:
self.alignment = parent.alignment if parent else []
else:
self.alignment = alignment
def to_sequence(self):
# Return sequence of nodes from root to current node.
if not self._sequence:
self._sequence = []
current_node = self
while current_node:
self._sequence.insert(0, current_node)
current_node = current_node.parent
return self._sequence
def __str__(self):
return ''.join(n.value for n in self.to_sequence()[1:])
# for sort order, use cumulative costs relative to length
# (in order to get a fair comparison across different lengths,
# and hence, breadth-first search), and use inverse order
# (so the faster bisect() and pop() can be used)
# [must be pessimistic estimation of final cum_cost]
def pro_cost(self):
# v0.1.0:
#return - (self.cum_cost + 0.5 * math.fabs(self.length - self.length0)) / self.length
# v0.1.1:
#return - (self.cum_cost + self.cum_cost/self.length * max(0, self.length0 - self.length)) / self.length
# v0.1.2:
#return - (self.cum_cost + (28 + self.cost0) * self.length0 * np.abs(1 - np.sqrt(self.length / self.length0)))
return - (self.cum_cost + self.cost0 * np.abs(self.length - self.length0))
def __lt__(self, other):
return self.pro_cost() < other.pro_cost()
def __le__(self, other):
return self.pro_cost() <= other.pro_cost()
def __eq__(self, other):
return self.pro_cost() == other.pro_cost()
def __ne__(self, other):
return self.pro_cost() != other.pro_cost()
def __gt__(self, other):
return self.pro_cost() > other.pro_cost()
def __ge__(self, other):
return self.pro_cost() >= other.pro_cost()
| [
"logging.getLogger",
"numpy.nanargmax",
"numpy.log",
"keras.callbacks.TerminateOnNaN",
"keras.layers.TimeDistributed",
"keras.backend.slice",
"math.sqrt",
"numpy.argsort",
"numpy.array",
"numpy.count_nonzero",
"keras.layers.Dense",
"tensorflow.compat.v1.get_default_graph",
"math.exp",
"ten... | [((10507, 10533), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (10531, 10533), True, 'import tensorflow as tf\n'), ((12030, 12086), 'keras.layers.Input', 'Input', ([], {'shape': '(None, self.voc_size)', 'name': '"""encoder_input"""'}), "(shape=(None, self.voc_size), name='encoder_input')\n", (12035, 12086), False, 'from keras.layers import Input, Dense, TimeDistributed, Dropout, Lambda\n'), ((12408, 12469), 'keras.layers.TimeDistributed', 'TimeDistributed', (['char_embedding'], {'name': '"""char_input_projection"""'}), "(char_embedding, name='char_input_projection')\n", (12423, 12469), False, 'from keras.layers import Input, Dense, TimeDistributed, Dropout, Lambda\n'), ((17103, 17159), 'keras.layers.Input', 'Input', ([], {'shape': '(None, self.voc_size)', 'name': '"""decoder_input"""'}), "(shape=(None, self.voc_size), name='decoder_input')\n", (17108, 17159), False, 'from keras.layers import Input, Dense, TimeDistributed, Dropout, Lambda\n'), ((21373, 21461), 'keras.models.Model', 'Model', (['[encoder_input, decoder_input]', 'decoder_output'], {'name': '"""encoder_decoder_model"""'}), "([encoder_input, decoder_input], decoder_output, name=\n 'encoder_decoder_model')\n", (21378, 21461), False, 'from keras.models import Model\n'), ((22075, 22164), 'keras.models.Model', 'Model', (['encoder_input', '([encoder_output] + encoder_state_outputs)'], {'name': '"""encoder_model"""'}), "(encoder_input, [encoder_output] + encoder_state_outputs, name=\n 'encoder_model')\n", (22080, 22164), False, 'from keras.models import Model\n'), ((25973, 26102), 'keras.models.Model', 'Model', (['([decoder_input, attention_input] + decoder_state_inputs)', '(decoder_output + decoder_state_outputs)'], {'name': '"""decoder_model"""'}), "([decoder_input, attention_input] + decoder_state_inputs, \n decoder_output + decoder_state_outputs, name='decoder_model')\n", (25978, 26102), False, 'from keras.models import Model\n'), ((26429, 26461), 'tensorflow.compat.v1.get_default_graph', 'tf.compat.v1.get_default_graph', ([], {}), '()\n', (26459, 26461), True, 'import tensorflow as tf\n'), ((29314, 29364), 'keras.backend.slice', 'K.slice', (['embedding_matrix', '[0, 0]', '[1, em_dims[1]]'], {}), '(embedding_matrix, [0, 0], [1, em_dims[1]])\n', (29321, 29364), True, 'from keras import backend as K\n'), ((29500, 29563), 'keras.backend.slice', 'K.slice', (['embedding_matrix', '[1, 0]', '[em_dims[0] - 1, em_dims[1]]'], {}), '(embedding_matrix, [1, 0], [em_dims[0] - 1, em_dims[1]])\n', (29507, 29563), True, 'from keras import backend as K\n'), ((30014, 30032), 'keras.backend.ones_like', 'K.ones_like', (['norms'], {}), '(norms)\n', (30025, 30032), True, 'from keras import backend as K\n'), ((30155, 30206), 'keras.backend.in_train_phase', 'K.in_train_phase', (['(lowrank + underspecification)', '(0.0)'], {}), '(lowrank + underspecification, 0.0)\n', (30171, 30206), True, 'from keras import backend as K\n'), ((33495, 33594), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(3)', 'verbose': '(1)', 'mode': '"""min"""', 'restore_best_weights': '(True)'}), "(monitor='val_loss', patience=3, verbose=1, mode='min',\n restore_best_weights=True)\n", (33508, 33594), False, 'from keras.callbacks import EarlyStopping, TerminateOnNaN\n'), ((59928, 60055), 'numpy.zeros', 'np.zeros', (['(batch_size, max_encoder_input_length, self.voc_size)'], {'dtype': '(np.float32 if encoder_conf_sequences else np.uint32)'}), '((batch_size, max_encoder_input_length, self.voc_size), dtype=np.\n float32 if encoder_conf_sequences else np.uint32)\n', (59936, 60055), True, 'import numpy as np\n'), ((60120, 60209), 'numpy.zeros', 'np.zeros', (['(batch_size, max_decoder_input_length + 1, self.voc_size)'], {'dtype': 'np.uint32'}), '((batch_size, max_decoder_input_length + 1, self.voc_size), dtype=\n np.uint32)\n', (60128, 60209), True, 'import numpy as np\n'), ((60272, 60361), 'numpy.zeros', 'np.zeros', (['(batch_size, max_decoder_input_length + 1, self.voc_size)'], {'dtype': 'np.uint32'}), '((batch_size, max_decoder_input_length + 1, self.voc_size), dtype=\n np.uint32)\n', (60280, 60361), True, 'import numpy as np\n'), ((63018, 63075), 'numpy.ones', 'np.ones', (['decoder_output_data.shape[:-1]'], {'dtype': 'np.float32'}), '(decoder_output_data.shape[:-1], dtype=np.float32)\n', (63025, 63075), True, 'import numpy as np\n'), ((70191, 70248), 'numpy.zeros', 'np.zeros', (['(batch_size, 1, self.voc_size)'], {'dtype': 'np.uint32'}), '((batch_size, 1, self.voc_size), dtype=np.uint32)\n', (70199, 70248), True, 'import numpy as np\n'), ((70279, 70351), 'numpy.zeros', 'np.zeros', (['(batch_size, batch_length * 2, self.voc_size)'], {'dtype': 'np.uint32'}), '((batch_size, batch_length * 2, self.voc_size), dtype=np.uint32)\n', (70287, 70351), True, 'import numpy as np\n'), ((74573, 74621), 'numpy.zeros', 'np.zeros', (['(1, 1, self.voc_size)'], {'dtype': 'np.uint32'}), '((1, 1, self.voc_size), dtype=np.uint32)\n', (74581, 74621), True, 'import numpy as np\n'), ((8675, 8702), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (8692, 8702), False, 'import logging\n'), ((10603, 10638), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'config': 'config'}), '(config=config)\n', (10623, 10638), True, 'import tensorflow as tf\n'), ((10850, 10892), 'keras.backend.tensorflow_backend._get_available_gpus', 'K.tensorflow_backend._get_available_gpus', ([], {}), '()\n', (10890, 10892), True, 'from keras import backend as K\n'), ((16945, 16978), 'keras.layers.Dense', 'Dense', (['self.width'], {'use_bias': '(False)'}), '(self.width, use_bias=False)\n', (16950, 16978), False, 'from keras.layers import Input, Dense, TimeDistributed, Dropout, Lambda\n'), ((20847, 20906), 'keras.layers.Lambda', 'Lambda', (['char_embedding_transposed'], {'name': '"""transpose+softmax"""'}), "(char_embedding_transposed, name='transpose+softmax')\n", (20853, 20906), False, 'from keras.layers import Input, Dense, TimeDistributed, Dropout, Lambda\n'), ((22809, 22872), 'keras.layers.Input', 'Input', ([], {'shape': '(self.width,)', 'name': "('initial_h_%d_input' % (n + 1))"}), "(shape=(self.width,), name='initial_h_%d_input' % (n + 1))\n", (22814, 22872), False, 'from keras.layers import Input, Dense, TimeDistributed, Dropout, Lambda\n'), ((22927, 22990), 'keras.layers.Input', 'Input', ([], {'shape': '(self.width,)', 'name': "('initial_c_%d_input' % (n + 1))"}), "(shape=(self.width,), name='initial_c_%d_input' % (n + 1))\n", (22932, 22990), False, 'from keras.layers import Input, Dense, TimeDistributed, Dropout, Lambda\n'), ((29701, 29721), 'keras.backend.mean', 'K.mean', (['vecs'], {'axis': '(0)'}), '(vecs, axis=0)\n', (29707, 29721), True, 'from keras import backend as K\n'), ((29962, 29988), 'keras.backend.square', 'K.square', (['embedding_matrix'], {}), '(embedding_matrix)\n', (29970, 29988), True, 'from keras import backend as K\n'), ((33353, 33390), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(num_lines,)'], {}), '(0, 1, (num_lines,))\n', (33370, 33390), True, 'import numpy as np\n'), ((33665, 33681), 'keras.callbacks.TerminateOnNaN', 'TerminateOnNaN', ([], {}), '()\n', (33679, 33681), False, 'from keras.callbacks import EarlyStopping, TerminateOnNaN\n'), ((42050, 42106), 'math.exp', 'math.exp', (['(c_greedy_counts.score / c_greedy_counts.length)'], {}), '(c_greedy_counts.score / c_greedy_counts.length)\n', (42058, 42106), False, 'import math\n'), ((42151, 42207), 'math.exp', 'math.exp', (['(c_beamed_counts.score / c_beamed_counts.length)'], {}), '(c_beamed_counts.score / c_beamed_counts.length)\n', (42159, 42207), False, 'import math\n'), ((42280, 42312), 'math.sqrt', 'math.sqrt', (['c_origin_counts.varia'], {}), '(c_origin_counts.varia)\n', (42289, 42312), False, 'import math\n'), ((42386, 42418), 'math.sqrt', 'math.sqrt', (['c_greedy_counts.varia'], {}), '(c_greedy_counts.varia)\n', (42395, 42418), False, 'import math\n'), ((42492, 42524), 'math.sqrt', 'math.sqrt', (['c_beamed_counts.varia'], {}), '(c_beamed_counts.varia)\n', (42501, 42524), False, 'import math\n'), ((42598, 42630), 'math.sqrt', 'math.sqrt', (['w_origin_counts.varia'], {}), '(w_origin_counts.varia)\n', (42607, 42630), False, 'import math\n'), ((42704, 42736), 'math.sqrt', 'math.sqrt', (['w_greedy_counts.varia'], {}), '(w_greedy_counts.varia)\n', (42713, 42736), False, 'import math\n'), ((42810, 42842), 'math.sqrt', 'math.sqrt', (['w_beamed_counts.varia'], {}), '(w_beamed_counts.varia)\n', (42819, 42842), False, 'import math\n'), ((63107, 63147), 'numpy.all', 'np.all', (['(decoder_output_data == 0)'], {'axis': '(2)'}), '(decoder_output_data == 0, axis=2)\n', (63113, 63147), True, 'import numpy as np\n'), ((64063, 64087), 'h5py.File', 'h5py.File', (['filename', '"""a"""'], {}), "(filename, 'a')\n", (64072, 64087), False, 'import h5py\n'), ((65170, 65194), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (65179, 65194), False, 'import h5py\n'), ((67294, 67323), 'h5py.File', 'h5py.File', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (67303, 67323), False, 'import h5py\n'), ((68084, 68293), 'keras.engine.saving.load_weights_from_hdf5_group_by_name', 'load_weights_from_hdf5_group_by_name', (['file', "[(layer.cell if layer.name == 'decoder_lstm_%d' % self.depth else layer) for\n layer in self.encoder_decoder_model.layers]"], {'skip_mismatch': '(True)', 'reshape': '(False)'}), "(file, [(layer.cell if layer.name == \n 'decoder_lstm_%d' % self.depth else layer) for layer in self.\n encoder_decoder_model.layers], skip_mismatch=True, reshape=False)\n", (68120, 68293), False, 'from keras.engine.saving import load_weights_from_hdf5_group_by_name\n'), ((71040, 71078), 'numpy.nanargmax', 'np.nanargmax', (['scores[:, :, 1:]'], {'axis': '(2)'}), '(scores[:, :, 1:], axis=2)\n', (71052, 71078), True, 'import numpy as np\n'), ((75266, 75296), 'numpy.nanargmax', 'np.nanargmax', (['scores[0, -1, :]'], {}), '(scores[0, -1, :])\n', (75278, 75296), True, 'import numpy as np\n'), ((10818, 10829), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (10827, 10829), True, 'from keras import backend as K\n'), ((12226, 12252), 'keras.initializers.RandomNormal', 'RandomNormal', ([], {'stddev': '(0.001)'}), '(stddev=0.001)\n', (12238, 12252), False, 'from keras.initializers import RandomNormal\n'), ((16003, 16052), 'keras.layers.Dropout', 'Dropout', (['self.dropout'], {'noise_shape': 'constant_shape'}), '(self.dropout, noise_shape=constant_shape)\n', (16010, 16052), False, 'from keras.layers import Input, Dense, TimeDistributed, Dropout, Lambda\n'), ((18877, 18894), 'keras.layers.RNN', 'RNN', (['cell'], {}), '(cell, **args)\n', (18880, 18894), False, 'from keras.layers import RNN, LSTMCell, LSTM, CuDNNLSTM, Bidirectional\n'), ((19483, 19521), 'keras.layers.add', 'add', (['[decoder_output2, decoder_output]'], {}), '([decoder_output2, decoder_output])\n', (19486, 19521), False, 'from keras.layers import concatenate, average, add\n'), ((23700, 23755), 'keras.layers.Input', 'Input', ([], {'shape': '(None, self.width)', 'name': '"""attention_input"""'}), "(shape=(None, self.width), name='attention_input')\n", (23705, 23755), False, 'from keras.layers import Input, Dense, TimeDistributed, Dropout, Lambda\n'), ((23833, 23883), 'keras.layers.Input', 'Input', ([], {'shape': '(None,)', 'name': '"""attention_state_input"""'}), "(shape=(None,), name='attention_state_input')\n", (23838, 23883), False, 'from keras.layers import Input, Dense, TimeDistributed, Dropout, Lambda\n'), ((24720, 24737), 'keras.layers.RNN', 'RNN', (['cell'], {}), '(cell, **args)\n', (24723, 24737), False, 'from keras.layers import RNN, LSTMCell, LSTM, CuDNNLSTM, Bidirectional\n'), ((26718, 26734), 'keras.optimizers.Adam', 'Adam', ([], {'clipnorm': '(5)'}), '(clipnorm=5)\n', (26722, 26734), False, 'from keras.optimizers import Adam\n'), ((29804, 29825), 'keras.backend.square', 'K.square', (['(vec0 - vecs)'], {}), '(vec0 - vecs)\n', (29812, 29825), True, 'from keras import backend as K\n'), ((30106, 30129), 'keras.backend.square', 'K.square', (['(norm0 - norms)'], {}), '(norm0 - norms)\n', (30114, 30129), True, 'from keras import backend as K\n'), ((34600, 34641), 'numpy.isnan', 'np.isnan', (["history.history['val_loss'][-1]"], {}), "(history.history['val_loss'][-1])\n", (34608, 34641), True, 'import numpy as np\n'), ((71323, 71337), 'numpy.log', 'np.log', (['scores'], {}), '(scores)\n', (71329, 71337), True, 'import numpy as np\n'), ((74367, 74401), 'numpy.expand_dims', 'np.expand_dims', (['source_seq'], {'axis': '(0)'}), '(source_seq, axis=0)\n', (74381, 74401), True, 'import numpy as np\n'), ((75356, 75368), 'numpy.log', 'np.log', (['prob'], {}), '(prob)\n', (75362, 75368), True, 'import numpy as np\n'), ((75523, 75553), 'numpy.nanargmax', 'np.nanargmax', (['scores[0, -1, :]'], {}), '(scores[0, -1, :])\n', (75535, 75553), True, 'import numpy as np\n'), ((77908, 77942), 'numpy.expand_dims', 'np.expand_dims', (['source_seq'], {'axis': '(0)'}), '(source_seq, axis=0)\n', (77922, 77942), True, 'import numpy as np\n'), ((80142, 80183), 'numpy.vstack', 'np.vstack', (['[node.scores for node in beam]'], {}), '([node.scores for node in beam])\n', (80151, 80183), True, 'import numpy as np\n'), ((80256, 80303), 'numpy.vstack', 'np.vstack', (['[node.state[layer] for node in beam]'], {}), '([node.state[layer] for node in beam])\n', (80265, 80303), True, 'import numpy as np\n'), ((82856, 82874), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (82866, 82874), True, 'import numpy as np\n'), ((14556, 14593), 'keras.layers.Bidirectional', 'Bidirectional', (['layer'], {'name': 'layer.name'}), '(layer, name=layer.name)\n', (14569, 14593), False, 'from keras.layers import RNN, LSTMCell, LSTM, CuDNNLSTM, Bidirectional\n'), ((16129, 16195), 'keras.layers.Dense', 'Dense', (['self.width'], {'activation': '"""tanh"""', 'name': "('bridge_h_%d' % (n + 1))"}), "(self.width, activation='tanh', name='bridge_h_%d' % (n + 1))\n", (16134, 16195), False, 'from keras.layers import Input, Dense, TimeDistributed, Dropout, Lambda\n'), ((16229, 16295), 'keras.layers.Dense', 'Dense', (['self.width'], {'activation': '"""tanh"""', 'name': "('bridge_c_%d' % (n + 1))"}), "(self.width, activation='tanh', name='bridge_c_%d' % (n + 1))\n", (16234, 16295), False, 'from keras.layers import Input, Dense, TimeDistributed, Dropout, Lambda\n'), ((16576, 16591), 'keras.backend.zeros_like', 'K.zeros_like', (['x'], {}), '(x)\n', (16588, 16591), True, 'from keras import backend as K\n'), ((18463, 18537), 'keras.layers.LSTMCell', 'LSTMCell', (['self.width'], {'dropout': 'self.dropout', 'recurrent_activation': '"""sigmoid"""'}), "(self.width, dropout=self.dropout, recurrent_activation='sigmoid')\n", (18471, 18537), False, 'from keras.layers import RNN, LSTMCell, LSTM, CuDNNLSTM, Bidirectional\n'), ((19893, 19942), 'keras.layers.Dropout', 'Dropout', (['self.dropout'], {'noise_shape': 'constant_shape'}), '(self.dropout, noise_shape=constant_shape)\n', (19900, 19942), False, 'from keras.layers import Input, Dense, TimeDistributed, Dropout, Lambda\n'), ((20767, 20801), 'keras.backend.transpose', 'K.transpose', (['char_embedding.kernel'], {}), '(char_embedding.kernel)\n', (20778, 20801), True, 'from keras import backend as K\n'), ((24306, 24380), 'keras.layers.LSTMCell', 'LSTMCell', (['self.width'], {'dropout': 'self.dropout', 'recurrent_activation': '"""sigmoid"""'}), "(self.width, dropout=self.dropout, recurrent_activation='sigmoid')\n", (24314, 24380), False, 'from keras.layers import RNN, LSTMCell, LSTM, CuDNNLSTM, Bidirectional\n'), ((30623, 30640), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (30634, 30640), False, 'import pickle\n'), ((31276, 31310), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFC"""', 'line'], {}), "('NFC', line)\n", (31297, 31310), False, 'import unicodedata\n'), ((49419, 49459), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.batch_size'], {}), '(0, 1, self.batch_size)\n', (49436, 49459), True, 'import numpy as np\n'), ((51636, 51676), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.batch_size'], {}), '(0, 1, self.batch_size)\n', (51653, 51676), True, 'import numpy as np\n'), ((64194, 64214), 'numpy.array', 'np.array', (['self.width'], {}), '(self.width)\n', (64202, 64214), True, 'import numpy as np\n'), ((64264, 64284), 'numpy.array', 'np.array', (['self.depth'], {}), '(self.depth)\n', (64272, 64284), True, 'import numpy as np\n'), ((64337, 64360), 'numpy.array', 'np.array', (['self.stateful'], {}), '(self.stateful)\n', (64345, 64360), True, 'import numpy as np\n'), ((64425, 64460), 'numpy.array', 'np.array', (['self.residual_connections'], {}), '(self.residual_connections)\n', (64433, 64460), True, 'import numpy as np\n'), ((64531, 64572), 'numpy.array', 'np.array', (['self.deep_bidirectional_encoder'], {}), '(self.deep_bidirectional_encoder)\n', (64539, 64572), True, 'import numpy as np\n'), ((64629, 64656), 'numpy.array', 'np.array', (['self.bridge_dense'], {}), '(self.bridge_dense)\n', (64637, 64656), True, 'import numpy as np\n'), ((75621, 75633), 'numpy.log', 'np.log', (['prob'], {}), '(prob)\n', (75627, 75633), True, 'import numpy as np\n'), ((78244, 78267), 'numpy.zeros', 'np.zeros', (['self.voc_size'], {}), '(self.voc_size)\n', (78252, 78267), True, 'import numpy as np\n'), ((78847, 78876), 'bisect.insort_left', 'insort_left', (['final_beam', 'node'], {}), '(final_beam, node)\n', (78858, 78876), False, 'from bisect import insort_left\n'), ((81476, 81516), 'numpy.abs', 'np.abs', (['(source_pos - prev_source_pos - 1)'], {}), '(source_pos - prev_source_pos - 1)\n', (81482, 81516), True, 'import numpy as np\n'), ((82135, 82156), 'numpy.any', 'np.any', (['source_scores'], {}), '(source_scores)\n', (82141, 82156), True, 'import numpy as np\n'), ((82189, 82216), 'numpy.nanargmax', 'np.nanargmax', (['source_scores'], {}), '(source_scores)\n', (82201, 82216), True, 'import numpy as np\n'), ((83007, 83078), 'numpy.searchsorted', 'np.searchsorted', (['scores[scores_order]', '(highest * self.beam_threshold_in)'], {}), '(scores[scores_order], highest * self.beam_threshold_in)\n', (83022, 83078), True, 'import numpy as np\n'), ((85180, 85195), 'numpy.copy', 'np.copy', (['scores'], {}), '(scores)\n', (85187, 85195), True, 'import numpy as np\n'), ((86001, 86033), 'bisect.insort_left', 'insort_left', (['next_beam', 'new_node'], {}), '(next_beam, new_node)\n', (86012, 86033), False, 'from bisect import insort_left\n'), ((89406, 89440), 'numpy.abs', 'np.abs', (['(self.length - self.length0)'], {}), '(self.length - self.length0)\n', (89412, 89440), True, 'import numpy as np\n'), ((15542, 15580), 'keras.layers.add', 'add', (['[encoder_output2, encoder_output]'], {}), '([encoder_output2, encoder_output])\n', (15545, 15580), False, 'from keras.layers import concatenate, average, add\n'), ((50090, 50115), 'numpy.count_nonzero', 'np.count_nonzero', (['indexes'], {}), '(indexes)\n', (50106, 50115), True, 'import numpy as np\n'), ((51999, 52020), 'numpy.eye', 'np.eye', (['self.voc_size'], {}), '(self.voc_size)\n', (52005, 52020), True, 'import numpy as np\n'), ((52944, 52961), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (52955, 52961), False, 'import pickle\n'), ((55226, 55267), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFC"""', 'source_text'], {}), "('NFC', source_text)\n", (55247, 55267), False, 'import unicodedata\n'), ((55306, 55347), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFC"""', 'target_text'], {}), "('NFC', target_text)\n", (55327, 55347), False, 'import unicodedata\n'), ((71464, 71493), 'numpy.any', 'np.any', (['encoder_input_data[j]'], {}), '(encoder_input_data[j])\n', (71470, 71493), True, 'import numpy as np\n'), ((81337, 81360), 'numpy.arange', 'np.arange', (['attended_len'], {}), '(attended_len)\n', (81346, 81360), True, 'import numpy as np\n'), ((81416, 81439), 'numpy.arange', 'np.arange', (['attended_len'], {}), '(attended_len)\n', (81425, 81439), True, 'import numpy as np\n'), ((81540, 81562), 'numpy.max', 'np.max', (['prev_alignment'], {}), '(prev_alignment)\n', (81546, 81562), True, 'import numpy as np\n'), ((83655, 83668), 'numpy.log', 'np.log', (['score'], {}), '(score)\n', (83661, 83668), True, 'import numpy as np\n'), ((84615, 84633), 'numpy.isnan', 'np.isnan', (['logscore'], {}), '(logscore)\n', (84623, 84633), True, 'import numpy as np\n'), ((51293, 51368), 'numpy.where', 'np.where', (['indexes_condition', 'decoder_input_data_sampled', 'decoder_input_data'], {}), '(indexes_condition, decoder_input_data_sampled, decoder_input_data)\n', (51301, 51368), True, 'import numpy as np\n'), ((82080, 82102), 'numpy.max', 'np.max', (['node.alignment'], {}), '(node.alignment)\n', (82086, 82102), True, 'import numpy as np\n'), ((83831, 83862), 'numpy.log', 'np.log', (['lmscores_output[i][idx]'], {}), '(lmscores_output[i][idx])\n', (83837, 83862), True, 'import numpy as np\n'), ((84148, 84168), 'numpy.eye', 'np.eye', (['attended_len'], {}), '(attended_len)\n', (84154, 84168), True, 'import numpy as np\n'), ((86715, 86733), 'numpy.nanargmax', 'np.nanargmax', (['step'], {}), '(step)\n', (86727, 86733), True, 'import numpy as np\n'), ((13587, 13650), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, x.shape[1].value, x.shape[2].value // 2, 2]'], {}), '(x, [-1, x.shape[1].value, x.shape[2].value // 2, 2])\n', (13597, 13650), True, 'import tensorflow as tf\n'), ((25318, 25333), 'keras.backend.zeros_like', 'K.zeros_like', (['x'], {}), '(x)\n', (25330, 25333), True, 'from keras import backend as K\n'), ((51881, 51907), 'numpy.arange', 'np.arange', (['self.batch_size'], {}), '(self.batch_size)\n', (51890, 51907), True, 'import numpy as np\n'), ((48691, 48743), 'math.exp', 'math.exp', (['(5 - 10 * attenuation * epoch / self.epochs)'], {}), '(5 - 10 * attenuation * epoch / self.epochs)\n', (48699, 48743), False, 'import math\n'), ((79377, 79395), 'numpy.nanargmax', 'np.nanargmax', (['step'], {}), '(step)\n', (79389, 79395), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
import torch
from groupy.gconv.tensorflow_gconv.transform_filter import transform_filter_2d_nchw, transform_filter_2d_nhwc
from groupy.gconv.make_gconv_indices import make_c4_z2_indices, make_c4_p4_indices,\
make_d4_z2_indices, make_d4_p4m_indices, flatten_indices
from groupy.gconv.pytorch_gconv.splitgconv2d import trans_filter as pytorch_trans_filter_
# Comparing tensorflow and pytorch filter transformation
def check_c4_z2():
inds = make_c4_z2_indices(ksize=3)
w = np.random.randn(6, 7, 1, 3, 3)
rt = tf_trans_filter(w, inds)
rp = pytorch_trans_filter(w, inds)
diff = np.abs(rt - rp).sum()
print ('>>>>> DIFFERENCE:', diff)
assert diff == 0
def check_c4_p4():
inds = make_c4_p4_indices(ksize=3)
w = np.random.randn(6, 7, 4, 3, 3)
rt = tf_trans_filter(w, inds)
rp = pytorch_trans_filter(w, inds)
diff = np.abs(rt - rp).sum()
print ('>>>>> DIFFERENCE:', diff)
assert diff == 0
def check_d4_z2():
inds = make_d4_z2_indices(ksize=3)
w = np.random.randn(6, 7, 1, 3, 3)
rt = tf_trans_filter(w, inds)
rp = pytorch_trans_filter(w, inds)
diff = np.abs(rt - rp).sum()
print ('>>>>> DIFFERENCE:', diff)
assert diff == 0
def check_d4_p4m():
inds = make_d4_p4m_indices(ksize=3)
w = np.random.randn(6, 7, 8, 3, 3)
rt = tf_trans_filter(w, inds)
rp = pytorch_trans_filter(w, inds)
diff = np.abs(rt - rp).sum()
print ('>>>>> DIFFERENCE:', diff)
assert diff == 0
def tf_trans_filter(w, inds):
flat_inds = flatten_indices(inds)
no, ni, nti, n, _ = w.shape
shape_info = (no, inds.shape[0], ni, nti, n)
w = w.transpose((3, 4, 2, 1, 0)).reshape((n, n, nti * ni, no))
wt = tf.constant(w)
rwt = transform_filter_2d_nhwc(wt, flat_inds, shape_info)
sess = tf.Session()
rwt = sess.run(rwt)
sess.close()
nto = inds.shape[0]
rwt = rwt.transpose(3, 2, 0, 1).reshape(no, nto, ni, nti, n, n)
return rwt
def tf_trans_filter2(w, inds):
flat_inds = flatten_indices(inds)
no, ni, nti, n, _ = w.shape
shape_info = (no, inds.shape[0], ni, nti, n)
w = w.reshape(no, ni * nti, n, n)
wt = tf.constant(w)
rwt = transform_filter_2d_nchw(wt, flat_inds, shape_info)
sess = tf.Session()
rwt = sess.run(rwt)
sess.close()
nto = inds.shape[0]
rwt = rwt.reshape(no, nto, ni, nti, n, n)
return rwt
def pytorch_trans_filter(w, inds):
w = torch.DoubleTensor(w)
rp = pytorch_trans_filter_(w, inds)
rp = rp.numpy()
return rp
| [
"numpy.abs",
"groupy.gconv.pytorch_gconv.splitgconv2d.trans_filter",
"groupy.gconv.make_gconv_indices.make_c4_z2_indices",
"tensorflow.Session",
"groupy.gconv.tensorflow_gconv.transform_filter.transform_filter_2d_nhwc",
"tensorflow.constant",
"groupy.gconv.make_gconv_indices.flatten_indices",
"groupy.... | [((493, 520), 'groupy.gconv.make_gconv_indices.make_c4_z2_indices', 'make_c4_z2_indices', ([], {'ksize': '(3)'}), '(ksize=3)\n', (511, 520), False, 'from groupy.gconv.make_gconv_indices import make_c4_z2_indices, make_c4_p4_indices, make_d4_z2_indices, make_d4_p4m_indices, flatten_indices\n'), ((529, 559), 'numpy.random.randn', 'np.random.randn', (['(6)', '(7)', '(1)', '(3)', '(3)'], {}), '(6, 7, 1, 3, 3)\n', (544, 559), True, 'import numpy as np\n'), ((758, 785), 'groupy.gconv.make_gconv_indices.make_c4_p4_indices', 'make_c4_p4_indices', ([], {'ksize': '(3)'}), '(ksize=3)\n', (776, 785), False, 'from groupy.gconv.make_gconv_indices import make_c4_z2_indices, make_c4_p4_indices, make_d4_z2_indices, make_d4_p4m_indices, flatten_indices\n'), ((794, 824), 'numpy.random.randn', 'np.random.randn', (['(6)', '(7)', '(4)', '(3)', '(3)'], {}), '(6, 7, 4, 3, 3)\n', (809, 824), True, 'import numpy as np\n'), ((1024, 1051), 'groupy.gconv.make_gconv_indices.make_d4_z2_indices', 'make_d4_z2_indices', ([], {'ksize': '(3)'}), '(ksize=3)\n', (1042, 1051), False, 'from groupy.gconv.make_gconv_indices import make_c4_z2_indices, make_c4_p4_indices, make_d4_z2_indices, make_d4_p4m_indices, flatten_indices\n'), ((1060, 1090), 'numpy.random.randn', 'np.random.randn', (['(6)', '(7)', '(1)', '(3)', '(3)'], {}), '(6, 7, 1, 3, 3)\n', (1075, 1090), True, 'import numpy as np\n'), ((1291, 1319), 'groupy.gconv.make_gconv_indices.make_d4_p4m_indices', 'make_d4_p4m_indices', ([], {'ksize': '(3)'}), '(ksize=3)\n', (1310, 1319), False, 'from groupy.gconv.make_gconv_indices import make_c4_z2_indices, make_c4_p4_indices, make_d4_z2_indices, make_d4_p4m_indices, flatten_indices\n'), ((1328, 1358), 'numpy.random.randn', 'np.random.randn', (['(6)', '(7)', '(8)', '(3)', '(3)'], {}), '(6, 7, 8, 3, 3)\n', (1343, 1358), True, 'import numpy as np\n'), ((1575, 1596), 'groupy.gconv.make_gconv_indices.flatten_indices', 'flatten_indices', (['inds'], {}), '(inds)\n', (1590, 1596), False, 'from groupy.gconv.make_gconv_indices import make_c4_z2_indices, make_c4_p4_indices, make_d4_z2_indices, make_d4_p4m_indices, flatten_indices\n'), ((1756, 1770), 'tensorflow.constant', 'tf.constant', (['w'], {}), '(w)\n', (1767, 1770), True, 'import tensorflow as tf\n'), ((1781, 1832), 'groupy.gconv.tensorflow_gconv.transform_filter.transform_filter_2d_nhwc', 'transform_filter_2d_nhwc', (['wt', 'flat_inds', 'shape_info'], {}), '(wt, flat_inds, shape_info)\n', (1805, 1832), False, 'from groupy.gconv.tensorflow_gconv.transform_filter import transform_filter_2d_nchw, transform_filter_2d_nhwc\n'), ((1845, 1857), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1855, 1857), True, 'import tensorflow as tf\n'), ((2057, 2078), 'groupy.gconv.make_gconv_indices.flatten_indices', 'flatten_indices', (['inds'], {}), '(inds)\n', (2072, 2078), False, 'from groupy.gconv.make_gconv_indices import make_c4_z2_indices, make_c4_p4_indices, make_d4_z2_indices, make_d4_p4m_indices, flatten_indices\n'), ((2209, 2223), 'tensorflow.constant', 'tf.constant', (['w'], {}), '(w)\n', (2220, 2223), True, 'import tensorflow as tf\n'), ((2234, 2285), 'groupy.gconv.tensorflow_gconv.transform_filter.transform_filter_2d_nchw', 'transform_filter_2d_nchw', (['wt', 'flat_inds', 'shape_info'], {}), '(wt, flat_inds, shape_info)\n', (2258, 2285), False, 'from groupy.gconv.tensorflow_gconv.transform_filter import transform_filter_2d_nchw, transform_filter_2d_nhwc\n'), ((2298, 2310), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2308, 2310), True, 'import tensorflow as tf\n'), ((2483, 2504), 'torch.DoubleTensor', 'torch.DoubleTensor', (['w'], {}), '(w)\n', (2501, 2504), False, 'import torch\n'), ((2514, 2544), 'groupy.gconv.pytorch_gconv.splitgconv2d.trans_filter', 'pytorch_trans_filter_', (['w', 'inds'], {}), '(w, inds)\n', (2535, 2544), True, 'from groupy.gconv.pytorch_gconv.splitgconv2d import trans_filter as pytorch_trans_filter_\n'), ((645, 660), 'numpy.abs', 'np.abs', (['(rt - rp)'], {}), '(rt - rp)\n', (651, 660), True, 'import numpy as np\n'), ((911, 926), 'numpy.abs', 'np.abs', (['(rt - rp)'], {}), '(rt - rp)\n', (917, 926), True, 'import numpy as np\n'), ((1177, 1192), 'numpy.abs', 'np.abs', (['(rt - rp)'], {}), '(rt - rp)\n', (1183, 1192), True, 'import numpy as np\n'), ((1445, 1460), 'numpy.abs', 'np.abs', (['(rt - rp)'], {}), '(rt - rp)\n', (1451, 1460), True, 'import numpy as np\n')] |
import numpy as np
import nanocut.common as nc
from nanocut.output import error, printstatus
__all__ = [ "Periodicity", ]
def gcd(numbers):
"""Calculates greatest common divisor of a list of numbers."""
aa = numbers[0]
for bb in numbers[1:]:
while bb:
aa, bb = bb, aa % bb
return aa
def plane_axis_from_miller(miller):
"""Returns two vectors in a plane with given miller index.
Args:
miller: Miller indices of the plane (array of 3 integers)
Returns:
Two 3D vectors in relative coordinates, both being vectors
in the plane. It returns the shortest possible vectors.
"""
# Separate zero and nonzero components of Miller vector
nonzero = np.flatnonzero(np.not_equal(miller, 0))
zero = np.flatnonzero(np.equal(miller, 0))
miller_nonzero = miller[nonzero]
# Get smallest possible intersections along lattice vectors
factor = np.prod(miller_nonzero)
relintersecs = factor / miller_nonzero
relintersecs /= gcd(relintersecs)
axis = np.zeros((2, 3), dtype=int)
if len(relintersecs) == 1:
# 2 zero components in Miller indices: plane parallel to the
# corresponding lattice vectors
axis[0, zero[0]] = 1
axis[1, zero[1]] = 1
elif len(relintersecs) == 2:
# 1 zero component: plane contains corresponding lattice vector.
# other vector: difference of intersection points along the
# other two lattice vectors.
axis[0, zero[0]] = 1
axis[1, nonzero[0]] = -relintersecs[0]
axis[1, nonzero[1]] = relintersecs[1]
else:
# all components non-zero: Vectors from intersection point along
# first lattice vector to the intersection points along the second and
# third lattice vectors will spawn the plane.
axis[0, nonzero[0]] = -relintersecs[0]
axis[0, nonzero[1]] = relintersecs[1]
axis[1, nonzero[0]] = -relintersecs[0]
axis[1, nonzero[2]] = relintersecs[2]
return axis
def cell_axis_from_superlattice(superlattice, latvecs):
"""Returns three vectors spawning a supercell similar to a given one.
Args:
superlattice: Lattice vectors of the supercell.
latvecs: Original lattice vectors.
Returns:
Three vectors in relative coordinates (respective the original lattice
vectors) which spawn a superlattice similar to the specified one.
"""
# Transformation to build superlattice from current lattice vectors.
trans = np.dot(superlattice, np.linalg.inv(latvecs))
# Rescale transformation matrix to contain only elements >= 1.0.
nonzero = np.nonzero(np.greater(np.abs(trans), nc.EPSILON))
trans_nonzero = trans[nonzero]
minelemind = np.argmin(np.abs(trans_nonzero))
trans_nonzero /= abs(trans_nonzero[minelemind])
# If biggest element greater tolerance: we would leave 64 bit integer range.
if np.any(np.greater(np.abs(trans_nonzero), 11.0)):
error("Target lattice coefficients too big")
# Scale up transformation to ensure that all components are very close to
# integers, if superlattice and lattice are commensurable
factor = np.prod(np.arange(2, 11 + 1, dtype=int))
trans_nonzero *= factor
trans_nonzero_int = np.around(trans_nonzero).astype(int)
# Check, whether all coefficients are close to integers.
if np.any(np.abs(trans_nonzero_int - trans_nonzero)
> nc.RELATIVE_PERIODIC_TOLERANCE):
error("Target lattice and source lattices probably incompatible")
# Simplify transformation matrix with greatest common divisor.
factor = gcd(abs(trans_nonzero_int.flatten()))
trans_nonzero_int /= factor
# Fill nonzero components into axis.
axis = np.zeros((3, 3), dtype=int)
axis[nonzero] = trans_nonzero_int
return axis
class Periodicity:
"""Holds information about type of periodicity, and axes.
Attributes:
period_type: Type of the periodicity ("0D", "1D", "2D" or "3D")
axis: Axis vector(s) in relatvie coordinates.
axis_cart: Axis vector(s) in cartesian coordinates.
"""
def __init__(self, geometry, period_type, axis=None):
"""Initialized Periodicity instance.
Args:
geometry: Geometry object to provide transformation.
period_type: Periodicity type ("0D", "1D", "2D", "3D").
axis: (3, -1) array with periodicity vectors in relative coords.
"""
self.period_type = period_type
if self.period_type not in [ "0D", "1D", "2D", "3D" ]:
raise ValueError("Value of period_type is invalid.")
if self.period_type == "0D":
self.axis = None
self.axis_cart = None
return
self.axis = np.array(axis, dtype=int)
self.axis.shape = (-1, 3)
self.axis_cart = geometry.coord_transform(self.axis, "lattice")
def rotate_coordsys(self, atoms_coords):
"""Rotates coordinate system to have standardized z-axis.
Args:
atom_coords: Cordinate to rotate.
Returns:
New translational vectors and rotated coordinates. For 0D systems it
returns an empty list and the original coordinates.
For 1D systems periodicity will be along the z-axis,
for 2D sytems z-axis will be orthogonal to the periodic directions
and the first lattice vector will be along the x-axis.
For 3D systems it returns lattice vectors and atom coordinates
unchanged.
"""
if self.period_type == "0D":
return [], atoms_coords
elif self.period_type == "1D":
z_axis = self.axis_cart[0]
elif self.period_type == "2D":
z_axis = np.cross(self.axis_cart[0], self.axis_cart[1])
elif self.period_type == "3D":
return self.axis_cart, atoms_coords
# Calculate rotation angle and rotation axis
z_axis= z_axis / np.linalg.norm(z_axis)
angle = np.arccos(np.dot(z_axis, np.array([0,0,1])))
rot = np.cross(z_axis, np.array([0,0,1]))
norm = np.linalg.norm(rot)
if norm > nc.EPSILON:
rot /= norm
sin = np.sin(angle)
cos = np.cos(angle)
# Calculate rotation matrix
rotation_matrix = np.array([
[ cos + rot[0] * rot[0] * (1 - cos),
rot[1] * rot[0] * (1 - cos) + rot[2] * sin,
rot[2] * rot[0] * (1 - cos) - rot[1] * sin ],
[ rot[0] * rot[1] * (1 - cos)- rot[2] * sin,
cos + rot[1] * rot[1] * (1 - cos),
rot[2] * rot[1] * (1-cos) + rot[0] * sin, ],
[ rot[0] * rot[2] * (1 - cos) + rot[1] * sin,
rot[1] * rot[2] * (1 - cos) - rot[0] * sin,
cos + rot[2] * rot[2] * (1 - cos) ]])
# If 2D rotate first lattice vector to the x-axis
axis = np.dot(self.axis_cart, rotation_matrix)
if self.period_type == "2D":
cos2 = axis[0,0] / np.linalg.norm(axis[0])
sin2 = axis[0,1] / np.linalg.norm(axis[0])
rot2 = np.array([[ cos2, -sin2, 0.0 ],
[ sin2, cos2, 0.0 ],
[ 0.0, 0.0, 1.0 ]], dtype=float)
axis = np.dot(axis, rot2)
rotation_matrix = np.dot(rotation_matrix, rot2)
# Rotate atoms
atoms_coords = np.dot(atoms_coords, rotation_matrix)
return axis, atoms_coords
def get_axis(self, coordsys="lattice"):
"""Returns axis.
Args:
coordsys: Coordinate system type ("lattice" or "cartesian").
Returns:
Periodicity axis in the given coordinate system.
"""
if self.period_type in [ "1D", "2D", "3D" ]:
if coordsys == "lattice":
return self.axis
elif coordsys == "cartesian":
return self.axis_cart
else:
raise ValueError("Value of coordsys is invalid.")
else:
raise ValueError("get_axis() called, but period_type is not 1D,"
" 2D or 3D.")
def splitcoords(self, coords):
"""Splits Cartesian coordinates into relative and absolute parts.
Args:
coords: Cartesian coordinates to split.
Returns:
Tuple of relative and absolute coordinates. The original Cartesian
coordinates can be obtained by calculating matrix product of the
relative coordinates with the Cartesian axis vectors and adding the
absolute coordinates to it.
"""
coords = np.array(coords)
if self.period_type == "0D":
return np.empty(( len(coords), 0 ), dtype=float), coords
elif self.period_type == "1D":
axis_norm = np.linalg.norm(self.axis_cart[0])
relcoords = np.dot(coords, np.transpose(self.axis_cart[0]))
relcoords /= axis_norm**2
relcoords.shape = (len(relcoords), 1)
elif self.period_type == "2D":
axis_3D = np.array(
[ self.axis_cart[0], self.axis_cart[1],
np.cross(self.axis_cart[0], self.axis_cart[1]) ])
invbasis = np.linalg.inv(axis_3D)
relcoords = np.dot(coords, invbasis)[:,0:2]
elif self.period_type == "3D":
invbasis = np.linalg.inv(self.axis_cart)
relcoords = np.dot(coords, invbasis)
shifts = coords - np.dot(relcoords, self.axis_cart)
return relcoords, shifts
def fold_to_unitcell(self, atoms_coords):
"""Folds atoms in the central unit cell, with relative coordinates
between 0.0 and 1.0.
Args:
atoms_coords: Cartesian coordinates of the atoms.
Returns:
Cartesian coordinates of the atoms in the unit cell.
"""
atoms_coords = np.array(atoms_coords)
if self.period_type == "0D":
return atoms_coords
relcoords, shifts = self.splitcoords(atoms_coords)
relcoords_folded = relcoords % 1.0
atoms_coords = shifts + np.dot(relcoords_folded, self.axis_cart)
return atoms_coords
def mask_unique(self, coords, mask=None):
"""Masks points being unique in the unit cell.
Args:
coords: Cartesian coordinates of the points (atoms).
mask: Only those coordinates are considered, where mask is True
Returns:
Logical list containing True for unique atoms and False otherwise.
"""
if mask is not None:
unique = np.array(mask, dtype=bool)
else:
unique = np.ones(( coords.shape[0], ), dtype=bool)
if self.period_type == "0D":
return unique
relcoords, shifts = self.splitcoords(coords)
relcoords = np.where(
np.greater(relcoords, 1.0 - nc.RELATIVE_PERIODIC_TOLERANCE),
relcoords - 1.0, relcoords)
onbounds = np.flatnonzero(np.any(np.less(relcoords, 0.01), axis=1))
onbounds_rel = relcoords[onbounds]
onbounds_cart = np.dot(onbounds_rel, self.axis_cart) + shifts[onbounds]
for ii in range(len(onbounds_cart)):
if not unique[onbounds[ii]]:
continue
diff = onbounds_cart[ii+1:] - onbounds_cart[ii]
equiv = np.flatnonzero(np.less(np.sum(diff**2, axis=1),
nc.DISTANCE_TOLERANCE**2))
unique[onbounds[equiv + ii + 1]] = False
return unique
@classmethod
def fromdict(cls, geometry, inidict):
"""Builds instance from dictionary."""
period_type = inidict.get("period_type", "0D")
if period_type not in [ "0D", "1D", "2D", "3D"]:
error("Invalid periodicty type '" + period_type + "'")
# 0D periodicity
if period_type == "0D":
return cls(geometry, "0D")
# 1D periodicity
if period_type == "1D":
axis = inidict.get("axis", None)
if axis is None:
error("Missing axis specification for 1D periodicity.")
try:
axis = np.array([ int(s) for s in axis.split() ])
axis.shape = (1, 3)
except ValueError:
error("Invalid axis specification.")
if np.all(axis == 0):
error("Invalid axis direction.")
# 2D periodicity
elif period_type == "2D":
axis = inidict.get("axis", None)
miller = inidict.get("miller_indices", None)
if axis is None and miller is None:
error("Either 'axis' or 'miller_indices' needed for "
"periodicity specification.")
elif axis is not None and miller is not None:
error("Only one of the keywords 'axis' or 'miller_indices' can "
"be used for periodicity specification.")
if miller:
try:
miller = np.array([ int(s) for s in miller.split() ])
miller.shape = (3, )
except ValueError:
error("Invalid miller index for 2D periodicity")
if np.all(miller == 0):
error("Invalid miller index for 2D periodicity")
axis = plane_axis_from_miller(miller)
else:
try:
axis = np.array([ int(s) for s in axis.split() ])
axis.shape = (2, 3)
except ValueError:
error("Invalid axis specification.")
if np.all(axis == 0):
error("Invalid axis direction.")
if np.all(np.cross(axis[0], axis[1]) == 0):
error("Axis are parallel.")
# 3D periodicity
else:
axis = inidict.get("axis", None)
superlattice = inidict.get("superlattice", None)
if axis is None and superlattice is None:
error("Either 'axis' or 'superlattice' needed for "
"periodicity specification.")
elif axis is not None and superlattice is not None:
error("Only one of the keywords 'axis' or 'superlattice'"
"can be used for periodicity specification.")
if superlattice:
try:
superlattice = np.array([ float(s)
for s in superlattice.split() ])
superlattice.shape = (3, 3)
except ValueError:
error("Invalid superlattice specification")
if np.abs(np.linalg.det(superlattice)) < nc.EPSILON:
error("Linearly dependent superlattice vectors")
axis = cell_axis_from_superlattice(superlattice,
np.dot(geometry.bravais_cell, geometry.latvecs))
else:
try:
axis = np.array([ int(s) for s in axis.split() ])
axis.shape = (3, 3)
except ValueError:
error("Invalid axis specification.")
if np.all(axis == 0):
error("Invalid axis direction.")
if np.abs(np.linalg.det(axis)) < nc.EPSILON:
error("Linearly dependent axis")
# Switch back to primitive lattice, if necessary
if np.any(geometry.bravais_cell != np.eye(3, dtype=int)):
printstatus("Axis with respect to Bravais lattice:")
for vec in axis:
printstatus("{:3d} {:3d} {:3d}".format(
*[ int(ss) for ss in vec ]), indentlevel=1)
axis = np.dot(axis, geometry.bravais_cell)
# Get smallest possible unit cell
for ii in range(len(axis)):
divisor = gcd(abs(axis[ii]))
axis[ii] = axis[ii] // divisor
printstatus("Axis with respect to primitive lattice:")
for vec in axis:
printstatus("{:3d} {:3d} {:3d}".format(
*[ int(ss) for ss in vec ]), indentlevel=1)
# Repeat unit cell
cellrep = inidict.get("axis_repetition", None)
if cellrep:
try:
cellrep = np.array([ float(s) for s in cellrep.split() ])
cellrep.shape = (int(period_type[0]), )
except ValueError:
error("Invalid axis repetition specification.")
axis = np.array(axis * cellrep[:,np.newaxis], int)
printstatus("Axis repetition:"
+ " ".join([ "{:3d}".format(int(s)) for s in cellrep ]))
return cls(geometry, period_type, axis)
| [
"nanocut.output.printstatus",
"numpy.prod",
"numpy.equal",
"numpy.not_equal",
"numpy.array",
"numpy.linalg.norm",
"numpy.sin",
"nanocut.output.error",
"numpy.arange",
"numpy.greater",
"numpy.less",
"numpy.cross",
"numpy.dot",
"numpy.abs",
"numpy.eye",
"numpy.ones",
"numpy.cos",
"nu... | [((945, 968), 'numpy.prod', 'np.prod', (['miller_nonzero'], {}), '(miller_nonzero)\n', (952, 968), True, 'import numpy as np\n'), ((1061, 1088), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {'dtype': 'int'}), '((2, 3), dtype=int)\n', (1069, 1088), True, 'import numpy as np\n'), ((3785, 3812), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {'dtype': 'int'}), '((3, 3), dtype=int)\n', (3793, 3812), True, 'import numpy as np\n'), ((759, 782), 'numpy.not_equal', 'np.not_equal', (['miller', '(0)'], {}), '(miller, 0)\n', (771, 782), True, 'import numpy as np\n'), ((810, 829), 'numpy.equal', 'np.equal', (['miller', '(0)'], {}), '(miller, 0)\n', (818, 829), True, 'import numpy as np\n'), ((2575, 2597), 'numpy.linalg.inv', 'np.linalg.inv', (['latvecs'], {}), '(latvecs)\n', (2588, 2597), True, 'import numpy as np\n'), ((2794, 2815), 'numpy.abs', 'np.abs', (['trans_nonzero'], {}), '(trans_nonzero)\n', (2800, 2815), True, 'import numpy as np\n'), ((3014, 3058), 'nanocut.output.error', 'error', (['"""Target lattice coefficients too big"""'], {}), "('Target lattice coefficients too big')\n", (3019, 3058), False, 'from nanocut.output import error, printstatus\n'), ((3220, 3251), 'numpy.arange', 'np.arange', (['(2)', '(11 + 1)'], {'dtype': 'int'}), '(2, 11 + 1, dtype=int)\n', (3229, 3251), True, 'import numpy as np\n'), ((3517, 3582), 'nanocut.output.error', 'error', (['"""Target lattice and source lattices probably incompatible"""'], {}), "('Target lattice and source lattices probably incompatible')\n", (3522, 3582), False, 'from nanocut.output import error, printstatus\n'), ((4824, 4849), 'numpy.array', 'np.array', (['axis'], {'dtype': 'int'}), '(axis, dtype=int)\n', (4832, 4849), True, 'import numpy as np\n'), ((6219, 6238), 'numpy.linalg.norm', 'np.linalg.norm', (['rot'], {}), '(rot)\n', (6233, 6238), True, 'import numpy as np\n'), ((6307, 6320), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (6313, 6320), True, 'import numpy as np\n'), ((6335, 6348), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (6341, 6348), True, 'import numpy as np\n'), ((6412, 6819), 'numpy.array', 'np.array', (['[[cos + rot[0] * rot[0] * (1 - cos), rot[1] * rot[0] * (1 - cos) + rot[2] *\n sin, rot[2] * rot[0] * (1 - cos) - rot[1] * sin], [rot[0] * rot[1] * (1 -\n cos) - rot[2] * sin, cos + rot[1] * rot[1] * (1 - cos), rot[2] * rot[1] *\n (1 - cos) + rot[0] * sin], [rot[0] * rot[2] * (1 - cos) + rot[1] * sin,\n rot[1] * rot[2] * (1 - cos) - rot[0] * sin, cos + rot[2] * rot[2] * (1 -\n cos)]]'], {}), '([[cos + rot[0] * rot[0] * (1 - cos), rot[1] * rot[0] * (1 - cos) +\n rot[2] * sin, rot[2] * rot[0] * (1 - cos) - rot[1] * sin], [rot[0] *\n rot[1] * (1 - cos) - rot[2] * sin, cos + rot[1] * rot[1] * (1 - cos), \n rot[2] * rot[1] * (1 - cos) + rot[0] * sin], [rot[0] * rot[2] * (1 -\n cos) + rot[1] * sin, rot[1] * rot[2] * (1 - cos) - rot[0] * sin, cos + \n rot[2] * rot[2] * (1 - cos)]])\n', (6420, 6819), True, 'import numpy as np\n'), ((6994, 7033), 'numpy.dot', 'np.dot', (['self.axis_cart', 'rotation_matrix'], {}), '(self.axis_cart, rotation_matrix)\n', (7000, 7033), True, 'import numpy as np\n'), ((7497, 7534), 'numpy.dot', 'np.dot', (['atoms_coords', 'rotation_matrix'], {}), '(atoms_coords, rotation_matrix)\n', (7503, 7534), True, 'import numpy as np\n'), ((8793, 8809), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (8801, 8809), True, 'import numpy as np\n'), ((10079, 10101), 'numpy.array', 'np.array', (['atoms_coords'], {}), '(atoms_coords)\n', (10087, 10101), True, 'import numpy as np\n'), ((16347, 16401), 'nanocut.output.printstatus', 'printstatus', (['"""Axis with respect to primitive lattice:"""'], {}), "('Axis with respect to primitive lattice:')\n", (16358, 16401), False, 'from nanocut.output import error, printstatus\n'), ((2704, 2717), 'numpy.abs', 'np.abs', (['trans'], {}), '(trans)\n', (2710, 2717), True, 'import numpy as np\n'), ((2975, 2996), 'numpy.abs', 'np.abs', (['trans_nonzero'], {}), '(trans_nonzero)\n', (2981, 2996), True, 'import numpy as np\n'), ((3305, 3329), 'numpy.around', 'np.around', (['trans_nonzero'], {}), '(trans_nonzero)\n', (3314, 3329), True, 'import numpy as np\n'), ((3417, 3458), 'numpy.abs', 'np.abs', (['(trans_nonzero_int - trans_nonzero)'], {}), '(trans_nonzero_int - trans_nonzero)\n', (3423, 3458), True, 'import numpy as np\n'), ((6070, 6092), 'numpy.linalg.norm', 'np.linalg.norm', (['z_axis'], {}), '(z_axis)\n', (6084, 6092), True, 'import numpy as np\n'), ((6185, 6204), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (6193, 6204), True, 'import numpy as np\n'), ((7200, 7279), 'numpy.array', 'np.array', (['[[cos2, -sin2, 0.0], [sin2, cos2, 0.0], [0.0, 0.0, 1.0]]'], {'dtype': 'float'}), '([[cos2, -sin2, 0.0], [sin2, cos2, 0.0], [0.0, 0.0, 1.0]], dtype=float)\n', (7208, 7279), True, 'import numpy as np\n'), ((7363, 7381), 'numpy.dot', 'np.dot', (['axis', 'rot2'], {}), '(axis, rot2)\n', (7369, 7381), True, 'import numpy as np\n'), ((7412, 7441), 'numpy.dot', 'np.dot', (['rotation_matrix', 'rot2'], {}), '(rotation_matrix, rot2)\n', (7418, 7441), True, 'import numpy as np\n'), ((9636, 9669), 'numpy.dot', 'np.dot', (['relcoords', 'self.axis_cart'], {}), '(relcoords, self.axis_cart)\n', (9642, 9669), True, 'import numpy as np\n'), ((10313, 10353), 'numpy.dot', 'np.dot', (['relcoords_folded', 'self.axis_cart'], {}), '(relcoords_folded, self.axis_cart)\n', (10319, 10353), True, 'import numpy as np\n'), ((10842, 10868), 'numpy.array', 'np.array', (['mask'], {'dtype': 'bool'}), '(mask, dtype=bool)\n', (10850, 10868), True, 'import numpy as np\n'), ((10904, 10943), 'numpy.ones', 'np.ones', (['(coords.shape[0],)'], {'dtype': 'bool'}), '((coords.shape[0],), dtype=bool)\n', (10911, 10943), True, 'import numpy as np\n'), ((11120, 11179), 'numpy.greater', 'np.greater', (['relcoords', '(1.0 - nc.RELATIVE_PERIODIC_TOLERANCE)'], {}), '(relcoords, 1.0 - nc.RELATIVE_PERIODIC_TOLERANCE)\n', (11130, 11179), True, 'import numpy as np\n'), ((11365, 11401), 'numpy.dot', 'np.dot', (['onbounds_rel', 'self.axis_cart'], {}), '(onbounds_rel, self.axis_cart)\n', (11371, 11401), True, 'import numpy as np\n'), ((12038, 12092), 'nanocut.output.error', 'error', (['("Invalid periodicty type \'" + period_type + "\'")'], {}), '("Invalid periodicty type \'" + period_type + "\'")\n', (12043, 12092), False, 'from nanocut.output import error, printstatus\n'), ((12628, 12645), 'numpy.all', 'np.all', (['(axis == 0)'], {}), '(axis == 0)\n', (12634, 12645), True, 'import numpy as np\n'), ((15886, 15938), 'nanocut.output.printstatus', 'printstatus', (['"""Axis with respect to Bravais lattice:"""'], {}), "('Axis with respect to Bravais lattice:')\n", (15897, 15938), False, 'from nanocut.output import error, printstatus\n'), ((16107, 16142), 'numpy.dot', 'np.dot', (['axis', 'geometry.bravais_cell'], {}), '(axis, geometry.bravais_cell)\n', (16113, 16142), True, 'import numpy as np\n'), ((16919, 16963), 'numpy.array', 'np.array', (['(axis * cellrep[:, np.newaxis])', 'int'], {}), '(axis * cellrep[:, np.newaxis], int)\n', (16927, 16963), True, 'import numpy as np\n'), ((6134, 6153), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (6142, 6153), True, 'import numpy as np\n'), ((7102, 7125), 'numpy.linalg.norm', 'np.linalg.norm', (['axis[0]'], {}), '(axis[0])\n', (7116, 7125), True, 'import numpy as np\n'), ((7157, 7180), 'numpy.linalg.norm', 'np.linalg.norm', (['axis[0]'], {}), '(axis[0])\n', (7171, 7180), True, 'import numpy as np\n'), ((8979, 9012), 'numpy.linalg.norm', 'np.linalg.norm', (['self.axis_cart[0]'], {}), '(self.axis_cart[0])\n', (8993, 9012), True, 'import numpy as np\n'), ((11263, 11287), 'numpy.less', 'np.less', (['relcoords', '(0.01)'], {}), '(relcoords, 0.01)\n', (11270, 11287), True, 'import numpy as np\n'), ((12354, 12409), 'nanocut.output.error', 'error', (['"""Missing axis specification for 1D periodicity."""'], {}), "('Missing axis specification for 1D periodicity.')\n", (12359, 12409), False, 'from nanocut.output import error, printstatus\n'), ((12663, 12695), 'nanocut.output.error', 'error', (['"""Invalid axis direction."""'], {}), "('Invalid axis direction.')\n", (12668, 12695), False, 'from nanocut.output import error, printstatus\n'), ((15851, 15871), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': 'int'}), '(3, dtype=int)\n', (15857, 15871), True, 'import numpy as np\n'), ((5845, 5891), 'numpy.cross', 'np.cross', (['self.axis_cart[0]', 'self.axis_cart[1]'], {}), '(self.axis_cart[0], self.axis_cart[1])\n', (5853, 5891), True, 'import numpy as np\n'), ((9052, 9083), 'numpy.transpose', 'np.transpose', (['self.axis_cart[0]'], {}), '(self.axis_cart[0])\n', (9064, 9083), True, 'import numpy as np\n'), ((9390, 9412), 'numpy.linalg.inv', 'np.linalg.inv', (['axis_3D'], {}), '(axis_3D)\n', (9403, 9412), True, 'import numpy as np\n'), ((11635, 11660), 'numpy.sum', 'np.sum', (['(diff ** 2)'], {'axis': '(1)'}), '(diff ** 2, axis=1)\n', (11641, 11660), True, 'import numpy as np\n'), ((12576, 12612), 'nanocut.output.error', 'error', (['"""Invalid axis specification."""'], {}), "('Invalid axis specification.')\n", (12581, 12612), False, 'from nanocut.output import error, printstatus\n'), ((12930, 13015), 'nanocut.output.error', 'error', (['"""Either \'axis\' or \'miller_indices\' needed for periodicity specification."""'], {}), '("Either \'axis\' or \'miller_indices\' needed for periodicity specification."\n )\n', (12935, 13015), False, 'from nanocut.output import error, printstatus\n'), ((13521, 13540), 'numpy.all', 'np.all', (['(miller == 0)'], {}), '(miller == 0)\n', (13527, 13540), True, 'import numpy as np\n'), ((13942, 13959), 'numpy.all', 'np.all', (['(axis == 0)'], {}), '(axis == 0)\n', (13948, 13959), True, 'import numpy as np\n'), ((14354, 14432), 'nanocut.output.error', 'error', (['"""Either \'axis\' or \'superlattice\' needed for periodicity specification."""'], {}), '("Either \'axis\' or \'superlattice\' needed for periodicity specification.")\n', (14359, 14432), False, 'from nanocut.output import error, printstatus\n'), ((15544, 15561), 'numpy.all', 'np.all', (['(axis == 0)'], {}), '(axis == 0)\n', (15550, 15561), True, 'import numpy as np\n'), ((16852, 16899), 'nanocut.output.error', 'error', (['"""Invalid axis repetition specification."""'], {}), "('Invalid axis repetition specification.')\n", (16857, 16899), False, 'from nanocut.output import error, printstatus\n'), ((9437, 9461), 'numpy.dot', 'np.dot', (['coords', 'invbasis'], {}), '(coords, invbasis)\n', (9443, 9461), True, 'import numpy as np\n'), ((9531, 9560), 'numpy.linalg.inv', 'np.linalg.inv', (['self.axis_cart'], {}), '(self.axis_cart)\n', (9544, 9560), True, 'import numpy as np\n'), ((9585, 9609), 'numpy.dot', 'np.dot', (['coords', 'invbasis'], {}), '(coords, invbasis)\n', (9591, 9609), True, 'import numpy as np\n'), ((13110, 13223), 'nanocut.output.error', 'error', (['"""Only one of the keywords \'axis\' or \'miller_indices\' can be used for periodicity specification."""'], {}), '(\n "Only one of the keywords \'axis\' or \'miller_indices\' can be used for periodicity specification."\n )\n', (13115, 13223), False, 'from nanocut.output import error, printstatus\n'), ((13562, 13610), 'nanocut.output.error', 'error', (['"""Invalid miller index for 2D periodicity"""'], {}), "('Invalid miller index for 2D periodicity')\n", (13567, 13610), False, 'from nanocut.output import error, printstatus\n'), ((13981, 14013), 'nanocut.output.error', 'error', (['"""Invalid axis direction."""'], {}), "('Invalid axis direction.')\n", (13986, 14013), False, 'from nanocut.output import error, printstatus\n'), ((14094, 14121), 'nanocut.output.error', 'error', (['"""Axis are parallel."""'], {}), "('Axis are parallel.')\n", (14099, 14121), False, 'from nanocut.output import error, printstatus\n'), ((14538, 14648), 'nanocut.output.error', 'error', (['"""Only one of the keywords \'axis\' or \'superlattice\'can be used for periodicity specification."""'], {}), '(\n "Only one of the keywords \'axis\' or \'superlattice\'can be used for periodicity specification."\n )\n', (14543, 14648), False, 'from nanocut.output import error, printstatus\n'), ((15100, 15148), 'nanocut.output.error', 'error', (['"""Linearly dependent superlattice vectors"""'], {}), "('Linearly dependent superlattice vectors')\n", (15105, 15148), False, 'from nanocut.output import error, printstatus\n'), ((15234, 15281), 'numpy.dot', 'np.dot', (['geometry.bravais_cell', 'geometry.latvecs'], {}), '(geometry.bravais_cell, geometry.latvecs)\n', (15240, 15281), True, 'import numpy as np\n'), ((15583, 15615), 'nanocut.output.error', 'error', (['"""Invalid axis direction."""'], {}), "('Invalid axis direction.')\n", (15588, 15615), False, 'from nanocut.output import error, printstatus\n'), ((15697, 15729), 'nanocut.output.error', 'error', (['"""Linearly dependent axis"""'], {}), "('Linearly dependent axis')\n", (15702, 15729), False, 'from nanocut.output import error, printstatus\n'), ((9317, 9363), 'numpy.cross', 'np.cross', (['self.axis_cart[0]', 'self.axis_cart[1]'], {}), '(self.axis_cart[0], self.axis_cart[1])\n', (9325, 9363), True, 'import numpy as np\n'), ((13453, 13501), 'nanocut.output.error', 'error', (['"""Invalid miller index for 2D periodicity"""'], {}), "('Invalid miller index for 2D periodicity')\n", (13458, 13501), False, 'from nanocut.output import error, printstatus\n'), ((13886, 13922), 'nanocut.output.error', 'error', (['"""Invalid axis specification."""'], {}), "('Invalid axis specification.')\n", (13891, 13922), False, 'from nanocut.output import error, printstatus\n'), ((14040, 14066), 'numpy.cross', 'np.cross', (['axis[0]', 'axis[1]'], {}), '(axis[0], axis[1])\n', (14048, 14066), True, 'import numpy as np\n'), ((14967, 15010), 'nanocut.output.error', 'error', (['"""Invalid superlattice specification"""'], {}), "('Invalid superlattice specification')\n", (14972, 15010), False, 'from nanocut.output import error, printstatus\n'), ((15037, 15064), 'numpy.linalg.det', 'np.linalg.det', (['superlattice'], {}), '(superlattice)\n', (15050, 15064), True, 'import numpy as np\n'), ((15488, 15524), 'nanocut.output.error', 'error', (['"""Invalid axis specification."""'], {}), "('Invalid axis specification.')\n", (15493, 15524), False, 'from nanocut.output import error, printstatus\n'), ((15642, 15661), 'numpy.linalg.det', 'np.linalg.det', (['axis'], {}), '(axis)\n', (15655, 15661), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
import numpy as np
from sklearn.neighbors import NearestNeighbors
from computeNeighborWeights import computeNeighborWeights
from computeWeightedMRecons import computeWeightedMRecons
from computeFeatures import computeFeatures
def computeFullERD(MeasuredValues,MeasuredIdxs,UnMeasuredIdxs,Theta,SizeImage,TrainingInfo,Resolution,ImageType):
NeighborValues,NeighborWeights,NeighborDistances = FindNeighbors(TrainingInfo,MeasuredIdxs,UnMeasuredIdxs,MeasuredValues,Resolution)
ReconValues,ReconImage = ComputeRecons(TrainingInfo,NeighborValues,NeighborWeights,SizeImage,UnMeasuredIdxs,MeasuredIdxs,MeasuredValues)
# Compute features
PolyFeatures=computeFeatures(MeasuredValues,MeasuredIdxs,UnMeasuredIdxs,SizeImage,NeighborValues,NeighborWeights,NeighborDistances,TrainingInfo,ReconValues,ReconImage,Resolution,ImageType)
# Compute ERD
# ERDValues = PolyFeatures.dot(Theta)
ERDValues = Theta.predict(PolyFeatures)
return(ERDValues,ReconValues,ReconImage)
def updateERD(Mask,MeasuredIdxs,UnMeasuredIdxs,MeasuredValues,Theta,SizeImage,TrainingInfo,Resolution,ImageType,NewIdxs,NumSamples,UpdateERDParams,ReconValues,ReconImage,ERDValues,MaxIdxsVect,BatchSamplingParams):
ERDValues=np.delete(ERDValues,(MaxIdxsVect))
ReconValues=np.delete(ReconValues,(MaxIdxsVect))
SuggestedRadius = int(np.sqrt((1/np.pi)*(SizeImage[0]*SizeImage[1]*TrainingInfo.NumNbrs/NumSamples)))
UpdateRadiusTemp=np.max([SuggestedRadius,UpdateERDParams.MinRadius]);
UpdateRadius=int(np.min([UpdateERDParams.MaxRadius,UpdateRadiusTemp]));
updateRadiusMat = np.zeros((SizeImage[0],SizeImage[1]))
Done=0
while(Done==0):
if BatchSamplingParams.Do == 'N':
updateRadiusMat[max(NewIdxs[0]-UpdateRadius,0):min(NewIdxs[0]+UpdateRadius,SizeImage[0])][:,max(NewIdxs[1]-UpdateRadius,0):min(NewIdxs[1]+UpdateRadius,SizeImage[1])]=1
else:
for b in range(0,BatchSamplingParams.NumSamplesPerIter):
updateRadiusMat[max(NewIdxs[b][0]-UpdateRadius,0):min(NewIdxs[b][0]+UpdateRadius,SizeImage[0])][:,max(NewIdxs[b][1]-UpdateRadius,0):min(NewIdxs[b][1]+UpdateRadius,SizeImage[1])]=1
updateIdxs = np.where(updateRadiusMat[Mask==0]==1)
SmallUnMeasuredIdxs = np.transpose(np.where(np.logical_and(Mask==0,updateRadiusMat==1)))
if SmallUnMeasuredIdxs.size==0:
UpdateRadius=int(UpdateRadius*UpdateERDParams.IncreaseRadiusBy)
else:
Done=1
# Find neighbors of unmeasured locations
SmallNeighborValues,SmallNeighborWeights,SmallNeighborDistances = FindNeighbors(TrainingInfo,MeasuredIdxs,SmallUnMeasuredIdxs,MeasuredValues,Resolution)
# Perform reconstruction
SmallReconValues=computeWeightedMRecons(SmallNeighborValues,SmallNeighborWeights,TrainingInfo)
ReconImage[(np.logical_and(Mask==0,updateRadiusMat==1))]=SmallReconValues
ReconImage[MeasuredIdxs[:,0],MeasuredIdxs[:,1]]=MeasuredValues
# Compute features
SmallPolyFeatures=computeFeatures(MeasuredValues,MeasuredIdxs,SmallUnMeasuredIdxs,SizeImage,SmallNeighborValues,SmallNeighborWeights,SmallNeighborDistances,TrainingInfo,SmallReconValues,ReconImage,Resolution,ImageType)
# Compute ERD
# SmallERDValues = SmallPolyFeatures.dot(Theta)
SmallERDValues = Theta.predict(SmallPolyFeatures)
ReconValues[updateIdxs] = SmallReconValues
ERDValues[updateIdxs] = SmallERDValues
return(ERDValues,ReconValues)
def FindNeighbors(TrainingInfo,MeasuredIdxs,UnMeasuredIdxs,MeasuredValues,Resolution):
# Find neighbors of unmeasured locations
Neigh = NearestNeighbors(n_neighbors=TrainingInfo.NumNbrs)
Neigh.fit(MeasuredIdxs)
NeighborDistances, NeighborIndices = Neigh.kneighbors(UnMeasuredIdxs)
NeighborDistances=NeighborDistances*Resolution
# print(np.max(NeighborIndices))
# print(MeasuredValues.shape)
NeighborValues=MeasuredValues[NeighborIndices]
# print(NeighborValues.shape)
NeighborWeights=computeNeighborWeights(NeighborDistances,TrainingInfo)
return(NeighborValues,NeighborWeights,NeighborDistances)
def ComputeRecons(TrainingInfo,NeighborValues,NeighborWeights,SizeImage,UnMeasuredIdxs,MeasuredIdxs,MeasuredValues):
# Perform reconstruction
ReconValues=computeWeightedMRecons(NeighborValues,NeighborWeights,TrainingInfo)
ReconImage = np.zeros((SizeImage[0],SizeImage[1]))
ReconImage[UnMeasuredIdxs[:,0],UnMeasuredIdxs[:,1]]=ReconValues
ReconImage[MeasuredIdxs[:,0],MeasuredIdxs[:,1]]=MeasuredValues
return(ReconValues,ReconImage)
| [
"numpy.sqrt",
"numpy.logical_and",
"numpy.where",
"numpy.delete",
"computeNeighborWeights.computeNeighborWeights",
"computeFeatures.computeFeatures",
"numpy.max",
"numpy.zeros",
"computeWeightedMRecons.computeWeightedMRecons",
"sklearn.neighbors.NearestNeighbors",
"numpy.min"
] | [((690, 884), 'computeFeatures.computeFeatures', 'computeFeatures', (['MeasuredValues', 'MeasuredIdxs', 'UnMeasuredIdxs', 'SizeImage', 'NeighborValues', 'NeighborWeights', 'NeighborDistances', 'TrainingInfo', 'ReconValues', 'ReconImage', 'Resolution', 'ImageType'], {}), '(MeasuredValues, MeasuredIdxs, UnMeasuredIdxs, SizeImage,\n NeighborValues, NeighborWeights, NeighborDistances, TrainingInfo,\n ReconValues, ReconImage, Resolution, ImageType)\n', (705, 884), False, 'from computeFeatures import computeFeatures\n'), ((1254, 1287), 'numpy.delete', 'np.delete', (['ERDValues', 'MaxIdxsVect'], {}), '(ERDValues, MaxIdxsVect)\n', (1263, 1287), True, 'import numpy as np\n'), ((1305, 1340), 'numpy.delete', 'np.delete', (['ReconValues', 'MaxIdxsVect'], {}), '(ReconValues, MaxIdxsVect)\n', (1314, 1340), True, 'import numpy as np\n'), ((1469, 1521), 'numpy.max', 'np.max', (['[SuggestedRadius, UpdateERDParams.MinRadius]'], {}), '([SuggestedRadius, UpdateERDParams.MinRadius])\n', (1475, 1521), True, 'import numpy as np\n'), ((1621, 1659), 'numpy.zeros', 'np.zeros', (['(SizeImage[0], SizeImage[1])'], {}), '((SizeImage[0], SizeImage[1]))\n', (1629, 1659), True, 'import numpy as np\n'), ((2773, 2852), 'computeWeightedMRecons.computeWeightedMRecons', 'computeWeightedMRecons', (['SmallNeighborValues', 'SmallNeighborWeights', 'TrainingInfo'], {}), '(SmallNeighborValues, SmallNeighborWeights, TrainingInfo)\n', (2795, 2852), False, 'from computeWeightedMRecons import computeWeightedMRecons\n'), ((3047, 3270), 'computeFeatures.computeFeatures', 'computeFeatures', (['MeasuredValues', 'MeasuredIdxs', 'SmallUnMeasuredIdxs', 'SizeImage', 'SmallNeighborValues', 'SmallNeighborWeights', 'SmallNeighborDistances', 'TrainingInfo', 'SmallReconValues', 'ReconImage', 'Resolution', 'ImageType'], {}), '(MeasuredValues, MeasuredIdxs, SmallUnMeasuredIdxs,\n SizeImage, SmallNeighborValues, SmallNeighborWeights,\n SmallNeighborDistances, TrainingInfo, SmallReconValues, ReconImage,\n Resolution, ImageType)\n', (3062, 3270), False, 'from computeFeatures import computeFeatures\n'), ((3645, 3695), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': 'TrainingInfo.NumNbrs'}), '(n_neighbors=TrainingInfo.NumNbrs)\n', (3661, 3695), False, 'from sklearn.neighbors import NearestNeighbors\n'), ((4023, 4078), 'computeNeighborWeights.computeNeighborWeights', 'computeNeighborWeights', (['NeighborDistances', 'TrainingInfo'], {}), '(NeighborDistances, TrainingInfo)\n', (4045, 4078), False, 'from computeNeighborWeights import computeNeighborWeights\n'), ((4312, 4381), 'computeWeightedMRecons.computeWeightedMRecons', 'computeWeightedMRecons', (['NeighborValues', 'NeighborWeights', 'TrainingInfo'], {}), '(NeighborValues, NeighborWeights, TrainingInfo)\n', (4334, 4381), False, 'from computeWeightedMRecons import computeWeightedMRecons\n'), ((4397, 4435), 'numpy.zeros', 'np.zeros', (['(SizeImage[0], SizeImage[1])'], {}), '((SizeImage[0], SizeImage[1]))\n', (4405, 4435), True, 'import numpy as np\n'), ((1368, 1458), 'numpy.sqrt', 'np.sqrt', (['(1 / np.pi * (SizeImage[0] * SizeImage[1] * TrainingInfo.NumNbrs / NumSamples))'], {}), '(1 / np.pi * (SizeImage[0] * SizeImage[1] * TrainingInfo.NumNbrs /\n NumSamples))\n', (1375, 1458), True, 'import numpy as np\n'), ((1543, 1596), 'numpy.min', 'np.min', (['[UpdateERDParams.MaxRadius, UpdateRadiusTemp]'], {}), '([UpdateERDParams.MaxRadius, UpdateRadiusTemp])\n', (1549, 1596), True, 'import numpy as np\n'), ((2217, 2258), 'numpy.where', 'np.where', (['(updateRadiusMat[Mask == 0] == 1)'], {}), '(updateRadiusMat[Mask == 0] == 1)\n', (2225, 2258), True, 'import numpy as np\n'), ((2872, 2919), 'numpy.logical_and', 'np.logical_and', (['(Mask == 0)', '(updateRadiusMat == 1)'], {}), '(Mask == 0, updateRadiusMat == 1)\n', (2886, 2919), True, 'import numpy as np\n'), ((2316, 2363), 'numpy.logical_and', 'np.logical_and', (['(Mask == 0)', '(updateRadiusMat == 1)'], {}), '(Mask == 0, updateRadiusMat == 1)\n', (2330, 2363), True, 'import numpy as np\n')] |
from keras.models import Sequential
from keras.layers import Dense
from sklearn.cross_validation import StratifiedKFold
import numpy as np
# init seed
seed = 7
np.random.seed(seed)
# load data (CSV)
dataset = np.loadtxt('pima-indians-diabetes.data', delimiter=',')
# split in put and output
X = dataset[:, 0:8]
Y = dataset[:, 8]
kfold = StratifiedKFold( y = Y, n_folds=10, shuffle=True, random_state=seed)
cvscores = []
for i, (train_index, test_index) in enumerate(kfold):
# make model
model = Sequential()
model.add( Dense( 12, input_dim = 8, init = 'uniform', activation = 'relu' ) )
model.add( Dense( 8, init = 'uniform', activation = 'relu' ) )
model.add( Dense( 1, init = 'uniform', activation = 'sigmoid' ) )
# compile model
model.compile( loss = 'binary_crossentropy', optimizer = 'adam', metrics=['accuracy'] )
# fit
model.fit(X[train_index], Y[train_index], validation_split=0.33, nb_epoch = 150, batch_size = 10, verbose=0)
# evaluate
scores = model.evaluate(X[test_index], Y[test_index], verbose=0)
print( '%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100))
cvscores.append(scores[1] * 100)
print('%.2f%% (+/- %.2f%%)' % (np.mean(cvscores), np.std(cvscores)))
| [
"numpy.mean",
"keras.models.Sequential",
"sklearn.cross_validation.StratifiedKFold",
"numpy.random.seed",
"numpy.std",
"keras.layers.Dense",
"numpy.loadtxt"
] | [((161, 181), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (175, 181), True, 'import numpy as np\n'), ((211, 266), 'numpy.loadtxt', 'np.loadtxt', (['"""pima-indians-diabetes.data"""'], {'delimiter': '""","""'}), "('pima-indians-diabetes.data', delimiter=',')\n", (221, 266), True, 'import numpy as np\n'), ((341, 406), 'sklearn.cross_validation.StratifiedKFold', 'StratifiedKFold', ([], {'y': 'Y', 'n_folds': '(10)', 'shuffle': '(True)', 'random_state': 'seed'}), '(y=Y, n_folds=10, shuffle=True, random_state=seed)\n', (356, 406), False, 'from sklearn.cross_validation import StratifiedKFold\n'), ((508, 520), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (518, 520), False, 'from keras.models import Sequential\n'), ((536, 593), 'keras.layers.Dense', 'Dense', (['(12)'], {'input_dim': '(8)', 'init': '"""uniform"""', 'activation': '"""relu"""'}), "(12, input_dim=8, init='uniform', activation='relu')\n", (541, 593), False, 'from keras.layers import Dense\n'), ((619, 662), 'keras.layers.Dense', 'Dense', (['(8)'], {'init': '"""uniform"""', 'activation': '"""relu"""'}), "(8, init='uniform', activation='relu')\n", (624, 662), False, 'from keras.layers import Dense\n'), ((686, 732), 'keras.layers.Dense', 'Dense', (['(1)'], {'init': '"""uniform"""', 'activation': '"""sigmoid"""'}), "(1, init='uniform', activation='sigmoid')\n", (691, 732), False, 'from keras.layers import Dense\n'), ((1201, 1218), 'numpy.mean', 'np.mean', (['cvscores'], {}), '(cvscores)\n', (1208, 1218), True, 'import numpy as np\n'), ((1220, 1236), 'numpy.std', 'np.std', (['cvscores'], {}), '(cvscores)\n', (1226, 1236), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
from PIL import Image
import os
from crawl_HHU.cfg import MAX_CAPTCHA, CHAR_SET_LEN, model_path
from crawl_HHU.cnn_sys import crack_captcha_cnn, X, keep_prob
from crawl_HHU.utils import vec2text, get_clear_bin_image
def hack_function(sess, predict, captcha_image):
"""
装载完成识别内容后,
:param sess:
:param predict:
:param captcha_image:
:return:
"""
text_list = sess.run(predict, feed_dict={X: [captcha_image], keep_prob: 1})
text = text_list[0].tolist()
# print(text_list)
vector = np.zeros(MAX_CAPTCHA * CHAR_SET_LEN)
i = 0
for n in text:
vector[i * CHAR_SET_LEN + n] = 1
i += 1
return vec2text(vector)
def batch_hack_captcha(output, predict, saver, image):
"""
批量生成验证码,然后再批量进行识别
:return:
"""
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint(model_path))
image.show()
image = get_clear_bin_image(image)
image.show()
image = np.array(image)
test_X = image.flatten()
predict_text = hack_function(sess, predict, test_X).strip()
return predict_text
if __name__ == '__main__':
# 定义预测计算图
output = crack_captcha_cnn()
predict = tf.argmax(tf.reshape(output, [-1, MAX_CAPTCHA, CHAR_SET_LEN]), 2)
saver = tf.train.Saver()
# 测试
for j in range(10):
# root = "./captcha/"
# filePath = os.listdir(root)
# image = Image.open(root + filePath[j])
image = Image.open("./captcha_test/" + str(j) + ".jpg")
predict_text = batch_hack_captcha(output, predict, saver, image)
print(predict_text)
print('end...') | [
"tensorflow.Session",
"tensorflow.train.Saver",
"crawl_HHU.utils.vec2text",
"numpy.array",
"numpy.zeros",
"crawl_HHU.cnn_sys.crack_captcha_cnn",
"tensorflow.reshape",
"tensorflow.train.latest_checkpoint",
"crawl_HHU.utils.get_clear_bin_image"
] | [((566, 602), 'numpy.zeros', 'np.zeros', (['(MAX_CAPTCHA * CHAR_SET_LEN)'], {}), '(MAX_CAPTCHA * CHAR_SET_LEN)\n', (574, 602), True, 'import numpy as np\n'), ((699, 715), 'crawl_HHU.utils.vec2text', 'vec2text', (['vector'], {}), '(vector)\n', (707, 715), False, 'from crawl_HHU.utils import vec2text, get_clear_bin_image\n'), ((1222, 1241), 'crawl_HHU.cnn_sys.crack_captcha_cnn', 'crack_captcha_cnn', ([], {}), '()\n', (1239, 1241), False, 'from crawl_HHU.cnn_sys import crack_captcha_cnn, X, keep_prob\n'), ((1334, 1350), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1348, 1350), True, 'import tensorflow as tf\n'), ((834, 846), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (844, 846), True, 'import tensorflow as tf\n'), ((961, 987), 'crawl_HHU.utils.get_clear_bin_image', 'get_clear_bin_image', (['image'], {}), '(image)\n', (980, 987), False, 'from crawl_HHU.utils import vec2text, get_clear_bin_image\n'), ((1025, 1040), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1033, 1040), True, 'import numpy as np\n'), ((1266, 1317), 'tensorflow.reshape', 'tf.reshape', (['output', '[-1, MAX_CAPTCHA, CHAR_SET_LEN]'], {}), '(output, [-1, MAX_CAPTCHA, CHAR_SET_LEN])\n', (1276, 1317), True, 'import tensorflow as tf\n'), ((884, 922), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model_path'], {}), '(model_path)\n', (910, 922), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import classification_report
# In[3]:
def plot_confusion_matrix(y_true, y_pred, classes,
normalize = False,
title = None,
cmap = plt.cm.Purples):
cm = confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis = 1)[: , np.newaxis]
print("Normalized confusion matrix")
else :
print('Confusion matrix, without normalization')
print(cm)
classes = ['Painting', 'Thank You', 'Sorry']
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation = 'nearest', cmap = cmap)
ax.figure.colorbar(im, ax = ax)# We want to show all ticks...
ax.set(xticks = np.arange(cm.shape[1]),
yticks = np.arange(cm.shape[0]),
xticklabels = classes, yticklabels = classes,
title = title,
ylabel = 'True label',
xlabel = 'Predicted label')
plt.setp(ax.get_xticklabels(), rotation = 0, ha = "right",
rotation_mode = "anchor")
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha = "center", va = "center",
color = "white"
if cm[i, j] > thresh
else "black")
plt.show()
return ax
# In[4]:
#X = np.array(pd.read_csv('feature_vector.csv'))
#y = np.array(pd.read_csv('window_labels.csv'))
X = np.array(pd.read_csv('dataset_backup/train_data.csv'))
y = np.array(pd.read_csv('dataset_backup/train_labels.csv'))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
y_train = y_train.ravel()
y_test = y_test.ravel()
# In[19]:
clf1 = KNeighborsClassifier(n_neighbors=7);
clf1.fit(X_train, y_train);
print("KNN :", clf1.score(X_test, y_test)*100 , "%")
y_pred = clf1.predict(X_test)
print("Precision: ", precision_score(y_test, y_pred, average=None))
print("Recall: ", recall_score(y_test, y_pred, average=None))
print("BCR: ", balanced_accuracy_score(y_test, y_pred))
plot_confusion_matrix(y_test, y_pred, classes=['Painting', 'Thank You', 'Sorry'], title='KNN Confusion Matrix')
# In[6]:
clf2 = SVC(kernel='rbf', gamma='scale');
clf2.fit(X_train, y_train);
print("SVM :", clf2.score(X_test, y_test)*100 , "%")
y_pred = clf2.predict(X_test)
print("Precision: ", precision_score(y_test, y_pred, average=None))
print("Recall: ", recall_score(y_test, y_pred, average=None))
print("BCR: ", balanced_accuracy_score(y_test, y_pred))
plot_confusion_matrix(y_test, y_pred, classes=['Painting', 'Thank You', 'Sorry'], title='SVM Confusion Matrix')
# In[15]:
clf3 = RandomForestClassifier(n_estimators=10, max_depth=7, random_state=0);
clf3.fit(X_train, y_train);
print("RnF :", clf3.score(X_test, y_test)*100 , "%")
y_pred = clf3.predict(X_test)
print("Precision: ", precision_score(y_test, y_pred, average=None))
print("Recall: ", recall_score(y_test, y_pred, average=None))
print("BCR: ", balanced_accuracy_score(y_test, y_pred))
plot_confusion_matrix(y_test, y_pred, classes=['Painting', 'Thank You', 'Sorry'], title='RnF Confusion Matrix')
# In[16]:
clf4 = DecisionTreeClassifier();
clf4.fit(X_train, y_train);
print("DT :", clf4.score(X_test, y_test)*100 , "%")
y_pred = clf4.predict(X_test)
print("Precision: ", precision_score(y_test, y_pred, average=None))
print("Recall: ", recall_score(y_test, y_pred, average=None))
print("BCR: ", balanced_accuracy_score(y_test, y_pred))
plot_confusion_matrix(y_test, y_pred, classes=['Painting', 'Thank You', 'Sorry'], title='DT Confusion Matrix')
# In[17]:
clf5 = LogisticRegression();
clf4.fit(X_train, y_train);
print("LR :", clf4.score(X_test, y_test)*100 , "%")
y_pred = clf4.predict(X_test)
print("Precision: ", precision_score(y_test, y_pred, average=None))
print("Recall: ", recall_score(y_test, y_pred, average=None))
print("BCR: ", balanced_accuracy_score(y_test, y_pred))
plot_confusion_matrix(y_test, y_pred, classes=['Painting', 'Thank You', 'Sorry'], title='LR Confusion Matrix')
| [
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.show",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.balanced_accuracy_score",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.neighbors.KNeighborsClassifier",
"sklearn.ensemble.RandomForestClassifier",
"sklearn... | [((2282, 2336), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(X, y, test_size=0.3, random_state=42)\n', (2298, 2336), False, 'from sklearn.model_selection import train_test_split\n'), ((2408, 2443), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(7)'}), '(n_neighbors=7)\n', (2428, 2443), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((2874, 2906), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'gamma': '"""scale"""'}), "(kernel='rbf', gamma='scale')\n", (2877, 2906), False, 'from sklearn.svm import SVC\n'), ((3337, 3405), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)', 'max_depth': '(7)', 'random_state': '(0)'}), '(n_estimators=10, max_depth=7, random_state=0)\n', (3359, 3405), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3837, 3861), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (3859, 3861), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((4290, 4310), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (4308, 4310), False, 'from sklearn.linear_model import LogisticRegression\n'), ((877, 909), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (893, 909), False, 'from sklearn.metrics import confusion_matrix\n'), ((1188, 1202), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1200, 1202), True, 'import matplotlib.pyplot as plt\n'), ((1992, 2002), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2000, 2002), True, 'import matplotlib.pyplot as plt\n'), ((2140, 2184), 'pandas.read_csv', 'pd.read_csv', (['"""dataset_backup/train_data.csv"""'], {}), "('dataset_backup/train_data.csv')\n", (2151, 2184), True, 'import pandas as pd\n'), ((2199, 2245), 'pandas.read_csv', 'pd.read_csv', (['"""dataset_backup/train_labels.csv"""'], {}), "('dataset_backup/train_labels.csv')\n", (2210, 2245), True, 'import pandas as pd\n'), ((2577, 2622), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'y_pred'], {'average': 'None'}), '(y_test, y_pred, average=None)\n', (2592, 2622), False, 'from sklearn.metrics import precision_score\n'), ((2642, 2684), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'y_pred'], {'average': 'None'}), '(y_test, y_pred, average=None)\n', (2654, 2684), False, 'from sklearn.metrics import recall_score\n'), ((2701, 2740), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (2724, 2740), False, 'from sklearn.metrics import balanced_accuracy_score\n'), ((3040, 3085), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'y_pred'], {'average': 'None'}), '(y_test, y_pred, average=None)\n', (3055, 3085), False, 'from sklearn.metrics import precision_score\n'), ((3105, 3147), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'y_pred'], {'average': 'None'}), '(y_test, y_pred, average=None)\n', (3117, 3147), False, 'from sklearn.metrics import recall_score\n'), ((3164, 3203), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (3187, 3203), False, 'from sklearn.metrics import balanced_accuracy_score\n'), ((3539, 3584), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'y_pred'], {'average': 'None'}), '(y_test, y_pred, average=None)\n', (3554, 3584), False, 'from sklearn.metrics import precision_score\n'), ((3604, 3646), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'y_pred'], {'average': 'None'}), '(y_test, y_pred, average=None)\n', (3616, 3646), False, 'from sklearn.metrics import recall_score\n'), ((3663, 3702), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (3686, 3702), False, 'from sklearn.metrics import balanced_accuracy_score\n'), ((3994, 4039), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'y_pred'], {'average': 'None'}), '(y_test, y_pred, average=None)\n', (4009, 4039), False, 'from sklearn.metrics import precision_score\n'), ((4059, 4101), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'y_pred'], {'average': 'None'}), '(y_test, y_pred, average=None)\n', (4071, 4101), False, 'from sklearn.metrics import recall_score\n'), ((4118, 4157), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (4141, 4157), False, 'from sklearn.metrics import balanced_accuracy_score\n'), ((4443, 4488), 'sklearn.metrics.precision_score', 'precision_score', (['y_test', 'y_pred'], {'average': 'None'}), '(y_test, y_pred, average=None)\n', (4458, 4488), False, 'from sklearn.metrics import precision_score\n'), ((4508, 4550), 'sklearn.metrics.recall_score', 'recall_score', (['y_test', 'y_pred'], {'average': 'None'}), '(y_test, y_pred, average=None)\n', (4520, 4550), False, 'from sklearn.metrics import recall_score\n'), ((4567, 4606), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (4590, 4606), False, 'from sklearn.metrics import balanced_accuracy_score\n'), ((1352, 1374), 'numpy.arange', 'np.arange', (['cm.shape[1]'], {}), '(cm.shape[1])\n', (1361, 1374), True, 'import numpy as np\n'), ((1393, 1415), 'numpy.arange', 'np.arange', (['cm.shape[0]'], {}), '(cm.shape[0])\n', (1402, 1415), True, 'import numpy as np\n')] |
import random
import gym
from gym import Env, logger, spaces
import numpy as np
np.random.seed(0)
class MultiArmedBanditEnv(Env):
def __init__(self, n=3, info={}):
"""
n - number of arms in the bandit
"""
self.num_bandits = n
self.action_space = spaces.Discrete(self.num_bandits)
self.observation_space = spaces.Discrete(1) # just the reward of the last action
self.bandit_success_prob = np.random.uniform(size=self.num_bandits) # Pick some random success probabilities
self.info = info
def step(self, action):
reward = 0
done = True
result_prob = np.random.random()
if result_prob < self.bandit_success_prob[action]:
reward = 1
else:
reward = 0
return [0], reward, done, self.info
def reset(self):
# Get some new bandit success probs
self.bandit_success_prob = np.random.uniform(size=self.num_bandits) # Pick some random success probabilities
def render(self, mode='human'):
print('bandits success prob:')
for i in range(self.num_bandits):
print("arm {num} reward prob: {prob}".format(num=i, prob=self.bandit_success_prob[i]))
| [
"numpy.random.random",
"gym.spaces.Discrete",
"numpy.random.seed",
"numpy.random.uniform"
] | [((81, 98), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (95, 98), True, 'import numpy as np\n'), ((293, 326), 'gym.spaces.Discrete', 'spaces.Discrete', (['self.num_bandits'], {}), '(self.num_bandits)\n', (308, 326), False, 'from gym import Env, logger, spaces\n'), ((360, 378), 'gym.spaces.Discrete', 'spaces.Discrete', (['(1)'], {}), '(1)\n', (375, 378), False, 'from gym import Env, logger, spaces\n'), ((451, 491), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'self.num_bandits'}), '(size=self.num_bandits)\n', (468, 491), True, 'import numpy as np\n'), ((653, 671), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (669, 671), True, 'import numpy as np\n'), ((936, 976), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'self.num_bandits'}), '(size=self.num_bandits)\n', (953, 976), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from autograd.blocks.trigo import sin
from autograd.blocks.trigo import cos
from autograd.blocks.trigo import tan
from autograd.blocks.trigo import arcsin
from autograd.blocks.trigo import arccos
from autograd.blocks.trigo import arctan
from autograd.variable import Variable
import numpy as np
import autograd as ad
def test_sin_forward():
ad.set_mode('forward')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
sin_block=sin()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=sin_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.sin(data)
gradient_true=np.diag(np.cos(data))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong sin data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong sin gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_sin_reverse():
ad.set_mode('reverse')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
sin_block=sin()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=sin_block(x)
# =============================================================================
# Compute gradient backwards
# =============================================================================
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.sin(data)
gradient_true=np.diag(np.cos(data))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong sin data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong sin gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_cos_forward():
ad.set_mode('forward')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
cos_block=cos()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=cos_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.cos(data)
gradient_true=np.diag(-np.sin(data))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong cos data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong cos gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_cos_reverse():
ad.set_mode('reverse')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
cos_block=cos()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=cos_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.cos(data)
gradient_true=np.diag(-np.sin(data))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong cos data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong cos gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_tan_forward():
ad.set_mode('forward')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
tan_block=tan()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=tan_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.tan(data)
gradient_true=np.diag(1/np.cos(data)**2)
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong tan data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong tan gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_tan_reverse():
ad.set_mode('reverse')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
tan_block=tan()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=tan_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.tan(data)
gradient_true=np.diag(1/np.cos(data)**2)
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong tan data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong tan gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_arcsin_forward():
ad.set_mode('forward')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
arcsin_block=arcsin()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=arcsin_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.arcsin(data)
gradient_true= np.diag(1/(np.sqrt(1 - data**2)))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong arcsin data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong arcsin gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_arcsin_reverse():
ad.set_mode('reverse')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
arcsin_block=arcsin()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=arcsin_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.arcsin(data)
gradient_true= np.diag(1/(np.sqrt(1 - data**2)))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong arcsin data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong arcsin gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_arccos_forward():
ad.set_mode('forward')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
arccos_block=arccos()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=arccos_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.arccos(data)
gradient_true= np.diag(-1/(np.sqrt(1 - data**2)))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong arccos data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong arccos gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_arccos_reverse():
ad.set_mode('reverse')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
arccos_block=arccos()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=arccos_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.arccos(data)
gradient_true= np.diag(-1/(np.sqrt(1 - data**2)))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong arccos data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong arccos gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_arctan_forward():
ad.set_mode('forward')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
arctan_block=arctan()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=arctan_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.arctan(data)
gradient_true= np.diag(1/(1 + data**2))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong arctan data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong arctan gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
def test_arctan_reverse():
ad.set_mode('reverse')
# =============================================================================
# define the input variable
# =============================================================================
data=np.random.random(5)
x=Variable(data)
# =============================================================================
# define custom block
# =============================================================================
arctan_block=arctan()
# =============================================================================
# compute output of custom block
# =============================================================================
y_block=arctan_block(x)
y_block.compute_gradients()
# =============================================================================
# define expected output
# =============================================================================
data_true=np.arctan(data)
gradient_true= np.diag(1/(1 + data**2))
# =============================================================================
# assert data pass
# =============================================================================
assert np.equal(data_true, y_block.data).all(), 'wrong arctan data pass. expected {}, given{}'.format(data_true, y_block.data)
# =============================================================================
# assert gradient forward pass
# =============================================================================
assert np.equal(gradient_true, y_block.gradient).all(), 'wrong arctan gradient forward pass. expected {}, given{}'.format(gradient_true,y_block.gradient)
ad.set_mode('forward')
| [
"autograd.blocks.trigo.arcsin",
"numpy.tan",
"numpy.arccos",
"numpy.sqrt",
"numpy.random.random",
"autograd.blocks.trigo.cos",
"numpy.arcsin",
"autograd.blocks.trigo.arccos",
"numpy.diag",
"autograd.blocks.trigo.sin",
"autograd.set_mode",
"autograd.blocks.trigo.tan",
"numpy.equal",
"numpy.... | [((370, 392), 'autograd.set_mode', 'ad.set_mode', (['"""forward"""'], {}), "('forward')\n", (381, 392), True, 'import autograd as ad\n'), ((592, 611), 'numpy.random.random', 'np.random.random', (['(5)'], {}), '(5)\n', (608, 611), True, 'import numpy as np\n'), ((618, 632), 'autograd.variable.Variable', 'Variable', (['data'], {}), '(data)\n', (626, 632), False, 'from autograd.variable import Variable\n'), ((832, 837), 'autograd.blocks.trigo.sin', 'sin', ([], {}), '()\n', (835, 837), False, 'from autograd.blocks.trigo import sin\n'), ((1292, 1304), 'numpy.sin', 'np.sin', (['data'], {}), '(data)\n', (1298, 1304), True, 'import numpy as np\n'), ((2035, 2057), 'autograd.set_mode', 'ad.set_mode', (['"""reverse"""'], {}), "('reverse')\n", (2046, 2057), True, 'import autograd as ad\n'), ((2257, 2276), 'numpy.random.random', 'np.random.random', (['(5)'], {}), '(5)\n', (2273, 2276), True, 'import numpy as np\n'), ((2283, 2297), 'autograd.variable.Variable', 'Variable', (['data'], {}), '(data)\n', (2291, 2297), False, 'from autograd.variable import Variable\n'), ((2497, 2502), 'autograd.blocks.trigo.sin', 'sin', ([], {}), '()\n', (2500, 2502), False, 'from autograd.blocks.trigo import sin\n'), ((3149, 3161), 'numpy.sin', 'np.sin', (['data'], {}), '(data)\n', (3155, 3161), True, 'import numpy as np\n'), ((3893, 3915), 'autograd.set_mode', 'ad.set_mode', (['"""forward"""'], {}), "('forward')\n", (3904, 3915), True, 'import autograd as ad\n'), ((4115, 4134), 'numpy.random.random', 'np.random.random', (['(5)'], {}), '(5)\n', (4131, 4134), True, 'import numpy as np\n'), ((4141, 4155), 'autograd.variable.Variable', 'Variable', (['data'], {}), '(data)\n', (4149, 4155), False, 'from autograd.variable import Variable\n'), ((4355, 4360), 'autograd.blocks.trigo.cos', 'cos', ([], {}), '()\n', (4358, 4360), False, 'from autograd.blocks.trigo import cos\n'), ((4815, 4827), 'numpy.cos', 'np.cos', (['data'], {}), '(data)\n', (4821, 4827), True, 'import numpy as np\n'), ((5559, 5581), 'autograd.set_mode', 'ad.set_mode', (['"""reverse"""'], {}), "('reverse')\n", (5570, 5581), True, 'import autograd as ad\n'), ((5781, 5800), 'numpy.random.random', 'np.random.random', (['(5)'], {}), '(5)\n', (5797, 5800), True, 'import numpy as np\n'), ((5807, 5821), 'autograd.variable.Variable', 'Variable', (['data'], {}), '(data)\n', (5815, 5821), False, 'from autograd.variable import Variable\n'), ((6021, 6026), 'autograd.blocks.trigo.cos', 'cos', ([], {}), '()\n', (6024, 6026), False, 'from autograd.blocks.trigo import cos\n'), ((6481, 6493), 'numpy.cos', 'np.cos', (['data'], {}), '(data)\n', (6487, 6493), True, 'import numpy as np\n'), ((7225, 7247), 'autograd.set_mode', 'ad.set_mode', (['"""forward"""'], {}), "('forward')\n", (7236, 7247), True, 'import autograd as ad\n'), ((7448, 7467), 'numpy.random.random', 'np.random.random', (['(5)'], {}), '(5)\n', (7464, 7467), True, 'import numpy as np\n'), ((7474, 7488), 'autograd.variable.Variable', 'Variable', (['data'], {}), '(data)\n', (7482, 7488), False, 'from autograd.variable import Variable\n'), ((7688, 7693), 'autograd.blocks.trigo.tan', 'tan', ([], {}), '()\n', (7691, 7693), False, 'from autograd.blocks.trigo import tan\n'), ((8148, 8160), 'numpy.tan', 'np.tan', (['data'], {}), '(data)\n', (8154, 8160), True, 'import numpy as np\n'), ((8895, 8917), 'autograd.set_mode', 'ad.set_mode', (['"""reverse"""'], {}), "('reverse')\n", (8906, 8917), True, 'import autograd as ad\n'), ((9118, 9137), 'numpy.random.random', 'np.random.random', (['(5)'], {}), '(5)\n', (9134, 9137), True, 'import numpy as np\n'), ((9144, 9158), 'autograd.variable.Variable', 'Variable', (['data'], {}), '(data)\n', (9152, 9158), False, 'from autograd.variable import Variable\n'), ((9358, 9363), 'autograd.blocks.trigo.tan', 'tan', ([], {}), '()\n', (9361, 9363), False, 'from autograd.blocks.trigo import tan\n'), ((9818, 9830), 'numpy.tan', 'np.tan', (['data'], {}), '(data)\n', (9824, 9830), True, 'import numpy as np\n'), ((10568, 10590), 'autograd.set_mode', 'ad.set_mode', (['"""forward"""'], {}), "('forward')\n", (10579, 10590), True, 'import autograd as ad\n'), ((10790, 10809), 'numpy.random.random', 'np.random.random', (['(5)'], {}), '(5)\n', (10806, 10809), True, 'import numpy as np\n'), ((10816, 10830), 'autograd.variable.Variable', 'Variable', (['data'], {}), '(data)\n', (10824, 10830), False, 'from autograd.variable import Variable\n'), ((11033, 11041), 'autograd.blocks.trigo.arcsin', 'arcsin', ([], {}), '()\n', (11039, 11041), False, 'from autograd.blocks.trigo import arcsin\n'), ((11499, 11514), 'numpy.arcsin', 'np.arcsin', (['data'], {}), '(data)\n', (11508, 11514), True, 'import numpy as np\n'), ((12268, 12290), 'autograd.set_mode', 'ad.set_mode', (['"""reverse"""'], {}), "('reverse')\n", (12279, 12290), True, 'import autograd as ad\n'), ((12490, 12509), 'numpy.random.random', 'np.random.random', (['(5)'], {}), '(5)\n', (12506, 12509), True, 'import numpy as np\n'), ((12516, 12530), 'autograd.variable.Variable', 'Variable', (['data'], {}), '(data)\n', (12524, 12530), False, 'from autograd.variable import Variable\n'), ((12733, 12741), 'autograd.blocks.trigo.arcsin', 'arcsin', ([], {}), '()\n', (12739, 12741), False, 'from autograd.blocks.trigo import arcsin\n'), ((13199, 13214), 'numpy.arcsin', 'np.arcsin', (['data'], {}), '(data)\n', (13208, 13214), True, 'import numpy as np\n'), ((13969, 13991), 'autograd.set_mode', 'ad.set_mode', (['"""forward"""'], {}), "('forward')\n", (13980, 13991), True, 'import autograd as ad\n'), ((14191, 14210), 'numpy.random.random', 'np.random.random', (['(5)'], {}), '(5)\n', (14207, 14210), True, 'import numpy as np\n'), ((14217, 14231), 'autograd.variable.Variable', 'Variable', (['data'], {}), '(data)\n', (14225, 14231), False, 'from autograd.variable import Variable\n'), ((14434, 14442), 'autograd.blocks.trigo.arccos', 'arccos', ([], {}), '()\n', (14440, 14442), False, 'from autograd.blocks.trigo import arccos\n'), ((14900, 14915), 'numpy.arccos', 'np.arccos', (['data'], {}), '(data)\n', (14909, 14915), True, 'import numpy as np\n'), ((15669, 15691), 'autograd.set_mode', 'ad.set_mode', (['"""reverse"""'], {}), "('reverse')\n", (15680, 15691), True, 'import autograd as ad\n'), ((15891, 15910), 'numpy.random.random', 'np.random.random', (['(5)'], {}), '(5)\n', (15907, 15910), True, 'import numpy as np\n'), ((15917, 15931), 'autograd.variable.Variable', 'Variable', (['data'], {}), '(data)\n', (15925, 15931), False, 'from autograd.variable import Variable\n'), ((16134, 16142), 'autograd.blocks.trigo.arccos', 'arccos', ([], {}), '()\n', (16140, 16142), False, 'from autograd.blocks.trigo import arccos\n'), ((16600, 16615), 'numpy.arccos', 'np.arccos', (['data'], {}), '(data)\n', (16609, 16615), True, 'import numpy as np\n'), ((17370, 17392), 'autograd.set_mode', 'ad.set_mode', (['"""forward"""'], {}), "('forward')\n", (17381, 17392), True, 'import autograd as ad\n'), ((17592, 17611), 'numpy.random.random', 'np.random.random', (['(5)'], {}), '(5)\n', (17608, 17611), True, 'import numpy as np\n'), ((17618, 17632), 'autograd.variable.Variable', 'Variable', (['data'], {}), '(data)\n', (17626, 17632), False, 'from autograd.variable import Variable\n'), ((17835, 17843), 'autograd.blocks.trigo.arctan', 'arctan', ([], {}), '()\n', (17841, 17843), False, 'from autograd.blocks.trigo import arctan\n'), ((18301, 18316), 'numpy.arctan', 'np.arctan', (['data'], {}), '(data)\n', (18310, 18316), True, 'import numpy as np\n'), ((18336, 18364), 'numpy.diag', 'np.diag', (['(1 / (1 + data ** 2))'], {}), '(1 / (1 + data ** 2))\n', (18343, 18364), True, 'import numpy as np\n'), ((19059, 19081), 'autograd.set_mode', 'ad.set_mode', (['"""reverse"""'], {}), "('reverse')\n", (19070, 19081), True, 'import autograd as ad\n'), ((19281, 19300), 'numpy.random.random', 'np.random.random', (['(5)'], {}), '(5)\n', (19297, 19300), True, 'import numpy as np\n'), ((19307, 19321), 'autograd.variable.Variable', 'Variable', (['data'], {}), '(data)\n', (19315, 19321), False, 'from autograd.variable import Variable\n'), ((19524, 19532), 'autograd.blocks.trigo.arctan', 'arctan', ([], {}), '()\n', (19530, 19532), False, 'from autograd.blocks.trigo import arctan\n'), ((19990, 20005), 'numpy.arctan', 'np.arctan', (['data'], {}), '(data)\n', (19999, 20005), True, 'import numpy as np\n'), ((20025, 20053), 'numpy.diag', 'np.diag', (['(1 / (1 + data ** 2))'], {}), '(1 / (1 + data ** 2))\n', (20032, 20053), True, 'import numpy as np\n'), ((20719, 20741), 'autograd.set_mode', 'ad.set_mode', (['"""forward"""'], {}), "('forward')\n", (20730, 20741), True, 'import autograd as ad\n'), ((1331, 1343), 'numpy.cos', 'np.cos', (['data'], {}), '(data)\n', (1337, 1343), True, 'import numpy as np\n'), ((3188, 3200), 'numpy.cos', 'np.cos', (['data'], {}), '(data)\n', (3194, 3200), True, 'import numpy as np\n'), ((1538, 1571), 'numpy.equal', 'np.equal', (['data_true', 'y_block.data'], {}), '(data_true, y_block.data)\n', (1546, 1571), True, 'import numpy as np\n'), ((1860, 1901), 'numpy.equal', 'np.equal', (['gradient_true', 'y_block.gradient'], {}), '(gradient_true, y_block.gradient)\n', (1868, 1901), True, 'import numpy as np\n'), ((3395, 3428), 'numpy.equal', 'np.equal', (['data_true', 'y_block.data'], {}), '(data_true, y_block.data)\n', (3403, 3428), True, 'import numpy as np\n'), ((3717, 3758), 'numpy.equal', 'np.equal', (['gradient_true', 'y_block.gradient'], {}), '(gradient_true, y_block.gradient)\n', (3725, 3758), True, 'import numpy as np\n'), ((4855, 4867), 'numpy.sin', 'np.sin', (['data'], {}), '(data)\n', (4861, 4867), True, 'import numpy as np\n'), ((5062, 5095), 'numpy.equal', 'np.equal', (['data_true', 'y_block.data'], {}), '(data_true, y_block.data)\n', (5070, 5095), True, 'import numpy as np\n'), ((5384, 5425), 'numpy.equal', 'np.equal', (['gradient_true', 'y_block.gradient'], {}), '(gradient_true, y_block.gradient)\n', (5392, 5425), True, 'import numpy as np\n'), ((6521, 6533), 'numpy.sin', 'np.sin', (['data'], {}), '(data)\n', (6527, 6533), True, 'import numpy as np\n'), ((6728, 6761), 'numpy.equal', 'np.equal', (['data_true', 'y_block.data'], {}), '(data_true, y_block.data)\n', (6736, 6761), True, 'import numpy as np\n'), ((7050, 7091), 'numpy.equal', 'np.equal', (['gradient_true', 'y_block.gradient'], {}), '(gradient_true, y_block.gradient)\n', (7058, 7091), True, 'import numpy as np\n'), ((8399, 8432), 'numpy.equal', 'np.equal', (['data_true', 'y_block.data'], {}), '(data_true, y_block.data)\n', (8407, 8432), True, 'import numpy as np\n'), ((8721, 8762), 'numpy.equal', 'np.equal', (['gradient_true', 'y_block.gradient'], {}), '(gradient_true, y_block.gradient)\n', (8729, 8762), True, 'import numpy as np\n'), ((10069, 10102), 'numpy.equal', 'np.equal', (['data_true', 'y_block.data'], {}), '(data_true, y_block.data)\n', (10077, 10102), True, 'import numpy as np\n'), ((10391, 10432), 'numpy.equal', 'np.equal', (['gradient_true', 'y_block.gradient'], {}), '(gradient_true, y_block.gradient)\n', (10399, 10432), True, 'import numpy as np\n'), ((11545, 11567), 'numpy.sqrt', 'np.sqrt', (['(1 - data ** 2)'], {}), '(1 - data ** 2)\n', (11552, 11567), True, 'import numpy as np\n'), ((11761, 11794), 'numpy.equal', 'np.equal', (['data_true', 'y_block.data'], {}), '(data_true, y_block.data)\n', (11769, 11794), True, 'import numpy as np\n'), ((12086, 12127), 'numpy.equal', 'np.equal', (['gradient_true', 'y_block.gradient'], {}), '(gradient_true, y_block.gradient)\n', (12094, 12127), True, 'import numpy as np\n'), ((13245, 13267), 'numpy.sqrt', 'np.sqrt', (['(1 - data ** 2)'], {}), '(1 - data ** 2)\n', (13252, 13267), True, 'import numpy as np\n'), ((13461, 13494), 'numpy.equal', 'np.equal', (['data_true', 'y_block.data'], {}), '(data_true, y_block.data)\n', (13469, 13494), True, 'import numpy as np\n'), ((13786, 13827), 'numpy.equal', 'np.equal', (['gradient_true', 'y_block.gradient'], {}), '(gradient_true, y_block.gradient)\n', (13794, 13827), True, 'import numpy as np\n'), ((14947, 14969), 'numpy.sqrt', 'np.sqrt', (['(1 - data ** 2)'], {}), '(1 - data ** 2)\n', (14954, 14969), True, 'import numpy as np\n'), ((15163, 15196), 'numpy.equal', 'np.equal', (['data_true', 'y_block.data'], {}), '(data_true, y_block.data)\n', (15171, 15196), True, 'import numpy as np\n'), ((15488, 15529), 'numpy.equal', 'np.equal', (['gradient_true', 'y_block.gradient'], {}), '(gradient_true, y_block.gradient)\n', (15496, 15529), True, 'import numpy as np\n'), ((16647, 16669), 'numpy.sqrt', 'np.sqrt', (['(1 - data ** 2)'], {}), '(1 - data ** 2)\n', (16654, 16669), True, 'import numpy as np\n'), ((16863, 16896), 'numpy.equal', 'np.equal', (['data_true', 'y_block.data'], {}), '(data_true, y_block.data)\n', (16871, 16896), True, 'import numpy as np\n'), ((17188, 17229), 'numpy.equal', 'np.equal', (['gradient_true', 'y_block.gradient'], {}), '(gradient_true, y_block.gradient)\n', (17196, 17229), True, 'import numpy as np\n'), ((18554, 18587), 'numpy.equal', 'np.equal', (['data_true', 'y_block.data'], {}), '(data_true, y_block.data)\n', (18562, 18587), True, 'import numpy as np\n'), ((18879, 18920), 'numpy.equal', 'np.equal', (['gradient_true', 'y_block.gradient'], {}), '(gradient_true, y_block.gradient)\n', (18887, 18920), True, 'import numpy as np\n'), ((20243, 20276), 'numpy.equal', 'np.equal', (['data_true', 'y_block.data'], {}), '(data_true, y_block.data)\n', (20251, 20276), True, 'import numpy as np\n'), ((20568, 20609), 'numpy.equal', 'np.equal', (['gradient_true', 'y_block.gradient'], {}), '(gradient_true, y_block.gradient)\n', (20576, 20609), True, 'import numpy as np\n'), ((8189, 8201), 'numpy.cos', 'np.cos', (['data'], {}), '(data)\n', (8195, 8201), True, 'import numpy as np\n'), ((9859, 9871), 'numpy.cos', 'np.cos', (['data'], {}), '(data)\n', (9865, 9871), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Converts the 3D simulation data from carpet output (for every Node, that for every
variable name creates a list of .h5 files) into a single profile.h5 for a given iteration using `scidata`.
options:
-i : str : path to the simulation dir that contains `output-xxxx` directories with /data/files structure
-o : str : path to where to dump the output
--eos : str : file of the Hydro eos to compute additiona quantity, .e.g. enthalpy
-t : str : either `prof` or `nuprof` -- what type of data to collect, hydro and GR data or neutrino M0 data.
-m : str : if `times` then the code expect to bi given for what timesteps to extract profiles,
if `iterations` then code expects a list of iterations for which to extract profiles
-it : list of ints : is expected if chosen `-m iterations`. Proivde the list of iteration
-time : list of floats : is expected if chosen `-m times`. Provide list of timesteps
NOTE:
In order to know what timestep corresponds to what output in the simulation, i.e., what data to
process for user required iteration or a timestep, the code loads additional files where
there is mapping between iteration and timesteps.
This file is `"dens.norm1.asc"` that is usually present in simulation.
"""
from __future__ import division
from glob import glob
import numpy as np
import h5py
import argparse
from math import sqrt
import gc
from scidata.utils import locate
import scidata.carpet.hdf5 as h5
from scidata import units as ut
from scipy.interpolate import RegularGridInterpolator
import os
import click
import time
# from uutils import Paths
import config as Paths
class Names(object):
# naming conventions, -- content of the .dat.tar
outdir = "profiles/"
dattar = {
'alp' : 'ADMBASE::alp',
'betax' : 'ADMBASE::betax',
'betay' : 'ADMBASE::betay',
'betaz' : 'ADMBASE::betaz',
'gxx' : 'ADMBASE::gxx',
'gxy' : 'ADMBASE::gxy',
'gxz' : 'ADMBASE::gxz',
'gyy' : 'ADMBASE::gyy',
'gyz' : 'ADMBASE::gyz',
'gzz' : 'ADMBASE::gzz',
'rho' : 'HYDROBASE::rho',
'vel[0]' : 'HYDROBASE::vel[0]',
'vel[1]' : 'HYDROBASE::vel[1]',
'vel[2]' : 'HYDROBASE::vel[2]',
'Y_e' : 'HYDROBASE::Y_e',
'temperature': 'HYDROBASE::temperature',
'w_lorentz' : 'HYDROBASE::w_lorentz',
'volform' : 'THC_CORE::volform',
}
nu_dattar = {
'thc_M0_abs_energy': 'THC_LEAKAGEM0::thc_M0_abs_energy',
'thc_M0_abs_nua': 'THC_LEAKAGEM0::thc_M0_abs_nua',
'thc_M0_abs_nue': 'THC_LEAKAGEM0::thc_M0_abs_nue',
'thc_M0_abs_number': 'THC_LEAKAGEM0::thc_M0_abs_number',
'thc_M0_eave_nua': 'THC_LEAKAGEM0::thc_M0_eave_nua',
'thc_M0_eave_nue': 'THC_LEAKAGEM0::thc_M0_eave_nue',
'thc_M0_eave_nux': 'THC_LEAKAGEM0::thc_M0_eave_nux',
'thc_M0_E_nua': 'THC_LEAKAGEM0::thc_M0_E_nua',
'thc_M0_E_nue': 'THC_LEAKAGEM0::thc_M0_E_nue',
'thc_M0_E_nux': 'THC_LEAKAGEM0::thc_M0_E_nux',
'thc_M0_flux_fac': 'THC_LEAKAGEM0::thc_M0_flux_fac',
'thc_M0_ndens_nua': 'THC_LEAKAGEM0::thc_M0_ndens_nua',
'thc_M0_ndens_nue': 'THC_LEAKAGEM0::thc_M0_ndens_nue',
'thc_M0_ndens_nux': 'THC_LEAKAGEM0::thc_M0_ndens_nux',
'thc_M0_N_nua': 'THC_LEAKAGEM0::thc_M0_N_nua',
'thc_M0_N_nue': 'THC_LEAKAGEM0::thc_M0_N_nue',
'thc_M0_N_nux': 'THC_LEAKAGEM0::thc_M0_N_nux',
}
# naming conventions, -- content of the EOS
eos = {
'eps': "internalEnergy",
'press': "pressure",
'entropy': "entropy"
}
# naming conventions, -- change for the output module_profile
out = {
'alp': 'lapse',
'vel[0]': 'velx',
'vel[1]': 'vely',
'vel[2]': 'velz',
'volform': 'vol',
'temperature': 'temp',
'Y_e': 'Ye',
'entropy': 'entr',
'thc_M0_abs_energy': 'abs_energy',
'thc_M0_abs_nua': 'abs_nua',
'thc_M0_abs_nue': 'abs_nue',
'thc_M0_abs_number': 'abs_number',
'thc_M0_eave_nua': 'eave_nua',
'thc_M0_eave_nue': 'eave_nue',
'thc_M0_eave_nux': 'eave_nux',
'thc_M0_E_nua': 'E_nua',
'thc_M0_E_nue': 'E_nue',
'thc_M0_E_nux': 'E_nux',
'thc_M0_flux_fac': 'flux_fac',
'thc_M0_ndens_nua': 'ndens_nua',
'thc_M0_ndens_nue': 'ndens_nue',
'thc_M0_ndens_nux': 'ndens_nux',
'thc_M0_N_nua': 'N_nua',
'thc_M0_N_nue': 'N_nue',
'thc_M0_N_nux': 'N_nux',
}
# @staticmethod
# def get_dattar_conent():
# if gen_set["content"] == "m0":
# return Names.nu_dattar
# else:
# return Names.dattar
# DAVID RADICE A tabulated nuclear equation of state
class EOSTable(object):
def __init__(self):
"""
Create an empty table
"""
self.log_rho = None
self.log_temp = None
self.ye = None
self.table = {}
self.interp = {}
def read_table(self, fname):
"""
Initialize the EOS object from a table file
* fname : must be the filename of a table in EOS_Thermal format
"""
assert os.path.isfile(fname)
dfile = h5py.File(fname, "r")
for k in dfile.keys():
self.table[k] = np.array(dfile[k])
del dfile
self.log_rho = np.log10(self.table["density"])
self.log_temp = np.log10(self.table["temperature"])
self.ye = self.table["ye"]
def get_names(self):
return self.table.keys()
def evaluate(self, prop, rho, temp, ye):
"""
* prop : name of the thermodynamical quantity to compute
* rho : rest mass density (in Msun^-2)
* temp : temperature (in MeV)
* ye : electron fraction
"""
assert self.table.has_key(prop)
assert self.log_rho is not None
assert self.log_temp is not None
assert self.ye is not None
assert rho.shape == temp.shape
assert temp.shape == ye.shape
log_rho = np.log10(ut.conv_dens(ut.cactus, ut.cgs, rho))
log_temp = np.log10(temp)
xi = np.array((ye.flatten(), log_temp.flatten(),
log_rho.flatten())).transpose()
if not self.interp.has_key(prop):
self.interp[prop] = RegularGridInterpolator(
(self.ye, self.log_temp, self.log_rho), self.table[prop],
method="linear", bounds_error=False, fill_value=None)
else:
print("EOS interp. object already exists")
return self.interp[prop](xi).reshape(rho.shape)
class FileWork:
def __init__(self):
pass
@staticmethod
def get_number(file_):
return int(str(file_.split('.file_')[-1]).split('.h5')[0])
@staticmethod
def get_filelist(key, inpath, output, clean = True):
# loading files for a given variable (multiple files from multiple nodes of supercomputer)
# start_t = time.time()
# print("\t Locating input files..."),
fname = key + '.file_*.h5'
fullpath = inpath + output + '/data/'
files = locate(fname, root=fullpath, followlinks=True)
files = sorted(files, key=FileWork.get_number)
if len(files) == 0:
raise ValueError("For '{}' in {} found NO files searched:{}"
.format(key, fullpath, fname))
# if len(files) <= 128:
# print("For '{}' {}, {}files found. Too many. searched:{}"
# .format(key, fullpath, len(files), fname))
# elif len(files) > 128 and len(files) <= 256:
# print('')
# print("WARNING! For '{}' {}, {}files found. Too many. searched:{}"
# .format(key, fullpath, len(files), fname))
# elif len(files) > 256 and len(files) <= 512:
# print("Warning! For '{}' {}, {} files found. Too many. searched:{}"
# .format(key, fullpath, len(files), fname))
# elif len(files) > 512:
# print("Error! For '{}' {}, {} files found. Too many. searched:{}"
# .format(key, fullpath, len(files), fname))
# else:
# raise ValueError("Error! For '{}' {}, {} files found. Too many. \n searched:{}"
# .format(key, fullpath, len(files), fname))
# # if len(files) >
#
# print(" {}, for {} ".format(len(files), key)),
# print("done! (%.2f sec)" % (time.time() - start_t))
return files
class ExtractProfile:
def __init__(self, it, output, inpath, outpath, eos_fpath, def_v_n ="rho", overwrite=False):
self.it = it
self.output = output
self.inpath = inpath
self.outpath = outpath
self.description = None
self.nlevels = 7 # has to be updated for every file
self.eos_fpath = eos_fpath
self.overwrite = overwrite
# self.gen_set = gen_set
# extract grid for a given iteration (this is time consuming, so better to do it once)
outfname = self.outpath + str(self.it) + ".h5"
if (not os.path.isfile(outfname)) or \
(os.path.isfile(outfname) and self.overwrite):
print("Extracting carpet grid for future use")
self.dset_rho, self.grid = self.get_grid_for_it(def_v_n)
self.nlevels = len(self.grid.levels)
print("\tfound {} ref.levels".format(self.nlevels))
# for every var. name, load, create dataset, save only for a given iteration
print("Processing available data")
for key, val in Names.dattar.iteritems():#Names.dattar.iteritems():
print("\tkey:'{}' val:'{}' ...".format(key, val))
self.process_datasets_for_it(key, val)
# interpoalte and save the EOS quantities (returning 'rho' dataset
# if not self.gen_set["content"] == "m0":
print("Processing EOS data...")
self.default_data = self.inter_save_eos_vars()
# load all variables iteration_v_n.h5 and combine them into the module_profile.h5
print("Saving the result as a single file")
self.load_combine_save()
print("DONE")
print("ITERATION {} WAS SAVED".format(str(self.it) + ".h5"))
else:
print("File: {} already exists. Skipping.")
# @staticmethod
# def get_number(file_):
# return int(str(file_.split('.file_')[-1]).split('.h5')[0])
#
# def get_filelist(self, key):
# # loading files for a given variable (multiple files from multiple nodes of supercomputer)
# start_t = time.time()
# print("\t Locating input files..."),
# fname = key + '.file_*.h5'
# fullpath = self.gen_set["inpath"] + self.output + '/data/'
# files = locate(fname, root=fullpath, followlinks=True)
# files = sorted(files, key=self.get_number)
# if len(files) == 0:
# raise ValueError("For '{}' in {} found NO files \n searched:{}"
# .format(key, fullpath, fname))
# if len(files) > 128:
# print("WARNING! For '{}' {}, {}files found. Too many. \n searched:{}"
# .format(key, fullpath, len(files), fname))
# if len(files) > 256:
# raise ValueError("Error! For '{}' {}, {}files found. Too many. \n searched:{}"
# .format(key, fullpath, len(files), fname))
#
# print(" {}, for {} ".format(len(files), key)),
# print("done! (%.2f sec)" % (time.time() - start_t))
# return files
def get_grid_for_it(self, key):
files = FileWork.get_filelist(key, self.inpath, self.output)
# files = self.get_filelist(key)
# create a scidata dataset out of those files
print("\t Parsing the metadata..."),
start_t = time.time()
dset = h5.dataset(files)
if not self.it in dset.iterations:
raise ValueError("Required it: {} is not in dset.iterations() {}"
.format(self.it, dset.iterations))
print("done! (%.2f sec)" % (time.time() - start_t))
# Get the grid
print("\t Reading the grid..."),
start_t = time.time()
grid = dset.get_grid(iteration=self.it)
print("done! (%.2f sec)" % (time.time() - start_t))
return dset, grid
def process_datasets_for_it(self, key, val):
files = FileWork.get_filelist(key, self.inpath, self.output)
# files = self.get_filelist(key)
print("\t Parsing the metadata..."),
start_t = time.time()
dset = h5.dataset(files)
print("done! (%.2f sec)" % (time.time() - start_t))
if not self.it in dset.iterations:
raise ValueError("it: {} is missing in dset for v_n: {}\n{}"
.format(self.it, key, dset.iterations))
# saving data for iteration
outfname = self.outpath + str(self.it) + '_' + key + ".h5"
dfile = h5py.File(outfname, "w")
if self.description is not None:
dfile.create_dataset("description", data=np.string_(self.description))
print("\t Saving {}...".format(outfname)),
for rl in range(len(self.grid)):
gname = "reflevel=%d" % rl
dfile.create_group(gname)
dfile[gname].attrs.create("delta", self.grid[rl].delta)
dfile[gname].attrs.create("extent", self.grid[rl].extent())
dfile[gname].attrs.create("iteration", self.it)
dfile[gname].attrs.create("reflevel", rl)
dfile[gname].attrs.create("time", dset.get_time(self.it))
# found = False
# for entry in dset.contents.keys():
# print("\tNot found {} in {}".format(val, entry.split()))
# if val in entry.split() \
# and "it={}".format(self.it) in entry.split() \
# and 'c=0' in entry.split():
# found = True
# print("\tFound {} -> {}".format(val, entry))
# break
# if found == False:
# raise KeyError("Check for found failed.")
# self.grid[rl]
# print("\t\tdset.contents : {}".format(dset.iterations))
data = dset.get_reflevel_data(self.grid[rl], iteration=int(self.it),
variable=val, timelevel=0, dtype=np.float32)
try:
data = dset.get_reflevel_data(self.grid[rl], iteration=int(self.it),
variable=val, timelevel=0, dtype=np.float32)
except KeyError:
raise KeyError("Failed to extract data from {} file \n"
"Data: rl: {} it: {} v_n: {}\n"
""
.format(files[0], rl, self.it, val))
dfile[gname].create_dataset(key, data=data)
dfile.close()
print("done! (%.2f sec)" % (time.time() - start_t))
dset.close_files()
gc.collect()
def interpolate_save_eos_quantity(self, v_n, dset_rho, dset_temp, dset_ye, eostable):
print("\t Insterpolating/saving {} ...".format(v_n))
start_t = time.time()
dfile = h5py.File(self.outpath + str(self.it) + '_' + v_n + ".h5", "w")
if self.description is not None:
dfile.create_dataset("description", data=np.string_(self.description))
for rl in range(self.nlevels):
print("\t\trl:{}".format(rl))
print("\t\t extracting rho, temp, ye...")
group_rho = dset_rho["reflevel={}".format(rl)]
group_temp = dset_temp["reflevel={}".format(rl)]
group_ye = dset_ye["reflevel={}".format(rl)]
arr_rho = np.array(group_rho["rho"])
arr_temp = np.array(group_temp["temperature"])
arr_ye = np.array(group_ye["Y_e"])
# arr_rho_ = units.conv_dens(units.cactus, units.cgs, arr_rho)
# arr_temp_ = units.conv_temperature(units.cactus, units.cgs, arr_temp)
# print("\t interpolating eos rl:{}".format(rl))
print("\t\t evaluating {}".format(Names.eos[v_n]))
data_arr = eostable.evaluate(Names.eos[v_n], arr_rho, arr_temp, arr_ye)
print("\t\t converting units for {}".format(Names.eos[v_n]))
if v_n == 'eps':
data_arr = ut.conv_spec_energy(ut.cgs, ut.cactus, data_arr)
elif v_n == 'press':
data_arr = ut.conv_press(ut.cgs, ut.cactus, data_arr)
elif v_n == 'entropy':
data_arr = data_arr
else:
raise NameError("EOS quantity: {}".format(v_n))
gname = "reflevel=%d" % rl
dfile.create_group(gname)
dfile[gname].attrs.create("delta", group_rho.attrs["delta"])
dfile[gname].attrs.create("extent", group_rho.attrs["extent"])
dfile[gname].attrs.create("iteration", group_rho.attrs["iteration"])
dfile[gname].attrs.create("reflevel", rl)
dfile[gname].attrs.create("time", group_rho.attrs["time"])
dfile[gname].create_dataset(v_n, data=data_arr, dtype=np.float32)
del arr_rho
del group_temp
del group_ye
dfile.close()
print("done! (%.2f sec)" % (time.time() - start_t))
gc.collect()
def inter_save_eos_vars(self):
# from scivis import eostable
o_eos = EOSTable()
o_eos.read_table(self.eos_fpath)
data_rho = h5py.File(self.outpath + str(self.it) + '_' + "rho" + ".h5", "r")
data_temp = h5py.File(self.outpath + str(self.it) + '_' + "temperature" + ".h5", "r")
data_ye = h5py.File(self.outpath + str(self.it) + '_' + "Y_e" + ".h5", "r")
for v_n in Names.eos.keys():
print("\t{}...".format(v_n))
self.interpolate_save_eos_quantity(v_n, data_rho, data_temp, data_ye, o_eos)
return data_rho
@staticmethod
def merge_two_dicts(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
def load_combine_save(self):
all_in_names = self.merge_two_dicts(Names.dattar, Names.eos)
print("\t Combining data into the module_profile {}.h5...".format(self.it)),
start_t = time.time()
dfile = h5py.File(self.outpath + str(self.it) + ".h5", "w")
if self.description is not None:
dfile.create_dataset("description", data=np.string_(self.description))
for rl in range(self.nlevels):
gname = "reflevel=%d" % rl
dfile.create_group(gname)
dfile[gname].attrs.create("delta", self.default_data["reflevel={}".format(rl)].attrs["delta"])
dfile[gname].attrs.create("extent", self.default_data["reflevel={}".format(rl)].attrs["extent"])
dfile[gname].attrs.create("iteration", self.default_data["reflevel={}".format(rl)].attrs["iteration"])
dfile[gname].attrs.create("reflevel", rl)
dfile[gname].attrs.create("time", self.default_data["reflevel={}".format(rl)].attrs["time"])
for key, val in all_in_names.iteritems():
# loading the input h5
dfile__ = h5py.File(self.outpath + str(self.it) + '_' + key + ".h5")
data = np.array(dfile__["reflevel={}".format(rl)][key])
if key in Names.out.keys():
key = Names.out[key]
dfile[gname].create_dataset(key, data=data, dtype=np.float32)
dfile.close()
print("done! (%.2f sec)" % (time.time() - start_t))
class ExtractNuProfile:
def __init__(self, it, output, inpath, outpath, def_nu_v_n ="thc_M0_abs_energy", overwrite=False):
self.it = it
self.output = output
self.inpath = inpath
self.outpath = outpath
self.description = None
self.overwrite = overwrite
outfname = self.outpath + str(self.it) + "nu.h5"
if (not os.path.isfile(outfname)) or \
(os.path.isfile(outfname) and self.overwrite):
# get reflevel for future use
default_dset = h5.dataset(FileWork.get_filelist(def_nu_v_n, self.inpath, self.output))
reflevel = default_dset.get_reflevel()
nrad = reflevel.n[0]
ntheta = int(round(sqrt(float(reflevel.n[1] / 2))))
nphi = 2 * ntheta
if ntheta * nphi != reflevel.n[1]:
raise ValueError("The leakage grid is inconsistent")
for key, val in Names.nu_dattar.iteritems():
print("\tProcessing key'{}' val:'{}'".format(key, val))
files = FileWork.get_filelist(key, self.inpath, self.output)
assert len(files)
dset = h5.dataset(files)
data = dset.get_reflevel_data(reflevel=reflevel, iteration=int(self.it),
variable=val, timelevel=0, dtype=np.float32)
# print(data)
# output
fname = self.outpath + str(self.it) + '_' + key + ".h5"
dfile = h5py.File(fname, "w")
# dfile.attrs.create("delta", reflevel.delta)
# dfile.attrs.create("extent", reflevel.extent())
dfile.attrs.create("iteration", self.it)
dfile.attrs.create("time", default_dset.get_time(self.it))
dfile.attrs.create("nrad", nrad)
dfile.attrs.create("ntheta", ntheta)
dfile.attrs.create("nphi", nphi)
print(data.shape)
# print('delta: {}'.format(reflevel.delta))
# print('extent:{}'.format(reflevel.extent()))
# print('iteration:{}'.format(self.it))
# print('time:{}'.format(dset.get_time(self.it)))
# print('nrad:{}'.format(nrad))
# print('ntheta:{}'.format(ntheta))
# print('nphi:{}'.format(nphi))
# exit(1)
dfile.create_dataset(key, data=data)
dset.close_files()
dfile.close()
print("\tFinished key'{}' val:'{}'".format(key, val))
# print("done! (%.2f sec)" % (time.time() - start_t))
default_dset.close_files()
# load extracted data and save as one file:
all_in_names = Names.nu_dattar
dfile = h5py.File(outfname, "w")
for key, val in all_in_names.iteritems():
print("\tLoading and appending {}".format(key))
dfile__ = h5py.File(self.outpath + str(self.it) + '_' + key + ".h5")
data = np.array(dfile__[key])
if key in Names.out.keys():
key = Names.out[key]
dfile.create_dataset(key, data=data, dtype=np.float32)
dfile.attrs.create("iteration", self.it)
dfile.attrs.create("time", default_dset.get_time(self.it))
dfile.attrs.create("nrad", nrad)
dfile.attrs.create("ntheta", ntheta)
dfile.attrs.create("nphi", nphi)
dfile.close()
print("\tDONE")
else:
print("File: {} already exists. Skipping."
.format(outfname))
""" ==================================| independent output-it-time mapping methods |================================="""
def find_nearest_index(array, value):
''' Finds index of the value in the array that is the closest to the provided one '''
idx = (np.abs(array - value)).argmin()
return idx
def get_output_for_time(time, output_it_time_dic, it_time):
it_time = np.array(it_time)
if time > it_time[:,1].max():
raise ValueError("time {:.3f}s beyond the simulation length ({:.3f}s)".format(time, it_time[:,1].max()))
if time < it_time[:,1].min():
raise ValueError("time {:.3f}s is too small, minimum is ({:.3f}s)".format(time, it_time[:,1].min()))
closest_iteration = it_time[find_nearest_index(it_time[:,1], time), 0]
output = ''
for output_dir, it_time in output_it_time_dic.iteritems():
if closest_iteration in it_time[:, 0]:
output = output_dir
if output == '':
raise ValueError("output was not found")
print("\t required time:{} found in {} output".format(time, output))
return output
def load_one_dset_to_get_iter(time_, key, inpath, output):
files = FileWork.get_filelist(key, inpath, output)
# files = get_filelist(key, output_dir)
dset = h5.dataset(files[0]) # fastest way
dataset_iterations = dset.iterations
dataset_times = []
for it in dataset_iterations:
dataset_times.append(float(dset.get_time(it)))
print("\t Iterations {}".format(dataset_iterations))
print("\t Times "),
print([("{:.3f}, ".format(i_time)) for i_time in dataset_times])
# print(' ')
# selecting the iteration that has the closest time to the required
idx = find_nearest_index(np.array(dataset_times), time_ / (0.004925794970773136 * 1e-3))
iteration = dataset_iterations[idx]
closest_time = dataset_times[idx]
print("\t it:{} with time:{:.3f} is the closest to required time:{:.3f}"
.format(iteration, closest_time * 0.004925794970773136 * 1e-3, time_))
return iteration, closest_time * 0.004925794970773136 * 1e-3
def set_it_output_map(inpath, outpath, it_time_fname = "dens.norm1.asc"):
"""
Loads set of it_time_files that have '1:it 2:time ...' structure to get a map
of what output-xxxx contains what iteration (and time)
"""
output_it_time_dic = {}
# if not os.path.isdir(gen_set["inpath"] + "profiles/"):
# print("creating output dir: {}".format(gen_set["inpath"] + "profiles/"))
# os.mkdir(gen_set["inpath"] + "profiles/")
#
# it_time_files = glob(gen_set["inpath"] + "output-*" + "/data/" + gen_set["it_map_file"])
#
# print('-' * 25 + 'LOADING it list ({})'
# .format(gen_set["it_map_file"]) + '-' * 25)
# print("\t loading from: {}, {} it_time_files".format(gen_set["inpath"], len(it_time_files)))
assert os.path.isdir(inpath)
assert os.path.isdir(outpath)
it_time_files = glob(inpath + "output-*" + "/data/" + it_time_fname)
assert len(it_time_files) > 0
print('-' * 25 + 'LOADING it list ({})'
.format(it_time_fname) + '-' * 25)
print("\t loading from: {}, {} it_time_files".format(inpath, len(it_time_files)))
it_time = np.zeros(2)
for file in it_time_files:
o_name = file.split('/')
o_dir = ''
for o_part in o_name:
if o_part.__contains__('output-'):
o_dir = o_part
if o_dir == '':
raise NameError("Did not find output-xxxx in {}".format(o_name))
it_time_i = np.loadtxt(file, usecols=(0, 1))
it_time_i[:, 1] *= 0.004925794970773136 * 1e-3 # time is seconds
output_it_time_dic[o_dir] = it_time_i
it_time = np.vstack((it_time, it_time_i))
it_time = np.delete(it_time, 0, 0)
print('outputs:{} its:{} [{}->{}] time:[{}->{:.3f}]'.format(len(it_time_files),
len(it_time[:, 0]),
int(it_time[:, 0].min()),
int(it_time[:, 0].max()),
float(it_time[:, 1].min()),
float(it_time[:, 1].max())))
if len(it_time[:, 0]) != len(set(it_time[:, 0])):
print("Warning: repetitions found in the loaded iterations")
iterations = np.unique(it_time[:, 0])
timestpes = np.unique(it_time[:, 1])
if not len(iterations) == len(timestpes):
raise ValueError("Failed attmept to remove repetitions from "
"\t it and time lists. Wrong lengths: {} {}"
.format(len(iterations), len(timestpes)))
else:
print("\t repetitions are not found in loaded it list, continue nurmally")
iterations = np.unique(it_time[:, 0])
timestpes = np.unique(it_time[:, 1])
print('-' * 30 + '------DONE-----' + '-' * 30)
return output_it_time_dic, np.vstack((iterations, timestpes)).T
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", dest="input", default='./', required=False, help="path/to/input/data/")
parser.add_argument("-o", "--output", dest="output", default='same', required=False, help="path/to/output/dir")
parser.add_argument("-t", "--tasklist", nargs='+', dest="tasklist", default=[], required=True, help="tasklist to perform")
parser.add_argument("-m", "--mode", dest="mode", default="times", required=True, help="times or iterations")
parser.add_argument("--it", dest="iterations", nargs='+', default=[], required=False, help="iterations to postprocess")
parser.add_argument("--time", dest="times", nargs='+', default=[], required=False, help="times to postprocess [ms]")
parser.add_argument("--eos", dest="eos", required=False, default="auto", help="Hydro EOS file to use")
args = parser.parse_args()
#
glob_input_dir = args.input
glob_output_dir = args.output
glob_tasklist = args.tasklist
glob_mode = args.mode
glob_iterations =args.iterations
glob_times = args.times
glob_eosfpath = args.eos
#
assert len(glob_tasklist) > 0
assert os.path.isdir(glob_input_dir)
if glob_output_dir == "same": glob_output_dir = glob_input_dir
assert os.path.isdir(glob_input_dir)
assert os.path.isdir(glob_output_dir)
#
if glob_mode == "iterations":
glob_iterations = np.array(glob_iterations, dtype=int)
assert len(glob_iterations) > 0
elif glob_mode == "times":
glob_times = np.array(glob_times, dtype=float) / 1e3 # back to [s]
assert len(glob_times) > 0
else:
raise NameError("mode {} is not recognized".format(glob_mode))
#
print("Setting output-iteration-time map")
output_it_time_dic, it_time = set_it_output_map(glob_input_dir, glob_output_dir)
#
if not os.path.isdir(glob_output_dir+Names.outdir):
os.mkdir(glob_output_dir+Names.outdir)
#
if glob_mode == "times":
assert len(glob_times) > 0
if glob_times.min() < it_time[:,1].min():
raise ValueError("Given time: {} is below minimum in the data: {}"
.format(glob_times.min()*1e3, it_time[:,1].min()*1e3))
if glob_times.max() > it_time[:,1].max():
raise ValueError("Given time: {} is above maximum in the data: {}"
.format(glob_times.max()*1e3, it_time[:, 1].max()*1e3))
print("Required times are: {}".format(glob_times))
outputs = []
iterations = []
closest_times = []
for required_time in glob_times:
# find in what output the required time is located
print("Locating the required output for time:{:.3f}".format(required_time))
output = get_output_for_time(required_time, output_it_time_dic, it_time)
outputs.append(output)
iteration, closest_time = load_one_dset_to_get_iter(required_time, "rho", glob_input_dir, output)
iterations.append(iteration)
closest_times.append(closest_time)
print("\n")
print(" < tmin: {:.3f} tmax: {:.3f} >".format(it_time[:, 1].min(), it_time[:, 1].max()))
print(" --------------- TASK -------------------")
print(" t_req | t_aval | it | output ")
for required_time, closest_time, iteration, output in zip(glob_times, closest_times, iterations, outputs):
print(" {:.3f} | {:.3f} | {} | {} ".format(required_time, closest_time, iteration, output))
print(" --------------- DONE -------------------")
print("\n")
print("Mode: {}".format(glob_mode))
print("Task: {}".format(glob_tasklist))
# get the EOS file (if hydro is to be computed)
if "prof" in glob_tasklist:
if glob_eosfpath == 'auto':
glob_eosfpath = Paths.get_eos_fname_from_curr_dir(glob_input_dir)
else:
glob_eosfpath = glob_eosfpath
assert os.path.isfile(glob_eosfpath)
if click.confirm('Is it right EOS: {}'.format(glob_eosfpath.split('/')[-1]), default=True):
pass
else:
exit(1)
if click.confirm('Do you wish to start?', default=True):
print("Initializing...")
# main loop (here, it can be parallelized)
n = 1
for output, iteration in zip(outputs, iterations):
print("it:{} ({}/{})".format(iteration, n, len(iterations)))
if "prof" in glob_tasklist:
#
ExtractProfile(iteration, output, glob_input_dir, glob_output_dir + Names.outdir, glob_eosfpath, "rho",
overwrite=False)
#
# try:
# ExtractProfile(iteration, output, glob_input_dir, glob_output_dir+Names.outdir, glob_eosfpath, "rho", overwrite=False)
# except KeyboardInterrupt:
# exit(0)
# except:
# print("ERROR HYDRO output:{} iteration:{}".format(output, iteration))
if "nuprof" in glob_tasklist:
if True:
ExtractNuProfile(iteration, output, glob_input_dir, glob_output_dir+Names.outdir, "thc_M0_abs_energy", overwrite=False)
else:
print("ERROR NU output:{} iteration:{}".format(output, iteration))
n=n+1
elif glob_mode == "iterations":
raise AttributeError("this mode is not done yet...")
else:
raise NameError("mode (-m {}) is not recognized".format(glob_mode))
print(" ------------- ALL DONE ----------------- ")
print(" remove the temporary files ( rm *_* ) ")
| [
"numpy.log10",
"scidata.units.conv_dens",
"numpy.array",
"scidata.utils.locate",
"scidata.units.conv_press",
"argparse.ArgumentParser",
"scipy.interpolate.RegularGridInterpolator",
"numpy.delete",
"config.get_eos_fname_from_curr_dir",
"os.path.isdir",
"numpy.vstack",
"os.mkdir",
"glob.glob",... | [((24415, 24432), 'numpy.array', 'np.array', (['it_time'], {}), '(it_time)\n', (24423, 24432), True, 'import numpy as np\n'), ((25294, 25314), 'scidata.carpet.hdf5.dataset', 'h5.dataset', (['files[0]'], {}), '(files[0])\n', (25304, 25314), True, 'import scidata.carpet.hdf5 as h5\n'), ((26901, 26922), 'os.path.isdir', 'os.path.isdir', (['inpath'], {}), '(inpath)\n', (26914, 26922), False, 'import os\n'), ((26934, 26956), 'os.path.isdir', 'os.path.isdir', (['outpath'], {}), '(outpath)\n', (26947, 26956), False, 'import os\n'), ((26978, 27030), 'glob.glob', 'glob', (["(inpath + 'output-*' + '/data/' + it_time_fname)"], {}), "(inpath + 'output-*' + '/data/' + it_time_fname)\n", (26982, 27030), False, 'from glob import glob\n'), ((27256, 27267), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (27264, 27267), True, 'import numpy as np\n'), ((27796, 27820), 'numpy.delete', 'np.delete', (['it_time', '(0)', '(0)'], {}), '(it_time, 0, 0)\n', (27805, 27820), True, 'import numpy as np\n'), ((29135, 29160), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (29158, 29160), False, 'import argparse\n'), ((30290, 30319), 'os.path.isdir', 'os.path.isdir', (['glob_input_dir'], {}), '(glob_input_dir)\n', (30303, 30319), False, 'import os\n'), ((30398, 30427), 'os.path.isdir', 'os.path.isdir', (['glob_input_dir'], {}), '(glob_input_dir)\n', (30411, 30427), False, 'import os\n'), ((30439, 30469), 'os.path.isdir', 'os.path.isdir', (['glob_output_dir'], {}), '(glob_output_dir)\n', (30452, 30469), False, 'import os\n'), ((5648, 5669), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (5662, 5669), False, 'import os\n'), ((5687, 5708), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (5696, 5708), False, 'import h5py\n'), ((5829, 5860), 'numpy.log10', 'np.log10', (["self.table['density']"], {}), "(self.table['density'])\n", (5837, 5860), True, 'import numpy as np\n'), ((5885, 5920), 'numpy.log10', 'np.log10', (["self.table['temperature']"], {}), "(self.table['temperature'])\n", (5893, 5920), True, 'import numpy as np\n'), ((6594, 6608), 'numpy.log10', 'np.log10', (['temp'], {}), '(temp)\n', (6602, 6608), True, 'import numpy as np\n'), ((7600, 7646), 'scidata.utils.locate', 'locate', (['fname'], {'root': 'fullpath', 'followlinks': '(True)'}), '(fname, root=fullpath, followlinks=True)\n', (7606, 7646), False, 'from scidata.utils import locate\n'), ((12403, 12414), 'time.time', 'time.time', ([], {}), '()\n', (12412, 12414), False, 'import time\n'), ((12430, 12447), 'scidata.carpet.hdf5.dataset', 'h5.dataset', (['files'], {}), '(files)\n', (12440, 12447), True, 'import scidata.carpet.hdf5 as h5\n'), ((12776, 12787), 'time.time', 'time.time', ([], {}), '()\n', (12785, 12787), False, 'import time\n'), ((13149, 13160), 'time.time', 'time.time', ([], {}), '()\n', (13158, 13160), False, 'import time\n'), ((13176, 13193), 'scidata.carpet.hdf5.dataset', 'h5.dataset', (['files'], {}), '(files)\n', (13186, 13193), True, 'import scidata.carpet.hdf5 as h5\n'), ((13560, 13584), 'h5py.File', 'h5py.File', (['outfname', '"""w"""'], {}), "(outfname, 'w')\n", (13569, 13584), False, 'import h5py\n'), ((15646, 15658), 'gc.collect', 'gc.collect', ([], {}), '()\n', (15656, 15658), False, 'import gc\n'), ((15830, 15841), 'time.time', 'time.time', ([], {}), '()\n', (15839, 15841), False, 'import time\n'), ((18001, 18013), 'gc.collect', 'gc.collect', ([], {}), '()\n', (18011, 18013), False, 'import gc\n'), ((19017, 19028), 'time.time', 'time.time', ([], {}), '()\n', (19026, 19028), False, 'import time\n'), ((25756, 25779), 'numpy.array', 'np.array', (['dataset_times'], {}), '(dataset_times)\n', (25764, 25779), True, 'import numpy as np\n'), ((27580, 27612), 'numpy.loadtxt', 'np.loadtxt', (['file'], {'usecols': '(0, 1)'}), '(file, usecols=(0, 1))\n', (27590, 27612), True, 'import numpy as np\n'), ((27750, 27781), 'numpy.vstack', 'np.vstack', (['(it_time, it_time_i)'], {}), '((it_time, it_time_i))\n', (27759, 27781), True, 'import numpy as np\n'), ((28448, 28472), 'numpy.unique', 'np.unique', (['it_time[:, 0]'], {}), '(it_time[:, 0])\n', (28457, 28472), True, 'import numpy as np\n'), ((28493, 28517), 'numpy.unique', 'np.unique', (['it_time[:, 1]'], {}), '(it_time[:, 1])\n', (28502, 28517), True, 'import numpy as np\n'), ((28901, 28925), 'numpy.unique', 'np.unique', (['it_time[:, 0]'], {}), '(it_time[:, 0])\n', (28910, 28925), True, 'import numpy as np\n'), ((28946, 28970), 'numpy.unique', 'np.unique', (['it_time[:, 1]'], {}), '(it_time[:, 1])\n', (28955, 28970), True, 'import numpy as np\n'), ((30536, 30572), 'numpy.array', 'np.array', (['glob_iterations'], {'dtype': 'int'}), '(glob_iterations, dtype=int)\n', (30544, 30572), True, 'import numpy as np\n'), ((30990, 31035), 'os.path.isdir', 'os.path.isdir', (['(glob_output_dir + Names.outdir)'], {}), '(glob_output_dir + Names.outdir)\n', (31003, 31035), False, 'import os\n'), ((31043, 31083), 'os.mkdir', 'os.mkdir', (['(glob_output_dir + Names.outdir)'], {}), '(glob_output_dir + Names.outdir)\n', (31051, 31083), False, 'import os\n'), ((33378, 33430), 'click.confirm', 'click.confirm', (['"""Do you wish to start?"""'], {'default': '(True)'}), "('Do you wish to start?', default=True)\n", (33391, 33430), False, 'import click\n'), ((5768, 5786), 'numpy.array', 'np.array', (['dfile[k]'], {}), '(dfile[k])\n', (5776, 5786), True, 'import numpy as np\n'), ((6537, 6573), 'scidata.units.conv_dens', 'ut.conv_dens', (['ut.cactus', 'ut.cgs', 'rho'], {}), '(ut.cactus, ut.cgs, rho)\n', (6549, 6573), True, 'from scidata import units as ut\n'), ((6785, 6925), 'scipy.interpolate.RegularGridInterpolator', 'RegularGridInterpolator', (['(self.ye, self.log_temp, self.log_rho)', 'self.table[prop]'], {'method': '"""linear"""', 'bounds_error': '(False)', 'fill_value': 'None'}), "((self.ye, self.log_temp, self.log_rho), self.table[\n prop], method='linear', bounds_error=False, fill_value=None)\n", (6808, 6925), False, 'from scipy.interpolate import RegularGridInterpolator\n'), ((16384, 16410), 'numpy.array', 'np.array', (["group_rho['rho']"], {}), "(group_rho['rho'])\n", (16392, 16410), True, 'import numpy as np\n'), ((16434, 16469), 'numpy.array', 'np.array', (["group_temp['temperature']"], {}), "(group_temp['temperature'])\n", (16442, 16469), True, 'import numpy as np\n'), ((16491, 16516), 'numpy.array', 'np.array', (["group_ye['Y_e']"], {}), "(group_ye['Y_e'])\n", (16499, 16516), True, 'import numpy as np\n'), ((23156, 23180), 'h5py.File', 'h5py.File', (['outfname', '"""w"""'], {}), "(outfname, 'w')\n", (23165, 23180), False, 'import h5py\n'), ((24292, 24313), 'numpy.abs', 'np.abs', (['(array - value)'], {}), '(array - value)\n', (24298, 24313), True, 'import numpy as np\n'), ((29055, 29089), 'numpy.vstack', 'np.vstack', (['(iterations, timestpes)'], {}), '((iterations, timestpes))\n', (29064, 29089), True, 'import numpy as np\n'), ((33169, 33198), 'os.path.isfile', 'os.path.isfile', (['glob_eosfpath'], {}), '(glob_eosfpath)\n', (33183, 33198), False, 'import os\n'), ((9610, 9634), 'os.path.isfile', 'os.path.isfile', (['outfname'], {}), '(outfname)\n', (9624, 9634), False, 'import os\n'), ((9658, 9682), 'os.path.isfile', 'os.path.isfile', (['outfname'], {}), '(outfname)\n', (9672, 9682), False, 'import os\n'), ((17016, 17064), 'scidata.units.conv_spec_energy', 'ut.conv_spec_energy', (['ut.cgs', 'ut.cactus', 'data_arr'], {}), '(ut.cgs, ut.cactus, data_arr)\n', (17035, 17064), True, 'from scidata import units as ut\n'), ((20709, 20733), 'os.path.isfile', 'os.path.isfile', (['outfname'], {}), '(outfname)\n', (20723, 20733), False, 'import os\n'), ((20757, 20781), 'os.path.isfile', 'os.path.isfile', (['outfname'], {}), '(outfname)\n', (20771, 20781), False, 'import os\n'), ((21504, 21521), 'scidata.carpet.hdf5.dataset', 'h5.dataset', (['files'], {}), '(files)\n', (21514, 21521), True, 'import scidata.carpet.hdf5 as h5\n'), ((21853, 21874), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (21862, 21874), False, 'import h5py\n'), ((23407, 23429), 'numpy.array', 'np.array', (['dfile__[key]'], {}), '(dfile__[key])\n', (23415, 23429), True, 'import numpy as np\n'), ((30665, 30698), 'numpy.array', 'np.array', (['glob_times'], {'dtype': 'float'}), '(glob_times, dtype=float)\n', (30673, 30698), True, 'import numpy as np\n'), ((33036, 33085), 'config.get_eos_fname_from_curr_dir', 'Paths.get_eos_fname_from_curr_dir', (['glob_input_dir'], {}), '(glob_input_dir)\n', (33069, 33085), True, 'import config as Paths\n'), ((12669, 12680), 'time.time', 'time.time', ([], {}), '()\n', (12678, 12680), False, 'import time\n'), ((12872, 12883), 'time.time', 'time.time', ([], {}), '()\n', (12881, 12883), False, 'import time\n'), ((13230, 13241), 'time.time', 'time.time', ([], {}), '()\n', (13239, 13241), False, 'import time\n'), ((13680, 13708), 'numpy.string_', 'np.string_', (['self.description'], {}), '(self.description)\n', (13690, 13708), True, 'import numpy as np\n'), ((15587, 15598), 'time.time', 'time.time', ([], {}), '()\n', (15596, 15598), False, 'import time\n'), ((16018, 16046), 'numpy.string_', 'np.string_', (['self.description'], {}), '(self.description)\n', (16028, 16046), True, 'import numpy as np\n'), ((17125, 17167), 'scidata.units.conv_press', 'ut.conv_press', (['ut.cgs', 'ut.cactus', 'data_arr'], {}), '(ut.cgs, ut.cactus, data_arr)\n', (17138, 17167), True, 'from scidata import units as ut\n'), ((17968, 17979), 'time.time', 'time.time', ([], {}), '()\n', (17977, 17979), False, 'import time\n'), ((19192, 19220), 'numpy.string_', 'np.string_', (['self.description'], {}), '(self.description)\n', (19202, 19220), True, 'import numpy as np\n'), ((20303, 20314), 'time.time', 'time.time', ([], {}), '()\n', (20312, 20314), False, 'import time\n')] |
'''
Plotting tools relevant for illustrating and comparing clustering results
can be found in this module.
'''
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def scatter_plot_two_dim_group_data(
two_dim_data,
labels,
markers=None,
colors=None,
figsize=(10, 6),
xlim=None,
ylim=None,
alpha=0.8,
bbox_to_anchor=(1.01, 1),
loc=2,
grid=True,
show=True,
filepath=None,
**kwargs
):
'''
Plot the distribution of a two dimensional data against clustering groups
in a scatter plot.
A point represents an instance in the dataset. Points in a same cluster
are painted with a same colour.
This tool is useful to check the clustering impact in this two-dimensional
sub-space.
Parameters
----------
two_dim_data: Pandas DataFrame
A dataframe with two columns. The first column goes to the x-axis,
and the second column goes to the y-axis.
labels: list, Pandas Series, Numpy Array, or any iterable
The segment label for each sample in ``two_dim_data``.
markers: list
Marker names for each group.
bbox_to_anchor: tuple
Instruction to placing the legend box relative to the axes. Details
refer to ``Matplotlib`` document.
colors: list, default None
Colours for each group. Use equally distanced colours on colour map
if not supplied.
figsize: tuple
Figure size (width, height).
xlim: tuple
X-axis limits.
ylim: tuple
Y-axis limits.
alpha: float, between 0 and 1
Marker transparency. From 0 to 1: from transparent to opaque.
loc: int
The corner of the legend box to anchor. Details refer to ``Matplotlib``
document.
grid: boolean, default True
Show grid.
show: boolean, default True
Show figure in pop-up windows if true. Save to files if False.
filepath: str
File name to saving the plot. Must be assigned a valid filepath if
``show`` is False.
**kwargs: keyword arguments
Other keyword arguemnts passed on to ``matplotlib.pyplot.scatter``.
Note
----
Instances in a same cluster does not necessarily assemble together in
all two dimensional sub-spaces. There can be possibly no clustering
capaility for certain features. Additionally certain features play a
secondary role in clustering as having less importance in
``field_importance`` in ``clusteror`` module.
'''
assert isinstance(two_dim_data, pd.core.frame.DataFrame)
assert two_dim_data.shape[1] == 2, 'Two_dim_data must have two columns!'
if isinstance(labels, pd.core.series.Series):
labels = labels.values
grouped = two_dim_data.groupby(labels)
n_groups = grouped.ngroups
# there should be enough markers
if markers is not None:
error_msg = 'There should be one marker for each group!'
assert len(markers) == n_groups, error_msg
# get color for each group from the spectrum
if colors is None:
colors = plt.cm.Spectral(np.linspace(0, 1, n_groups))
plt.figure(figsize=figsize)
ax = plt.subplot(111)
if markers is None:
# do a for loop to plot one by one
# if markers not given, default circles
for (name, group), color in zip(grouped, colors):
ax.scatter(
x=group.values[:, 0],
y=group.values[:, 1],
color=color,
label=str(name),
alpha=alpha,
**kwargs)
else:
for (name, group), color, marker in zip(grouped, colors, markers):
ax.scatter(
x=group.values[:, 0],
y=group.values[:, 1],
color=color,
marker=marker,
label=str(name),
alpha=alpha,
ax=ax,
**kwargs)
# place the legend at the right hand side of the chart
plt.legend(bbox_to_anchor=bbox_to_anchor, loc=loc)
# get the axes names
x_label, y_label = tuple(two_dim_data.columns)
plt.xlabel(x_label, size=17)
plt.ylabel(y_label, size=17)
# get lim for x and y axes
if xlim is None:
xlim = (two_dim_data.iloc[:, 0].min(), two_dim_data.iloc[:, 0].max())
if ylim is None:
ylim = (two_dim_data.iloc[:, 1].min(), two_dim_data.iloc[:, 1].max())
plt.xlim(xlim)
plt.ylim(ylim)
if grid:
plt.grid()
if show:
plt.show()
else:
assert filepath
plt.savefig(filepath)
def hist_plot_one_dim_group_data(
one_dim_data,
labels,
bins=11,
colors=None,
figsize=(10, 6),
xlabel='Dimension Reduced Data',
ylabel='Occurance',
bbox_to_anchor=(1.01, 1),
loc=2,
grid=True,
show=True,
filepath=None,
**kwargs):
'''
Plot the distribution of a one dimensional numerical data in a histogram.
This tool is useful to check the clustering impact in this one-dimensional
sub-space.
Parameters
----------
one_dim_data: list, Pandas Series, Numpy Array, or any iterable
A sequence of data. Each element if for an instance.
labels: list, Pandas Series, Numpy Array, or any iterable
The segment label for each sample in ``one_dim_data``.
bins: int or iterable
If an integer, bins - 1 bins created or a list of the delimiters.
colors: list, default None
Colours for each group. Use equally distanced colours on colour map
if not supplied.
figsize: tuple
Figure size (width, height).
xlabel: str
Plot xlabel.
ylabel: str
Plot ylabel.
bbox_to_anchor: tuple
Instruction to placing the legend box relative to the axes. Details
refer to ``Matplotlib`` document.
loc: int
The corner of the legend box to anchor. Details refer to ``Matplotlib``
document.
grid: boolean, default True
Show grid.
show: boolean, default True
Show figure in pop-up windows if true. Save to files if False.
filepath: str
File name to saving the plot. Must be assigned a valid filepath if
``show`` is False.
**kwargs: keyword arguments
Other keyword arguemnts passed on to ``matplotlib.pyplot.scatter``.
Note
----
Instances in a same cluster does not necessarily assemble together in
all one dimensional sub-spaces. There can be possibly no clustering
capaility for certain features. Additionally certain features play a
secondary role in clustering as having less importance in
``field_importance`` in ``clusteror`` module.
'''
if not isinstance(one_dim_data, pd.core.series.Series):
one_dim_data = pd.Series(one_dim_data)
if isinstance(labels, pd.core.series.Series):
labels = labels.values
grouped = one_dim_data.groupby(labels)
n_groups = grouped.ngroups
# get color for each group from the spectrum
if colors is None:
colors = plt.cm.Spectral(np.linspace(0, 1, n_groups))
plt.figure(figsize=figsize)
ax = plt.subplot(111)
# do a for loop to plot one by one
for (name, group), color in zip(grouped, colors):
ax.hist(
group.values,
bins=bins,
color=color,
label=str(name),
alpha=0.5,
**kwargs
)
# place the legend at the right hand side of the chart
plt.legend(bbox_to_anchor=bbox_to_anchor, loc=loc)
plt.xlabel(xlabel, size=17)
plt.ylabel(ylabel, size=17)
if grid:
plt.grid()
if show:
plt.show()
else:
assert filepath
plt.savefig(filepath)
def group_occurance_plot(
one_dim_data,
cat_label,
labels,
group_label,
colors=None,
figsize=(10, 6),
bbox_to_anchor=(1.01, 1),
loc=2,
grid=True,
show=True,
filepath=None,
**kwargs):
'''
Plot the distribution of a one dimensional **ordinal or categorical** data
in a bar chart. This tool is useful to check the clustering impact in this
one-dimensional sub-space.
Parameters
----------
one_dim_data: list, Pandas Series, Numpy Array, or any iterable
A sequence of data. Each element if for an instance.
cat_label: str
Field name will be used for the one dimensional data.
labels: list, Pandas Series, Numpy Array, or any iterable
The segment label for each sample in one_dim_data.
group_label: str
Field name will be used for the cluster ID.
colors: list, default None
Colours for each category existing in this one dimensional data.
Default colour scheme used if not supplied.
figsize: tuple
Figure size (width, height).
bbox_to_anchor: tuple
Instruction to placing the legend box relative to the axes. Details
refer to ``Matplotlib`` document.
loc: int
The corner of the legend box to anchor. Details refer to ``Matplotlib``
document.
grid: boolean, default True
Show grid.
show: boolean, default True
Show figure in pop-up windows if true. Save to files if False.
filepath: str
File name to saving the plot. Must be assigned a valid filepath if
``show`` is False.
**kwargs: keyword arguments
Other keyword arguemnts passed on to ``matplotlib.pyplot.scatter``.
Note
----
Instances in a same cluster does not necessarily assemble together in
all one dimensional sub-spaces. There can be possibly no clustering
capaility for certain features. Additionally certain features play a
secondary role in clustering as having less importance in
``field_importance`` in ``clusteror`` module.
'''
if not isinstance(one_dim_data, pd.core.series.Series):
one_dim_data = pd.Series(one_dim_data)
df = pd.DataFrame({cat_label: one_dim_data, group_label: labels})
df_to_plot = df.pivot_table(
index=group_label,
columns=cat_label,
aggfunc=len
)
plt.figure(figsize=figsize)
ax = plt.subplot(111)
df_to_plot.plot.bar(color=colors, ax=ax, **kwargs)
plt.legend(bbox_to_anchor=bbox_to_anchor, loc=loc)
if grid:
plt.grid()
if show:
plt.show()
else:
assert filepath
plt.savefig(filepath)
| [
"pandas.Series",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure",
"numpy.linspace",
"pandas.DataFrame",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.... | [((3189, 3216), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3199, 3216), True, 'import matplotlib.pyplot as plt\n'), ((3226, 3242), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (3237, 3242), True, 'import matplotlib.pyplot as plt\n'), ((4052, 4102), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': 'bbox_to_anchor', 'loc': 'loc'}), '(bbox_to_anchor=bbox_to_anchor, loc=loc)\n', (4062, 4102), True, 'import matplotlib.pyplot as plt\n'), ((4183, 4211), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x_label'], {'size': '(17)'}), '(x_label, size=17)\n', (4193, 4211), True, 'import matplotlib.pyplot as plt\n'), ((4216, 4244), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {'size': '(17)'}), '(y_label, size=17)\n', (4226, 4244), True, 'import matplotlib.pyplot as plt\n'), ((4478, 4492), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (4486, 4492), True, 'import matplotlib.pyplot as plt\n'), ((4497, 4511), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (4505, 4511), True, 'import matplotlib.pyplot as plt\n'), ((7199, 7226), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (7209, 7226), True, 'import matplotlib.pyplot as plt\n'), ((7236, 7252), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (7247, 7252), True, 'import matplotlib.pyplot as plt\n'), ((7583, 7633), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': 'bbox_to_anchor', 'loc': 'loc'}), '(bbox_to_anchor=bbox_to_anchor, loc=loc)\n', (7593, 7633), True, 'import matplotlib.pyplot as plt\n'), ((7638, 7665), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {'size': '(17)'}), '(xlabel, size=17)\n', (7648, 7665), True, 'import matplotlib.pyplot as plt\n'), ((7670, 7697), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {'size': '(17)'}), '(ylabel, size=17)\n', (7680, 7697), True, 'import matplotlib.pyplot as plt\n'), ((10064, 10124), 'pandas.DataFrame', 'pd.DataFrame', (['{cat_label: one_dim_data, group_label: labels}'], {}), '({cat_label: one_dim_data, group_label: labels})\n', (10076, 10124), True, 'import pandas as pd\n'), ((10242, 10269), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (10252, 10269), True, 'import matplotlib.pyplot as plt\n'), ((10279, 10295), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (10290, 10295), True, 'import matplotlib.pyplot as plt\n'), ((10355, 10405), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': 'bbox_to_anchor', 'loc': 'loc'}), '(bbox_to_anchor=bbox_to_anchor, loc=loc)\n', (10365, 10405), True, 'import matplotlib.pyplot as plt\n'), ((4533, 4543), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4541, 4543), True, 'import matplotlib.pyplot as plt\n'), ((4565, 4575), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4573, 4575), True, 'import matplotlib.pyplot as plt\n'), ((4618, 4639), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filepath'], {}), '(filepath)\n', (4629, 4639), True, 'import matplotlib.pyplot as plt\n'), ((6882, 6905), 'pandas.Series', 'pd.Series', (['one_dim_data'], {}), '(one_dim_data)\n', (6891, 6905), True, 'import pandas as pd\n'), ((7719, 7729), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (7727, 7729), True, 'import matplotlib.pyplot as plt\n'), ((7751, 7761), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7759, 7761), True, 'import matplotlib.pyplot as plt\n'), ((7804, 7825), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filepath'], {}), '(filepath)\n', (7815, 7825), True, 'import matplotlib.pyplot as plt\n'), ((10031, 10054), 'pandas.Series', 'pd.Series', (['one_dim_data'], {}), '(one_dim_data)\n', (10040, 10054), True, 'import pandas as pd\n'), ((10427, 10437), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (10435, 10437), True, 'import matplotlib.pyplot as plt\n'), ((10459, 10469), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10467, 10469), True, 'import matplotlib.pyplot as plt\n'), ((10512, 10533), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filepath'], {}), '(filepath)\n', (10523, 10533), True, 'import matplotlib.pyplot as plt\n'), ((3156, 3183), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n_groups'], {}), '(0, 1, n_groups)\n', (3167, 3183), True, 'import numpy as np\n'), ((7166, 7193), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n_groups'], {}), '(0, 1, n_groups)\n', (7177, 7193), True, 'import numpy as np\n')] |
#! /usr/bin/env python
import numpy as np
import pandas as pd
from math import log, exp
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
if __name__ == '__main__':
data = pd.read_csv("laurent_coeffs_2_511.csv", sep=',\s+')
odd_numerators = data['numerator'][1::2]
even_numerators = data['numerator'][::2]
model = LinearRegression()
start_idx = 120
x = np.reshape(odd_numerators.index[start_idx:], (-1,1))
y = [[log(abs(int(a)))] for a in odd_numerators.values[start_idx:]]
model.fit(x,y)
score = model.score(x,y)
slope = model.coef_[0][0]
intercept = model.intercept_[0]
print("Slope: {}\nIntercept: {}".format(slope, intercept))
print(f"Exp growth rate: {exp(slope)}")
# plt.loglog(even_numerators)
# plt.show()
| [
"math.exp",
"numpy.reshape",
"sklearn.linear_model.LinearRegression",
"pandas.read_csv"
] | [((209, 261), 'pandas.read_csv', 'pd.read_csv', (['"""laurent_coeffs_2_511.csv"""'], {'sep': '""",\\\\s+"""'}), "('laurent_coeffs_2_511.csv', sep=',\\\\s+')\n", (220, 261), True, 'import pandas as pd\n'), ((368, 386), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (384, 386), False, 'from sklearn.linear_model import LinearRegression\n'), ((417, 470), 'numpy.reshape', 'np.reshape', (['odd_numerators.index[start_idx:]', '(-1, 1)'], {}), '(odd_numerators.index[start_idx:], (-1, 1))\n', (427, 470), True, 'import numpy as np\n'), ((752, 762), 'math.exp', 'exp', (['slope'], {}), '(slope)\n', (755, 762), False, 'from math import log, exp\n')] |
# -*- coding: utf-8 -*-
# TODO Licence:
#
# TODO: move library intensive functions to vtool
from __future__ import absolute_import, division, print_function, unicode_literals
import operator
import six
from six.moves import zip, range, reduce
from utool import util_type
from utool import util_inject
from utool import util_decor
try:
import numpy as np
HAVE_NUMPY = True
except ImportError:
HAVE_NUMPY = False
# TODO remove numpy
pass
try:
import scipy.spatial.distance as spdist
HAVE_SCIPY = True
except ImportError:
HAVE_SCIPY = False
print, rrr, profile = util_inject.inject2(__name__)
PHI = 1.61803398875
PHI_A = (1 / PHI)
PHI_B = 1 - PHI_A
def bayesnet():
"""
References:
https://class.coursera.org/pgm-003/lecture/17
http://www.cs.ubc.ca/~murphyk/Bayes/bnintro.html
http://www3.cs.stonybrook.edu/~sael/teaching/cse537/Slides/chapter14d_BP.pdf
http://www.cse.unsw.edu.au/~cs9417ml/Bayes/Pages/PearlPropagation.html
https://github.com/pgmpy/pgmpy.git
http://pgmpy.readthedocs.org/en/latest/
http://nipy.bic.berkeley.edu:5000/download/11
"""
# import operator as op
# # Enumerate all possible events
# varcard_list = list(map(op.attrgetter('variable_card'), cpd_list))
# _esdat = list(ut.iprod(*map(range, varcard_list)))
# _escol = list(map(op.attrgetter('variable'), cpd_list))
# event_space = pd.DataFrame(_esdat, columns=_escol)
# # Custom compression of event space to inspect a specific graph
# def compress_space_flags(event_space, var1, var2, var3, cmp12_):
# """
# var1, var2, cmp_ = 'Lj', 'Lk', op.eq
# """
# import vtool as vt
# data = event_space
# other_cols = ut.setdiff_ordered(data.columns.tolist(), [var1, var2, var3])
# case_flags12 = cmp12_(data[var1], data[var2]).values
# # case_flags23 = cmp23_(data[var2], data[var3]).values
# # case_flags = np.logical_and(case_flags12, case_flags23)
# case_flags = case_flags12
# case_flags = case_flags.astype(np.int64)
# subspace = np.hstack((case_flags[:, None], data[other_cols].values))
# sel_ = vt.unique_row_indexes(subspace)
# flags = np.logical_and(mask, case_flags)
# return flags
# # Build special cases
# case_same = event_space.loc[compress_space_flags(event_space, 'Li', 'Lj', 'Lk', op.eq)]
# case_diff = event_space.loc[compress_space_flags(event_space, 'Li', 'Lj', 'Lk', op.ne)]
# special_cases = [
# case_same,
# case_diff,
# ]
from pgmpy.factors import TabularCPD
from pgmpy.models import BayesianModel
import pandas as pd
from pgmpy.inference import BeliefPropagation # NOQA
from pgmpy.inference import VariableElimination # NOQA
name_nice = ['n1', 'n2', 'n3']
score_nice = ['low', 'high']
match_nice = ['diff', 'same']
num_names = len(name_nice)
num_scores = len(score_nice)
nid_basis = list(range(num_names))
score_basis = list(range(num_scores))
semtype2_nice = {
'score': score_nice,
'name': name_nice,
'match': match_nice,
}
var2_cpd = {
}
globals()['semtype2_nice'] = semtype2_nice
globals()['var2_cpd'] = var2_cpd
name_combo = np.array(list(ut.iprod(nid_basis, nid_basis)))
combo_is_same = name_combo.T[0] == name_combo.T[1]
def get_expected_scores_prob(level1, level2):
part1 = combo_is_same * level1
part2 = (1 - combo_is_same) * (1 - (level2))
expected_scores_level = part1 + part2
return expected_scores_level
# def make_cpd():
def name_cpd(aid):
from pgmpy.factors import TabularCPD
cpd = TabularCPD(
variable='N' + aid,
variable_card=num_names,
values=[[1.0 / num_names] * num_names])
cpd.semtype = 'name'
return cpd
name_cpds = [name_cpd('i'), name_cpd('j'), name_cpd('k')]
var2_cpd.update(dict(zip([cpd.variable for cpd in name_cpds], name_cpds)))
if True:
num_same_diff = 2
samediff_measure = np.array([
# get_expected_scores_prob(.12, .2),
# get_expected_scores_prob(.88, .8),
get_expected_scores_prob(0, 0),
get_expected_scores_prob(1, 1),
])
samediff_vals = (samediff_measure / samediff_measure.sum(axis=0)).tolist()
def samediff_cpd(aid1, aid2):
cpd = TabularCPD(
variable='A' + aid1 + aid2,
variable_card=num_same_diff,
values=samediff_vals,
evidence=['N' + aid1, 'N' + aid2], # [::-1],
evidence_card=[num_names, num_names]) # [::-1])
cpd.semtype = 'match'
return cpd
samediff_cpds = [samediff_cpd('i', 'j'), samediff_cpd('j', 'k'), samediff_cpd('k', 'i')]
var2_cpd.update(dict(zip([cpd.variable for cpd in samediff_cpds], samediff_cpds)))
if True:
def score_cpd(aid1, aid2):
semtype = 'score'
evidence = ['A' + aid1 + aid2, 'N' + aid1, 'N' + aid2]
evidence_cpds = [var2_cpd[key] for key in evidence]
evidence_nice = [semtype2_nice[cpd.semtype] for cpd in evidence_cpds]
evidence_card = list(map(len, evidence_nice))
evidence_states = list(ut.iprod(*evidence_nice))
variable_basis = semtype2_nice[semtype]
variable_values = []
for mystate in variable_basis:
row = []
for state in evidence_states:
if state[0] == state[1]:
if state[2] == 'same':
val = .2 if mystate == 'low' else .8
else:
val = 1
# val = .5 if mystate == 'low' else .5
elif state[0] != state[1]:
if state[2] == 'same':
val = .5 if mystate == 'low' else .5
else:
val = 1
# val = .9 if mystate == 'low' else .1
row.append(val)
variable_values.append(row)
cpd = TabularCPD(
variable='S' + aid1 + aid2,
variable_card=len(variable_basis),
values=variable_values,
evidence=evidence, # [::-1],
evidence_card=evidence_card) # [::-1])
cpd.semtype = semtype
return cpd
else:
score_values = [
[.8, .1],
[.2, .9],
]
def score_cpd(aid1, aid2):
cpd = TabularCPD(
variable='S' + aid1 + aid2,
variable_card=num_scores,
values=score_values,
evidence=['A' + aid1 + aid2], # [::-1],
evidence_card=[num_same_diff]) # [::-1])
cpd.semtype = 'score'
return cpd
score_cpds = [score_cpd('i', 'j'), score_cpd('j', 'k')]
cpd_list = name_cpds + score_cpds + samediff_cpds
else:
score_measure = np.array([get_expected_scores_prob(level1, level2)
for level1, level2 in
zip(np.linspace(.1, .9, num_scores),
np.linspace(.2, .8, num_scores))])
score_values = (score_measure / score_measure.sum(axis=0)).tolist()
def score_cpd(aid1, aid2):
cpd = TabularCPD(
variable='S' + aid1 + aid2,
variable_card=num_scores,
values=score_values,
evidence=['N' + aid1, 'N' + aid2],
evidence_card=[num_names, num_names])
cpd.semtype = 'score'
return cpd
score_cpds = [score_cpd('i', 'j'), score_cpd('j', 'k')]
cpd_list = name_cpds + score_cpds
pass
input_graph = []
for cpd in cpd_list:
if cpd.evidence is not None:
for evar in cpd.evidence:
input_graph.append((evar, cpd.variable))
name_model = BayesianModel(input_graph)
name_model.add_cpds(*cpd_list)
var2_cpd.update(dict(zip([cpd.variable for cpd in cpd_list], cpd_list)))
globals()['var2_cpd'] = var2_cpd
varnames = [cpd.variable for cpd in cpd_list]
# --- PRINT CPDS ---
cpd = score_cpds[0]
def print_cpd(cpd):
print('CPT: %r' % (cpd,))
index = semtype2_nice[cpd.semtype]
if cpd.evidence is None:
columns = ['None']
else:
basis_lists = [semtype2_nice[var2_cpd[ename].semtype] for ename in cpd.evidence]
columns = [','.join(x) for x in ut.iprod(*basis_lists)]
data = cpd.get_cpd()
print(pd.DataFrame(data, index=index, columns=columns))
for cpd in name_model.get_cpds():
print('----')
print(cpd._str('phi'))
print_cpd(cpd)
# --- INFERENCE ---
Ni = name_cpds[0]
event_space_combos = {}
event_space_combos[Ni.variable] = 0 # Set ni to always be Fred
for cpd in cpd_list:
if cpd.semtype == 'score':
event_space_combos[cpd.variable] = list(range(cpd.variable_card))
evidence_dict = ut.all_dict_combinations(event_space_combos)
# Query about name of annotation k given different event space params
def pretty_evidence(evidence):
return [key + '=' + str(semtype2_nice[var2_cpd[key].semtype][val])
for key, val in evidence.items()]
def print_factor(factor):
row_cards = factor.cardinality
row_vars = factor.variables
values = factor.values.reshape(np.prod(row_cards), 1).flatten()
# col_cards = 1
# col_vars = ['']
basis_lists = list(zip(*list(ut.iprod(*[range(c) for c in row_cards]))))
nice_basis_lists = []
for varname, basis in zip(row_vars, basis_lists):
cpd = var2_cpd[varname]
_nice_basis = ut.take(semtype2_nice[cpd.semtype], basis)
nice_basis = ['%s=%s' % (varname, val) for val in _nice_basis]
nice_basis_lists.append(nice_basis)
row_lbls = [', '.join(sorted(x)) for x in zip(*nice_basis_lists)]
print(ut.repr3(dict(zip(row_lbls, values)), precision=3, align=True, key_order_metric='-val'))
# name_belief = BeliefPropagation(name_model)
name_belief = VariableElimination(name_model)
import pgmpy
import six # NOQA
def try_query(evidence):
print('--------')
query_vars = ut.setdiff_ordered(varnames, list(evidence.keys()))
evidence_str = ', '.join(pretty_evidence(evidence))
probs = name_belief.query(query_vars, evidence)
factor_list = probs.values()
joint_factor = pgmpy.factors.factor_product(*factor_list)
print('P(' + ', '.join(query_vars) + ' | ' + evidence_str + ')')
# print(six.text_type(joint_factor))
factor = joint_factor # NOQA
# print_factor(factor)
# import utool as ut
print(ut.hz_str([(f._str(phi_or_p='phi')) for f in factor_list]))
for evidence in evidence_dict:
try_query(evidence)
evidence = {'Aij': 1, 'Ajk': 1, 'Aki': 1, 'Ni': 0}
try_query(evidence)
evidence = {'Aij': 0, 'Ajk': 0, 'Aki': 0, 'Ni': 0}
try_query(evidence)
globals()['score_nice'] = score_nice
globals()['name_nice'] = name_nice
globals()['score_basis'] = score_basis
globals()['nid_basis'] = nid_basis
print('Independencies')
print(name_model.get_independencies())
print(name_model.local_independencies([Ni.variable]))
# name_belief = BeliefPropagation(name_model)
# # name_belief = VariableElimination(name_model)
# for case in special_cases:
# test_data = case.drop('Lk', axis=1)
# test_data = test_data.reset_index(drop=True)
# print('----')
# for i in range(test_data.shape[0]):
# evidence = test_data.loc[i].to_dict()
# probs = name_belief.query(['Lk'], evidence)
# factor = probs['Lk']
# probs = factor.values
# evidence_ = evidence.copy()
# evidence_['Li'] = name_nice[evidence['Li']]
# evidence_['Lj'] = name_nice[evidence['Lj']]
# evidence_['Sij'] = score_nice[evidence['Sij']]
# evidence_['Sjk'] = score_nice[evidence['Sjk']]
# nice2_prob = ut.odict(zip(name_nice, probs.tolist()))
# ut.print_python_code('P(Lk | {evidence}) = {cpt}'.format(
# evidence=(ut.repr2(evidence_, explicit=True, nobraces=True, strvals=True)),
# cpt=ut.repr3(nice2_prob, precision=3, align=True, key_order_metric='-val')
# ))
# for case in special_cases:
# test_data = case.drop('Lk', axis=1)
# test_data = test_data.drop('Lj', axis=1)
# test_data = test_data.reset_index(drop=True)
# print('----')
# for i in range(test_data.shape[0]):
# evidence = test_data.loc[i].to_dict()
# query_vars = ['Lk', 'Lj']
# probs = name_belief.query(query_vars, evidence)
# for queryvar in query_vars:
# factor = probs[queryvar]
# print(factor._str('phi'))
# probs = factor.values
# evidence_ = evidence.copy()
# evidence_['Li'] = name_nice[evidence['Li']]
# evidence_['Sij'] = score_nice[evidence['Sij']]
# evidence_['Sjk'] = score_nice[evidence['Sjk']]
# nice2_prob = ut.odict(zip([queryvar + '=' + x for x in name_nice], probs.tolist()))
# ut.print_python_code('P({queryvar} | {evidence}) = {cpt}'.format(
# query_var=query_var,
# evidence=(ut.repr2(evidence_, explicit=True, nobraces=True, strvals=True)),
# cpt=ut.repr3(nice2_prob, precision=3, align=True, key_order_metric='-val')
# ))
# _ draw model
import plottool as pt
import networkx as netx
fig = pt.figure() # NOQA
fig.clf()
ax = pt.gca()
netx_nodes = [(node, {}) for node in name_model.nodes()]
netx_edges = [(etup[0], etup[1], {}) for etup in name_model.edges()]
netx_graph = netx.DiGraph()
netx_graph.add_nodes_from(netx_nodes)
netx_graph.add_edges_from(netx_edges)
# pos = netx.graphviz_layout(netx_graph)
pos = netx.pydot_layout(netx_graph, prog='dot')
netx.draw(netx_graph, pos=pos, ax=ax, with_labels=True)
pt.plt.savefig('foo.png')
ut.startfile('foo.png')
def bayesnet_examples():
from pgmpy.factors import TabularCPD
from pgmpy.models import BayesianModel
import pandas as pd
student_model = BayesianModel([('D', 'G'),
('I', 'G'),
('G', 'L'),
('I', 'S')])
# we can generate some random data.
raw_data = np.random.randint(low=0, high=2, size=(1000, 5))
data = pd.DataFrame(raw_data, columns=['D', 'I', 'G', 'L', 'S'])
data_train = data[: int(data.shape[0] * 0.75)]
student_model.fit(data_train)
student_model.get_cpds()
data_test = data[int(0.75 * data.shape[0]): data.shape[0]]
data_test.drop('D', axis=1, inplace=True)
student_model.predict(data_test)
grade_cpd = TabularCPD(
variable='G',
variable_card=3,
values=[[0.3, 0.05, 0.9, 0.5],
[0.4, 0.25, 0.08, 0.3],
[0.3, 0.7, 0.02, 0.2]],
evidence=['I', 'D'],
evidence_card=[2, 2])
difficulty_cpd = TabularCPD(
variable='D',
variable_card=2,
values=[[0.6, 0.4]])
intel_cpd = TabularCPD(
variable='I',
variable_card=2,
values=[[0.7, 0.3]])
letter_cpd = TabularCPD(
variable='L',
variable_card=2,
values=[[0.1, 0.4, 0.99],
[0.9, 0.6, 0.01]],
evidence=['G'],
evidence_card=[3])
sat_cpd = TabularCPD(
variable='S',
variable_card=2,
values=[[0.95, 0.2],
[0.05, 0.8]],
evidence=['I'],
evidence_card=[2])
student_model.add_cpds(grade_cpd, difficulty_cpd,
intel_cpd, letter_cpd,
sat_cpd)
| [
"numpy.prod",
"six.moves.range",
"pgmpy.factors.TabularCPD",
"pgmpy.inference.VariableElimination",
"networkx.pydot_layout",
"six.moves.zip",
"plottool.gca",
"networkx.DiGraph",
"numpy.random.randint",
"numpy.linspace",
"pgmpy.models.BayesianModel",
"pandas.DataFrame",
"utool.util_inject.inj... | [((593, 622), 'utool.util_inject.inject2', 'util_inject.inject2', (['__name__'], {}), '(__name__)\n', (612, 622), False, 'from utool import util_inject\n'), ((8392, 8418), 'pgmpy.models.BayesianModel', 'BayesianModel', (['input_graph'], {}), '(input_graph)\n', (8405, 8418), False, 'from pgmpy.models import BayesianModel\n'), ((10674, 10705), 'pgmpy.inference.VariableElimination', 'VariableElimination', (['name_model'], {}), '(name_model)\n', (10693, 10705), False, 'from pgmpy.inference import VariableElimination\n'), ((14362, 14373), 'plottool.figure', 'pt.figure', ([], {}), '()\n', (14371, 14373), True, 'import plottool as pt\n'), ((14405, 14413), 'plottool.gca', 'pt.gca', ([], {}), '()\n', (14411, 14413), True, 'import plottool as pt\n'), ((14566, 14580), 'networkx.DiGraph', 'netx.DiGraph', ([], {}), '()\n', (14578, 14580), True, 'import networkx as netx\n'), ((14721, 14762), 'networkx.pydot_layout', 'netx.pydot_layout', (['netx_graph'], {'prog': '"""dot"""'}), "(netx_graph, prog='dot')\n", (14738, 14762), True, 'import networkx as netx\n'), ((14767, 14822), 'networkx.draw', 'netx.draw', (['netx_graph'], {'pos': 'pos', 'ax': 'ax', 'with_labels': '(True)'}), '(netx_graph, pos=pos, ax=ax, with_labels=True)\n', (14776, 14822), True, 'import networkx as netx\n'), ((14828, 14853), 'plottool.plt.savefig', 'pt.plt.savefig', (['"""foo.png"""'], {}), "('foo.png')\n", (14842, 14853), True, 'import plottool as pt\n'), ((15038, 15101), 'pgmpy.models.BayesianModel', 'BayesianModel', (["[('D', 'G'), ('I', 'G'), ('G', 'L'), ('I', 'S')]"], {}), "([('D', 'G'), ('I', 'G'), ('G', 'L'), ('I', 'S')])\n", (15051, 15101), False, 'from pgmpy.models import BayesianModel\n'), ((15262, 15310), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2)', 'size': '(1000, 5)'}), '(low=0, high=2, size=(1000, 5))\n', (15279, 15310), True, 'import numpy as np\n'), ((15322, 15379), 'pandas.DataFrame', 'pd.DataFrame', (['raw_data'], {'columns': "['D', 'I', 'G', 'L', 'S']"}), "(raw_data, columns=['D', 'I', 'G', 'L', 'S'])\n", (15334, 15379), True, 'import pandas as pd\n'), ((15658, 15830), 'pgmpy.factors.TabularCPD', 'TabularCPD', ([], {'variable': '"""G"""', 'variable_card': '(3)', 'values': '[[0.3, 0.05, 0.9, 0.5], [0.4, 0.25, 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]]', 'evidence': "['I', 'D']", 'evidence_card': '[2, 2]'}), "(variable='G', variable_card=3, values=[[0.3, 0.05, 0.9, 0.5], [\n 0.4, 0.25, 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]], evidence=['I', 'D'],\n evidence_card=[2, 2])\n", (15668, 15830), False, 'from pgmpy.factors import TabularCPD\n'), ((15916, 15978), 'pgmpy.factors.TabularCPD', 'TabularCPD', ([], {'variable': '"""D"""', 'variable_card': '(2)', 'values': '[[0.6, 0.4]]'}), "(variable='D', variable_card=2, values=[[0.6, 0.4]])\n", (15926, 15978), False, 'from pgmpy.factors import TabularCPD\n'), ((16020, 16082), 'pgmpy.factors.TabularCPD', 'TabularCPD', ([], {'variable': '"""I"""', 'variable_card': '(2)', 'values': '[[0.7, 0.3]]'}), "(variable='I', variable_card=2, values=[[0.7, 0.3]])\n", (16030, 16082), False, 'from pgmpy.factors import TabularCPD\n'), ((16125, 16251), 'pgmpy.factors.TabularCPD', 'TabularCPD', ([], {'variable': '"""L"""', 'variable_card': '(2)', 'values': '[[0.1, 0.4, 0.99], [0.9, 0.6, 0.01]]', 'evidence': "['G']", 'evidence_card': '[3]'}), "(variable='L', variable_card=2, values=[[0.1, 0.4, 0.99], [0.9, \n 0.6, 0.01]], evidence=['G'], evidence_card=[3])\n", (16135, 16251), False, 'from pgmpy.factors import TabularCPD\n'), ((16318, 16433), 'pgmpy.factors.TabularCPD', 'TabularCPD', ([], {'variable': '"""S"""', 'variable_card': '(2)', 'values': '[[0.95, 0.2], [0.05, 0.8]]', 'evidence': "['I']", 'evidence_card': '[2]'}), "(variable='S', variable_card=2, values=[[0.95, 0.2], [0.05, 0.8]],\n evidence=['I'], evidence_card=[2])\n", (16328, 16433), False, 'from pgmpy.factors import TabularCPD\n'), ((3019, 3035), 'six.moves.range', 'range', (['num_names'], {}), '(num_names)\n', (3024, 3035), False, 'from six.moves import zip, range, reduce\n'), ((3060, 3077), 'six.moves.range', 'range', (['num_scores'], {}), '(num_scores)\n', (3065, 3077), False, 'from six.moves import zip, range, reduce\n'), ((3751, 3850), 'pgmpy.factors.TabularCPD', 'TabularCPD', ([], {'variable': "('N' + aid)", 'variable_card': 'num_names', 'values': '[[1.0 / num_names] * num_names]'}), "(variable='N' + aid, variable_card=num_names, values=[[1.0 /\n num_names] * num_names])\n", (3761, 3850), False, 'from pgmpy.factors import TabularCPD\n'), ((10172, 10198), 'six.moves.zip', 'zip', (['row_vars', 'basis_lists'], {}), '(row_vars, basis_lists)\n', (10175, 10198), False, 'from six.moves import zip, range, reduce\n'), ((11051, 11093), 'pgmpy.factors.factor_product', 'pgmpy.factors.factor_product', (['*factor_list'], {}), '(*factor_list)\n', (11079, 11093), False, 'import pgmpy\n'), ((4020, 4071), 'six.moves.zip', 'zip', (['[cpd.variable for cpd in name_cpds]', 'name_cpds'], {}), '([cpd.variable for cpd in name_cpds], name_cpds)\n', (4023, 4071), False, 'from six.moves import zip, range, reduce\n'), ((4487, 4659), 'pgmpy.factors.TabularCPD', 'TabularCPD', ([], {'variable': "('A' + aid1 + aid2)", 'variable_card': 'num_same_diff', 'values': 'samediff_vals', 'evidence': "['N' + aid1, 'N' + aid2]", 'evidence_card': '[num_names, num_names]'}), "(variable='A' + aid1 + aid2, variable_card=num_same_diff, values=\n samediff_vals, evidence=['N' + aid1, 'N' + aid2], evidence_card=[\n num_names, num_names])\n", (4497, 4659), False, 'from pgmpy.factors import TabularCPD\n'), ((7780, 7948), 'pgmpy.factors.TabularCPD', 'TabularCPD', ([], {'variable': "('S' + aid1 + aid2)", 'variable_card': 'num_scores', 'values': 'score_values', 'evidence': "['N' + aid1, 'N' + aid2]", 'evidence_card': '[num_names, num_names]'}), "(variable='S' + aid1 + aid2, variable_card=num_scores, values=\n score_values, evidence=['N' + aid1, 'N' + aid2], evidence_card=[\n num_names, num_names])\n", (7790, 7948), False, 'from pgmpy.factors import TabularCPD\n'), ((8480, 8529), 'six.moves.zip', 'zip', (['[cpd.variable for cpd in cpd_list]', 'cpd_list'], {}), '([cpd.variable for cpd in cpd_list], cpd_list)\n', (8483, 8529), False, 'from six.moves import zip, range, reduce\n'), ((9054, 9102), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'index': 'index', 'columns': 'columns'}), '(data, index=index, columns=columns)\n', (9066, 9102), True, 'import pandas as pd\n'), ((4936, 4995), 'six.moves.zip', 'zip', (['[cpd.variable for cpd in samediff_cpds]', 'samediff_cpds'], {}), '([cpd.variable for cpd in samediff_cpds], samediff_cpds)\n', (4939, 4995), False, 'from six.moves import zip, range, reduce\n'), ((6906, 7057), 'pgmpy.factors.TabularCPD', 'TabularCPD', ([], {'variable': "('S' + aid1 + aid2)", 'variable_card': 'num_scores', 'values': 'score_values', 'evidence': "['A' + aid1 + aid2]", 'evidence_card': '[num_same_diff]'}), "(variable='S' + aid1 + aid2, variable_card=num_scores, values=\n score_values, evidence=['A' + aid1 + aid2], evidence_card=[num_same_diff])\n", (6916, 7057), False, 'from pgmpy.factors import TabularCPD\n'), ((9476, 9500), 'six.moves.range', 'range', (['cpd.variable_card'], {}), '(cpd.variable_card)\n', (9481, 9500), False, 'from six.moves import zip, range, reduce\n'), ((10478, 10500), 'six.moves.zip', 'zip', (['*nice_basis_lists'], {}), '(*nice_basis_lists)\n', (10481, 10500), False, 'from six.moves import zip, range, reduce\n'), ((9948, 9966), 'numpy.prod', 'np.prod', (['row_cards'], {}), '(row_cards)\n', (9955, 9966), True, 'import numpy as np\n'), ((10530, 10551), 'six.moves.zip', 'zip', (['row_lbls', 'values'], {}), '(row_lbls, values)\n', (10533, 10551), False, 'from six.moves import zip, range, reduce\n'), ((7543, 7576), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.9)', 'num_scores'], {}), '(0.1, 0.9, num_scores)\n', (7554, 7576), True, 'import numpy as np\n'), ((7614, 7647), 'numpy.linspace', 'np.linspace', (['(0.2)', '(0.8)', 'num_scores'], {}), '(0.2, 0.8, num_scores)\n', (7625, 7647), True, 'import numpy as np\n'), ((10079, 10087), 'six.moves.range', 'range', (['c'], {}), '(c)\n', (10084, 10087), False, 'from six.moves import zip, range, reduce\n')] |
from projects.fa.transform import Transform
from config import cfg
import numpy as np
import cv2
from xvision.utils.draw import draw_bbox, draw_points
from transform import *
from xvision.datasets.wflw import WFLW
label = '/Users/jimmy/Documents/data/WFLW/WFLW_annotations/list_98pt_rect_attr_train_test/list_98pt_rect_attr_train.txt'
image = '/Users/jimmy/Documents/data/WFLW/WFLW_images'
data = WFLW(label, image)
t = Transform(cfg.dsize, cfg.padding, cfg.data.meanshape, cfg.data.meanbbox)
data.transform = t
for item in data:
image = item['image']
shape = item['shape']
draw_points(image, shape)
cv2.imshow('v', image)
cv2.waitKey()
meanbbox = np.array(cfg.data.meanbbox)
meanshape = np.array(cfg.data.meanshape)
image = np.ones((384, 384, 3))
meanshape = (meanshape - 0.5) * 224 + 192
meanbbox = (meanbbox - 0.5) * 224 + 192
draw_points(image, meanshape, color=(1, 0, 0), radius=3)
draw_bbox(image, meanbbox, (0, 0, 1), thickness=2)
cv2.line(image, (0, 0), (384, 384), color=(.5, .5, .5), thickness=1)
cv2.line(image, (384, 0), (0, 384), color=(.5, .5, .5), thickness=1)
cv2.imwrite('projects/fa/images/meanshape-meanbbox.png', (image*255).astype(np.uint8))
cv2.imshow("v", image)
cv2.waitKey() | [
"xvision.utils.draw.draw_points",
"numpy.ones",
"projects.fa.transform.Transform",
"cv2.line",
"xvision.utils.draw.draw_bbox",
"cv2.imshow",
"numpy.array",
"cv2.waitKey",
"xvision.datasets.wflw.WFLW"
] | [((402, 420), 'xvision.datasets.wflw.WFLW', 'WFLW', (['label', 'image'], {}), '(label, image)\n', (406, 420), False, 'from xvision.datasets.wflw import WFLW\n'), ((426, 498), 'projects.fa.transform.Transform', 'Transform', (['cfg.dsize', 'cfg.padding', 'cfg.data.meanshape', 'cfg.data.meanbbox'], {}), '(cfg.dsize, cfg.padding, cfg.data.meanshape, cfg.data.meanbbox)\n', (435, 498), False, 'from projects.fa.transform import Transform\n'), ((677, 704), 'numpy.array', 'np.array', (['cfg.data.meanbbox'], {}), '(cfg.data.meanbbox)\n', (685, 704), True, 'import numpy as np\n'), ((717, 745), 'numpy.array', 'np.array', (['cfg.data.meanshape'], {}), '(cfg.data.meanshape)\n', (725, 745), True, 'import numpy as np\n'), ((756, 778), 'numpy.ones', 'np.ones', (['(384, 384, 3)'], {}), '((384, 384, 3))\n', (763, 778), True, 'import numpy as np\n'), ((863, 919), 'xvision.utils.draw.draw_points', 'draw_points', (['image', 'meanshape'], {'color': '(1, 0, 0)', 'radius': '(3)'}), '(image, meanshape, color=(1, 0, 0), radius=3)\n', (874, 919), False, 'from xvision.utils.draw import draw_bbox, draw_points\n'), ((920, 970), 'xvision.utils.draw.draw_bbox', 'draw_bbox', (['image', 'meanbbox', '(0, 0, 1)'], {'thickness': '(2)'}), '(image, meanbbox, (0, 0, 1), thickness=2)\n', (929, 970), False, 'from xvision.utils.draw import draw_bbox, draw_points\n'), ((972, 1043), 'cv2.line', 'cv2.line', (['image', '(0, 0)', '(384, 384)'], {'color': '(0.5, 0.5, 0.5)', 'thickness': '(1)'}), '(image, (0, 0), (384, 384), color=(0.5, 0.5, 0.5), thickness=1)\n', (980, 1043), False, 'import cv2\n'), ((1041, 1112), 'cv2.line', 'cv2.line', (['image', '(384, 0)', '(0, 384)'], {'color': '(0.5, 0.5, 0.5)', 'thickness': '(1)'}), '(image, (384, 0), (0, 384), color=(0.5, 0.5, 0.5), thickness=1)\n', (1049, 1112), False, 'import cv2\n'), ((1197, 1219), 'cv2.imshow', 'cv2.imshow', (['"""v"""', 'image'], {}), "('v', image)\n", (1207, 1219), False, 'import cv2\n'), ((1220, 1233), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1231, 1233), False, 'import cv2\n'), ((594, 619), 'xvision.utils.draw.draw_points', 'draw_points', (['image', 'shape'], {}), '(image, shape)\n', (605, 619), False, 'from xvision.utils.draw import draw_bbox, draw_points\n'), ((624, 646), 'cv2.imshow', 'cv2.imshow', (['"""v"""', 'image'], {}), "('v', image)\n", (634, 646), False, 'import cv2\n'), ((651, 664), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (662, 664), False, 'import cv2\n')] |
from Node import Node
from Factor import Factor
from FactorGeneric import FactorGeneric
from TraitPrior import TraitPrior
import utils
import numpy as np
import settings
###############
# #
# [F] #
# / \ #
# (A)...(A) #
# #
###############
def pbpdf(probin):
c = FactorPriorWordHW.c
p = np.outer(probin, c-1)
p_log = np.sum(np.log1p(p), axis=0)
s = np.exp(p_log)
s = np.abs(np.real(np.fft.fft(s)))
pdf = s/(len(probin)+1)
return pdf
def pbpdf_c(l):
l = l-1
return np.exp(2*1j*np.pi*np.arange(0, l+1)/(l+1))
class FactorPriorWordHW(Factor, TraitPrior):
c = None
def __init__(self,name,wordsize,dist):
assert(len(dist)==(wordsize+1))
self.dist = dist #for debugging
self.wordsize = wordsize
super().__init__(name)
def f2n(self):
l = len(self.edges)
if(FactorPriorWordHW.c is None):
FactorPriorWordHW.c = pbpdf_c(l)
msgin = self.gatherIncoming()
# now iterate over all the target nodes
idxall = np.full(l, True)
for (targetIdx,edge) in enumerate(self.edges):
curridx = idxall.copy()
curridx[targetIdx] = False
currmsg = msgin[curridx, 1]
currpdf = pbpdf(currmsg)
p0 = np.dot(self.dist[:-1], currpdf)
p1 = np.dot(self.dist[1:], currpdf)
edge.m2n = np.array([p0, p1])
| [
"numpy.fft.fft",
"numpy.exp",
"numpy.array",
"numpy.dot",
"numpy.outer",
"numpy.full",
"numpy.log1p",
"numpy.arange"
] | [((339, 362), 'numpy.outer', 'np.outer', (['probin', '(c - 1)'], {}), '(probin, c - 1)\n', (347, 362), True, 'import numpy as np\n'), ((409, 422), 'numpy.exp', 'np.exp', (['p_log'], {}), '(p_log)\n', (415, 422), True, 'import numpy as np\n'), ((380, 391), 'numpy.log1p', 'np.log1p', (['p'], {}), '(p)\n', (388, 391), True, 'import numpy as np\n'), ((1080, 1096), 'numpy.full', 'np.full', (['l', '(True)'], {}), '(l, True)\n', (1087, 1096), True, 'import numpy as np\n'), ((446, 459), 'numpy.fft.fft', 'np.fft.fft', (['s'], {}), '(s)\n', (456, 459), True, 'import numpy as np\n'), ((1323, 1354), 'numpy.dot', 'np.dot', (['self.dist[:-1]', 'currpdf'], {}), '(self.dist[:-1], currpdf)\n', (1329, 1354), True, 'import numpy as np\n'), ((1372, 1402), 'numpy.dot', 'np.dot', (['self.dist[1:]', 'currpdf'], {}), '(self.dist[1:], currpdf)\n', (1378, 1402), True, 'import numpy as np\n'), ((1427, 1445), 'numpy.array', 'np.array', (['[p0, p1]'], {}), '([p0, p1])\n', (1435, 1445), True, 'import numpy as np\n'), ((564, 583), 'numpy.arange', 'np.arange', (['(0)', '(l + 1)'], {}), '(0, l + 1)\n', (573, 583), True, 'import numpy as np\n')] |
from __future__ import print_function
import argparse
import os
import random
import sys
sys.path.append(os.getcwd())
import pdb
import time
import numpy as np
import json
import progressbar
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from misc.utils import repackage_hidden, clip_gradient, adjust_learning_rate, \
decode_txt, sample_batch_neg, l2_norm
import misc.dataLoader as dl
import misc.model as model
from misc.encoder_QIH import _netE
from misc.netG import _netG
import datetime
parser = argparse.ArgumentParser()
parser.add_argument('--input_img_h5', default='data/vdl_img_vgg.h5', help='path to dataset, now hdf5 file')
parser.add_argument('--input_ques_h5', default='data/visdial_data.h5', help='path to dataset, now hdf5 file')
parser.add_argument('--input_json', default='data/visdial_params.json', help='path to dataset, now hdf5 file')
parser.add_argument('--outf', default='./save', help='folder to output images and model checkpoints')
parser.add_argument('--encoder', default='G_QIH_VGG', help='what encoder to use.')
parser.add_argument('--model_path', default='', help='folder to output images and model checkpoints')
parser.add_argument('--num_val', default=0, help='number of image split out as validation set.')
parser.add_argument('--niter', type=int, default=50, help='number of epochs to train for')
parser.add_argument('--start_epoch', type=int, default=0, help='start of epochs to train for')
parser.add_argument('--negative_sample', type=int, default=20, help='folder to output images and model checkpoints')
parser.add_argument('--neg_batch_sample', type=int, default=30, help='folder to output images and model checkpoints')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=6)
parser.add_argument('--batchSize', type=int, default=128, help='input batch size')
parser.add_argument('--save_iter', type=int, default=1, help='number of epochs to train for')
parser.add_argument('--adam', action='store_true', help='Whether to use adam (default is rmsprop)')
parser.add_argument('--lr', type=float, default=0.0004, help='learning rate for, default=0.00005')
parser.add_argument('--beta1', type=float, default=0.8, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--verbose' , action='store_true', help='show the sampled caption')
parser.add_argument('--conv_feat_size', type=int, default=512, help='input batch size')
parser.add_argument('--model', type=str, default='LSTM', help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
parser.add_argument('--ninp', type=int, default=300, help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=512, help='humber of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=1, help='number of layers')
parser.add_argument('--dropout', type=int, default=0.5, help='number of layers')
parser.add_argument('--clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--margin', type=float, default=2, help='number of epochs to train for')
parser.add_argument('--log_interval', type=int, default=50, help='how many iterations show the log info')
opt = parser.parse_args()
print(opt)
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
if opt.model_path != '':
print("=> loading checkpoint '{}'".format(opt.model_path))
checkpoint = torch.load(opt.model_path)
model_path = opt.model_path
opt = checkpoint['opt']
opt.start_epoch = checkpoint['epoch']
opt.model_path = model_path
opt.batchSize = 128
opt.niter = 100
else:
t = datetime.datetime.now()
cur_time = '%s-%s-%s' %(t.day, t.month, t.hour)
save_path = os.path.join(opt.outf, opt.encoder + '.' + cur_time)
try:
os.makedirs(save_path)
except OSError:
pass
####################################################################################
# Data Loader
####################################################################################
dataset = dl.train(input_img_h5=opt.input_img_h5, input_ques_h5=opt.input_ques_h5,
input_json=opt.input_json, negative_sample = opt.negative_sample,
num_val = opt.num_val, data_split = 'train')
dataset_val = dl.validate(input_img_h5=opt.input_img_h5, input_ques_h5=opt.input_ques_h5,
input_json=opt.input_json, negative_sample = opt.negative_sample,
num_val = opt.num_val, data_split = 'test')
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
dataloader_val = torch.utils.data.DataLoader(dataset_val, batch_size=5,
shuffle=False, num_workers=int(opt.workers))
####################################################################################
# Build the Model
####################################################################################
vocab_size = dataset.vocab_size
ques_length = dataset.ques_length
ans_length = dataset.ans_length + 1
his_length = dataset.ques_length + dataset.ans_length
itow = dataset.itow
img_feat_size = opt.conv_feat_size
netE = _netE(opt.model, opt.ninp, opt.nhid, opt.nlayers, opt.dropout, img_feat_size)
netW = model._netW(vocab_size, opt.ninp, opt.dropout)
netG = _netG(opt.model, vocab_size, opt.ninp, opt.nhid, opt.nlayers, opt.dropout)
critG = model.LMCriterion()
sampler = model.gumbel_sampler()
if opt.cuda:
netW.cuda()
netE.cuda()
netG.cuda()
critG.cuda()
sampler.cuda()
if opt.model_path != '':
netW.load_state_dict(checkpoint['netW'])
netE.load_state_dict(checkpoint['netE'])
netG.load_state_dict(checkpoint['netG'])
# training function
def train(epoch):
netW.train()
netE.train()
netG.train()
lr = adjust_learning_rate(optimizer, epoch, opt.lr)
data_iter = iter(dataloader)
ques_hidden = netE.init_hidden(opt.batchSize)
hist_hidden = netE.init_hidden(opt.batchSize)
average_loss = 0
count = 0
i = 0
total_loss = 0
while i < len(dataloader):
data = data_iter.next()
image, history, question, answer, answerT, answerLen, answerIdx, \
questionL, negAnswer, negAnswerLen, negAnswerIdx = data
batch_size = question.size(0)
image = image.view(-1, img_feat_size)
img_input.data.resize_(image.size()).copy_(image)
for rnd in range(10):
ques = question[:,rnd,:].t()
his = history[:,:rnd+1,:].clone().view(-1, his_length).t()
ans, tans = answer[:,rnd,:].t(), answerT[:,rnd,:].t()
his_input.data.resize_(his.size()).copy_(his)
ques_input.data.resize_(ques.size()).copy_(ques)
ans_input.data.resize_(ans.size()).copy_(ans)
ans_target.data.resize_(tans.size()).copy_(tans)
ques_emb = netW(ques_input, format = 'index')
his_emb = netW(his_input, format = 'index')
ques_hidden = repackage_hidden(ques_hidden, batch_size)
hist_hidden = repackage_hidden(hist_hidden, his_input.size(1))
encoder_feat, ques_hidden = netE(ques_emb, his_emb, img_input, \
ques_hidden, hist_hidden, rnd+1)
_, ques_hidden = netG(encoder_feat.view(1,-1,opt.ninp), ques_hidden)
ans_emb = netW(ans_input)
logprob, ques_hidden = netG(ans_emb, ques_hidden)
loss = critG(logprob, ans_target.view(-1, 1))
loss = loss / torch.sum(ans_target.data.gt(0))
average_loss += loss.data[0]
total_loss += loss.data[0]
# do backward.
netW.zero_grad()
netE.zero_grad()
netG.zero_grad()
loss.backward()
optimizer.step()
count += 1
i += 1
if i % opt.log_interval == 0:
average_loss /= count
print("step {} / {} (epoch {}), g_loss {:.3f}, lr = {:.6f}"\
.format(i, len(dataloader), epoch, average_loss, lr))
average_loss = 0
count = 0
return total_loss / (10 * i), lr
def val():
netE.eval()
netW.eval()
netG.eval()
data_iter_val = iter(dataloader_val)
ques_hidden = netE.init_hidden(opt.batchSize)
hist_hidden = netE.init_hidden(opt.batchSize)
i = 0
average_loss = 0
rank_all_tmp = []
while i < len(dataloader_val):
data = data_iter_val.next()
image, history, question, answer, answerT, questionL, opt_answer, \
opt_answerT, answer_ids, answerLen, opt_answerLen, img_id = data
batch_size = question.size(0)
image = image.view(-1, img_feat_size)
img_input.data.resize_(image.size()).copy_(image)
for rnd in range(10):
# get the corresponding round QA and history.
ques, tans = question[:,rnd,:].t(), opt_answerT[:,rnd,:].clone().view(-1, ans_length).t()
his = history[:,:rnd+1,:].clone().view(-1, his_length).t()
ans = opt_answer[:,rnd,:,:].clone().view(-1, ans_length).t()
gt_id = answer_ids[:,rnd]
his_input.data.resize_(his.size()).copy_(his)
ques_input.data.resize_(ques.size()).copy_(ques)
ans_input.data.resize_(ans.size()).copy_(ans)
ans_target.data.resize_(tans.size()).copy_(tans)
gt_index.data.resize_(gt_id.size()).copy_(gt_id)
ques_emb = netW(ques_input, format = 'index')
his_emb = netW(his_input, format = 'index')
ques_hidden = repackage_hidden(ques_hidden, batch_size)
hist_hidden = repackage_hidden(hist_hidden, his_input.size(1))
encoder_feat, ques_hidden = netE(ques_emb, his_emb, img_input, \
ques_hidden, hist_hidden, rnd+1)
_, ques_hidden = netG(encoder_feat.view(1,-1,opt.ninp), ques_hidden)
hidden_replicated = []
for hid in ques_hidden:
hidden_replicated.append(hid.view(opt.nlayers, batch_size, 1, \
opt.nhid).expand(opt.nlayers, batch_size, 100, opt.nhid).clone().view(opt.nlayers, -1, opt.nhid))
hidden_replicated = tuple(hidden_replicated)
ans_emb = netW(ans_input, format = 'index')
output, _ = netG(ans_emb, hidden_replicated)
logprob = - output
logprob_select = torch.gather(logprob, 1, ans_target.view(-1,1))
mask = ans_target.data.eq(0) # generate the mask
if isinstance(logprob, Variable):
mask = Variable(mask, volatile=logprob.volatile)
logprob_select.masked_fill_(mask.view_as(logprob_select), 0)
prob = logprob_select.view(ans_length, -1, 100).sum(0).view(-1,100)
for b in range(batch_size):
gt_index.data[b] = gt_index.data[b] + b*100
gt_score = prob.view(-1).index_select(0, gt_index)
sort_score, sort_idx = torch.sort(prob, 1)
count = sort_score.lt(gt_score.view(-1,1).expand_as(sort_score))
rank = count.sum(1) + 1
rank_all_tmp += list(rank.view(-1).data.cpu().numpy())
i += 1
return rank_all_tmp, average_loss
####################################################################################
# Main
####################################################################################
img_input = torch.FloatTensor(opt.batchSize, 49, 512)
ques_input = torch.LongTensor(ques_length, opt.batchSize)
his_input = torch.LongTensor(his_length, opt.batchSize)
ans_input = torch.LongTensor(ans_length, opt.batchSize)
ans_target = torch.LongTensor(ans_length, opt.batchSize)
ans_sample = torch.LongTensor(1, opt.batchSize)
noise_input = torch.FloatTensor(opt.batchSize)
gt_index = torch.LongTensor(opt.batchSize)
if opt.cuda:
img_input, his_input = img_input.cuda(), his_input.cuda()
ques_input, ans_input = ques_input.cuda(), ans_input.cuda()
ans_target, ans_sample = ans_target.cuda(), ans_sample.cuda()
noise_input = noise_input.cuda()
gt_index = gt_index.cuda()
ques_input = Variable(ques_input)
ans_input = Variable(ans_input)
ans_target = Variable(ans_target)
ans_sample = Variable(ans_sample)
noise_input = Variable(noise_input)
img_input = Variable(img_input)
his_input = Variable(his_input)
gt_index = Variable(gt_index)
optimizer = optim.Adam([{'params': netW.parameters()},
{'params': netG.parameters()},
{'params': netE.parameters()}], lr=opt.lr, betas=(opt.beta1, 0.999))
history = []
for epoch in range(opt.start_epoch+1, opt.niter):
t = time.time()
train_loss, lr = train(epoch)
print ('Epoch: %d learningRate %4f train loss %4f Time: %3f' % (epoch, lr, train_loss, time.time()-t))
print('Evaluating ... ')
rank_all, val_loss = val()
R1 = np.sum(np.array(rank_all)==1) / float(len(rank_all))
R5 = np.sum(np.array(rank_all)<=5) / float(len(rank_all))
R10 = np.sum(np.array(rank_all)<=10) / float(len(rank_all))
ave = np.sum(np.array(rank_all)) / float(len(rank_all))
mrr = np.sum(1/(np.array(rank_all, dtype='float'))) / float(len(rank_all))
print ('%d/%d: mrr: %f R1: %f R5 %f R10 %f Mean %f' %(epoch, len(dataloader_val), mrr, R1, R5, R10, ave))
train_his = {'loss': train_loss}
val_his = {'R1': R1, 'R5':R5, 'R10': R10, 'Mean':ave, 'mrr':mrr}
history.append({'epoch':epoch, 'train': train_his, 'val': val_his})
# saving the model.
if epoch % opt.save_iter == 0:
torch.save({'epoch': epoch,
'opt': opt,
'netW': netW.state_dict(),
'netG': netG.state_dict(),
'netE': netE.state_dict()},
'%s/epoch_%d.pth' % (save_path, epoch))
json.dump(history, open('%s/log.json' %(save_path), 'w'))
| [
"misc.utils.adjust_learning_rate",
"torch.LongTensor",
"misc.dataLoader.validate",
"numpy.array",
"torch.cuda.is_available",
"misc.dataLoader.train",
"misc.netG._netG",
"argparse.ArgumentParser",
"misc.utils.repackage_hidden",
"misc.model._netW",
"torch.autograd.Variable",
"random.randint",
... | [((773, 798), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (796, 798), False, 'import argparse\n'), ((3622, 3646), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (3636, 3646), False, 'import random\n'), ((3698, 3725), 'random.seed', 'random.seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (3709, 3725), False, 'import random\n'), ((3726, 3759), 'torch.manual_seed', 'torch.manual_seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (3743, 3759), False, 'import torch\n'), ((4655, 4841), 'misc.dataLoader.train', 'dl.train', ([], {'input_img_h5': 'opt.input_img_h5', 'input_ques_h5': 'opt.input_ques_h5', 'input_json': 'opt.input_json', 'negative_sample': 'opt.negative_sample', 'num_val': 'opt.num_val', 'data_split': '"""train"""'}), "(input_img_h5=opt.input_img_h5, input_ques_h5=opt.input_ques_h5,\n input_json=opt.input_json, negative_sample=opt.negative_sample, num_val\n =opt.num_val, data_split='train')\n", (4663, 4841), True, 'import misc.dataLoader as dl\n'), ((4886, 5074), 'misc.dataLoader.validate', 'dl.validate', ([], {'input_img_h5': 'opt.input_img_h5', 'input_ques_h5': 'opt.input_ques_h5', 'input_json': 'opt.input_json', 'negative_sample': 'opt.negative_sample', 'num_val': 'opt.num_val', 'data_split': '"""test"""'}), "(input_img_h5=opt.input_img_h5, input_ques_h5=opt.input_ques_h5,\n input_json=opt.input_json, negative_sample=opt.negative_sample, num_val\n =opt.num_val, data_split='test')\n", (4897, 5074), True, 'import misc.dataLoader as dl\n'), ((5834, 5911), 'misc.encoder_QIH._netE', '_netE', (['opt.model', 'opt.ninp', 'opt.nhid', 'opt.nlayers', 'opt.dropout', 'img_feat_size'], {}), '(opt.model, opt.ninp, opt.nhid, opt.nlayers, opt.dropout, img_feat_size)\n', (5839, 5911), False, 'from misc.encoder_QIH import _netE\n'), ((5920, 5966), 'misc.model._netW', 'model._netW', (['vocab_size', 'opt.ninp', 'opt.dropout'], {}), '(vocab_size, opt.ninp, opt.dropout)\n', (5931, 5966), True, 'import misc.model as model\n'), ((5974, 6048), 'misc.netG._netG', '_netG', (['opt.model', 'vocab_size', 'opt.ninp', 'opt.nhid', 'opt.nlayers', 'opt.dropout'], {}), '(opt.model, vocab_size, opt.ninp, opt.nhid, opt.nlayers, opt.dropout)\n', (5979, 6048), False, 'from misc.netG import _netG\n'), ((6057, 6076), 'misc.model.LMCriterion', 'model.LMCriterion', ([], {}), '()\n', (6074, 6076), True, 'import misc.model as model\n'), ((6087, 6109), 'misc.model.gumbel_sampler', 'model.gumbel_sampler', ([], {}), '()\n', (6107, 6109), True, 'import misc.model as model\n'), ((12154, 12195), 'torch.FloatTensor', 'torch.FloatTensor', (['opt.batchSize', '(49)', '(512)'], {}), '(opt.batchSize, 49, 512)\n', (12171, 12195), False, 'import torch\n'), ((12209, 12253), 'torch.LongTensor', 'torch.LongTensor', (['ques_length', 'opt.batchSize'], {}), '(ques_length, opt.batchSize)\n', (12225, 12253), False, 'import torch\n'), ((12266, 12309), 'torch.LongTensor', 'torch.LongTensor', (['his_length', 'opt.batchSize'], {}), '(his_length, opt.batchSize)\n', (12282, 12309), False, 'import torch\n'), ((12323, 12366), 'torch.LongTensor', 'torch.LongTensor', (['ans_length', 'opt.batchSize'], {}), '(ans_length, opt.batchSize)\n', (12339, 12366), False, 'import torch\n'), ((12380, 12423), 'torch.LongTensor', 'torch.LongTensor', (['ans_length', 'opt.batchSize'], {}), '(ans_length, opt.batchSize)\n', (12396, 12423), False, 'import torch\n'), ((12438, 12472), 'torch.LongTensor', 'torch.LongTensor', (['(1)', 'opt.batchSize'], {}), '(1, opt.batchSize)\n', (12454, 12472), False, 'import torch\n'), ((12487, 12519), 'torch.FloatTensor', 'torch.FloatTensor', (['opt.batchSize'], {}), '(opt.batchSize)\n', (12504, 12519), False, 'import torch\n'), ((12531, 12562), 'torch.LongTensor', 'torch.LongTensor', (['opt.batchSize'], {}), '(opt.batchSize)\n', (12547, 12562), False, 'import torch\n'), ((12851, 12871), 'torch.autograd.Variable', 'Variable', (['ques_input'], {}), '(ques_input)\n', (12859, 12871), False, 'from torch.autograd import Variable\n'), ((12884, 12903), 'torch.autograd.Variable', 'Variable', (['ans_input'], {}), '(ans_input)\n', (12892, 12903), False, 'from torch.autograd import Variable\n'), ((12917, 12937), 'torch.autograd.Variable', 'Variable', (['ans_target'], {}), '(ans_target)\n', (12925, 12937), False, 'from torch.autograd import Variable\n'), ((12951, 12971), 'torch.autograd.Variable', 'Variable', (['ans_sample'], {}), '(ans_sample)\n', (12959, 12971), False, 'from torch.autograd import Variable\n'), ((12986, 13007), 'torch.autograd.Variable', 'Variable', (['noise_input'], {}), '(noise_input)\n', (12994, 13007), False, 'from torch.autograd import Variable\n'), ((13020, 13039), 'torch.autograd.Variable', 'Variable', (['img_input'], {}), '(img_input)\n', (13028, 13039), False, 'from torch.autograd import Variable\n'), ((13052, 13071), 'torch.autograd.Variable', 'Variable', (['his_input'], {}), '(his_input)\n', (13060, 13071), False, 'from torch.autograd import Variable\n'), ((13083, 13101), 'torch.autograd.Variable', 'Variable', (['gt_index'], {}), '(gt_index)\n', (13091, 13101), False, 'from torch.autograd import Variable\n'), ((105, 116), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (114, 116), False, 'import os\n'), ((3788, 3813), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3811, 3813), False, 'import torch\n'), ((4023, 4049), 'torch.load', 'torch.load', (['opt.model_path'], {}), '(opt.model_path)\n', (4033, 4049), False, 'import torch\n'), ((4242, 4265), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4263, 4265), False, 'import datetime\n'), ((4334, 4386), 'os.path.join', 'os.path.join', (['opt.outf', "(opt.encoder + '.' + cur_time)"], {}), "(opt.outf, opt.encoder + '.' + cur_time)\n", (4346, 4386), False, 'import os\n'), ((6469, 6515), 'misc.utils.adjust_learning_rate', 'adjust_learning_rate', (['optimizer', 'epoch', 'opt.lr'], {}), '(optimizer, epoch, opt.lr)\n', (6489, 6515), False, 'from misc.utils import repackage_hidden, clip_gradient, adjust_learning_rate, decode_txt, sample_batch_neg, l2_norm\n'), ((13379, 13390), 'time.time', 'time.time', ([], {}), '()\n', (13388, 13390), False, 'import time\n'), ((4404, 4426), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (4415, 4426), False, 'import os\n'), ((7650, 7691), 'misc.utils.repackage_hidden', 'repackage_hidden', (['ques_hidden', 'batch_size'], {}), '(ques_hidden, batch_size)\n', (7666, 7691), False, 'from misc.utils import repackage_hidden, clip_gradient, adjust_learning_rate, decode_txt, sample_batch_neg, l2_norm\n'), ((10270, 10311), 'misc.utils.repackage_hidden', 'repackage_hidden', (['ques_hidden', 'batch_size'], {}), '(ques_hidden, batch_size)\n', (10286, 10311), False, 'from misc.utils import repackage_hidden, clip_gradient, adjust_learning_rate, decode_txt, sample_batch_neg, l2_norm\n'), ((11706, 11725), 'torch.sort', 'torch.sort', (['prob', '(1)'], {}), '(prob, 1)\n', (11716, 11725), False, 'import torch\n'), ((13800, 13818), 'numpy.array', 'np.array', (['rank_all'], {}), '(rank_all)\n', (13808, 13818), True, 'import numpy as np\n'), ((11310, 11351), 'torch.autograd.Variable', 'Variable', (['mask'], {'volatile': 'logprob.volatile'}), '(mask, volatile=logprob.volatile)\n', (11318, 11351), False, 'from torch.autograd import Variable\n'), ((13610, 13628), 'numpy.array', 'np.array', (['rank_all'], {}), '(rank_all)\n', (13618, 13628), True, 'import numpy as np\n'), ((13673, 13691), 'numpy.array', 'np.array', (['rank_all'], {}), '(rank_all)\n', (13681, 13691), True, 'import numpy as np\n'), ((13736, 13754), 'numpy.array', 'np.array', (['rank_all'], {}), '(rank_all)\n', (13744, 13754), True, 'import numpy as np\n'), ((13863, 13896), 'numpy.array', 'np.array', (['rank_all'], {'dtype': '"""float"""'}), "(rank_all, dtype='float')\n", (13871, 13896), True, 'import numpy as np\n'), ((13517, 13528), 'time.time', 'time.time', ([], {}), '()\n', (13526, 13528), False, 'import time\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Package: mesxr.calibration
Module: utilities
Author: <NAME>, <NAME>
Affiliation: Department of Physics, University of Wisconsin-Madison
Last Updated: November 2018
Description:
This module contains a number of auxilary functions for the main timscan.py module to
make use of. These are either functions which might be useful elsewhere (such are coordinate
conversions) or functions which are more likely to change in future implementations (like
loading in the trimbit scan data from a specific file naming scheme).
Usage:
TBD
"""
from os import path
import numpy as np
import tifffile
M_SIZE_Y = 195
M_SIZE_X = 487
M_NUM_TRIM = 64
M_NUM_CHIPS_Y = 2
M_NUM_CHIPS_X = 8
M_NUM_CHIPS = M_NUM_CHIPS_X * M_NUM_CHIPS_Y
M_CHIP_SIZE_Y = 97
M_CHIP_SIZE_X = 60
def load_calibration_data(calib_path):
"""
Description
Reads in the calibration .tiff files and returns an image indexed by pixel location
and trimbit setting. These files are photon counts for each pixel with the detector
exposed to a given emission line, with this measurement repeated for each trimbit
setting.
The calibrate_detector routine stores the output of this code under the key "trimscan"
in the calibration dictionary. This terminology is used throughout the code.
Parameters:
- calib_path = (string) Path to the folder containing the calibration images.
Returns:
- images = (int[x_index, y_index, trimbit]) Array containing one calibration
image for each of the 64 possible trimbit settings.
Credit:
This function was originally written by <NAME> and/or <NAME>, then
modified by <NAME>.
"""
# Read in the params_b01_m01.txt file - left in for future expansion
with open(path.join(calib_path, 'config.txt'), 'r') as f:
params = f.readlines()
# Container for the calibration data
images = np.empty([M_SIZE_X, M_SIZE_Y, M_NUM_TRIM])
# Load in the data one trimbit at a time
for i in range(0, M_NUM_TRIM):
try:
filename = path.join(calib_path, 'scan_image_{:03d}.tif'.format(i))
images[:, :, 63 - i] = tifffile.imread(filename).transpose()
except:
# Also try 5-digit labeling, the camserver default
filename = path.join(calib_path, 'scan_image_{:05d}.tif'.format(i))
images[:, :, 63 - i] = tifffile.imread(filename).transpose()
return images
def get_chip_coords(image_x, image_y):
"""
Description
This function takes coordinates in the broader "image" (detector) reference frame and
determines chat chip this point falls on as well as its local x,y coordinates in the
chip reference frame.
Parameters:
- image_x = (int) X-coordinate of a point on the overall detector image
- image_y = (int) Y-coordinate of a point on the overall detector image
Returns:
- chip_num = (int) The chip number on which the point (image_x, image_y) lies
- chip_x = (int) The x-coordinate of the point in the frame of chip_num
- chip_y = (int) The y-coordinate of the point in the frame of chip_num
Credit:
This function was originally written by <NAME>. Original note:
This is copied from pix_add in utils.c for the p2det detector. Given an x and y
location on the detector, this will return the appropriate chip number and the x
and y location on that given chip.
"""
if image_y < M_SIZE_Y/2:
chip_num = image_x/(M_CHIP_SIZE_X + 1)
chip_x = (M_CHIP_SIZE_X+1)*(chip_num+1) - image_x - 2
chip_y = image_y
if chip_x < 0:
chip_num = -1
elif image_y == M_SIZE_Y/2:
chip_num = -1
else:
chip_num = M_NUM_CHIPS/2 + image_x/(M_CHIP_SIZE_X + 1)
chip_x = image_x % (M_CHIP_SIZE_X+1)
chip_y = M_SIZE_Y - image_y - 1
if chip_x >= M_CHIP_SIZE_X:
chip_num = -1
# Check if this is a valid chip.
if chip_num < 0:
chip_y = -1
chip_x = -1
return chip_num, chip_x, chip_y
# Data for the line energies
energies = {'Zr': 2.04, 'Mo': 2.29, 'Ag': 2.98, 'In': 3.29, 'Ti': 4.51, 'V': 4.95,
'Cr': 5.41, 'Fe': 6.40, 'Cu': 8.05, 'Ge': 9.89, 'Br': 11.92, 'Y': 14.95,
'MoK': 17.48, 'AgK': 22.16, 'Sn': 25.27}
def get_line_energy(elements):
"""
Return the appropriate energies for the supplied elements.
"""
elem_en = np.array([energies[elem] for elem in elements])
return elem_en | [
"os.path.join",
"numpy.array",
"numpy.empty",
"tifffile.imread"
] | [((1970, 2012), 'numpy.empty', 'np.empty', (['[M_SIZE_X, M_SIZE_Y, M_NUM_TRIM]'], {}), '([M_SIZE_X, M_SIZE_Y, M_NUM_TRIM])\n', (1978, 2012), True, 'import numpy as np\n'), ((4560, 4607), 'numpy.array', 'np.array', (['[energies[elem] for elem in elements]'], {}), '([energies[elem] for elem in elements])\n', (4568, 4607), True, 'import numpy as np\n'), ((1836, 1871), 'os.path.join', 'path.join', (['calib_path', '"""config.txt"""'], {}), "(calib_path, 'config.txt')\n", (1845, 1871), False, 'from os import path\n'), ((2222, 2247), 'tifffile.imread', 'tifffile.imread', (['filename'], {}), '(filename)\n', (2237, 2247), False, 'import tifffile\n'), ((2454, 2479), 'tifffile.imread', 'tifffile.imread', (['filename'], {}), '(filename)\n', (2469, 2479), False, 'import tifffile\n')] |
#!/usr/bin/env python
import numpy as np
from tqdm import tqdm
from astropy.constants import G as Ggrav
from .low_level_utils import fast_dist
G = Ggrav.to('kpc Msun**-1 km**2 s**-2').value
def all_profiles(bins, positions, velocities, masses, two_dimensional=False, zcut=None,
ages=None, pbar_msg='Making profiles"', nexpr=False):
"""
assumes all positions and velocities are rotated in the same way, such
that the angular momentum axis aligns with the z axis
if two_dimensional == False, then compute:
M(<r), M(r), rho = M(r)/dV, Vcirc = sqrt(GM(<r)/r), mag J(r), mag J(<r), J_z(r), J_z(<r)
if two_dimensional == True, then compute:
M(<R), M(R), rho = M(R)/dA, Vcirc = mean(vx**2 + vy**2), mag J(R), mag J(<R), J_z(R), J_z(<R)
:bins : array-like : sorted (from small to large) bin edges to use
:positions : array-like : particle positions, rotated such that z aligns with angular momentum axis
:velocities : array-like : particle velocities, rotated in the same way as the positions
:masses : array-like : particle masses, in the same order as positions and velocities
:two_dimensional : bool : whether or not to do 2D profiles
:pbar_msg: str : what to print for the pbar (total mass and number of particles is appended)
:nexpr : bool : whether or not to try to use numexpr to try to speed up the calculation
"""
if nexpr:
from numexpr import evaluate
print("Using numexpr for the masking and summing masses")
# work from outside in, throwing away particles as I no longer need them
assert positions.shape[0] == velocities.shape[0] == masses.shape[0]
m_of_r = np.empty(bins.size)
J_of_r = np.empty(bins.size)
Jz_of_r = np.empty(bins.size)
Jz_inside_r = np.empty(bins.size)
JinsideR = np.empty(bins.size)
specJinsideR = np.zeros(bins.size)
specJ_of_r = np.zeros(bins.size)
specJz_of_r = np.zeros(bins.size)
specJz_insideR = np.zeros(bins.size)
if ages is not None:
age_of_r = np.zeros(bins.size)
density = np.empty_like(m_of_r)
if two_dimensional:
vcirc = np.zeros(bins.size)
if two_dimensional:
x, y, z = positions.T
# distances are in the plane of the galaxy
distances = np.sqrt(x**2 + y**2)
else:
distances = fast_dist(positions) # center assumed to be at (0,0,0)
# throw away any particles beyond my last bin edge
msk = distances <= bins.max()
if two_dimensional:
msk = msk & (np.abs(z) <= zcut)
positions = positions[msk]
velocities = velocities[msk]
masses = masses[msk]
distances = distances[msk]
if ages is not None:
ages = ages[msk]
if two_dimensional:
x = x[msk]
y = y[msk]
# compute (angular) momenta for the particles:
# velocities should already have the halo at
pvec = (velocities.T*masses).T
# J = r cross p, and pos is assumed to have the halo at 0,0,0
Jvec = np.cross(positions, pvec)
del pvec
Jz = Jvec[:, 2]
if two_dimensional:
# calculate circular velocities:
# velocities in the plane of the disk
vx, vy = velocities[:, 0], velocities[:, 1]
V = np.vstack((vx, vy)).T # velocity vector in the plane of the disk
R = np.vstack((x, y)).T # distance vector in the plane of the disk
# use the definition of the dot product to find the angle between R and V, theta
# a dot b == mag(a) * mag(b) * cos(theta)
# => cos(theta) == a dot b / (mag(a) * mag(b))
# checked by hand -- does the dot product of R[ii] w/ V[ii]
R_dot_V = np.sum(R*V, axis=1)
mag_V = np.linalg.norm(V, axis=1)
# checked by hand -- gives the magnitdue of R[ii]
mag_R = np.linalg.norm(R, axis=1)
if careful:
assert (mag_R == distances).all() # should be identically true
theta = np.arccos(R_dot_V / (mag_R * mag_V))
# now that I know the angle, the circular velocity of each particle is going to be
# the magnitude of each velocity in the plane of the disk times the sin of angle between R and V
# -- if the angle is 0, then all the velocity is radial; if it's pi/2, then all the velocity is tangential (circular)
circular_velocities = mag_V*np.sin(theta)
# handle any nan (i.e. either R or V == 0) by replacing with a 0
print("Replacing {} NaNs with 0".format(
np.count_nonzero(np.isnan(circular_velocities))))
circular_velocities[np.isnan(circular_velocities)] = 0
# clean up to save memory
del R, V, theta
# make sure this is true because otherwise return will be nonsense since I use cumsum at the end
assert (np.sort(bins) == bins).all()
rev_bins = bins[::-1]
if two_dimensional:
pbar_msg += '; Mtot(R < {:.0f} kpc, Z < {:.1f} kpc)'.format(bins.max(), zcut)
else:
pbar_msg += '; Mtot(r < {:.0f} kpc)'.format(bins.max())
pbar_msg += ' = {:.2g} Msun, {:,} particles)'.format(
np.sum(masses), masses.size)
for ii in tqdm(range(len(rev_bins)), pbar_msg):
rhigh = rev_bins[ii]
if ii == len(rev_bins)-1:
rlow = 0
else:
rlow = rev_bins[ii+1]
assert rlow < rhigh
if two_dimensional:
shell_vol = 4.*np.pi*(rhigh**2 - rlow**2)
else:
shell_vol = 4./3.*np.pi*(rhigh**3 - rlow**3)
if nexpr:
# within_rhigh = evaluate("(distances <= rhigh)") #No need to do this -- I trim the particles before the loop and within the loop, so everything is within rhigh trivially
minsider = evaluate("sum(masses)")
inbin = evaluate("(distances > rlow)")
# sum up the masses where inbin, 0 otherwise
thism = evaluate("sum(where(inbin,masses,0))")
Jz_of_r[ii] = evaluate("sum(where(inbin,Jz,0))")
Jz_inside_r[ii] = evaluate("sum(Jz)")
# particles that are within rhigh but not in the bin. equivalent to (within_rhigh) & (logical_not( (distances>rlow) & (within_rhigh) )
# equivalent to False if not within_rhigh, so throws away outer particles
# equivalent to True & logical_not(True & True) = True & not(True) = True & False = False if distances > rlow and distances < rhigh
# equivalent to True & not(False & True) = True & not(False) = True if distances <= rlow
# keep = evaluate("~inbin") #but since I trim the particles so within_rhigh is trivially true (see above), this just reduces to not inbin, so no reason to calculate/store that
else:
# within_rhigh = distances <= rhigh
# &(within_rhigh) #works for both 2D and 3D
inbin = (distances > rlow)
minsider = np.sum(masses)
thism = np.sum(masses[inbin])
# keep = within_rhigh & (~inbin) #save logic as above
# just the z angular momentum for the particles int he bin, allowed to cancel
Jz_of_r[ii] = np.sum(Jz[inbin])
# Jz of all the particles inside R. should be smoother.
Jz_inside_r[ii] = np.sum(Jz)
m_of_r[ii] = thism
density[ii] = thism/shell_vol
# norm of the vector sum (sum(Jx), sum(Jy), sum(Jz)) of the angular momentum in the bin -- no need to mass weight because J is mass weighted
J_of_r[ii] = np.linalg.norm(np.sum(Jvec[inbin], axis=0))
# Do the same for all the particles inside the max of this bin; different because these can cancel differently
# remember that everything is within the max of this bin
JinsideR[ii] = np.linalg.norm(np.sum(Jvec, axis=0))
# normalize all those to the approrpiate specific value if m > 0.
if thism > 0:
specJ_of_r[ii] = J_of_r[ii]/thism
specJz_of_r[ii] = Jz_of_r[ii]/thism
if two_dimensional:
vcirc[ii] = np.average(
circular_velocities[inbin], weights=masses[inbin])
if ages is not None:
age_of_r[ii] = np.average(ages[inbin], weights=masses[inbin])
if minsider > 0:
specJinsideR[ii] = JinsideR[ii]/minsider
specJz_insideR[ii] = Jz_inside_r[ii]/minsider
distances = distances[~inbin]
masses = masses[~inbin]
positions = positions[~inbin]
velocities = velocities[~inbin]
Jvec = Jvec[~inbin]
Jz = Jz[~inbin]
if two_dimensional:
circular_velocities = circular_velocities[~inbin]
if ages is not None:
ages = ages[~inbin]
# swap everything back around so that I go from the inside out so that I can cumsum. remember bins is already sorted because I didn't swap it; I created rev_bins.
density = density[::-1]
m_of_r = m_of_r[::-1]
J_of_r = J_of_r[::-1]
Jz_of_r = Jz_of_r[::-1]
JinsideR = JinsideR[::-1]
Jz_inside_r = Jz_inside_r[::-1]
specJ_of_r = specJ_of_r[::-1]
specJz_of_r = specJz_of_r[::-1]
specJinsideR = specJinsideR[::-1]
specJz_insideR = specJz_insideR[::-1]
if ages is not None:
age_of_r = age_of_r[::-1]
mltr = np.cumsum(m_of_r)
Jltr = np.cumsum(J_of_r)
Jzltr = np.cumsum(Jz_of_r)
specJltr = np.cumsum(specJ_of_r)
specJzltr = np.cumsum(specJz_of_r)
# don't cumsum the "inside R" lines -- doesn't make much sense
if two_dimensional == False:
# calculate keplerian circular velocity
vcirc = np.sqrt(G*mltr/bins) # remember that bins didn't get reversed
else:
vcirc = vcirc[::-1]
# remember this gets saved directly, so be good about naming!
end = 'R' if two_dimensional else 'r'
toreturn = {
'density': density,
'M.of.'+end: m_of_r,
'J.of.'+end: J_of_r,
'Jz.of.'+end: Jz_of_r,
'J.inside'+end: JinsideR,
'Jz.inside'+end: Jz_inside_r,
'spec.J.of.'+end: specJ_of_r,
'spec.Jz.of.'+end: specJz_of_r,
'spec.Jinside'+end: specJinsideR,
'spec.Jz.insideR'+end: specJz_insideR,
'M.lt.'+end: mltr,
'J.lt.'+end: Jltr,
'Jz.lt.'+end: Jzltr,
'spec.J.lt.'+end: specJltr,
'spec.Jz.lt.'+end: specJzltr,
'vcirc': vcirc,
}
if ages is not None:
toreturn['age.of.'+end] = age_of_r
return toreturn
def particle_mass_profiles(part, species='all', bins=None, center_position=None, **kwargs):
'''
given part (a particle dictionary), call mass_profiles on the particles
bins can be either:
* None -- defaults to logspace(-2, 0.5, 150)
* raw bin edges -- passed directly
* single integer -- defaults to logspace(-2, 0.5, bins)
'''
import utilities as ut
species = ut.particle.parse_species(part, species)
center_position = ut.particle.parse_property(
part, 'center_position', center_position)
npart = np.sum([part[spec]['mass'].size for spec in species])
positions = np.empty((npart, 3))
masses = np.empty(npart)
left = 0
for spec in species:
right = left + part[spec]['mass'].size
positions[left:right] = part[spec]['position']
masses[left:right] = part[spec]['mass']
left = right
# shift so that the center is at [0, 0, 0]:
positions -= center_position
# now handle the bins:
if bins is None:
bins = np.logspace(-2, 0.5, 150)
elif isinstance(bins, int):
bins = np.logspace(-2, 0.5, bins)
elif len(bins) == 3:
bins = np.logspace(bins[0], bins[1], bins[2])
assert not np.isscalar(bins)
return mass_profiles(bins, positions, masses, **kwargs)
def mass_profiles(bins, positions, masses, pbar_msg='Making mass profiles', nexpr=False):
"""
computes:
M(<r), M(r), rho = M(r)/dV, Vcirc = sqrt(GM(<r)/r)
:bins : array-like : sorted (from small to large) bin edges to use
:positions : array-like : particle positions, with the center at 0,0,0
:masses : array-like : particle masses, in the same order as positions and velocities
:pbar_msg: str : what to print for the pbar (total mass and number of particles is appended)
:nexpr : bool : whether or not to try to use numexpr to try to speed up the calculation
"""
if nexpr:
from numexpr import evaluate
print("Using numexpr for the masking and summing masses")
# work from outside in, throwing away particles as I no longer need them
assert positions.shape[0] == masses.shape[0]
m_of_r = np.empty(bins.size)
density = np.empty_like(m_of_r)
distances = fast_dist(positions) # center assumed to be at (0,0,0)
# throw away any particles beyond my last bin edge
msk = distances <= bins.max()
positions = positions[msk]
masses = masses[msk]
distances = distances[msk]
# make sure this is true because otherwise return will be nonsense since I use cumsum at the end
assert (np.sort(bins) == bins).all()
rev_bins = bins[::-1]
pbar_msg += '; Mtot(r < {:.0f} kpc)'.format(bins.max())
pbar_msg += ' = {:.2g} Msun, {:,} particles)'.format(
np.sum(masses), masses.size)
for ii in tqdm(range(len(rev_bins)), pbar_msg):
rhigh = rev_bins[ii]
if ii == len(rev_bins)-1:
rlow = 0
else:
rlow = rev_bins[ii+1]
assert rlow <= rhigh
shell_vol = 4./3.*np.pi*(rhigh**3 - rlow**3)
if nexpr:
# within_rhigh = evaluate("(distances <= rhigh)") #No need to do this -- I trim the particles before the loop and within the loop, so everything is within rhigh trivially
minsider = evaluate("sum(masses)")
inbin = evaluate("(distances > rlow)")
# sum up the masses where inbin, 0 otherwise
thism = evaluate("sum(where(inbin,masses,0))")
# particles that are within rhigh but not in the bin. equivalent to (within_rhigh) & (logical_not( (distances>rlow) & (within_rhigh) )
# equivalent to False if not within_rhigh, so throws away outer particles
# equivalent to True & logical_not(True & True) = True & not(True) = True & False = False if distances > rlow and distances < rhigh
# equivalent to True & not(False & True) = True & not(False) = True if distances <= rlow
# keep = evaluate("~inbin") #but since I trim the particles so within_rhigh is trivially true (see above), this just reduces to not inbin, so no reason to calculate/store that
else:
# within_rhigh = distances <= rhigh
# &(within_rhigh) #works for both 2D and 3D
inbin = (distances > rlow)
minsider = np.sum(masses)
thism = np.sum(masses[inbin])
# keep = within_rhigh & (~inbin) #save logic as above
m_of_r[ii] = thism
density[ii] = thism/shell_vol
distances = distances[~inbin]
masses = masses[~inbin]
positions = positions[~inbin]
if pbar is not None:
pbar.update(ii)
if pbar is not None:
pbar.finish()
# swap everything back around so that I go from the inside out so that I can cumsum. remember bins is already sorted because I didn't swap it; I created rev_bins.
density = density[::-1]
m_of_r = m_of_r[::-1]
mltr = np.cumsum(m_of_r)
# calculate keplerian circular velocity
vcirc = np.sqrt(G*mltr/bins) # remember that bins didn't get reversed
# remember this gets saved directly, so be good about naming!
end = 'r'
toreturn = {
'density': density,
'M.of.'+end: m_of_r,
'M.lt.'+end: mltr,
'vcirc': vcirc,
'bins': bins,
}
return toreturn
def mass_profiles_nopair(bins, positions, masses, pair_distance, pbar_msg='Making mass profiles', nexpr=False):
"""
computes:
M(<r), M(r), rho = M(r)/dV, Vcirc = sqrt(GM(<r)/r)
assumes that particles closer to second host (whcih is pair_distance from main
host) are removed already. removes the volume in that region from density
calculations.
:bins : array-like : sorted (from small to large) bin edges to use
:positions : array-like : particle positions, with the center at 0,0,0
:masses : array-like : particle masses, in the same order as positions and velocities
:pbar_msg: str : what to print for the pbar (total mass and number of particles is appended)
:nexpr : bool : whether or not to try to use numexpr to try to speed up the calculation
"""
if nexpr:
from numexpr import evaluate
print("Using numexpr for the masking and summing masses")
pair_midpoint_distance = pair_distance / 2.0
# work from outside in, throwing away particles as I no longer need them
assert positions.shape[0] == masses.shape[0]
m_of_r = np.empty(bins.size)
density = np.empty_like(m_of_r)
distances = fast_dist(positions) # center assumed to be at (0,0,0)
# throw away any particles beyond my last bin edge
msk = distances <= bins.max()
positions = positions[msk]
masses = masses[msk]
distances = distances[msk]
# make sure this is true because otherwise return will be nonsense since I use cumsum at the end
assert (np.sort(bins) == bins).all()
rev_bins = bins[::-1]
pbar_msg += '; Mtot(r < {:.0f} kpc)'.format(bins.max())
pbar_msg += ' = {:.2g} Msun, {:,} particles)'.format(
np.sum(masses), masses.size)
for ii in tqdm(range(len(rev_bins)), pbar_msg):
rhigh = rev_bins[ii]
if ii == len(rev_bins)-1:
rlow = 0
else:
rlow = rev_bins[ii+1]
assert rlow < rhigh
if rhigh <= pair_midpoint_distance:
shell_vol = 4./3.*np.pi*(rhigh**3 - rlow**3)
else:
# ok, more complicated because I need to subtract out the volume where the particles are trimmed
# from wikipedia's article on spherical caps:
# f the radius of the sphere is r and the height of the cap is h, then the volume of the spherical cap is:
# V= pi/3 * h^2 * (3r - h)
def cap_vol(r, h): return (np.pi/3.) * (h**2) * (3*r - h)
if rlow <= pair_midpoint_distance:
# then rhigh is over the border, but rlow is under it
vol_low = 4./3. * np.pi * rlow**3
else:
height_of_low_cap = rlow - pair_midpoint_distance
vol_of_low_cap = cap_vol(rlow, height_of_low_cap)
low_vol_total = 4./3. * np.pi * rlow**3
vol_low = low_vol_total - vol_of_low_cap
height_of_high_cap = rhigh - pair_midpoint_distance
vol_of_high_cap = cap_vol(rhigh, height_of_high_cap)
vol_high_total = (4./3.) * np.pi * rhigh**3
vol_high = vol_high_total - vol_of_high_cap
shell_vol = vol_high - vol_low
if nexpr:
# within_rhigh = evaluate("(distances <= rhigh)") #No need to do this -- I trim the particles before the loop and within the loop, so everything is within rhigh trivially
minsider = evaluate("sum(masses)")
inbin = evaluate("(distances > rlow)")
# sum up the masses where inbin, 0 otherwise
thism = evaluate("sum(where(inbin,masses,0))")
# particles that are within rhigh but not in the bin. equivalent to (within_rhigh) & (logical_not( (distances>rlow) & (within_rhigh) )
# equivalent to False if not within_rhigh, so throws away outer particles
# equivalent to True & logical_not(True & True) = True & not(True) = True & False = False if distances > rlow and distances < rhigh
# equivalent to True & not(False & True) = True & not(False) = True if distances <= rlow
# keep = evaluate("~inbin") #but since I trim the particles so within_rhigh is trivially true (see above), this just reduces to not inbin, so no reason to calculate/store that
else:
# within_rhigh = distances <= rhigh
# &(within_rhigh) #works for both 2D and 3D
inbin = (distances > rlow)
minsider = np.sum(masses)
thism = np.sum(masses[inbin])
# keep = within_rhigh & (~inbin) #save logic as above
m_of_r[ii] = thism
density[ii] = thism/shell_vol
distances = distances[~inbin]
masses = masses[~inbin]
positions = positions[~inbin]
# swap everything back around so that I go from the inside out so that I can cumsum. remember bins is already sorted because I didn't swap it; I created rev_bins.
density = density[::-1]
m_of_r = m_of_r[::-1]
mltr = np.cumsum(m_of_r)
# calculate keplerian circular velocity
vcirc = np.sqrt(G*mltr/bins) # remember that bins didn't get reversed
# remember this gets saved directly, so be good about naming!
end = 'r'
toreturn = {
'density': density,
'M.of.'+end: m_of_r,
'M.lt.'+end: mltr,
'vcirc': vcirc,
'bins': bins,
}
return toreturn
| [
"numpy.sqrt",
"numpy.arccos",
"numpy.linalg.norm",
"numpy.sin",
"numpy.cross",
"numpy.isscalar",
"numpy.sort",
"numpy.empty",
"utilities.particle.parse_species",
"numpy.vstack",
"numpy.logspace",
"numpy.abs",
"astropy.constants.G.to",
"numpy.average",
"utilities.particle.parse_property",... | [((150, 186), 'astropy.constants.G.to', 'Ggrav.to', (['"""kpc Msun**-1 km**2 s**-2"""'], {}), "('kpc Msun**-1 km**2 s**-2')\n", (158, 186), True, 'from astropy.constants import G as Ggrav\n'), ((1697, 1716), 'numpy.empty', 'np.empty', (['bins.size'], {}), '(bins.size)\n', (1705, 1716), True, 'import numpy as np\n'), ((1730, 1749), 'numpy.empty', 'np.empty', (['bins.size'], {}), '(bins.size)\n', (1738, 1749), True, 'import numpy as np\n'), ((1764, 1783), 'numpy.empty', 'np.empty', (['bins.size'], {}), '(bins.size)\n', (1772, 1783), True, 'import numpy as np\n'), ((1802, 1821), 'numpy.empty', 'np.empty', (['bins.size'], {}), '(bins.size)\n', (1810, 1821), True, 'import numpy as np\n'), ((1837, 1856), 'numpy.empty', 'np.empty', (['bins.size'], {}), '(bins.size)\n', (1845, 1856), True, 'import numpy as np\n'), ((1877, 1896), 'numpy.zeros', 'np.zeros', (['bins.size'], {}), '(bins.size)\n', (1885, 1896), True, 'import numpy as np\n'), ((1914, 1933), 'numpy.zeros', 'np.zeros', (['bins.size'], {}), '(bins.size)\n', (1922, 1933), True, 'import numpy as np\n'), ((1952, 1971), 'numpy.zeros', 'np.zeros', (['bins.size'], {}), '(bins.size)\n', (1960, 1971), True, 'import numpy as np\n'), ((1993, 2012), 'numpy.zeros', 'np.zeros', (['bins.size'], {}), '(bins.size)\n', (2001, 2012), True, 'import numpy as np\n'), ((2093, 2114), 'numpy.empty_like', 'np.empty_like', (['m_of_r'], {}), '(m_of_r)\n', (2106, 2114), True, 'import numpy as np\n'), ((3008, 3033), 'numpy.cross', 'np.cross', (['positions', 'pvec'], {}), '(positions, pvec)\n', (3016, 3033), True, 'import numpy as np\n'), ((9253, 9270), 'numpy.cumsum', 'np.cumsum', (['m_of_r'], {}), '(m_of_r)\n', (9262, 9270), True, 'import numpy as np\n'), ((9282, 9299), 'numpy.cumsum', 'np.cumsum', (['J_of_r'], {}), '(J_of_r)\n', (9291, 9299), True, 'import numpy as np\n'), ((9312, 9330), 'numpy.cumsum', 'np.cumsum', (['Jz_of_r'], {}), '(Jz_of_r)\n', (9321, 9330), True, 'import numpy as np\n'), ((9346, 9367), 'numpy.cumsum', 'np.cumsum', (['specJ_of_r'], {}), '(specJ_of_r)\n', (9355, 9367), True, 'import numpy as np\n'), ((9384, 9406), 'numpy.cumsum', 'np.cumsum', (['specJz_of_r'], {}), '(specJz_of_r)\n', (9393, 9406), True, 'import numpy as np\n'), ((10846, 10886), 'utilities.particle.parse_species', 'ut.particle.parse_species', (['part', 'species'], {}), '(part, species)\n', (10871, 10886), True, 'import utilities as ut\n'), ((10909, 10977), 'utilities.particle.parse_property', 'ut.particle.parse_property', (['part', '"""center_position"""', 'center_position'], {}), "(part, 'center_position', center_position)\n", (10935, 10977), True, 'import utilities as ut\n'), ((11000, 11053), 'numpy.sum', 'np.sum', (["[part[spec]['mass'].size for spec in species]"], {}), "([part[spec]['mass'].size for spec in species])\n", (11006, 11053), True, 'import numpy as np\n'), ((11071, 11091), 'numpy.empty', 'np.empty', (['(npart, 3)'], {}), '((npart, 3))\n', (11079, 11091), True, 'import numpy as np\n'), ((11105, 11120), 'numpy.empty', 'np.empty', (['npart'], {}), '(npart)\n', (11113, 11120), True, 'import numpy as np\n'), ((12620, 12639), 'numpy.empty', 'np.empty', (['bins.size'], {}), '(bins.size)\n', (12628, 12639), True, 'import numpy as np\n'), ((12654, 12675), 'numpy.empty_like', 'np.empty_like', (['m_of_r'], {}), '(m_of_r)\n', (12667, 12675), True, 'import numpy as np\n'), ((15431, 15448), 'numpy.cumsum', 'np.cumsum', (['m_of_r'], {}), '(m_of_r)\n', (15440, 15448), True, 'import numpy as np\n'), ((15506, 15530), 'numpy.sqrt', 'np.sqrt', (['(G * mltr / bins)'], {}), '(G * mltr / bins)\n', (15513, 15530), True, 'import numpy as np\n'), ((16954, 16973), 'numpy.empty', 'np.empty', (['bins.size'], {}), '(bins.size)\n', (16962, 16973), True, 'import numpy as np\n'), ((16988, 17009), 'numpy.empty_like', 'np.empty_like', (['m_of_r'], {}), '(m_of_r)\n', (17001, 17009), True, 'import numpy as np\n'), ((20833, 20850), 'numpy.cumsum', 'np.cumsum', (['m_of_r'], {}), '(m_of_r)\n', (20842, 20850), True, 'import numpy as np\n'), ((20908, 20932), 'numpy.sqrt', 'np.sqrt', (['(G * mltr / bins)'], {}), '(G * mltr / bins)\n', (20915, 20932), True, 'import numpy as np\n'), ((2058, 2077), 'numpy.zeros', 'np.zeros', (['bins.size'], {}), '(bins.size)\n', (2066, 2077), True, 'import numpy as np\n'), ((2155, 2174), 'numpy.zeros', 'np.zeros', (['bins.size'], {}), '(bins.size)\n', (2163, 2174), True, 'import numpy as np\n'), ((2301, 2325), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (2308, 2325), True, 'import numpy as np\n'), ((3666, 3687), 'numpy.sum', 'np.sum', (['(R * V)'], {'axis': '(1)'}), '(R * V, axis=1)\n', (3672, 3687), True, 'import numpy as np\n'), ((3702, 3727), 'numpy.linalg.norm', 'np.linalg.norm', (['V'], {'axis': '(1)'}), '(V, axis=1)\n', (3716, 3727), True, 'import numpy as np\n'), ((3802, 3827), 'numpy.linalg.norm', 'np.linalg.norm', (['R'], {'axis': '(1)'}), '(R, axis=1)\n', (3816, 3827), True, 'import numpy as np\n'), ((3940, 3976), 'numpy.arccos', 'np.arccos', (['(R_dot_V / (mag_R * mag_V))'], {}), '(R_dot_V / (mag_R * mag_V))\n', (3949, 3976), True, 'import numpy as np\n'), ((5077, 5091), 'numpy.sum', 'np.sum', (['masses'], {}), '(masses)\n', (5083, 5091), True, 'import numpy as np\n'), ((9573, 9597), 'numpy.sqrt', 'np.sqrt', (['(G * mltr / bins)'], {}), '(G * mltr / bins)\n', (9580, 9597), True, 'import numpy as np\n'), ((11479, 11504), 'numpy.logspace', 'np.logspace', (['(-2)', '(0.5)', '(150)'], {}), '(-2, 0.5, 150)\n', (11490, 11504), True, 'import numpy as np\n'), ((11674, 11691), 'numpy.isscalar', 'np.isscalar', (['bins'], {}), '(bins)\n', (11685, 11691), True, 'import numpy as np\n'), ((13221, 13235), 'numpy.sum', 'np.sum', (['masses'], {}), '(masses)\n', (13227, 13235), True, 'import numpy as np\n'), ((17555, 17569), 'numpy.sum', 'np.sum', (['masses'], {}), '(masses)\n', (17561, 17569), True, 'import numpy as np\n'), ((3243, 3262), 'numpy.vstack', 'np.vstack', (['(vx, vy)'], {}), '((vx, vy))\n', (3252, 3262), True, 'import numpy as np\n'), ((3321, 3338), 'numpy.vstack', 'np.vstack', (['(x, y)'], {}), '((x, y))\n', (3330, 3338), True, 'import numpy as np\n'), ((4336, 4349), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4342, 4349), True, 'import numpy as np\n'), ((4563, 4592), 'numpy.isnan', 'np.isnan', (['circular_velocities'], {}), '(circular_velocities)\n', (4571, 4592), True, 'import numpy as np\n'), ((5700, 5723), 'numexpr.evaluate', 'evaluate', (['"""sum(masses)"""'], {}), "('sum(masses)')\n", (5708, 5723), False, 'from numexpr import evaluate\n'), ((5744, 5774), 'numexpr.evaluate', 'evaluate', (['"""(distances > rlow)"""'], {}), "('(distances > rlow)')\n", (5752, 5774), False, 'from numexpr import evaluate\n'), ((5852, 5890), 'numexpr.evaluate', 'evaluate', (['"""sum(where(inbin,masses,0))"""'], {}), "('sum(where(inbin,masses,0))')\n", (5860, 5890), False, 'from numexpr import evaluate\n'), ((5917, 5951), 'numexpr.evaluate', 'evaluate', (['"""sum(where(inbin,Jz,0))"""'], {}), "('sum(where(inbin,Jz,0))')\n", (5925, 5951), False, 'from numexpr import evaluate\n'), ((5982, 6001), 'numexpr.evaluate', 'evaluate', (['"""sum(Jz)"""'], {}), "('sum(Jz)')\n", (5990, 6001), False, 'from numexpr import evaluate\n'), ((6860, 6874), 'numpy.sum', 'np.sum', (['masses'], {}), '(masses)\n', (6866, 6874), True, 'import numpy as np\n'), ((6895, 6916), 'numpy.sum', 'np.sum', (['masses[inbin]'], {}), '(masses[inbin])\n', (6901, 6916), True, 'import numpy as np\n'), ((7100, 7117), 'numpy.sum', 'np.sum', (['Jz[inbin]'], {}), '(Jz[inbin])\n', (7106, 7117), True, 'import numpy as np\n'), ((7217, 7227), 'numpy.sum', 'np.sum', (['Jz'], {}), '(Jz)\n', (7223, 7227), True, 'import numpy as np\n'), ((7480, 7507), 'numpy.sum', 'np.sum', (['Jvec[inbin]'], {'axis': '(0)'}), '(Jvec[inbin], axis=0)\n', (7486, 7507), True, 'import numpy as np\n'), ((7732, 7752), 'numpy.sum', 'np.sum', (['Jvec'], {'axis': '(0)'}), '(Jvec, axis=0)\n', (7738, 7752), True, 'import numpy as np\n'), ((11552, 11578), 'numpy.logspace', 'np.logspace', (['(-2)', '(0.5)', 'bins'], {}), '(-2, 0.5, bins)\n', (11563, 11578), True, 'import numpy as np\n'), ((13744, 13767), 'numexpr.evaluate', 'evaluate', (['"""sum(masses)"""'], {}), "('sum(masses)')\n", (13752, 13767), False, 'from numexpr import evaluate\n'), ((13788, 13818), 'numexpr.evaluate', 'evaluate', (['"""(distances > rlow)"""'], {}), "('(distances > rlow)')\n", (13796, 13818), False, 'from numexpr import evaluate\n'), ((13896, 13934), 'numexpr.evaluate', 'evaluate', (['"""sum(where(inbin,masses,0))"""'], {}), "('sum(where(inbin,masses,0))')\n", (13904, 13934), False, 'from numexpr import evaluate\n'), ((14793, 14807), 'numpy.sum', 'np.sum', (['masses'], {}), '(masses)\n', (14799, 14807), True, 'import numpy as np\n'), ((14828, 14849), 'numpy.sum', 'np.sum', (['masses[inbin]'], {}), '(masses[inbin])\n', (14834, 14849), True, 'import numpy as np\n'), ((19251, 19274), 'numexpr.evaluate', 'evaluate', (['"""sum(masses)"""'], {}), "('sum(masses)')\n", (19259, 19274), False, 'from numexpr import evaluate\n'), ((19295, 19325), 'numexpr.evaluate', 'evaluate', (['"""(distances > rlow)"""'], {}), "('(distances > rlow)')\n", (19303, 19325), False, 'from numexpr import evaluate\n'), ((19403, 19441), 'numexpr.evaluate', 'evaluate', (['"""sum(where(inbin,masses,0))"""'], {}), "('sum(where(inbin,masses,0))')\n", (19411, 19441), False, 'from numexpr import evaluate\n'), ((20300, 20314), 'numpy.sum', 'np.sum', (['masses'], {}), '(masses)\n', (20306, 20314), True, 'import numpy as np\n'), ((20335, 20356), 'numpy.sum', 'np.sum', (['masses[inbin]'], {}), '(masses[inbin])\n', (20341, 20356), True, 'import numpy as np\n'), ((2543, 2552), 'numpy.abs', 'np.abs', (['z'], {}), '(z)\n', (2549, 2552), True, 'import numpy as np\n'), ((4771, 4784), 'numpy.sort', 'np.sort', (['bins'], {}), '(bins)\n', (4778, 4784), True, 'import numpy as np\n'), ((8005, 8066), 'numpy.average', 'np.average', (['circular_velocities[inbin]'], {'weights': 'masses[inbin]'}), '(circular_velocities[inbin], weights=masses[inbin])\n', (8015, 8066), True, 'import numpy as np\n'), ((8152, 8198), 'numpy.average', 'np.average', (['ages[inbin]'], {'weights': 'masses[inbin]'}), '(ages[inbin], weights=masses[inbin])\n', (8162, 8198), True, 'import numpy as np\n'), ((11619, 11657), 'numpy.logspace', 'np.logspace', (['bins[0]', 'bins[1]', 'bins[2]'], {}), '(bins[0], bins[1], bins[2])\n', (11630, 11657), True, 'import numpy as np\n'), ((13039, 13052), 'numpy.sort', 'np.sort', (['bins'], {}), '(bins)\n', (13046, 13052), True, 'import numpy as np\n'), ((17373, 17386), 'numpy.sort', 'np.sort', (['bins'], {}), '(bins)\n', (17380, 17386), True, 'import numpy as np\n'), ((4502, 4531), 'numpy.isnan', 'np.isnan', (['circular_velocities'], {}), '(circular_velocities)\n', (4510, 4531), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import matplotlib.tri as mtri
import numpy as np
import seaborn as sns
from msax.msax import paa
from mpl_toolkits.mplot3d import Axes3D
def ts_with_hist(x, fig=None, bins=50):
"""
Visualizes the input x time series with its histogram.
:param x: Input array
:param fig: Optional, matplotlib figure
:param bins: Number of bins on histogram
:return:
"""
if fig is None:
fig = plt.figure(figsize=(18, 6))
gridsize = (1, 3)
ax1 = plt.subplot2grid(gridsize, (0, 0), colspan=2, rowspan=1, fig=fig)
ax2 = plt.subplot2grid(gridsize, (0, 2), fig=fig)
ax1.plot(x)
ax2.hist(x, bins=bins)
return fig, ax1, ax2
def plot_paa(x, w, fig=None):
"""
Visualizes the input x time series with its PAA transformed version.
:param x: Input array
:param w: window size parameter for PAA transformation
:param fig: Optional, matplotlib figure
:return:
"""
if fig is None:
fig = plt.figure(figsize=(18, 6))
x_paa = paa(x, w)
ax = fig.subplots(nrows=1, ncols=1)
ax.plot(x, c='blue', label='Original')
ax.plot(np.repeat(x_paa, w), c='orange', label='PAA')
ax.legend()
return fig, ax
def plot_2d_error_surface(err_surface, fig=None):
"""
Visualizes the input ErrorSurface in 2D.
:param err_surface: Input error surface. Must be an instance of ErrorSurface (msax.error.ErrorSurface)
:type err_surface: msax.error.ErrorSurface
:param fig: Optional, matplotlib figure
:return:
"""
if fig is None:
fig = plt.figure(figsize=(18, 12))
err_surface_vals = err_surface.values
ax = fig.add_subplot(111)
sns.heatmap(err_surface_vals, xticklabels=err_surface.alphabets, yticklabels=err_surface.windows, ax=ax)
ax.set_ylabel('window size')
ax.set_xlabel('alphabet size')
ax.set_title('Cost of SAX')
return fig, ax
def plot_3d_error_surface(err_surface, ax=None, title=None):
"""
Visualizes the input ErrorSurface in 3D.
:param err_surface: Input error surface
:type err_surface: msax.error.ErrorSurface
:param title: Optional. The title of the figure
:param ax: Optional. matplotlib axes.
:return:
"""
if ax is None:
fig = plt.figure(figsize=(18, 12))
ax = fig.add_subplot(1, 1, 1, projection='3d')
from msax.error import ErrorSurface
window_sizes = err_surface.windows
alphabet_sizes = err_surface.alphabets
if isinstance(err_surface, ErrorSurface):
err_surface = err_surface.values
x = np.repeat(window_sizes, len(alphabet_sizes)).astype(float)
y = np.tile(alphabet_sizes, len(window_sizes)).astype(float)
z = np.ravel(err_surface)
plt.rcParams.update({'font.size': 20})
triang = mtri.Triangulation(y, x)
ax.plot_trisurf(triang, z, cmap='viridis', vmin=np.nanmin(z), vmax=np.nanmax(z))
ax.set_xlabel('alphabet size')
ax.set_ylabel('window size')
ax.set_zlabel('error')
ax.xaxis.labelpad = 18
ax.yaxis.labelpad = 18
ax.zaxis.labelpad = 18
if not title is None:
ax.set_title(title)
plt.rcParams.update({'font.size': 12})
return ax
| [
"numpy.repeat",
"matplotlib.tri.Triangulation",
"seaborn.heatmap",
"msax.msax.paa",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.figure",
"numpy.nanmin",
"numpy.nanmax",
"numpy.ravel",
"matplotlib.pyplot.subplot2grid"
] | [((511, 576), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['gridsize', '(0, 0)'], {'colspan': '(2)', 'rowspan': '(1)', 'fig': 'fig'}), '(gridsize, (0, 0), colspan=2, rowspan=1, fig=fig)\n', (527, 576), True, 'import matplotlib.pyplot as plt\n'), ((587, 630), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['gridsize', '(0, 2)'], {'fig': 'fig'}), '(gridsize, (0, 2), fig=fig)\n', (603, 630), True, 'import matplotlib.pyplot as plt\n'), ((1038, 1047), 'msax.msax.paa', 'paa', (['x', 'w'], {}), '(x, w)\n', (1041, 1047), False, 'from msax.msax import paa\n'), ((1692, 1800), 'seaborn.heatmap', 'sns.heatmap', (['err_surface_vals'], {'xticklabels': 'err_surface.alphabets', 'yticklabels': 'err_surface.windows', 'ax': 'ax'}), '(err_surface_vals, xticklabels=err_surface.alphabets,\n yticklabels=err_surface.windows, ax=ax)\n', (1703, 1800), True, 'import seaborn as sns\n'), ((2707, 2728), 'numpy.ravel', 'np.ravel', (['err_surface'], {}), '(err_surface)\n', (2715, 2728), True, 'import numpy as np\n'), ((2734, 2772), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 20}"], {}), "({'font.size': 20})\n", (2753, 2772), True, 'import matplotlib.pyplot as plt\n'), ((2786, 2810), 'matplotlib.tri.Triangulation', 'mtri.Triangulation', (['y', 'x'], {}), '(y, x)\n', (2804, 2810), True, 'import matplotlib.tri as mtri\n'), ((3133, 3171), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 12}"], {}), "({'font.size': 12})\n", (3152, 3171), True, 'import matplotlib.pyplot as plt\n'), ((451, 478), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 6)'}), '(figsize=(18, 6))\n', (461, 478), True, 'import matplotlib.pyplot as plt\n'), ((998, 1025), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 6)'}), '(figsize=(18, 6))\n', (1008, 1025), True, 'import matplotlib.pyplot as plt\n'), ((1144, 1163), 'numpy.repeat', 'np.repeat', (['x_paa', 'w'], {}), '(x_paa, w)\n', (1153, 1163), True, 'import numpy as np\n'), ((1585, 1613), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 12)'}), '(figsize=(18, 12))\n', (1595, 1613), True, 'import matplotlib.pyplot as plt\n'), ((2272, 2300), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 12)'}), '(figsize=(18, 12))\n', (2282, 2300), True, 'import matplotlib.pyplot as plt\n'), ((2863, 2875), 'numpy.nanmin', 'np.nanmin', (['z'], {}), '(z)\n', (2872, 2875), True, 'import numpy as np\n'), ((2882, 2894), 'numpy.nanmax', 'np.nanmax', (['z'], {}), '(z)\n', (2891, 2894), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# encoding: utf-8
r"""
One-dimensional advection
=========================
Solve the linear advection equation on a nonuniform grid:
.. math::
q_t + u q_x = 0.
Here q is the density of some conserved quantity and u is the velocity.
Here we have a nonuniform grid, given by the transformation x**2 on grid [-0.5,0.5] to [-0.25,0.25].
The initial condition is a Gaussian centered at 0 and the boundary conditions are periodic.
The final solution is identical to the initial data because the wave has
crossed the domain exactly once, which takes computational time 0.5, because the speed is 1 and grid length 0.5.
"""
from __future__ import absolute_import
import numpy as np
from clawpack import riemann
def mapc2p_nonunif(xc):
"""This function takes the interval [-xL,xR] and squares the computational coordinate
while keeping the negative coordinates to be squared and yet retain their negativity
to the physical coordinate [-xL**2, xR**2]
"""
neg = -1*(xc < 0) + (xc > 0)
xp = xc**2
xp = neg*xp
return xp
def setup(nx=100, kernel_language='Fortran', use_petsc=False, solver_type='classic', weno_order=5,
time_integrator='SSP104', outdir='./_output'):
if use_petsc:
import clawpack.petclaw as pyclaw
from clawpack import pyclaw
import clawpack.pyclaw.geometry
if kernel_language == 'Fortran':
riemann_solver = riemann.advection_1D
elif kernel_language == 'Python':
riemann_solver = riemann.advection_1D_py.advection_1D
# sharpclaw does not accomodate nonuniform grids
if solver_type=='classic':
solver = pyclaw.ClawSolver1D(riemann_solver)
else: raise Exception('Unrecognized value of solver_type.')
solver.kernel_language = kernel_language
solver.order = 1
solver.limiters = pyclaw.tvd.minmod
solver.num_eqn=1
solver.num_waves=1
solver.bc_lower[0] = pyclaw.BC.periodic
solver.bc_upper[0] = pyclaw.BC.periodic
solver.aux_bc_lower[0] = pyclaw.BC.periodic
solver.aux_bc_upper[0] = pyclaw.BC.periodic
x = pyclaw.Dimension(-0.5,0.5,nx,name='x')
domain = pyclaw.Domain(x)
state = pyclaw.State(domain,1,num_aux=1)
state.problem_data['u'] = 1. # Advection velocity
state.index_capa = 0
xc = state.grid.x.centers
grid1d = state.grid
# mapping to nonunif grid
grid1d.mapc2p = mapc2p_nonunif
state.aux = np.zeros((1,nx)) # capacity array dx_p/dx_c
state.aux[0,:] = np.diff(grid1d.p_nodes)/np.diff(state.grid.x.nodes)
# Initial data
beta = 100; gamma = 0; x0 = 0.0
state.q[0,:] = np.exp(-beta * (grid1d.p_centers-x0)**2) * np.cos(gamma * (grid1d.p_centers - x0))
claw = pyclaw.Controller()
claw.keep_copy = True
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.tfinal = 0.5 # one cycle
claw.outdir = outdir
claw.num_output_times = 10
claw.nstepout = 1
if outdir is None:
claw.output_format = None
claw.setplot = setplot
return claw
def setplot(plotdata):
"""
Plot solution using VisClaw.
"""
plotdata.clearfigures() # clear any old figures,axes,items data
plotdata.mapc2p = mapc2p_nonunif
plotfigure = plotdata.new_plotfigure(name='q', figno=1)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = [-0.25,0.25]
plotaxes.ylimits = [-.2,1.0]
plotaxes.title = 'q'
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 0
plotitem.plotstyle = '-o'
plotitem.color = 'b'
plotitem.kwargs = {'linewidth':2,'markersize':5}
return plotdata
if __name__=="__main__":
from clawpack.pyclaw.util import run_app_from_main
output = run_app_from_main(setup,setplot)
| [
"clawpack.petclaw.Dimension",
"clawpack.petclaw.ClawSolver1D",
"clawpack.pyclaw.util.run_app_from_main",
"clawpack.petclaw.State",
"numpy.diff",
"clawpack.petclaw.Domain",
"numpy.exp",
"numpy.zeros",
"clawpack.petclaw.Solution",
"numpy.cos",
"clawpack.petclaw.Controller"
] | [((2094, 2135), 'clawpack.petclaw.Dimension', 'pyclaw.Dimension', (['(-0.5)', '(0.5)', 'nx'], {'name': '"""x"""'}), "(-0.5, 0.5, nx, name='x')\n", (2110, 2135), True, 'import clawpack.petclaw as pyclaw\n'), ((2146, 2162), 'clawpack.petclaw.Domain', 'pyclaw.Domain', (['x'], {}), '(x)\n', (2159, 2162), True, 'import clawpack.petclaw as pyclaw\n'), ((2175, 2209), 'clawpack.petclaw.State', 'pyclaw.State', (['domain', '(1)'], {'num_aux': '(1)'}), '(domain, 1, num_aux=1)\n', (2187, 2209), True, 'import clawpack.petclaw as pyclaw\n'), ((2425, 2442), 'numpy.zeros', 'np.zeros', (['(1, nx)'], {}), '((1, nx))\n', (2433, 2442), True, 'import numpy as np\n'), ((2712, 2731), 'clawpack.petclaw.Controller', 'pyclaw.Controller', ([], {}), '()\n', (2729, 2731), True, 'import clawpack.petclaw as pyclaw\n'), ((2778, 2808), 'clawpack.petclaw.Solution', 'pyclaw.Solution', (['state', 'domain'], {}), '(state, domain)\n', (2793, 2808), True, 'import clawpack.petclaw as pyclaw\n'), ((3805, 3838), 'clawpack.pyclaw.util.run_app_from_main', 'run_app_from_main', (['setup', 'setplot'], {}), '(setup, setplot)\n', (3822, 3838), False, 'from clawpack.pyclaw.util import run_app_from_main\n'), ((1649, 1684), 'clawpack.petclaw.ClawSolver1D', 'pyclaw.ClawSolver1D', (['riemann_solver'], {}), '(riemann_solver)\n', (1668, 1684), True, 'import clawpack.petclaw as pyclaw\n'), ((2490, 2513), 'numpy.diff', 'np.diff', (['grid1d.p_nodes'], {}), '(grid1d.p_nodes)\n', (2497, 2513), True, 'import numpy as np\n'), ((2514, 2541), 'numpy.diff', 'np.diff', (['state.grid.x.nodes'], {}), '(state.grid.x.nodes)\n', (2521, 2541), True, 'import numpy as np\n'), ((2617, 2661), 'numpy.exp', 'np.exp', (['(-beta * (grid1d.p_centers - x0) ** 2)'], {}), '(-beta * (grid1d.p_centers - x0) ** 2)\n', (2623, 2661), True, 'import numpy as np\n'), ((2660, 2699), 'numpy.cos', 'np.cos', (['(gamma * (grid1d.p_centers - x0))'], {}), '(gamma * (grid1d.p_centers - x0))\n', (2666, 2699), True, 'import numpy as np\n')] |
import math
import array
import random
import numpy as np
from deap import base
from deap import creator
from deap import tools
from deap.benchmarks.tools import diversity, convergence, hypervolume
class PSO():
def __init__(self, dim, boundary, population=5, gen=1000, minimization=True, func=None):
# POTREI AGGIUNGERE SPEED
self.DIM = dim
self.BOUNDARY = boundary
self.POPULATION = population
self.GEN = gen
self.SPEED = self.initialize_speed(boundary)
self.func = func
self.minimization = minimization
self.FIRST = True
def generate(self, size, pmin, pmax, smin, smax):
# pmin,pmax=self.dispatch_boundaries()
part = creator.Particle(np.random.uniform(pmin, pmax, size))
part.speed = np.random.uniform(smin, smax, size)
part.smin = smin
part.smax = smax
return part
def updateParticle(self, part, best, phi1, phi2):
u1 = np.random.uniform(0, phi1, len(part))
u2 = np.random.uniform(0, phi2, len(part))
v_u1 = u1 * (part.best - part)
v_u2 = u2 * (best - part)
part.speed += v_u1 + v_u2
for i, speed in enumerate(part.speed):
if abs(speed) < part.smin:
part.speed[i] = math.copysign(part.smin, speed)
elif abs(speed) > part.smax:
part.speed[i] = math.copysign(part.smax, speed)
part += part.speed
def initialize_speed(self, boundaries):
s_b = np.sort(boundaries.sum(axis=1) / 2)
return [-s_b[math.floor(len(s_b) / 2)], s_b[math.floor(len(s_b) / 2)]]
def run(self):
# Setup with dummy variables
n = self.GEN
dim = self.DIM
population = self.POPULATION
# pmin,pmax=self.dispatch_boundaries()
smin, smax = self.dispatch_speed()
if self.FIRST:
if self.minimization is True:
creator.create("FitnessMax", base.Fitness, weights=(-1.0,))
else:
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Particle", np.ndarray, fitness=creator.FitnessMax, speed=list,
smin=None, smax=None, best=None)
self.FIRST = False
toolbox = base.Toolbox()
toolbox.register("particle", self.generate, size=dim, pmin=self.BOUNDARY[:, 0],
pmax=self.BOUNDARY[:, 1], smin=smin, smax=smax)
toolbox.register("population", tools.initRepeat, list, toolbox.particle)
toolbox.register("update", self.updateParticle, phi1=2.0, phi2=2.0)
toolbox.register("evaluate", self.func)
pop = toolbox.population(n=population)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
logbook = tools.Logbook()
logbook.header = ["gen", "evals"] + stats.fields
GEN = n
best = None
for g in range(GEN):
for part in pop:
part.fitness.values = toolbox.evaluate(part)
if part.best is None or part.best.fitness < part.fitness:
part.best = creator.Particle(part)
part.best.fitness.values = part.fitness.values
if best is None or best.fitness < part.fitness:
best = creator.Particle(part)
best.fitness.values = part.fitness.values
for part in pop:
toolbox.update(part, best)
# Gather all the fitnesses in one list and print the stats
#logbook.record(gen=g, evals=len(pop), **stats.compile(pop))
# print(logbook.stream)
self.PSO = {
"pop": pop,
"logbook": logbook,
"best": best
}
return best
def dispatch_speed(self):
return self.SPEED[0], self.SPEED[1]
def dispatch_boundaries(self):
return self.BOUNDARY[0], self.BOUNDARY[1]
def pop_sort(self):
return self.PSO["pop"].sort(key=lambda x: x.fitness.values)
class NSGAII():
def __init__(self, dim, nobj, boundary, population=100, gen=200, minimization=True, CXPB=0.9, func=None):
self.DIM = dim
self.NOBJ= nobj
self.BOUNDARIES = boundary
self.POPULATION = population
self.GEN = gen
self.func = func
self.minimization = minimization
self.CXPB = CXPB
self.FIRST = True
def uniform(self, b):
return np.random.uniform(b[:, 0], b[:, 1], self.DIM)
def set_func(self, func):
self.func=func
def run(self):
n = self.GEN
dim = self.DIM
population = self.POPULATION
if self.FIRST:
if self.minimization is True:
creator.create("FitnessMin", base.Fitness, weights=tuple([-1.0]* self.NOBJ))
creator.create("Individual", np.ndarray, typecode='d',
fitness=creator.FitnessMin)
else:
creator.create("FitnessMax", base.Fitness, weights=tuple([1.0] * self.NOBJ))
creator.create("Individual", np.ndarray, typecode='d',
fitness=creator.FitnessMax)
#array.array
self.FIRST = False
toolbox = base.Toolbox()
toolbox.register("attr_float", self.uniform, self.BOUNDARIES)
toolbox.register("individual",
tools.initIterate,
creator.Individual,
toolbox.attr_float)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", self.func)
toolbox.register("mate",
tools.cxSimulatedBinaryBounded,
low=self.BOUNDARIES[:, 0].tolist(),
up=self.BOUNDARIES[:, 1].tolist(),
eta=20.0)
toolbox.register("mutate",
tools.mutPolynomialBounded,
low=self.BOUNDARIES[:, 0].tolist(),
up=self.BOUNDARIES[:, 1].tolist(),
eta=20.0,
indpb=1.0 / dim)
toolbox.register("select", tools.selNSGA2)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("min", np.min, axis=0)
stats.register("max", np.max, axis=0)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "std", "min", "avg", "max"
pop = toolbox.population(n=population)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in pop if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# This is just to assign the crowding distance to the individuals
# no actual selection is done
pop = toolbox.select(pop, len(pop))
record = stats.compile(pop)
logbook.record(gen=0, evals=len(invalid_ind), **record)
# Begin the generational process
for gen in range(1, n):
# Vary the population
offspring = tools.selTournamentDCD(pop, len(pop))
offspring = [toolbox.clone(ind) for ind in offspring]
for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
if random.random() <= self.CXPB:
toolbox.mate(ind1, ind2)
toolbox.mutate(ind1)
toolbox.mutate(ind2)
del ind1.fitness.values, ind2.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Select the next generation population
pop = toolbox.select(pop + offspring, population)
record = stats.compile(pop)
logbook.record(gen=gen, evals=len(invalid_ind), **record)
# print(logbook.stream)
front = np.array([ind.fitness.values for ind in pop])
pop_array=np.array([ind for ind in pop])
hyper_v=hypervolume(pop)
self.NSGAII = {
"pop": pop,
"logbook": logbook,
"pareto": front,
"npop" : pop_array,
"hypervolume" : hyper_v
}
return pop, logbook, front
def pareto_sorted(self):
if hasattr(self, 'NSGAII'):
tmp_array=self.NSGAII["pareto"]
return tmp_array[tmp_array[:, 0].argsort()]
else:
print("NO PARETO SOLUTION IN MEMORY")
| [
"deap.creator.Particle",
"deap.benchmarks.tools.hypervolume",
"deap.creator.create",
"deap.tools.Logbook",
"math.copysign",
"numpy.array",
"numpy.random.uniform",
"random.random",
"deap.tools.Statistics",
"deap.base.Toolbox"
] | [((794, 829), 'numpy.random.uniform', 'np.random.uniform', (['smin', 'smax', 'size'], {}), '(smin, smax, size)\n', (811, 829), True, 'import numpy as np\n'), ((2284, 2298), 'deap.base.Toolbox', 'base.Toolbox', ([], {}), '()\n', (2296, 2298), False, 'from deap import base\n'), ((2729, 2777), 'deap.tools.Statistics', 'tools.Statistics', (['(lambda ind: ind.fitness.values)'], {}), '(lambda ind: ind.fitness.values)\n', (2745, 2777), False, 'from deap import tools\n'), ((2950, 2965), 'deap.tools.Logbook', 'tools.Logbook', ([], {}), '()\n', (2963, 2965), False, 'from deap import tools\n'), ((4625, 4670), 'numpy.random.uniform', 'np.random.uniform', (['b[:, 0]', 'b[:, 1]', 'self.DIM'], {}), '(b[:, 0], b[:, 1], self.DIM)\n', (4642, 4670), True, 'import numpy as np\n'), ((5436, 5450), 'deap.base.Toolbox', 'base.Toolbox', ([], {}), '()\n', (5448, 5450), False, 'from deap import base\n'), ((6425, 6473), 'deap.tools.Statistics', 'tools.Statistics', (['(lambda ind: ind.fitness.values)'], {}), '(lambda ind: ind.fitness.values)\n', (6441, 6473), False, 'from deap import tools\n'), ((6585, 6600), 'deap.tools.Logbook', 'tools.Logbook', ([], {}), '()\n', (6598, 6600), False, 'from deap import tools\n'), ((8377, 8422), 'numpy.array', 'np.array', (['[ind.fitness.values for ind in pop]'], {}), '([ind.fitness.values for ind in pop])\n', (8385, 8422), True, 'import numpy as np\n'), ((8441, 8471), 'numpy.array', 'np.array', (['[ind for ind in pop]'], {}), '([ind for ind in pop])\n', (8449, 8471), True, 'import numpy as np\n'), ((8488, 8504), 'deap.benchmarks.tools.hypervolume', 'hypervolume', (['pop'], {}), '(pop)\n', (8499, 8504), False, 'from deap.benchmarks.tools import diversity, convergence, hypervolume\n'), ((736, 771), 'numpy.random.uniform', 'np.random.uniform', (['pmin', 'pmax', 'size'], {}), '(pmin, pmax, size)\n', (753, 771), True, 'import numpy as np\n'), ((2095, 2211), 'deap.creator.create', 'creator.create', (['"""Particle"""', 'np.ndarray'], {'fitness': 'creator.FitnessMax', 'speed': 'list', 'smin': 'None', 'smax': 'None', 'best': 'None'}), "('Particle', np.ndarray, fitness=creator.FitnessMax, speed=\n list, smin=None, smax=None, best=None)\n", (2109, 2211), False, 'from deap import creator\n'), ((1282, 1313), 'math.copysign', 'math.copysign', (['part.smin', 'speed'], {}), '(part.smin, speed)\n', (1295, 1313), False, 'import math\n'), ((1930, 1989), 'deap.creator.create', 'creator.create', (['"""FitnessMax"""', 'base.Fitness'], {'weights': '(-1.0,)'}), "('FitnessMax', base.Fitness, weights=(-1.0,))\n", (1944, 1989), False, 'from deap import creator\n'), ((2024, 2082), 'deap.creator.create', 'creator.create', (['"""FitnessMax"""', 'base.Fitness'], {'weights': '(1.0,)'}), "('FitnessMax', base.Fitness, weights=(1.0,))\n", (2038, 2082), False, 'from deap import creator\n'), ((5002, 5089), 'deap.creator.create', 'creator.create', (['"""Individual"""', 'np.ndarray'], {'typecode': '"""d"""', 'fitness': 'creator.FitnessMin'}), "('Individual', np.ndarray, typecode='d', fitness=creator.\n FitnessMin)\n", (5016, 5089), False, 'from deap import creator\n'), ((5243, 5330), 'deap.creator.create', 'creator.create', (['"""Individual"""', 'np.ndarray'], {'typecode': '"""d"""', 'fitness': 'creator.FitnessMax'}), "('Individual', np.ndarray, typecode='d', fitness=creator.\n FitnessMax)\n", (5257, 5330), False, 'from deap import creator\n'), ((1387, 1418), 'math.copysign', 'math.copysign', (['part.smax', 'speed'], {}), '(part.smax, speed)\n', (1400, 1418), False, 'import math\n'), ((3286, 3308), 'deap.creator.Particle', 'creator.Particle', (['part'], {}), '(part)\n', (3302, 3308), False, 'from deap import creator\n'), ((3467, 3489), 'deap.creator.Particle', 'creator.Particle', (['part'], {}), '(part)\n', (3483, 3489), False, 'from deap import creator\n'), ((7581, 7596), 'random.random', 'random.random', ([], {}), '()\n', (7594, 7596), False, 'import random\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__author__ = 'Justin'
__mtime__ = '2018-06-02'
"""
import numpy as np
from skimage import color, morphology
from skimage.morphology import square
import os
import pandas as pd
def get_seeds(MaskLow, lowScale, highScale, patch_size_high, spacingHigh, margin = -8):
'''
得到Mask图像中为True位置在高分辨率下的坐标值
:param MaskLow: 低分辨率Mask图像
:param lowScale: 低分辨率值
:param highScale: 种子点坐标所在的高分辨率值
:param patch_size_high: 在高分辨率图块的大小
:param spacingHigh: 种子点在高分辨率图像之间的间隔spacingHigh
:param margin: 边界参数
:return: 在高分辨率中的种子点坐标
'''
amp = highScale / lowScale
patch_size = int(patch_size_high / amp) # patch size low
if margin < 0:
# 灰度图像腐蚀,图像中物体会收缩/细化:https://wenku.baidu.com/view/c600c8d1360cba1aa811da73.html
seed_img = morphology.binary_erosion(MaskLow, square(patch_size))
seed_img = morphology.binary_erosion(seed_img, square(abs(margin))) # 收缩边界
elif margin > 0:
seed_img = morphology.binary_dilation(MaskLow, square(patch_size))
seed_img = morphology.binary_dilation(seed_img, square(margin)) # 扩展边界
else:
seed_img = MaskLow
space_patch = spacingHigh / amp
pos = seed_img.nonzero()
y = (np.rint(pos[0] / space_patch) * spacingHigh).astype(np.int32) # row
x = (np.rint(pos[1] / space_patch) * spacingHigh).astype(np.int32) # col
resultHigh = set()
for xx, yy in zip(x, y):
resultHigh.add((xx, yy))
return list(resultHigh)
def read_csv_file(root_path, csv_path):
filenames_list = []
labels_list = []
f = open(csv_path, "r")
lines = f.readlines()
for line in lines:
items = line.split(" ")
if len(items) == 2: # 单标签
tag = int(items[1])
else: # 多标签
tag = tuple(int(sub_tag) for sub_tag in items[1:])
labels_list.append(tag)
patch_file = "{}/{}".format(root_path, items[0])
filenames_list.append(patch_file)
return filenames_list, labels_list
def read_DSC_csv_file(root_path, csv_path,):
filenames_list = []
labels_list = []
f = open(csv_path, "r")
lines = f.readlines()
for line in lines:
items = line.split(" ")
if len(items) >= 5: # 双图片输入,三个标签
tag = tuple(int(sub_tag) for sub_tag in items[2:])
labels_list.append(tag)
filenames = ("{}/{}".format(root_path, items[0]), "{}/{}".format(root_path, items[1]))
filenames_list.append(filenames)
return filenames_list, labels_list
def latest_checkpoint(search_dir):
filename = []
epoch = []
loss = []
accuracy = []
for ckpt_name in os.listdir(search_dir):
file_name = os.path.splitext(ckpt_name)[0]
value = file_name.split("-")
if len(value) >= 4:
filename.append(ckpt_name)
epoch.append(int(value[1]))
loss.append(float(value[2]))
accuracy.append(float(value[3]))
if len(filename) == 0: return None
data = {'filename':filename, 'epoch':epoch, 'loss':loss, 'accuracy':accuracy}
df = pd.DataFrame(data, columns=['filename','epoch','loss','accuracy'])
result = df.sort_values(['loss', 'accuracy', 'epoch'], ascending=[True, False, False])
path = "{}/{}".format(search_dir, result.iloc[0,0])
return path
def clean_checkpoint(search_dir, best_number = 10):
filename = []
epoch = []
loss = []
accuracy = []
for ckpt_name in os.listdir(search_dir):
file_name = os.path.splitext(ckpt_name)[0]
value = file_name.split("-")
if len(value) == 4:
filename.append(ckpt_name)
epoch.append(int(value[1]))
loss.append(float(value[2]))
accuracy.append(float(value[3]))
if len(filename) <= best_number: return None
data = {'filename':filename, 'epoch':epoch, 'loss':loss, 'accuracy':accuracy}
df = pd.DataFrame(data, columns=['filename','epoch','loss','accuracy'])
result = df.sort_values(['loss', 'accuracy', 'epoch'], ascending=[True, False, False])
for ckpt_name in result.iloc[best_number:, 0]:
path = "{}/{}".format(search_dir, ckpt_name)
print("deleted", path)
os.remove(path)
# print(path)
#
# for ckpt_name in result.iloc[:best_number, 0]:
# path = "{}/{}".format(search_dir, ckpt_name)
# # os.remove(path
# print("best -> ",path)
def transform_coordinate(x1, y1, coordinate_scale, seeds_scale, target_scale, seeds):
'''
将图块中心坐标变换到新的坐标系中。 新坐标系的原点为检测区域的左上角,所处的倍镜为target_scale
:param x1: 左上角x坐标
:param y1: 左上角y坐标
:param coordinate_scale: 以上坐标的倍镜数
:param seeds_scale: 图块中心点(种子点)的倍镜
:param target_scale: 目标坐标系所对应的倍镜
:param seeds: 图块中心点集
:return:新坐标系下的中心点
'''
xx1 = (x1 * target_scale / coordinate_scale)
yy1 = (y1 * target_scale / coordinate_scale)
results = []
for x, y in seeds:
xx = int(x * target_scale / seeds_scale - xx1)
yy = int(y * target_scale / seeds_scale - yy1)
# xx = max(0, xx)
# yy = max(0, yy)
results.append((xx, yy))
# print(results)
return results
def get_project_root():
path = os.getcwd()
#约定 项目的代码在src文件夹,其它的目录与它平级
pos = path.find("src")
return path[:pos - 1]
def is_tumor_by_code(code):
'''
:param code:slide id
:return: 是否是Tumor case
'''
tag = code[0:-4]
if tag == "Tumor":
return True
elif tag == "Normal":
return False
else: # "Test_001"
id = int(code[-3:])
if id in [1, 2, 4, 8, 10, 11, 13, 16, 21, 26, 27, 29, 30, 33, 38, 40, 46, 48, 51, 52,
61, 64, 65, 66, 68, 69, 71, 73, 74, 75, 79,
82, 84, 90, 94, 97, 99, 102, 104, 105, 108, 110, 113, 116, 117, 121, 122]:
return True
else:
return False | [
"os.listdir",
"skimage.morphology.square",
"os.path.splitext",
"os.getcwd",
"numpy.rint",
"pandas.DataFrame",
"os.remove"
] | [((2675, 2697), 'os.listdir', 'os.listdir', (['search_dir'], {}), '(search_dir)\n', (2685, 2697), False, 'import os\n'), ((3113, 3182), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['filename', 'epoch', 'loss', 'accuracy']"}), "(data, columns=['filename', 'epoch', 'loss', 'accuracy'])\n", (3125, 3182), True, 'import pandas as pd\n'), ((3484, 3506), 'os.listdir', 'os.listdir', (['search_dir'], {}), '(search_dir)\n', (3494, 3506), False, 'import os\n'), ((3932, 4001), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['filename', 'epoch', 'loss', 'accuracy']"}), "(data, columns=['filename', 'epoch', 'loss', 'accuracy'])\n", (3944, 4001), True, 'import pandas as pd\n'), ((5221, 5232), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5230, 5232), False, 'import os\n'), ((4234, 4249), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (4243, 4249), False, 'import os\n'), ((853, 871), 'skimage.morphology.square', 'square', (['patch_size'], {}), '(patch_size)\n', (859, 871), False, 'from skimage.morphology import square\n'), ((2719, 2746), 'os.path.splitext', 'os.path.splitext', (['ckpt_name'], {}), '(ckpt_name)\n', (2735, 2746), False, 'import os\n'), ((3528, 3555), 'os.path.splitext', 'os.path.splitext', (['ckpt_name'], {}), '(ckpt_name)\n', (3544, 3555), False, 'import os\n'), ((1033, 1051), 'skimage.morphology.square', 'square', (['patch_size'], {}), '(patch_size)\n', (1039, 1051), False, 'from skimage.morphology import square\n'), ((1109, 1123), 'skimage.morphology.square', 'square', (['margin'], {}), '(margin)\n', (1115, 1123), False, 'from skimage.morphology import square\n'), ((1245, 1274), 'numpy.rint', 'np.rint', (['(pos[0] / space_patch)'], {}), '(pos[0] / space_patch)\n', (1252, 1274), True, 'import numpy as np\n'), ((1323, 1352), 'numpy.rint', 'np.rint', (['(pos[1] / space_patch)'], {}), '(pos[1] / space_patch)\n', (1330, 1352), True, 'import numpy as np\n')] |
import os
import gym
from brl_gym.envs.mujoco.model_updater import MujocoUpdater
from gym import error
from gym.utils import seeding
import numpy as np
from os import path
import gym
import six
import time as timer
from gym import spaces
try:
import mujoco_py
from mujoco_py import load_model_from_path, load_model_from_xml, MjSim, MjViewer
except ImportError as e:
raise error.DependencyNotInstalled("{}. (HINT: you need to install mujoco_py, and also perform the setup instructions here: https://github.com/openai/mujoco-py/.)".format(e))
positive_params = ["size", "damping", "stiffness"]
DEFAULT_SIZE = 500
class MujocoEnv(gym.Env):
"""Superclass for all MuJoCo environments.
"""
def __init__(self, model_path, frame_skip=1, xml_string=""):
"""
@param model_path path of the default model
@param xml_string if given, the model will be reset using these values
"""
fullpath = os.path.join(os.path.dirname(__file__), "assets", model_path)
if not path.exists(fullpath):
raise IOError("File %s does not exist" % fullpath)
self.model = load_model_from_path(fullpath)
with open(fullpath, 'r') as f:
self.model_xml = f.read()
self.default_xml = self.model_xml
if xml_string != "":
self.model = load_model_from_xml(xml_string)
self.model_xml = xml_string
self.frame_skip = frame_skip
self.sim = MjSim(self.model)
self.data = self.sim.data
self.viewer = None
self._viewers = {}
self.metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': int(np.round(1.0 / self.dt))
}
self.mujoco_render_frames = False
self.init_qpos = self.data.qpos.ravel().copy()
self.init_qvel = self.data.qvel.ravel().copy()
observation, _reward, done, _info = self.step(np.zeros(self.model.nu))
assert not done
self.obs_dim = np.sum([o.size for o in observation]) if type(observation) is tuple else observation.size
high = np.inf*np.ones(self.obs_dim)
low = -high
self.observation_space = spaces.Box(low, high)
self._seed()
self.set_param_space()
def get_params(self):
"""
Returns a dict of (param_name, param_value)
"""
return MujocoUpdater(self.model_xml).get_params()
def set_params(self, params):
"""
@param params: dict(param_name, param_value)
param_name should be a string of bodyname__type__paramname
where type is either geom or joint,
e.g. thigh__joint__friction,
and param_value is a numpy array
"""
# invalidate cached properties
self.__dict__.pop('action_space', None)
self.__dict__.pop('observation_space', None)
new_xml = MujocoUpdater.set_params(self.model_xml, params)
self.__init__(xml_string=new_xml)
self.reset()
return self
def set_param_space(self, param_space=None, eps_scale=0.5, replace=True):
"""
Set param_space
@param param_space: dict(string, gym.space.base.Space)
@param eps_scale: scale of variation applied to all params
@param replace: if true, param_space overwrites default param_space.
Default behavior is to merge.
"""
if param_space is not None:
if replace:
self._param_space = param_space
else:
self._param_space = {**self._param_space, **param_space}
else:
params = MujocoUpdater(self.model_xml).get_params()
self._param_space = dict()
for param, value in params.items():
eps = np.abs(value) * eps_scale
ub = value + eps
lb = value - eps
for name in positive_params:
if name in param:
lb = np.clip(lb, 1e-3, ub)
break
space = spaces.Box(lb, ub)
self._param_space[param] = space
def get_geom_params(self, body_name):
geom = MujocoUpdater(self.model_xml).get_geom(body_name)
return {
k: v for k, v in geom.attrib.items()
if k not in MujocoUpdater.ignore_params
}
def get_joint_params(self, body_name):
joint = MujocoUpdater(self.model_xml).get_joint(body_name)
return {
k: v for k, v in joint.attrib.items()
if k not in MujocoUpdater.ignore_params
}
def get_body_names(self):
return MujocoUpdater(self.model_xml).get_body_names()
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def get_current_obs(self):
return self._get_full_obs()
def _get_full_obs(self):
data = self.sim.data
cdists = np.copy(self.model.geom_margin).flat
for c in self.sim.data.contact:
cdists[c.geom2] = min(cdists[c.geom2], c.dist)
obs = np.concatenate([
data.qpos.flat,
data.qvel.flat,
# data.cdof.flat,
data.cinert.flat,
data.cvel.flat,
# data.cacc.flat,
data.qfrc_actuator.flat,
data.cfrc_ext.flat,
data.qfrc_constraint.flat,
cdists,
# data.qfrc_bias.flat,
# data.qfrc_passive.flat,
self.dcom.flat,
])
return obs
@property
def _state(self):
return np.concatenate([
self.sim.data.qpos.flat,
self.sim.data.qvel.flat
])
@property
def _full_state(self):
return np.concatenate([
self.sim.data.qpos,
self.sim.data.qvel,
self.sim.data.qacc,
self.sim.data.ctrl,
]).ravel()
# methods to override:
# ----------------------------
def reset_model(self):
"""
Reset the robot degrees of freedom (qpos and qvel).
Implement this in each subclass.
"""
raise NotImplementedError
def mj_viewer_setup(self):
"""
Due to specifics of new mujoco rendering, the standard viewer cannot be used
with this set-up. Instead we use this mujoco specific function.
"""
pass
def viewer_setup(self):
"""
Does not work. Use mj_viewer_setup() instead
"""
pass
# -----------------------------
def reset(self, randomize=True):
self.sim.reset()
self.sim.forward()
ob = self.reset_model()
return ob
# Added for bayesian_rl
def get_sim_state(self):
return self.sim.get_state()
# Added for bayesian_rl
def set_sim_state(self, state):
self.sim.set_state(state)
# Added for bayesian_rl
def set_state_vector(self, state_vector):
qpos = state_vector[:self.model.nq]
qvel = state_vector[self.model.nq:]
self.set_state(qpos, qvel)
# Added for bayesian_rl
def get_state_vector(self):
return self.state_vector()
def set_state(self, qpos, qvel):
assert qpos.shape == (self.model.nq,) and qvel.shape == (self.model.nv,)
state = self.sim.get_state()
for i in range(self.model.nq):
state.qpos[i] = qpos[i]
for i in range(self.model.nv):
state.qvel[i] = qvel[i]
self.sim.set_state(state)
self.sim.forward()
@property
def dt(self):
return self.model.opt.timestep * self.frame_skip
def do_simulation(self, ctrl, n_frames):
for i in range(self.model.nu):
self.sim.data.ctrl[i] = ctrl[i]
for _ in range(n_frames):
self.sim.step()
if self.mujoco_render_frames is True:
self.mj_render()
def mj_render(self):
try:
self.viewer.render()
except:
self.mj_viewer_setup()
self.viewer._run_speed = 1.0
#self.viewer._run_speed /= self.frame_skip
self.viewer.render()
def _get_viewer(self, mode):
self.viewer = self._viewers.get(mode)
if self.viewer is None:
if mode == 'human':
self.viewer = mujoco_py.MjViewer(self.sim)
elif mode == 'rgb_array' or mode == 'depth_array':
self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1)
self.viewer_setup()
self._viewers[mode] = self.viewer
return self.viewer
def close(self):
if self.viewer is not None:
# self.viewer.finish()
self.viewer = None
self._viewers = {}
# def step(self, a):
# return self._step(a)
# Added for bayesian_rl
def take_action(self, a):
self.step(a)
return self.get_sim_state()
def state_vector(self):
state = self.sim.get_state()
return np.concatenate([
state.qpos.flat, state.qvel.flat])
# -----------------------------
def visualize_policy(self, policy, horizon=1000, num_episodes=1, mode='exploration'):
self.mujoco_render_frames = True
for ep in range(num_episodes):
o = self.reset()
d = False
t = 0
while t < horizon and d is False:
a = policy.get_action(o)[0] if mode == 'exploration' else policy.get_action(o)[1]['evaluation']
o, r, d, _ = self.step(a)
t = t+1
self.mujoco_render_frames = False
def visualize_policy_offscreen(self, policy, horizon=1000,
num_episodes=1,
mode='exploration',
save_loc='/tmp/',
filename='newvid',
camera_name=None):
import skvideo.io
for ep in range(num_episodes):
print("Episode %d: rendering offline " % ep, end='', flush=True)
o = self.reset()
d = False
t = 0
arrs = []
t0 = timer.time()
while t < horizon and d is False:
a = policy.get_action(o)[0] if mode == 'exploration' else policy.get_action(o)[1]['mean']
o, r, d, _ = self.step(a)
t = t+1
curr_frame = self.sim.render(width=640, height=480, mode='offscreen',
camera_name=camera_name, device_id=0)
arrs.append(curr_frame[::-1,:,:])
print(t, end=', ', flush=True)
file_name = save_loc + filename + '_' + str(ep) + ".mp4"
skvideo.io.vwrite( file_name, np.asarray(arrs))
print("saved", file_name)
t1 = timer.time()
print("time taken = %f"% (t1-t0))
def render(self, mode='human', width=DEFAULT_SIZE, height=DEFAULT_SIZE):
if mode == 'rgb_array':
self._get_viewer(mode).render(width, height)
# window size used for old mujoco-py:
data = self._get_viewer(mode).read_pixels(width, height, depth=False)
# original image is upside-down, so flip it
return data[::-1, :, :]
elif mode == 'depth_array':
self._get_viewer(mode).render(width, height)
# window size used for old mujoco-py:
# Extract depth part of the read_pixels() tuple
data = self._get_viewer(mode).read_pixels(width, height, depth=True)[1]
# original image is upside-down, so flip it
return data[::-1, :]
elif mode == 'human':
self._get_viewer(mode).render()
| [
"numpy.clip",
"mujoco_py.MjViewer",
"mujoco_py.MjRenderContextOffscreen",
"gym.utils.seeding.np_random",
"mujoco_py.MjSim",
"os.path.exists",
"mujoco_py.load_model_from_path",
"numpy.asarray",
"brl_gym.envs.mujoco.model_updater.MujocoUpdater.set_params",
"numpy.concatenate",
"mujoco_py.load_mode... | [((1135, 1165), 'mujoco_py.load_model_from_path', 'load_model_from_path', (['fullpath'], {}), '(fullpath)\n', (1155, 1165), False, 'from mujoco_py import load_model_from_path, load_model_from_xml, MjSim, MjViewer\n'), ((1474, 1491), 'mujoco_py.MjSim', 'MjSim', (['self.model'], {}), '(self.model)\n', (1479, 1491), False, 'from mujoco_py import load_model_from_path, load_model_from_xml, MjSim, MjViewer\n'), ((2205, 2226), 'gym.spaces.Box', 'spaces.Box', (['low', 'high'], {}), '(low, high)\n', (2215, 2226), False, 'from gym import spaces\n'), ((2901, 2949), 'brl_gym.envs.mujoco.model_updater.MujocoUpdater.set_params', 'MujocoUpdater.set_params', (['self.model_xml', 'params'], {}), '(self.model_xml, params)\n', (2925, 2949), False, 'from brl_gym.envs.mujoco.model_updater import MujocoUpdater\n'), ((4787, 4810), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (4804, 4810), False, 'from gym.utils import seeding\n'), ((5127, 5315), 'numpy.concatenate', 'np.concatenate', (['[data.qpos.flat, data.qvel.flat, data.cinert.flat, data.cvel.flat, data.\n qfrc_actuator.flat, data.cfrc_ext.flat, data.qfrc_constraint.flat,\n cdists, self.dcom.flat]'], {}), '([data.qpos.flat, data.qvel.flat, data.cinert.flat, data.cvel\n .flat, data.qfrc_actuator.flat, data.cfrc_ext.flat, data.\n qfrc_constraint.flat, cdists, self.dcom.flat])\n', (5141, 5315), True, 'import numpy as np\n'), ((5629, 5695), 'numpy.concatenate', 'np.concatenate', (['[self.sim.data.qpos.flat, self.sim.data.qvel.flat]'], {}), '([self.sim.data.qpos.flat, self.sim.data.qvel.flat])\n', (5643, 5695), True, 'import numpy as np\n'), ((9053, 9103), 'numpy.concatenate', 'np.concatenate', (['[state.qpos.flat, state.qvel.flat]'], {}), '([state.qpos.flat, state.qvel.flat])\n', (9067, 9103), True, 'import numpy as np\n'), ((964, 989), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (979, 989), False, 'import os\n'), ((1028, 1049), 'os.path.exists', 'path.exists', (['fullpath'], {}), '(fullpath)\n', (1039, 1049), False, 'from os import path\n'), ((1344, 1375), 'mujoco_py.load_model_from_xml', 'load_model_from_xml', (['xml_string'], {}), '(xml_string)\n', (1363, 1375), False, 'from mujoco_py import load_model_from_path, load_model_from_xml, MjSim, MjViewer\n'), ((1945, 1968), 'numpy.zeros', 'np.zeros', (['self.model.nu'], {}), '(self.model.nu)\n', (1953, 1968), True, 'import numpy as np\n'), ((2017, 2054), 'numpy.sum', 'np.sum', (['[o.size for o in observation]'], {}), '([o.size for o in observation])\n', (2023, 2054), True, 'import numpy as np\n'), ((2130, 2151), 'numpy.ones', 'np.ones', (['self.obs_dim'], {}), '(self.obs_dim)\n', (2137, 2151), True, 'import numpy as np\n'), ((4977, 5008), 'numpy.copy', 'np.copy', (['self.model.geom_margin'], {}), '(self.model.geom_margin)\n', (4984, 5008), True, 'import numpy as np\n'), ((10241, 10253), 'time.time', 'timer.time', ([], {}), '()\n', (10251, 10253), True, 'import time as timer\n'), ((10922, 10934), 'time.time', 'timer.time', ([], {}), '()\n', (10932, 10934), True, 'import time as timer\n'), ((1703, 1726), 'numpy.round', 'np.round', (['(1.0 / self.dt)'], {}), '(1.0 / self.dt)\n', (1711, 1726), True, 'import numpy as np\n'), ((2398, 2427), 'brl_gym.envs.mujoco.model_updater.MujocoUpdater', 'MujocoUpdater', (['self.model_xml'], {}), '(self.model_xml)\n', (2411, 2427), False, 'from brl_gym.envs.mujoco.model_updater import MujocoUpdater\n'), ((4086, 4104), 'gym.spaces.Box', 'spaces.Box', (['lb', 'ub'], {}), '(lb, ub)\n', (4096, 4104), False, 'from gym import spaces\n'), ((4212, 4241), 'brl_gym.envs.mujoco.model_updater.MujocoUpdater', 'MujocoUpdater', (['self.model_xml'], {}), '(self.model_xml)\n', (4225, 4241), False, 'from brl_gym.envs.mujoco.model_updater import MujocoUpdater\n'), ((4450, 4479), 'brl_gym.envs.mujoco.model_updater.MujocoUpdater', 'MujocoUpdater', (['self.model_xml'], {}), '(self.model_xml)\n', (4463, 4479), False, 'from brl_gym.envs.mujoco.model_updater import MujocoUpdater\n'), ((4676, 4705), 'brl_gym.envs.mujoco.model_updater.MujocoUpdater', 'MujocoUpdater', (['self.model_xml'], {}), '(self.model_xml)\n', (4689, 4705), False, 'from brl_gym.envs.mujoco.model_updater import MujocoUpdater\n'), ((5787, 5887), 'numpy.concatenate', 'np.concatenate', (['[self.sim.data.qpos, self.sim.data.qvel, self.sim.data.qacc, self.sim.data.ctrl\n ]'], {}), '([self.sim.data.qpos, self.sim.data.qvel, self.sim.data.qacc,\n self.sim.data.ctrl])\n', (5801, 5887), True, 'import numpy as np\n'), ((8367, 8395), 'mujoco_py.MjViewer', 'mujoco_py.MjViewer', (['self.sim'], {}), '(self.sim)\n', (8385, 8395), False, 'import mujoco_py\n'), ((10849, 10865), 'numpy.asarray', 'np.asarray', (['arrs'], {}), '(arrs)\n', (10859, 10865), True, 'import numpy as np\n'), ((3656, 3685), 'brl_gym.envs.mujoco.model_updater.MujocoUpdater', 'MujocoUpdater', (['self.model_xml'], {}), '(self.model_xml)\n', (3669, 3685), False, 'from brl_gym.envs.mujoco.model_updater import MujocoUpdater\n'), ((3808, 3821), 'numpy.abs', 'np.abs', (['value'], {}), '(value)\n', (3814, 3821), True, 'import numpy as np\n'), ((8489, 8537), 'mujoco_py.MjRenderContextOffscreen', 'mujoco_py.MjRenderContextOffscreen', (['self.sim', '(-1)'], {}), '(self.sim, -1)\n', (8523, 8537), False, 'import mujoco_py\n'), ((4011, 4033), 'numpy.clip', 'np.clip', (['lb', '(0.001)', 'ub'], {}), '(lb, 0.001, ub)\n', (4018, 4033), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import argparse
import os
import os.path as path
import numpy as np
import tensorflow as tf
from mobilenet.dataset import imagenet
def main(args):
tf.random.set_seed(0)
if not path.isdir(args.examples_dir):
os.makedirs(args.examples_dir)
data, _ = imagenet(args.split, tuple(args.size), augment=args.augment)
for i, item in enumerate(data.unbatch()):
if i >= args.n_examples:
break
class_id = np.argmax(item[1])
filename = path.join(args.examples_dir, '{}_{}.jpeg'.format(i, class_id))
image_uint8 = tf.image.convert_image_dtype(item[0], tf.uint8)
tf.io.write_file(filename, tf.io.encode_jpeg(image_uint8))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
add_help=False)
parser.add_argument(
'-h', '--help', action='help',
help='Display this help message and exit.')
parser.add_argument(
'-E', '--examples-dir', default='examples',
help='Examples are saved to a subdirectory of this directory '
'corresponding to their split ("train", "val", or '
'"test").')
parser.add_argument(
'-a', '--augment', action='store_true',
help='Apply data augmentation. During actual training this '
'should only be done with training-set images. Here it '
'can be done with any split.')
parser.add_argument(
'-n', '--n-examples', default=10, type=int,
help='The number of examples to save.')
parser.add_argument(
'-s', '--size', nargs=2, default=[320, 320], type=int,
help='The height and width (in that order) to which images '
'should be resized.')
parser.add_argument(
'-S', '--split',
default='train', choices=['train', 'val', 'test'],
help='The dataset split from which examples should be pulled.')
main(parser.parse_args())
| [
"tensorflow.image.convert_image_dtype",
"tensorflow.random.set_seed",
"os.makedirs",
"argparse.ArgumentParser",
"numpy.argmax",
"os.path.isdir",
"tensorflow.io.encode_jpeg"
] | [((179, 200), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(0)'], {}), '(0)\n', (197, 200), True, 'import tensorflow as tf\n'), ((754, 854), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'add_help': '(False)'}), '(formatter_class=argparse.\n ArgumentDefaultsHelpFormatter, add_help=False)\n', (777, 854), False, 'import argparse\n'), ((212, 241), 'os.path.isdir', 'path.isdir', (['args.examples_dir'], {}), '(args.examples_dir)\n', (222, 241), True, 'import os.path as path\n'), ((251, 281), 'os.makedirs', 'os.makedirs', (['args.examples_dir'], {}), '(args.examples_dir)\n', (262, 281), False, 'import os\n'), ((474, 492), 'numpy.argmax', 'np.argmax', (['item[1]'], {}), '(item[1])\n', (483, 492), True, 'import numpy as np\n'), ((597, 644), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['item[0]', 'tf.uint8'], {}), '(item[0], tf.uint8)\n', (625, 644), True, 'import tensorflow as tf\n'), ((680, 710), 'tensorflow.io.encode_jpeg', 'tf.io.encode_jpeg', (['image_uint8'], {}), '(image_uint8)\n', (697, 710), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
'''
Standard Dynamic Energy Budget model
'''
import numpy as np
import pandas as pd
import scipy.integrate as sid
import lmfit
import matplotlib.pyplot as plt
import seaborn as sns
import corner
from tqdm import tqdm
from collections import namedtuple
from lossfunc import symmetric_loss_function
from deb_aux import physical_volume
EPS = np.finfo(np.double).eps
def get_deb_params_pandas():
'''Set up primary parameters with standard animal value.
Also include temperature params here for now.
Use standard animal values from AMP:
Code Value Unit Description
v 0.02 cm/d energy conductance
kap 0.8 / allocation fraction to soma
kap_R 0.95 / reproduction efficiency
p_M 18 J/d.cm^3 volume-specific somatic maintenance costs
k_J 0.002 1/d maturity maintenance rate coefficient
kap_G 0.8 / growth efficiency
'''
df = pd.DataFrame(columns=['Min', 'Max', 'Value', 'Dimension', 'Units', 'Description'])
df.loc['Fm'] = (EPS, np.nan, 6.5, 'l L**-2 t**-1', '', 'Specific searching rate')
df.loc['kappaX'] = (EPS, 1.0, 0.8, '-', '', 'Assimilation efficiency')
df.loc['pAm'] = (EPS, np.nan, 530.0, 'e L**-2 t**-1', '', 'max specific assimilation rate')
df.loc['v'] = (EPS, np.nan, 0.02, 'L t**-1', 'cm/d', 'Energy conductance')
df.loc['kappa'] = (EPS, 1.0, 0.8, '-', '', 'Allocation fraction to soma')
df.loc['kappaR'] = (EPS, 1.0, 0.95, '-', '', 'Reproduction efficiency')
df.loc['pM'] = (EPS, np.nan, 18.0, 'e L**-3 t**-1', 'J/d/cm**3', 'Volume-specific somatic maintenance cost')
df.loc['pT'] = (0.0, np.nan, 0.0, 'e L**-1 t**-1', '', 'Surface-specific somatic maintenance cost')
df.loc['kJ'] = (EPS, np.nan, 0.002, 't**-1', '', 'Maturity maintenance rate coefficient')
df.loc['EG'] = (EPS, np.nan, 4184., 'e L**-3', '', 'Specific cost for structure')
df.loc['EbH'] = (EPS, np.nan, 1e-6, 'e', '', 'Maturity at birth')
df.loc['EpH'] = (EPS, np.nan, 843.6, 'e', '', 'Maturity at puberty')
df.loc['TA'] = (EPS, np.nan, 6000.0, 'T', 'K', 'Arrhenius temperature')
df.loc['Ts'] = (EPS, np.nan, 273.1+20.0, 'T', 'K', 'Reference temperature')
return df
def get_deb_params(df=None):
if df is None:
df = get_deb_params_pandas()
v = lmfit.Parameters()
for name, (mi, ma, va, dim, unit, desc) in df.iterrows():
v.add(name, value=va, min=mi, max=ma, vary=False)
return v
def params_to_dataframe(params):
'''Convert lmfit Parameters to Pandas DataFrame'''
df = get_deb_params_pandas()
for name, p in params.items():
df.loc[name, 'Min'] = p.min
df.loc[name, 'Max'] = p.max
df.loc[name, 'Value'] = p.value
return df
def arrhenius_scaling(T, TA, Ts):
'''Arrhenius temperature scaling relationship'''
return np.exp(TA/Ts - TA/T)
class Fluxes:
'''Fluxes in the standard DEB model'''
def __init__(self):
# Rates scale with temperature (per time times)
self.temp_scale_params = ['pAm', 'v', 'pM', 'pT', 'kJ']
@staticmethod
def pA(f, V, pAm):
return f * pAm * np.cbrt(V**2)
@staticmethod
def pC(E, V, EG, v, kappa, pS):
return E * (EG * v * np.cbrt(V**2) + pS) / (kappa * E + EG * V)
@staticmethod
def pS(V, pM, pT):
return pM*V + pT*V**(2/3.)
@staticmethod
def pG(kappa, pC, pS):
return kappa*pC - pS
@staticmethod
def pJ(EH, kJ):
return kJ * EH
@staticmethod
def pR(kappa, pC, pJ):
return (1 - kappa) * pC - pJ
def __call__(self, t, forcings, state, params):
'''Evaluate fluxes for given time, state and environment'''
#Unpack state
E, V, EH, ER = state
#Food level
f = forcings['f']
#Temperature
if 'T' in forcings.keys():
T = forcings['T'](t)
else:
T = params['Ts']
#Temperature scaling of rates
TA = params['TA']
Ts = params['Ts']
ats = arrhenius_scaling(T, TA, Ts)
pAm = ats * params['pAm']
v = ats * params['v']
pM = ats * params['pM']
pT = ats * params['pT']
kJ = ats * params['kJ']
#s = pd.Series(name='Fluxes')
s = dict()
s['pA'] = Fluxes.pA(f(t), V, pAm)
s['pS'] = Fluxes.pS(V, pM, pT)
s['pC'] = Fluxes.pC(E, V, params['EG'], v, params['kappa'], s['pS'])
s['pG'] = Fluxes.pG(params['kappa'], s['pC'], s['pS'])
s['pJ'] = Fluxes.pJ(EH, kJ)
s['pR'] = Fluxes.pR(params['kappa'], s['pC'], s['pJ'])
return s
class DEBStandard(Fluxes):
'''Standard DEB model for reserve energy, structural volume, maturity
energy and reproduction buffer (E, V, EH, ER)
'''
def __init__(self, forcings, pripars=None):
self.params = get_deb_params(pripars)
self.fluxes = Fluxes()
self.forcings = forcings
if 'T' in forcings.keys():
if callable(forcings['T']):
print('Applying dynamic temperature adjustment of rates')
else:
print('Applying static temperature adjustment of rates.')
# Parameters for ODE solver and data fit
self._ode_nsteps = 6000
self.dy = np.zeros(4)
def _rhs(self, t, y, params, forcings):
'''Standard DEB model equation set
dE/dt = pA - pC Reserve energy
dV/dt = pG / [EG] Structural volume
dEH/dt = pR(EH < EHp) Maturity energy
dER/dt = pR(EH = EHp) Reproduction buffer energy
State vector indexing:
y[0] = E, y[1] = V, y[2] = EH, y[3] = ER
'''
v = params
dy = self.dy
# Current state vector values
E = y[0]
V = y[1]
EH = y[2]
ER = y[3]
#Calculate fluxes
flux = self.fluxes(t, forcings, [E, V, EH, ER], params)
# Reserve energy equation
dE = flux['pA'] - flux['pC']
# Structural volume equation
dV = flux['pG'] / v['EG']
#Maturity and reproductive buffer energy equations
if(EH < v['EpH']):
dEH = flux['pR']
dER = 0.0
else:
dEH = 0.0
dER = flux['pR']
dy[:] = [dE, dV, dEH, dER]
return dy
def _solve(self, params, y0, times):
'''Solver for model ODE.
Returns solution to model ODE at times <times>, given model parameters
<params> and the time-dependent exposure profile function <cd>, for
given initial conditions <y0>.
'''
# Trying explicit bdf for stiff equations, since lsoda complains
#r = sid.ode(cls.currhs).set_integrator('vode', nsteps=1000, method='bdf')
r = sid.ode(self._rhs).set_integrator('lsoda', nsteps=self._ode_nsteps,
rtol=1e-6)
r.set_initial_value(y0, times[0])
r.set_f_params(params.valuesdict(), self.forcings)
sols = [y0]
for t in times[1:]:
r.integrate(t)
sols.append(r.y)
y = np.array(sols)
return y, r
def predict(self, y0, times):
y, r = self._solve(self.params, y0, times)
self.solver_result = r
self.times = times
self.E = y[:, 0]
self.V = y[:, 1]
self.EH = y[:, 2]
self.ER = y[:, 3]
def plot_state(self, fig=None):
'''Plot state variables, run predict() first'''
#Maximum structural length
Lm = self.params['kappa']*self.params['pAm']/self.params['pM']
if not fig:
fig, ax = plt.subplots(2, 2, figsize=(10, 10))
ax = ax.flatten()
else:
ax = fig.axes
t = self.times
f = self.forcings['f']
ax[0].plot(t, self.E)
ax[0].set_title('Reserve (E)')
ax[0].set_ylabel('E [J]')
ax[1].plot(t, self.V)
ax[1].set_title('Structure (V)')
ax[1].set_ylabel('V [cm**3]')
ax[2].plot(t, self.EH)
ax[2].set_title('Maturity (EH)')
ax[2].set_xlabel('Time [days]')
ax[2].set_ylabel('EH [J]')
ax[3].plot(t, self.ER)
ax[3].set_title('Reproduction buffer (ER)')
ax[3].set_xlabel('Time [days]')
ax[3].set_ylabel('ER [J]')
ax[1].axhline(Lm**3, ls='--')
return fig
class DEBFit:
def __init__(self, debmodel, initial_state, auxpars):
self.debmodel = debmodel
self.initial_state = initial_state
self.auxpars = auxpars
#This maps the DEB state variables to observables (data variables)
#Hard-coded default is physical volume auxillary variable
self.auxmap = self.get_physical_volume
#Max iterations for Nelder-Mead optimization
self._fit_maxiter = 3000
def get_physical_volume(self, S, p):
E = S[:, 0]
V = S[:, 1]
ER = S[:, 3]
ap = self.auxpars
Vw, V, VE, VER = physical_volume(V, E, ER, ap.wE, ap.dE, ap.muE)
return Vw
def objective(self, params, datasets):
'''Objective function to minimize for parameter estimation'''
modpreds = []
weights = []
for data in datasets:
times = data.index.values
y0 = self.initial_state
#Solve DEB model equations
y, _ = self.debmodel._solve(params, y0, times)
#Here we need to map the DEB state onto data variables via
#observable auxillary function
o = self.auxmap(y, params)
modpreds.append(o)
#Use weights=1 for now
weights.append(np.ones(times.size))
#Calculate loss function
loss = symmetric_loss_function(datasets, modpreds, weights)
return loss
def fit(self, data, progressbar=True):
if progressbar:
pbar = tqdm(total=self._fit_maxiter)
def objective_wrapper(*args, **kwargs):
pbar.update(1)
return self.objective(*args, **kwargs)
else:
objective_wrapper = self.objective
# Run the minimizer with the simplex method, simultanous fit to all data
result = lmfit.minimize(objective_wrapper, self.debmodel.params,
args=(data,), method='nelder', tol=1e-9,
options=dict(maxfev=self._fit_maxiter))
self.params = result.params
if progressbar:
pbar.close()
return result
if __name__ == '__main__':
forcings = dict(f=lambda t: t)
state = [1, 1, 1, 1]
pars = get_deb_params()
flux = Fluxes()
print(flux(0.0, forcings, state, pars))
debmod = DEBStandard({'f': lambda t: 0.1*t})
y0 = [1, 1, 0, 0]
print(debmod._rhs(0.5, y0, debmod.params, debmod.forcings))
y, res = debmod._solve(debmod.params, y0, [0, 1, 2])
print('Integration successful: ', res.successful())
print('Stiff ODE: ', res.stiff)
print(y)
| [
"numpy.ones",
"pandas.DataFrame",
"tqdm.tqdm",
"lossfunc.symmetric_loss_function",
"deb_aux.physical_volume",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.finfo",
"numpy.cbrt",
"scipy.integrate.ode",
"lmfit.Parameters",
"matplotlib.pyplot.subplots"
] | [((365, 384), 'numpy.finfo', 'np.finfo', (['np.double'], {}), '(np.double)\n', (373, 384), True, 'import numpy as np\n'), ((903, 989), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Min', 'Max', 'Value', 'Dimension', 'Units', 'Description']"}), "(columns=['Min', 'Max', 'Value', 'Dimension', 'Units',\n 'Description'])\n", (915, 989), True, 'import pandas as pd\n'), ((2284, 2302), 'lmfit.Parameters', 'lmfit.Parameters', ([], {}), '()\n', (2300, 2302), False, 'import lmfit\n'), ((2821, 2845), 'numpy.exp', 'np.exp', (['(TA / Ts - TA / T)'], {}), '(TA / Ts - TA / T)\n', (2827, 2845), True, 'import numpy as np\n'), ((5266, 5277), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (5274, 5277), True, 'import numpy as np\n'), ((7122, 7136), 'numpy.array', 'np.array', (['sols'], {}), '(sols)\n', (7130, 7136), True, 'import numpy as np\n'), ((9001, 9048), 'deb_aux.physical_volume', 'physical_volume', (['V', 'E', 'ER', 'ap.wE', 'ap.dE', 'ap.muE'], {}), '(V, E, ER, ap.wE, ap.dE, ap.muE)\n', (9016, 9048), False, 'from deb_aux import physical_volume\n'), ((9758, 9810), 'lossfunc.symmetric_loss_function', 'symmetric_loss_function', (['datasets', 'modpreds', 'weights'], {}), '(datasets, modpreds, weights)\n', (9781, 9810), False, 'from lossfunc import symmetric_loss_function\n'), ((3113, 3128), 'numpy.cbrt', 'np.cbrt', (['(V ** 2)'], {}), '(V ** 2)\n', (3120, 3128), True, 'import numpy as np\n'), ((7647, 7683), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(10, 10)'}), '(2, 2, figsize=(10, 10))\n', (7659, 7683), True, 'import matplotlib.pyplot as plt\n'), ((9920, 9949), 'tqdm.tqdm', 'tqdm', ([], {'total': 'self._fit_maxiter'}), '(total=self._fit_maxiter)\n', (9924, 9949), False, 'from tqdm import tqdm\n'), ((6779, 6797), 'scipy.integrate.ode', 'sid.ode', (['self._rhs'], {}), '(self._rhs)\n', (6786, 6797), True, 'import scipy.integrate as sid\n'), ((9676, 9695), 'numpy.ones', 'np.ones', (['times.size'], {}), '(times.size)\n', (9683, 9695), True, 'import numpy as np\n'), ((3211, 3226), 'numpy.cbrt', 'np.cbrt', (['(V ** 2)'], {}), '(V ** 2)\n', (3218, 3226), True, 'import numpy as np\n')] |
import numpy as np
from itertools import chain, islice, product, repeat, cycle, izip
from fos.actor.surf import CommonSurfaceGroup, IlluminatedSurfaceGroup
from fos.core.world import World
from fos.lib.pyglet.gl import *
from fos.lib.pyglet.graphics import Batch
from fos.core.actor import Actor
from fos.geometry.vec3 import Vec3
from fos.geometry.math3d import *
type_to_enum = {
gl.GLubyte: gl.GL_UNSIGNED_BYTE,
gl.GLushort: gl.GL_UNSIGNED_SHORT,
gl.GLuint: gl.GL_UNSIGNED_INT,
}
def glarray(gltype, seq, length):
'''
Convert a list of lists into a flattened ctypes array, eg:
[ (1, 2, 3), (4, 5, 6) ] -> (GLfloat*6)(1, 2, 3, 4, 5, 6)
'''
arraytype = gltype * length
return arraytype(*seq)
def tessellate(face):
'''
Return the given face broken into a list of triangles, wound in the
same direction as the original poly. Does not work for concave faces.
e.g. [0, 1, 2, 3, 4] -> [[0, 1, 2], [0, 2, 3], [0, 3, 4]]
'''
return (
[face[0], face[index], face[index + 1]]
for index in xrange(1, len(face) - 1)
)
def face_normal(vertices, face):
'''
Return the unit normal vector (at right angles to) this face.
Note that the direction of the normal will be reversed if the
face's winding is reversed.
'''
v0 = vertices[face[0]]
v1 = vertices[face[1]]
v2 = vertices[face[2]]
a = v0 - v1
b = v2 - v1
return b.cross(a).normalized()
class GLPrimitive(object):
def __init__(self):
self.num_glvertices = None
self.glvertices = None
self.glindex_type = None
self.glindices = None
self.glcolors = None
self.glnormals = None
def get_num_glvertices(_, faces):
return len(list(chain(*faces)))
def get_glvertices(self, vertices, faces):
glverts = chain.from_iterable(
vertices[index]
for face in faces
for index in face
)
self.num_glvertices = self.get_num_glvertices(faces)
return glarray(gl.GLfloat, glverts, self.num_glvertices * 3)
def get_glindex_type(self):
'''
The type of the glindices array depends on how many vertices there are
'''
if self.num_glvertices < 256:
index_type = gl.GLubyte
elif self.num_glvertices < 65536:
index_type = gl.GLushort
else:
index_type = gl.GLuint
return index_type
def get_glindices(self, faces):
glindices = []
face_offset = 0
for face in faces:
indices = xrange(face_offset, face_offset + len(face))
glindices.extend(chain(*tessellate(indices)))
face_offset += len(face)
self.glindex_type = self.get_glindex_type()
return glarray(self.glindex_type, glindices, len(glindices))
def get_glcolors(self, faces, face_colors):
glcolors = chain.from_iterable(
repeat(color, len(face))
for face, color in izip(faces, face_colors)
)
return glarray(gl.GLubyte, chain(*glcolors), self.num_glvertices * 4)
def get_glnormals(self, vertices, faces):
normals = (
face_normal(vertices, face)
for face in faces
)
glnormals = chain.from_iterable(
repeat(normal, len(face))
for face, normal in izip(faces, normals)
)
return glarray(
gl.GLfloat, chain(*glnormals), self.num_glvertices * 3)
def from_shape(self, vertices, faces, face_colors, affine):
self.glvertices = self.get_glvertices(vertices, faces)
self.glindices = self.get_glindices(faces)
self.glcolors = self.get_glcolors(faces, face_colors)
self.glnormals = self.get_glnormals(vertices, faces)
self.affine = self.get_affine(affine)
def get_affine(self, affine):
ident = M3DMatrix44f()
ident = m3dLoadIdentity44(ident)
translate = m3dTranslateMatrix44(ident,
affine[0,3],
affine[1,3],
affine[2.3])
# do the rotation
return translate
class Polyhedron(object):
'''
Defines a polyhedron, a 3D shape with flat faces and straight edges.
Each vertex defines a point in 3d space. Each face is a list of indices
into the vertex array, forming a coplanar convex ring defining the face's
edges. Each face has its own color.
'''
def __init__(self, vertices, faces, face_colors=None, affine=None):
if len(vertices) > 0 and not isinstance(vertices[0], Vec3):
vertices = [Vec3(*v) for v in vertices]
self.vertices = vertices
for face in faces:
assert len(face) >= 3
for index in face:
assert 0 <= index < len(vertices)
self.faces = faces
if face_colors is None:
face_colors = repeat((255, 0, 0, 255))
# if face_colors is None:
# face_colors = white
# if isinstance(face_colors, Color):
# face_colors = repeat(face_colors)
# TODO: colors of koch_cube/tetra break if we remove this 'list'
# and set face_colors to the return of 'islice'. Don't know why.
# returns a list of tuples of len self.faces of the given facecolors
self.face_colors = list(islice(cycle(face_colors), len(self.faces)))
self.affine = affine
def get_glprimitive(self):
glprimitive = GLPrimitive()
glprimitive.from_shape(self.vertices, self.faces, self.face_colors, self.affine)
return glprimitive
class Cubes(Actor):
def __init__(self, location):
'''
# just one cube
e2 = 1. / 2
verts = list(product(*repeat([-e2, +e2], 3)))
faces = [
[0, 1, 3, 2], # left
[4, 6, 7, 5], # right
[7, 3, 1, 5], # front
[0, 2, 6, 4], # back
[3, 7, 6, 2], # top
[1, 0, 4, 5], # bottom
]
oc = Polyhedron(verts, faces)
glprim = oc.get_glprimitive()
group = IlluminatedSurfaceGroup()
'''
self.primitives = []
nr_cubes = location.shape[0]
for i in xrange(nr_cubes):
self.primitives.append(self.create_cubes(location[:,i],1.0))
def create_cubes(self, location, edge_size, color=None):
e2 = edge_size / 2.0
verts = list(product(*repeat([-e2, +e2], 3)))
faces = [
[0, 1, 3, 2], # left
[4, 6, 7, 5], # right
[7, 3, 1, 5], # front
[0, 2, 6, 4], # back
[3, 7, 6, 2], # top
[1, 0, 4, 5], # bottom
]
aff= np.eye(4)
aff[3,:3] = location
oc = Polyhedron(verts, faces, affine = aff)
return oc
'''
self.vertex_list = []
self.batch=Batch()
self.vertex_list.append(self.batch.add_indexed(len(glprim.glvertices) / 3,\
GL_TRIANGLES,\
None,\
list(glprim.glindices),\
('v3f/static',np.array(glprim.glvertices)),\
('n3f/static',list(glprim.glnormals)),\
('c4B/static',list(glprim.glcolors))
) )
ver = np.array(glprim.glvertices)
for i in range(1,300):
self.vertex_list.append(self.batch.add_indexed(len(glprim.glvertices) / 3,\
GL_TRIANGLES,\
None,\
list(glprim.glindices),\
('v3f/static',ver+(i*1)),\
('n3f/static',list(glprim.glnormals)),\
('c4B/static',list(glprim.glcolors))
))
self.vertex_list.append(self.batch.add_indexed(len(glprim.glvertices) / 3,\
GL_TRIANGLES,\
None,\
list(glprim.glindices),\
('v3f/static',ver-(i*1)),\
('n3f/static',list(glprim.glnormals)),\
('c4B/static',list(glprim.glcolors))
))
'''
def draw(self):
gl.glEnableClientState(gl.GL_NORMAL_ARRAY)
for item in self.primitives:
gl.glPushMatrix()
# gl.glMultMatrixf(glyph.affine)
glyph = item.get_glprimitive()
gl.glVertexPointer(
3, gl.GL_FLOAT, 0, glyph.glvertices)
gl.glColorPointer(
4, gl.GL_UNSIGNED_BYTE, 0, glyph.glcolors)
gl.glNormalPointer(gl.GL_FLOAT, 0, glyph.glnormals)
gl.glDrawElements(
gl.GL_TRIANGLES,
len(glyph.glindices),
type_to_enum[glyph.glindex_type],
glyph.glindices)
gl.glPopMatrix()
# self.batch.draw()
def delete(self):
self.vertex_list.delete()
if __name__ == '__main__':
mycubes = Cubes( np.array([0.0,0.0,0.0]) )
| [
"itertools.chain",
"numpy.eye",
"itertools.cycle",
"numpy.array",
"itertools.chain.from_iterable",
"fos.geometry.vec3.Vec3",
"itertools.izip",
"itertools.repeat"
] | [((1850, 1922), 'itertools.chain.from_iterable', 'chain.from_iterable', (['(vertices[index] for face in faces for index in face)'], {}), '(vertices[index] for face in faces for index in face)\n', (1869, 1922), False, 'from itertools import chain, islice, product, repeat, cycle, izip\n'), ((6900, 6909), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6906, 6909), True, 'import numpy as np\n'), ((9856, 9881), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (9864, 9881), True, 'import numpy as np\n'), ((3087, 3103), 'itertools.chain', 'chain', (['*glcolors'], {}), '(*glcolors)\n', (3092, 3103), False, 'from itertools import chain, islice, product, repeat, cycle, izip\n'), ((3477, 3494), 'itertools.chain', 'chain', (['*glnormals'], {}), '(*glnormals)\n', (3482, 3494), False, 'from itertools import chain, islice, product, repeat, cycle, izip\n'), ((5036, 5060), 'itertools.repeat', 'repeat', (['(255, 0, 0, 255)'], {}), '((255, 0, 0, 255))\n', (5042, 5060), False, 'from itertools import chain, islice, product, repeat, cycle, izip\n'), ((1767, 1780), 'itertools.chain', 'chain', (['*faces'], {}), '(*faces)\n', (1772, 1780), False, 'from itertools import chain, islice, product, repeat, cycle, izip\n'), ((4746, 4754), 'fos.geometry.vec3.Vec3', 'Vec3', (['*v'], {}), '(*v)\n', (4750, 4754), False, 'from fos.geometry.vec3 import Vec3\n'), ((5499, 5517), 'itertools.cycle', 'cycle', (['face_colors'], {}), '(face_colors)\n', (5504, 5517), False, 'from itertools import chain, islice, product, repeat, cycle, izip\n'), ((3017, 3041), 'itertools.izip', 'izip', (['faces', 'face_colors'], {}), '(faces, face_colors)\n', (3021, 3041), False, 'from itertools import chain, islice, product, repeat, cycle, izip\n'), ((3398, 3418), 'itertools.izip', 'izip', (['faces', 'normals'], {}), '(faces, normals)\n', (3402, 3418), False, 'from itertools import chain, islice, product, repeat, cycle, izip\n'), ((6634, 6655), 'itertools.repeat', 'repeat', (['[-e2, +e2]', '(3)'], {}), '([-e2, +e2], 3)\n', (6640, 6655), False, 'from itertools import chain, islice, product, repeat, cycle, izip\n')] |
from pyAudioAnalysis import audioFeatureExtraction
from keras.preprocessing import sequence
from scipy import stats
import numpy as np
import cPickle
import sys
import globalvars
def feature_extract(data, nb_samples, dataset, save=True):
f_global = []
i = 0
for (x, Fs) in data:
# 34D short-term feature
f = audioFeatureExtraction.stFeatureExtraction(x, Fs, globalvars.frame_size * Fs, globalvars.step * Fs)
# Harmonic ratio and pitch, 2D
hr_pitch = audioFeatureExtraction.stFeatureSpeed(x, Fs, globalvars.frame_size * Fs, globalvars.step * Fs)
f = np.append(f, hr_pitch.transpose(), axis=0)
# Z-normalized
f = stats.zscore(f, axis=0)
f = f.transpose()
f_global.append(f)
sys.stdout.write("\033[F")
i = i + 1
print("Extracting features " + str(i) + '/' + str(nb_samples) + " from data set...")
f_global = sequence.pad_sequences(f_global, maxlen=globalvars.max_len, dtype='float64', padding='post',
value=-100.0)
if save:
print("Saving features to file...")
cPickle.dump(f_global, open(dataset + '_features.p', 'wb'))
return f_global
def get_confusion_matrix_one_hot(model_results, truth):
'''
model_results and truth should be for one-hot format, i.e, have >= 2 columns,
where truth is 0/1, and max along each row of model_results is model result
'''
assert model_results.shape == truth.shape
num_outputs = truth.shape[1]
confusion_matrix = np.zeros((num_outputs, num_outputs), dtype=np.int32)
predictions = np.argmax(model_results, axis=1)
assert len(predictions) == truth.shape[0]
for actual_class in range(num_outputs):
idx_examples_this_class = truth[:, actual_class] == 1
prediction_for_this_class = predictions[idx_examples_this_class]
for predicted_class in range(num_outputs):
count = np.sum(prediction_for_this_class == predicted_class)
confusion_matrix[actual_class, predicted_class] = count
assert np.sum(confusion_matrix) == len(truth)
assert np.sum(confusion_matrix) == np.sum(truth)
return confusion_matrix
| [
"numpy.argmax",
"numpy.sum",
"numpy.zeros",
"scipy.stats.zscore",
"pyAudioAnalysis.audioFeatureExtraction.stFeatureExtraction",
"pyAudioAnalysis.audioFeatureExtraction.stFeatureSpeed",
"keras.preprocessing.sequence.pad_sequences",
"sys.stdout.write"
] | [((927, 1037), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['f_global'], {'maxlen': 'globalvars.max_len', 'dtype': '"""float64"""', 'padding': '"""post"""', 'value': '(-100.0)'}), "(f_global, maxlen=globalvars.max_len, dtype='float64',\n padding='post', value=-100.0)\n", (949, 1037), False, 'from keras.preprocessing import sequence\n'), ((1557, 1609), 'numpy.zeros', 'np.zeros', (['(num_outputs, num_outputs)'], {'dtype': 'np.int32'}), '((num_outputs, num_outputs), dtype=np.int32)\n', (1565, 1609), True, 'import numpy as np\n'), ((1628, 1660), 'numpy.argmax', 'np.argmax', (['model_results'], {'axis': '(1)'}), '(model_results, axis=1)\n', (1637, 1660), True, 'import numpy as np\n'), ((340, 443), 'pyAudioAnalysis.audioFeatureExtraction.stFeatureExtraction', 'audioFeatureExtraction.stFeatureExtraction', (['x', 'Fs', '(globalvars.frame_size * Fs)', '(globalvars.step * Fs)'], {}), '(x, Fs, globalvars.frame_size *\n Fs, globalvars.step * Fs)\n', (382, 443), False, 'from pyAudioAnalysis import audioFeatureExtraction\n'), ((499, 598), 'pyAudioAnalysis.audioFeatureExtraction.stFeatureSpeed', 'audioFeatureExtraction.stFeatureSpeed', (['x', 'Fs', '(globalvars.frame_size * Fs)', '(globalvars.step * Fs)'], {}), '(x, Fs, globalvars.frame_size * Fs, \n globalvars.step * Fs)\n', (536, 598), False, 'from pyAudioAnalysis import audioFeatureExtraction\n'), ((685, 708), 'scipy.stats.zscore', 'stats.zscore', (['f'], {'axis': '(0)'}), '(f, axis=0)\n', (697, 708), False, 'from scipy import stats\n'), ((773, 799), 'sys.stdout.write', 'sys.stdout.write', (['"""\x1b[F"""'], {}), "('\\x1b[F')\n", (789, 799), False, 'import sys\n'), ((2090, 2114), 'numpy.sum', 'np.sum', (['confusion_matrix'], {}), '(confusion_matrix)\n', (2096, 2114), True, 'import numpy as np\n'), ((2140, 2164), 'numpy.sum', 'np.sum', (['confusion_matrix'], {}), '(confusion_matrix)\n', (2146, 2164), True, 'import numpy as np\n'), ((2168, 2181), 'numpy.sum', 'np.sum', (['truth'], {}), '(truth)\n', (2174, 2181), True, 'import numpy as np\n'), ((1958, 2010), 'numpy.sum', 'np.sum', (['(prediction_for_this_class == predicted_class)'], {}), '(prediction_for_this_class == predicted_class)\n', (1964, 2010), True, 'import numpy as np\n')] |
import numpy as np
import math
from os import path
import imageio
from datetime import date
def convertCSVtoImage(Filepath, FileFormat):
supportedFileFormats = ["png","jpeg","jpg","bmp"]
if not FileFormat in supportedFileFormats:
raise ValueError("Outputformat {} is not supported! The following are allowed: {}.".format(FileFormat, "".join(supportedFileFormats)))
if not path.isfile(Filepath):
raise ValueError("Path: '{}' is no file!".format(Filepath))
if not path.splitext(Filepath)[1] in [".txt",".csv"]:
raise ValueError("Selected fileformat {} is not valid. Only .csv or .txt are allowed!".format(path.splitext(Filepath)[1]))
with open(Filepath, "r") as f:
#read all the lines from the file
lines = f.readlines()
#first line only has comments, we ignore it
#second line has the image dimensions so we need to read it as integer
try:
x,y = [(int(x)) for x in lines[1].split()]
except ValueError:
raise ValueError("Error reading the image dimensions. Check that there are two integers seperated by whitespace!")
#create an empty numpy array to store the image data.
imageArray = np.zeros((x,y,3), dtype=np.uint8)
#drop the first two rows of data
lines = lines[2:]
for index, line in enumerate(lines):
#each line contains the r,g,b information separated by blank spaces
try:
r,g,b = [(int(x)) for x in line.split()]
except ValueError:
raise ValueError("Error in line {}. Check that there are three integers seperated by whitespace!".format(index+2))
imageArray[math.floor(index / y),index % y] = np.array((r,g,b))
outputPath = path.splitext(Filepath)[0]+"."+FileFormat
imageio.imwrite(outputPath,imageArray)
def convertImageToCsv(Filepath, FileFormat):
supporteImageFormats = [".png",".jpeg",".jpg",".bmp"]
if not FileFormat in ["csv","txt"]:
raise ValueError("Outputformat {} is not supported! The following are allowed: {}.".format(FileFormat, "".join(["csv","txt"])))
if not path.isfile(Filepath):
raise ValueError("Path: '{}' is no file!".format(Filepath))
if not path.splitext(Filepath)[1] in supporteImageFormats:
raise ValueError("Selected fileformat {} is not valid. The following are allowed: {}.".format(path.splitext(Filepath)[1], " ".join(supporteImageFormats)))
outputPath = path.splitext(Filepath)[0]+"."+FileFormat
image = imageio.imread(Filepath)
with open(outputPath, "w") as f:
f.write("Created on {}\n".format(date.today()))
f.write("{} {}\n".format(image.shape[0],image.shape[1]))
if len(image.shape) == 2:
for ix, iy in np.ndindex(image.shape):
f.write("{} {} {}\n".format(image[ix,iy],image[ix,iy],image[ix,iy]))
elif len(image.shape) == 3:
for ix, iy in np.ndindex(image.shape[0:2]):
f.write("{} {} {}\n".format(image[ix,iy,0],image[ix,iy,1],image[ix,iy,2]))
def main():
#convertImageToCsv(r"C:\Users\Unknown\Documents\Master-Convolution\Python\Faces\test.jpg","txt")
convertCSVtoImage(r"C:\Users\Unknown\Documents\Master-Convolution\VHDL\Code\Testbench\input.txt","jpg")
if __name__ == "__main__":
main() | [
"imageio.imwrite",
"math.floor",
"os.path.splitext",
"numpy.ndindex",
"os.path.isfile",
"numpy.array",
"numpy.zeros",
"imageio.imread",
"datetime.date.today"
] | [((1834, 1873), 'imageio.imwrite', 'imageio.imwrite', (['outputPath', 'imageArray'], {}), '(outputPath, imageArray)\n', (1849, 1873), False, 'import imageio\n'), ((2558, 2582), 'imageio.imread', 'imageio.imread', (['Filepath'], {}), '(Filepath)\n', (2572, 2582), False, 'import imageio\n'), ((395, 416), 'os.path.isfile', 'path.isfile', (['Filepath'], {}), '(Filepath)\n', (406, 416), False, 'from os import path\n'), ((1227, 1262), 'numpy.zeros', 'np.zeros', (['(x, y, 3)'], {'dtype': 'np.uint8'}), '((x, y, 3), dtype=np.uint8)\n', (1235, 1262), True, 'import numpy as np\n'), ((2167, 2188), 'os.path.isfile', 'path.isfile', (['Filepath'], {}), '(Filepath)\n', (2178, 2188), False, 'from os import path\n'), ((1752, 1771), 'numpy.array', 'np.array', (['(r, g, b)'], {}), '((r, g, b))\n', (1760, 1771), True, 'import numpy as np\n'), ((2803, 2826), 'numpy.ndindex', 'np.ndindex', (['image.shape'], {}), '(image.shape)\n', (2813, 2826), True, 'import numpy as np\n'), ((498, 521), 'os.path.splitext', 'path.splitext', (['Filepath'], {}), '(Filepath)\n', (511, 521), False, 'from os import path\n'), ((1788, 1811), 'os.path.splitext', 'path.splitext', (['Filepath'], {}), '(Filepath)\n', (1801, 1811), False, 'from os import path\n'), ((2270, 2293), 'os.path.splitext', 'path.splitext', (['Filepath'], {}), '(Filepath)\n', (2283, 2293), False, 'from os import path\n'), ((2503, 2526), 'os.path.splitext', 'path.splitext', (['Filepath'], {}), '(Filepath)\n', (2516, 2526), False, 'from os import path\n'), ((2662, 2674), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2672, 2674), False, 'from datetime import date\n'), ((2975, 3003), 'numpy.ndindex', 'np.ndindex', (['image.shape[0:2]'], {}), '(image.shape[0:2])\n', (2985, 3003), True, 'import numpy as np\n'), ((647, 670), 'os.path.splitext', 'path.splitext', (['Filepath'], {}), '(Filepath)\n', (660, 670), False, 'from os import path\n'), ((1717, 1738), 'math.floor', 'math.floor', (['(index / y)'], {}), '(index / y)\n', (1727, 1738), False, 'import math\n'), ((2424, 2447), 'os.path.splitext', 'path.splitext', (['Filepath'], {}), '(Filepath)\n', (2437, 2447), False, 'from os import path\n')] |
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
#Run Cell
x = np.linspace(0, 20, 100)
plt.plot(x, np.sin(x))
plt.show() | [
"numpy.sin",
"numpy.linspace",
"matplotlib.pyplot.show"
] | [((91, 114), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(100)'], {}), '(0, 20, 100)\n', (102, 114), True, 'import numpy as np\n'), ((138, 148), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (146, 148), True, 'import matplotlib.pyplot as plt\n'), ((127, 136), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (133, 136), True, 'import numpy as np\n')] |
import math
import numpy as np
import tvm
from tvm.tir import IterVar
from .hw_abs_dag import construct_dag
from itertools import permutations, product
from functools import reduce
from . import _ffi_api
####################################################
# schedule parameter functions
####################################################
def get_factor_lst(value):
assert isinstance(value, int)
ret = []
end = math.sqrt(value)
for i in range(1, math.ceil(end)):
if value % i == 0:
ret.append(i)
ret.append(value // i)
if end - int(end) < 1e-10 and value % int(end) == 0:
ret.append(int(end))
return ret
def powerx_lst(x, left, right):
ret = []
beg = 1
while beg < left:
beg *= x
while beg < right:
ret.append(beg)
beg = beg * x
return ret
def any_factor_split(value, number, allow_non_divisible="off"):
assert allow_non_divisible in ["off", "power2", "continuous"]
ret = []
assert isinstance(number, int)
recursive_factor_split(value, [], number, ret, allow_non_divisible)
return ret
def recursive_factor_split(left, cur, number, ret, policy):
if number == 1:
ret.append(cur + [left])
return
if policy == "power2":
f_lst = get_factor_lst(left)
f_lst.extend(powerx_lst(2, 1, left))
f_lst = list(set(f_lst))
elif policy == "continuous":
f_lst = list(range(1, left + 1))
else:
f_lst = get_factor_lst(left)
f_lst = sorted(f_lst)
for f in f_lst:
recursive_factor_split(left // f, cur + [f], number - 1, ret, policy)
def remap_factors(factor_lst):
assert isinstance(factor_lst, (list, tuple))
assert len(factor_lst) > 0
sample = factor_lst[0]
assert isinstance(sample, (list, tuple))
assert len(sample) > 0
dim = len(sample) - 1
number_count = {i: set() for i in range(dim + 1)}
# check the factor list
for v in factor_lst:
assert isinstance(v, (list, tuple))
assert len(v) == dim + 1, dim
for i, ele in enumerate(v):
number_count[i].add(ele)
num_factors = len(number_count[0])
for k, v in number_count.items():
assert len(v) == num_factors
# remap the factor list
sorted_factors = sorted(number_count[0])
factor_map = {x: i for i, x in enumerate(sorted_factors)}
reverse_map = {i: x for i, x in enumerate(sorted_factors)}
ret = list(map(lambda factors: [factor_map[x] for x in factors], factor_lst))
return ret, reverse_map, dim, num_factors - 1
def get_directions(dim):
return list(product([-1, 0, 1], repeat=dim))
def get_partial_directions(dim):
def worker(v):
def set_value(i):
d = [0 for _ in range(dim)]
d[i] = v
return d
return set_value
return list(map(worker(1), range(dim))) + list(map(worker(-1), range(dim)))
def bi_product(repeat):
return list(product([0, 1], repeat=repeat))
def softmax(x):
e_x = np.exp(x - np.max(x))
return (e_x / (e_x.sum() + 1e-5)).tolist()
####################################################
# schedule helper tools
####################################################
def substitute_inputs(org_dag, op_map):
"""Infer ranges for expressions
Parameters
----------
org_dag: ComputeDAG
op_map: dict of {Operation: Operation}
Returns
-------
ComputeDAG
"""
n = _ffi_api.SubstituteInputs(org_dag, op_map)
return n
def reconstruct_dag_as_intrin(target_dag, main_op, hw_abs_dag, compute_key, shape_key):
inputs = list(main_op.input_tensors)
outputs = [main_op.output(0)]
# TODO: consider elem op in dag construction
input_names, output_names, nodes, read_graph, feed_graph = construct_dag(
hw_abs_dag, compute_key, shape_key, inputs, outputs, [], outputs
)
output_tensors = reduce(lambda x, y: x + y, [nodes[x] for x in output_names], [])
output = output_tensors[0]
replace_map = {main_op: output.op}
result_dag = substitute_inputs(target_dag, replace_map)
return (result_dag, (input_names, output_names, nodes, read_graph, feed_graph))
def can_inline(op, dag):
"""
op: tvm.te.Operation
dag: ComputeDAG
"""
if op not in dag.feed_graph:
return False
if not isinstance(op, tvm.te.ComputeOp):
return False
if len(op.reduce_axis) > 0:
return False
return True
def is_reduce_axis(iv: IterVar):
return int(iv.iter_type) == IterVar.CommReduce
def is_heavy_reduce_op(op):
re_exts = [int(iv.dom.extent) for iv in getattr(op, "reduce_axis", [])]
re_time = reduce(lambda a, b: a * b, re_exts, 1)
return re_time >= 64
def is_vectorized(iv: IterVar):
return int(iv.iter_type) == IterVar.Vectorized
| [
"math.ceil",
"functools.reduce",
"itertools.product",
"math.sqrt",
"numpy.max"
] | [((427, 443), 'math.sqrt', 'math.sqrt', (['value'], {}), '(value)\n', (436, 443), False, 'import math\n'), ((3913, 3977), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', '[nodes[x] for x in output_names]', '[]'], {}), '(lambda x, y: x + y, [nodes[x] for x in output_names], [])\n', (3919, 3977), False, 'from functools import reduce\n'), ((4675, 4713), 'functools.reduce', 'reduce', (['(lambda a, b: a * b)', 're_exts', '(1)'], {}), '(lambda a, b: a * b, re_exts, 1)\n', (4681, 4713), False, 'from functools import reduce\n'), ((466, 480), 'math.ceil', 'math.ceil', (['end'], {}), '(end)\n', (475, 480), False, 'import math\n'), ((2630, 2661), 'itertools.product', 'product', (['[-1, 0, 1]'], {'repeat': 'dim'}), '([-1, 0, 1], repeat=dim)\n', (2637, 2661), False, 'from itertools import permutations, product\n'), ((2974, 3004), 'itertools.product', 'product', (['[0, 1]'], {'repeat': 'repeat'}), '([0, 1], repeat=repeat)\n', (2981, 3004), False, 'from itertools import permutations, product\n'), ((3045, 3054), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (3051, 3054), True, 'import numpy as np\n')] |
# (C) Copyright 1996- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
import sys
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import numpy as np
def plot(lats, lons, vals):
assert lats.shape == lons.shape
assert lats.shape == vals.shape
print(lats.shape)
ax = plt.axes(projection=ccrs.PlateCarree())
plt.contourf(
lons, lats, vals, 60, transform=ccrs.PlateCarree(), cmap="nipy_spectral"
)
ax.coastlines()
ax.set_global()
plt.savefig(sys.argv[2])
# plt.show()
def main():
npz = np.load(sys.argv[1])
# print(sorted(npz.files))
plot(npz["lats"], npz["lons"], npz["values"])
if __name__ == "__main__":
sys.exit(main())
| [
"numpy.load",
"matplotlib.pyplot.savefig",
"cartopy.crs.PlateCarree"
] | [((794, 818), 'matplotlib.pyplot.savefig', 'plt.savefig', (['sys.argv[2]'], {}), '(sys.argv[2])\n', (805, 818), True, 'import matplotlib.pyplot as plt\n'), ((860, 880), 'numpy.load', 'np.load', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (867, 880), True, 'import numpy as np\n'), ((625, 643), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (641, 643), True, 'import cartopy.crs as ccrs\n'), ((703, 721), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (719, 721), True, 'import cartopy.crs as ccrs\n')] |
import time
import numpy
from ..Instruments import EG_G_7265
#from ..Instruments import SRS_SR830
from ..UserInterfaces.Loggers import NullLogger
class VSMController2(object):
#Controlador y sensor del VSM
def __init__(self, Logger = None):
self.LockIn = EG_G_7265(RemoteOnly = False)
#self.LockIn = SRS_SR830(GPIB_Address = 22, RemoteOnly = False)
self.LockIn.InputMode('0')
self.LockIn.VoltageInputMode('1')
self.LockIn.FilterSlope('3')
self.LockIn.setRefPhase(85.0)
self.confDriver()
self.confInput()
self.emu_per_V = 1
#self.emu_per_V = 3.2867
#self.emu_per_V = 1
if Logger == None:
self._logger = NullLogger()
else:
self._logger = Logger
self.log = self._logger.log
def confDriver(self, OscFrec = 200, OscAmp = 0.2):
self.LockIn.setOscilatorAmp(OscAmp)
self.LockIn.setOscilatorFreq(OscFrec)
def confInput(self, Sen = 0.1, TC = 0.1, AcGain = '0'):
self.LockIn.TC = TC
self.LockIn.SEN = Sen
self.LockIn.ConfigureInput(AcGain = AcGain)
def ZeroPhase(self):
TCtemp = self.LockIn.TC
self.LockIn.TC = 1
time.sleep(15)
ph = 0
for i in range(10):
time.sleep(1)
ph = self.LockIn.Phase + ph
ph = ph / 10.0
self.LockIn.setRefPhase(self.LockIn.getRefPhase() + ph)
self.LockIn.TC = TCtemp
time.sleep(3)
def getRefPhase(self):
return self.LockIn.getRefPhase()
def getMagnetization(self, n = 20, iniDelay = 1, measDelay = 0, stat = False, tol = 0.05, maxIts = 50):
self.log('Measuring Magnetization ... ', EOL = '')
vsIn = numpy.zeros(n)
time.sleep(iniDelay)
for i in range(n):
time.sleep(measDelay)
vsIn[i] = self.LockIn.X
vIn = vsIn.mean()
sigma = vsIn.std()
maxSigma = numpy.abs(self.LockIn.SEN * tol)
if stat:
its = 0
while (sigma > maxSigma) and (its < maxIts):
its = its + 1
err = (vsIn - vIn)**2
vsIn = vsIn[err < sigma**2]
while len(vsIn) < n:
time.sleep(measDelay)
vsIn = numpy.append(vsIn, self.LockIn.X)
vIn = vsIn.mean()
sigma = vsIn.std()
self.log('Done.', [125,125,125])
self.log('M = %.3E | ' % (vIn * self.emu_per_V), [100,100,100], EOL = '')
self.log('s = %.3E ' % (sigma * self.emu_per_V), [190,190,190])
return numpy.array([vIn, sigma])* self.emu_per_V
def getAmplitude(self, n = 20, iniDelay = 1, measDelay = 0):
vsIn = numpy.zeros(n)
time.sleep(iniDelay)
for i in range(n):
time.sleep(measDelay)
vsIn[i] = self.LockIn.Magnitude
vIn = vsIn.mean()
return vIn
| [
"numpy.abs",
"time.sleep",
"numpy.append",
"numpy.array",
"numpy.zeros"
] | [((1316, 1330), 'time.sleep', 'time.sleep', (['(15)'], {}), '(15)\n', (1326, 1330), False, 'import time\n'), ((1575, 1588), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1585, 1588), False, 'import time\n'), ((1864, 1878), 'numpy.zeros', 'numpy.zeros', (['n'], {}), '(n)\n', (1875, 1878), False, 'import numpy\n'), ((1888, 1908), 'time.sleep', 'time.sleep', (['iniDelay'], {}), '(iniDelay)\n', (1898, 1908), False, 'import time\n'), ((2084, 2116), 'numpy.abs', 'numpy.abs', (['(self.LockIn.SEN * tol)'], {}), '(self.LockIn.SEN * tol)\n', (2093, 2116), False, 'import numpy\n'), ((2898, 2912), 'numpy.zeros', 'numpy.zeros', (['n'], {}), '(n)\n', (2909, 2912), False, 'import numpy\n'), ((2922, 2942), 'time.sleep', 'time.sleep', (['iniDelay'], {}), '(iniDelay)\n', (2932, 2942), False, 'import time\n'), ((1389, 1402), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1399, 1402), False, 'import time\n'), ((1950, 1971), 'time.sleep', 'time.sleep', (['measDelay'], {}), '(measDelay)\n', (1960, 1971), False, 'import time\n'), ((2764, 2789), 'numpy.array', 'numpy.array', (['[vIn, sigma]'], {}), '([vIn, sigma])\n', (2775, 2789), False, 'import numpy\n'), ((2984, 3005), 'time.sleep', 'time.sleep', (['measDelay'], {}), '(measDelay)\n', (2994, 3005), False, 'import time\n'), ((2391, 2412), 'time.sleep', 'time.sleep', (['measDelay'], {}), '(measDelay)\n', (2401, 2412), False, 'import time\n'), ((2441, 2474), 'numpy.append', 'numpy.append', (['vsIn', 'self.LockIn.X'], {}), '(vsIn, self.LockIn.X)\n', (2453, 2474), False, 'import numpy\n')] |
from collections import defaultdict
class Graph:
def __init__(self,graph):
self.graph = graph # residual graph
self. ROW = len(graph)
def BFS(self,s, t, parent):
# Mark all the vertices as not visited
visited =[False]*(self.ROW)
queue=[]
queue.append(s)
visited[s] = True
while queue:
u = queue.pop(0)
for ind, val in enumerate(self.graph[u]):
if visited[ind] == False and val > 0 :
queue.append(ind)
visited[ind] = True
parent[ind] = u
return True if visited[t] else False
# Returns tne maximum flow from s to t in the given graph
def FordFulkerson(self, source, sink):
parent = [-1]*(self.ROW)
max_flow = 0 # There is no flow initially
while self.BFS(source, sink, parent) :
path_flow = float("Inf")
s = sink
while(s != source):
path_flow = min (path_flow, self.graph[parent[s]][s])
s = parent[s]
max_flow += path_flow
v = sink
while(v != source):
u = parent[v]
self.graph[u][v] -= path_flow
self.graph[v][u] += path_flow
v = parent[v]
return max_flow,parent
import timeit
import numpy as np
content = np.loadtxt('file1.txt').tolist()
graph = content
g = Graph(graph)
source = 0; sink = 5
start = timeit.default_timer()
flow, parent = g.FordFulkerson(source, sink)
stop = timeit.default_timer()
print('Time: ', stop - start) | [
"timeit.default_timer",
"numpy.loadtxt"
] | [((1588, 1610), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1608, 1610), False, 'import timeit\n'), ((1664, 1686), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (1684, 1686), False, 'import timeit\n'), ((1490, 1513), 'numpy.loadtxt', 'np.loadtxt', (['"""file1.txt"""'], {}), "('file1.txt')\n", (1500, 1513), True, 'import numpy as np\n')] |
import os
from timemachines.skaters.localskaters import local_skater_from_name
from timemachines.skating import prior
import numpy as np
if __name__=='__main__':
from timemachines.skaters.sk.skinclusion import using_sktime
assert using_sktime
skater_name = __file__.split(os.path.sep)[-1].replace('test_skater_', '').replace('.py', '')
print(skater_name)
f = local_skater_from_name(skater_name)
assert f is not None
y = np.random.randn(100)
prior(f=f, y=y, k=1)
| [
"timemachines.skating.prior",
"numpy.random.randn",
"timemachines.skaters.localskaters.local_skater_from_name"
] | [((380, 415), 'timemachines.skaters.localskaters.local_skater_from_name', 'local_skater_from_name', (['skater_name'], {}), '(skater_name)\n', (402, 415), False, 'from timemachines.skaters.localskaters import local_skater_from_name\n'), ((449, 469), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (464, 469), True, 'import numpy as np\n'), ((474, 494), 'timemachines.skating.prior', 'prior', ([], {'f': 'f', 'y': 'y', 'k': '(1)'}), '(f=f, y=y, k=1)\n', (479, 494), False, 'from timemachines.skating import prior\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 14 14:29:29 2021
@author: surajitrana
"""
import matplotlib.pyplot as plt
import numpy as np
def plot_piechart():
dataset = np.array([20, 25, 10, 15, 30])
chart_lables = np.array(["Audi", "Mercedez", "BMW", "Tesla", "Volvo"])
chart_explode = [0.2, 0, 0, 0, 0]
chart_colors = ["black", "hotpink", "c", "#4CAF50", "yellow"]
plt.pie(dataset, labels=chart_lables, startangle=90,
explode=chart_explode, shadow=True, colors=chart_colors)
# Adding legend
plt.legend(title="Best cars of 2020-21", loc="lower right")
plt.show()
if __name__ == '__main__':
plot_piechart()
| [
"numpy.array",
"matplotlib.pyplot.pie",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((202, 232), 'numpy.array', 'np.array', (['[20, 25, 10, 15, 30]'], {}), '([20, 25, 10, 15, 30])\n', (210, 232), True, 'import numpy as np\n'), ((252, 307), 'numpy.array', 'np.array', (["['Audi', 'Mercedez', 'BMW', 'Tesla', 'Volvo']"], {}), "(['Audi', 'Mercedez', 'BMW', 'Tesla', 'Volvo'])\n", (260, 307), True, 'import numpy as np\n'), ((416, 529), 'matplotlib.pyplot.pie', 'plt.pie', (['dataset'], {'labels': 'chart_lables', 'startangle': '(90)', 'explode': 'chart_explode', 'shadow': '(True)', 'colors': 'chart_colors'}), '(dataset, labels=chart_lables, startangle=90, explode=chart_explode,\n shadow=True, colors=chart_colors)\n', (423, 529), True, 'import matplotlib.pyplot as plt\n'), ((562, 621), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'title': '"""Best cars of 2020-21"""', 'loc': '"""lower right"""'}), "(title='Best cars of 2020-21', loc='lower right')\n", (572, 621), True, 'import matplotlib.pyplot as plt\n'), ((626, 636), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (634, 636), True, 'import matplotlib.pyplot as plt\n')] |
'''
Thermodynamic helper functions.
'''
from __future__ import division, print_function, absolute_import
import numpy as np
# Saturation vapor pressure from the Clausius-Clapeyron relation.
# --> assumes L is constant with temperature!
def get_satvps(T,T0,e0,Rv,Lv):
return e0*np.exp(-(Lv/Rv)*(1./T - 1./T0))
# ---
# Input: T and *total* pressure. (Partial dry pressure is inferred.)
# Output: saturation specific humidity
def get_qsat(T,p_tot,params):
eps = params.R/params.Rv
p_dry = p_tot - params.esat(T)
qsat = eps* params.esat(T)/( eps* params.esat(T) + p_dry )
return qsat
# ---
# Input: T and *total* pressure. (Partial dry pressure is inferred.)
# Output: specific humidity.
#
# --> Valid for any value of RH! If RH=1, reduces to get_qsat.
def get_q(T,p_tot,params,RH=1.):
eps = params.R/params.Rv
p_h2o = RH*params.esat(T)
p_dry = p_tot - p_h2o
q = eps*p_h2o/(p_dry + eps*p_h2o)
return q
# ---
# Input: T and *total* pressure.
# Output: mass mixing ratio r := rho_H2O / rho_dry = m_H2O*p_H2O / (m_dry * p_dry)
#
# NOTE: this is NOT the volume/number mixing ratio !
# for that simply use n_H2O/n_dry = p_H2O/p_dry
def get_rsat(T,p_tot,params):
eps = params.R/params.Rv
e_sat = params.esat(T)
p_dry = p_tot - e_sat
r_sat = eps * e_sat / p_dry
return r_sat
# ---
# Input: abundance of gas, by volume and/or number of molecules
# molar concentration eta := n_i / (n_i + n_rest)
# Output: abundance of gas, by mass
# mass mixing ratio q := rho_i / (rho_i + rho_rest)
#
# where rho_i is (mass) density, and n_i is number density.
#
# Formula is (see, e.g., PoPC p.86-87):
# q_i = eta_i * <R>/R_i, where <R> is the mass-weighted average gas constant
# <R> = q_a*R_a + q_b*R_b + ...
# If I'm only given molar concentrations, use instead
# <R> = 1./( eta_a/R_a + eta_b/R_b + ...)
#
# USES, e.g.:
# - convert 300 ppmv of CO2 into a mass mixing ratio.
def convert_molar_to_mass_ratio(molar_i,R_i,R_air):
molar_air = 1. - molar_i
R_mean = 1./(molar_i/R_i + molar_air/R_air)
q_i = molar_i * R_mean/R_i
return q_i
| [
"numpy.exp"
] | [((283, 324), 'numpy.exp', 'np.exp', (['(-(Lv / Rv) * (1.0 / T - 1.0 / T0))'], {}), '(-(Lv / Rv) * (1.0 / T - 1.0 / T0))\n', (289, 324), True, 'import numpy as np\n')] |
import json
import os
import sys
import albumentations as A
import numpy as np
import pandas as pd
import timm
import torch
import ttach as tta
from albumentations.augmentations.geometric.resize import Resize
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from tqdm import tqdm
import missed_planes.engine as engine
import missed_planes.metrics as metrics
from missed_planes.dataset import PlanesDataset
with open(sys.argv[1], "r") as f:
config = json.load(f)
transforms = A.Compose(
[
A.Resize(height=config["image_size"], width=config["image_size"], p=1),
],
p=1,
)
test_data = pd.read_csv(config["test_csv"])
test_dataset = PlanesDataset(
test_data, path=config["test_path"], is_test=True, augmentation=transforms
)
test_loader = DataLoader(
test_dataset,
batch_size=config["batch_size"],
shuffle=False,
num_workers=config["num_workers"],
drop_last=False,
)
with torch.no_grad():
final = []
for ind in range(config["folds"]):
model = torch.load(f"{config['checkpoint']}/fold{ind}_{config['model']}.pt")
model.eval()
tta_model = tta.ClassificationTTAWrapper(model, tta.aliases.d4_transform(), merge_mode='mean')
result = []
for i in tqdm(test_loader, total=len(test_loader)):
i = i.to(config["device"])
output = tta_model(i)
output = output.view(-1).detach().cpu().numpy()
result.extend(output)
final.append(result)
result = np.array(final).mean(axis=0)
submission = pd.read_csv("data/sample_submission_extended.csv")
submission["sign"] = result
# import IPython; IPython.embed(); exit(1)
# submission["sign"] = (result > 0.5).astype(int)
# print((result > 0.5).sum())
submission.to_csv(
os.path.join(config["submission"], config["model"]) + ".csv",
index=None,
)
submission.to_csv(
os.path.join(config["submission"], config["model"]) + ".csv.gz",
compression="gzip",
index=None,
)
| [
"missed_planes.dataset.PlanesDataset",
"pandas.read_csv",
"torch.load",
"os.path.join",
"numpy.array",
"albumentations.Resize",
"torch.utils.data.DataLoader",
"json.load",
"torch.no_grad",
"ttach.aliases.d4_transform"
] | [((655, 686), 'pandas.read_csv', 'pd.read_csv', (["config['test_csv']"], {}), "(config['test_csv'])\n", (666, 686), True, 'import pandas as pd\n'), ((703, 796), 'missed_planes.dataset.PlanesDataset', 'PlanesDataset', (['test_data'], {'path': "config['test_path']", 'is_test': '(True)', 'augmentation': 'transforms'}), "(test_data, path=config['test_path'], is_test=True,\n augmentation=transforms)\n", (716, 796), False, 'from missed_planes.dataset import PlanesDataset\n'), ((814, 942), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': "config['batch_size']", 'shuffle': '(False)', 'num_workers': "config['num_workers']", 'drop_last': '(False)'}), "(test_dataset, batch_size=config['batch_size'], shuffle=False,\n num_workers=config['num_workers'], drop_last=False)\n", (824, 942), False, 'from torch.utils.data import DataLoader\n'), ((1585, 1635), 'pandas.read_csv', 'pd.read_csv', (['"""data/sample_submission_extended.csv"""'], {}), "('data/sample_submission_extended.csv')\n", (1596, 1635), True, 'import pandas as pd\n'), ((500, 512), 'json.load', 'json.load', (['f'], {}), '(f)\n', (509, 512), False, 'import json\n'), ((968, 983), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (981, 983), False, 'import torch\n'), ((552, 622), 'albumentations.Resize', 'A.Resize', ([], {'height': "config['image_size']", 'width': "config['image_size']", 'p': '(1)'}), "(height=config['image_size'], width=config['image_size'], p=1)\n", (560, 622), True, 'import albumentations as A\n'), ((1055, 1123), 'torch.load', 'torch.load', (['f"""{config[\'checkpoint\']}/fold{ind}_{config[\'model\']}.pt"""'], {}), '(f"{config[\'checkpoint\']}/fold{ind}_{config[\'model\']}.pt")\n', (1065, 1123), False, 'import torch\n'), ((1542, 1557), 'numpy.array', 'np.array', (['final'], {}), '(final)\n', (1550, 1557), True, 'import numpy as np\n'), ((1811, 1862), 'os.path.join', 'os.path.join', (["config['submission']", "config['model']"], {}), "(config['submission'], config['model'])\n", (1823, 1862), False, 'import os\n'), ((1915, 1966), 'os.path.join', 'os.path.join', (["config['submission']", "config['model']"], {}), "(config['submission'], config['model'])\n", (1927, 1966), False, 'import os\n'), ((1201, 1227), 'ttach.aliases.d4_transform', 'tta.aliases.d4_transform', ([], {}), '()\n', (1225, 1227), True, 'import ttach as tta\n')] |
import torch
import torch.nn as nn
from collections import OrderedDict
import numpy as np
from .. import util
class LinearBlock(nn.Module):
def __init__(self, linear_dim, output_dim, init_type='std'):
super().__init__()
modules = []
for i in range(8):
if i == 0:
modules.append((f"dense_{i}", nn.Linear(linear_dim, output_dim)))
else:
modules.append((f"dense_{i}", nn.Linear(output_dim, output_dim)))
modules.append((f"leaky_relu_{i}", nn.LeakyReLU(negative_slope=0.2, inplace=True)))
self._lin_block = nn.Sequential(OrderedDict(modules))
self._output_dim = output_dim
def forward(self, x):
batch_size = x.size(0)
x = self._lin_block(x)
return x
class NormAdaIn(nn.Module):
def __init__(self, latent_dim, channels, init_type='std'):
"""Given a latent vector we seek to map that into "styles" which should be a
scaling factor and translation factor for each channel --> therefore we
construct a linear linear layer with output equal twice the number of channels
"""
super().__init__()
self.lin = nn.Linear(latent_dim, channels*2)
self._channels = channels
def forward(self, x, latents):
styles = self.lin(latents)
batch_size = x.size(0)
# x.dim() - 2 as we need to extend styles to match the remaining number
# of dimensions of x (we build styles to match the first two dimensions
# by default)
shape = torch.Size([batch_size, self._channels] + (x.dim()-2)*[1])
scale = styles[:,:self._channels].view(shape)
bias = styles[:,self._channels:].view(shape)
x = nn.InstanceNorm2d(self._channels)(x) # see - https://pytorch.org/docs/stable/nn.html?highlight=instancenorm#torch.nn.InstanceNorm2d
return scale*x + bias
class InputBlockWithInput(nn.Module):
def __init__(self, linear_dim, channels, latent_dim, h, w, init_type='std'):
super().__init__()
self.ada1 = NormAdaIn(latent_dim, channels, init_type=init_type)
self.conv2d = nn.Conv2d(channels, channels, kernel_size=(3,3), padding=0)
self.ada2 = NormAdaIn(latent_dim, channels, init_type=init_type)
# UNCOMMENT FOR MAKING INIT BLOCK DEPEND ON AN INPUT
self.lin = nn.Linear(linear_dim, channels*h*w)
self._h = h
self._w = w
self._channels = channels
def forward(self, x, latents):
batch_size = latents.size(0)
# UNCOMMENT FOR MAKING INIT BLOCK DEPEND ON AN INPUT
x = self.lin(x).view(batch_size, self._channels, self._h, self._w)
x = self.ada1(x, latents)
x = self.conv2d(x)
x = self.ada2(x, latents)
return x
class InputBlockConst(nn.Module):
def __init__(self, linear_dim, channels, latent_dim, h, w):
super().__init__()
# UNCOMMENT FOR MAKING CONSTANT INIT BLOCK
self.const = nn.Parameter(torch.zeros(channels, h, w).normal_()).to(device=util._device)
self.bias = nn.Parameter(torch.zeros(channels).normal_()).view(channels,1,1).to(device=util._device)
self.ada1 = NormAdaIn(latent_dim, channels)
self.conv2d = nn.Conv2d(channels, channels, kernel_size=(3,3), padding=0)
self.ada2 = NormAdaIn(latent_dim, channels)
self._h = h
self._w = w
self._channels = channels
def forward(self, x, latents):
batch_size = latents.size(0)
x = self.const.expand(torch.Size([batch_size]) + self.const.shape) + self.bias # broadcast the bias
x = self.ada1(x, latents)
x = self.conv2d(x)
x = self.ada2(x, latents)
return x
class UpscaleBlock(nn.Module):
def __init__(self, in_channels, out_channels, latent_dim, h, w, init_type='std'):
super().__init__()
self.upsample = nn.Upsample(size=(h+4, w+4), mode='nearest')
self.conv2d1 = nn.Conv2d(in_channels, out_channels, kernel_size=(3,3), padding=0)
self.ada1 = NormAdaIn(latent_dim, out_channels, init_type=init_type)
self.conv2d2 = nn.Conv2d(out_channels, out_channels, kernel_size=(3,3), padding=0)
self.ada2 = NormAdaIn(latent_dim, out_channels, init_type=init_type)
def forward(self, x, latents):
x = self.upsample(x)
x = self.conv2d1(x)
x = self.ada1(x, latents)
x = self.conv2d2(x)
x = self.ada2(x, latents)
# TODO MAKE SURE LATENTS HAVENT CHANGED
return x
class ConvTranspose2d(nn.Module):
"""StyleGan: Two dimensional deconvolution based on upsampling
Here we implement the network presented in "A Style-Based Generator
Architecture for Generative Adversarial Networks"
WE DO NOT ADD NOISE AS WE SEEK A DETERMINISTIC FUNCTION (NOT USED IN A
GAN-like setting)
===================================================================
CITE:
<NAME>; <NAME>; <NAME>. A style-based generator
architecture for generative adversarial networks. In: Proceedings of the
IEEE Conference on Computer Vision and Pattern Recognition. 2019. p.
4401-4410.
===================================================================
Implementation is mainly inspired by:
- https://github.com/lernapparat/lernapparat/blob/master/style_gan/pytorch_style_gan.ipynb
"""
def __init__(self, linear_dim, H, W, init_type='std', final_channels=1):
super().__init__()
self.linear_dim = linear_dim
self._latent_dim = 512 # as done in the paper
max_resolution = max(H,W)
resolution_log2 = int(np.ceil(np.log2(max_resolution)))
h = 4 # the paper uses a value of 4
w = 4 # the paper uses a value of 4
assert H >= h and W >= w
fmap_max = 512
fmap_decay = 1.1
fmap_base = max_resolution * 8
def channels_at_stage(stage):
return max(min(int(fmap_base / (2.0 ** (stage * fmap_decay))), fmap_max),1)
num_upsampling_blocks = resolution_log2 - 2 # -2 as we start from a 4 x 4 image (and not a 1 x 1)
channels = channels_at_stage(2)
self._deconv_styles = LinearBlock(linear_dim, self._latent_dim, init_type=init_type)
self._input_block = InputBlockWithInput(linear_dim, channels, self._latent_dim, h, w, init_type=init_type)
upscale_module = []
in_channels = channels
for stage in range(num_upsampling_blocks):
out_channels = channels_at_stage(stage + 3)
h = h*2 if 2*h < H else H
w = w*2 if 2*w < W else W
upscale_module.append(UpscaleBlock(in_channels, out_channels,
self._latent_dim, h, w, init_type=init_type))
in_channels = out_channels
self._upscaling = nn.ModuleList(upscale_module)
self._to_rgb = nn.Conv2d(in_channels, final_channels,kernel_size=(1,1))
def forward(self, x):
latents = self._deconv_styles(x)
# go through the constant block
img = self._input_block(x, latents)
# pass through all upscalings using the latents in the AdaIn modules
for i, m in enumerate(self._upscaling):
img = m(img, latents)
return self._to_rgb(img)
def visualize_deconv(self, aspect=1):
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10,5))
random_input = torch.randn(self.linear_dim).view(1,-1)
viz = self.forward(random_input).detach().squeeze().numpy()
m = plt.imshow(viz, aspect=aspect)
plt.colorbar(m)
plt.show()
| [
"matplotlib.pyplot.imshow",
"collections.OrderedDict",
"torch.nn.LeakyReLU",
"torch.nn.ModuleList",
"matplotlib.pyplot.colorbar",
"torch.nn.Conv2d",
"torch.nn.InstanceNorm2d",
"matplotlib.pyplot.figure",
"torch.nn.Upsample",
"torch.nn.Linear",
"torch.zeros",
"numpy.log2",
"torch.Size",
"to... | [((1197, 1232), 'torch.nn.Linear', 'nn.Linear', (['latent_dim', '(channels * 2)'], {}), '(latent_dim, channels * 2)\n', (1206, 1232), True, 'import torch.nn as nn\n'), ((2153, 2213), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'channels'], {'kernel_size': '(3, 3)', 'padding': '(0)'}), '(channels, channels, kernel_size=(3, 3), padding=0)\n', (2162, 2213), True, 'import torch.nn as nn\n'), ((2367, 2406), 'torch.nn.Linear', 'nn.Linear', (['linear_dim', '(channels * h * w)'], {}), '(linear_dim, channels * h * w)\n', (2376, 2406), True, 'import torch.nn as nn\n'), ((3262, 3322), 'torch.nn.Conv2d', 'nn.Conv2d', (['channels', 'channels'], {'kernel_size': '(3, 3)', 'padding': '(0)'}), '(channels, channels, kernel_size=(3, 3), padding=0)\n', (3271, 3322), True, 'import torch.nn as nn\n'), ((3915, 3963), 'torch.nn.Upsample', 'nn.Upsample', ([], {'size': '(h + 4, w + 4)', 'mode': '"""nearest"""'}), "(size=(h + 4, w + 4), mode='nearest')\n", (3926, 3963), True, 'import torch.nn as nn\n'), ((3983, 4050), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': '(3, 3)', 'padding': '(0)'}), '(in_channels, out_channels, kernel_size=(3, 3), padding=0)\n', (3992, 4050), True, 'import torch.nn as nn\n'), ((4150, 4218), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_channels', 'out_channels'], {'kernel_size': '(3, 3)', 'padding': '(0)'}), '(out_channels, out_channels, kernel_size=(3, 3), padding=0)\n', (4159, 4218), True, 'import torch.nn as nn\n'), ((6865, 6894), 'torch.nn.ModuleList', 'nn.ModuleList', (['upscale_module'], {}), '(upscale_module)\n', (6878, 6894), True, 'import torch.nn as nn\n'), ((6918, 6976), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'final_channels'], {'kernel_size': '(1, 1)'}), '(in_channels, final_channels, kernel_size=(1, 1))\n', (6927, 6976), True, 'import torch.nn as nn\n'), ((7419, 7446), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (7429, 7446), True, 'import matplotlib.pyplot as plt\n'), ((7591, 7621), 'matplotlib.pyplot.imshow', 'plt.imshow', (['viz'], {'aspect': 'aspect'}), '(viz, aspect=aspect)\n', (7601, 7621), True, 'import matplotlib.pyplot as plt\n'), ((7630, 7645), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['m'], {}), '(m)\n', (7642, 7645), True, 'import matplotlib.pyplot as plt\n'), ((7654, 7664), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7662, 7664), True, 'import matplotlib.pyplot as plt\n'), ((627, 647), 'collections.OrderedDict', 'OrderedDict', (['modules'], {}), '(modules)\n', (638, 647), False, 'from collections import OrderedDict\n'), ((1746, 1779), 'torch.nn.InstanceNorm2d', 'nn.InstanceNorm2d', (['self._channels'], {}), '(self._channels)\n', (1763, 1779), True, 'import torch.nn as nn\n'), ((5667, 5690), 'numpy.log2', 'np.log2', (['max_resolution'], {}), '(max_resolution)\n', (5674, 5690), True, 'import numpy as np\n'), ((7470, 7498), 'torch.randn', 'torch.randn', (['self.linear_dim'], {}), '(self.linear_dim)\n', (7481, 7498), False, 'import torch\n'), ((537, 583), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)', 'inplace': '(True)'}), '(negative_slope=0.2, inplace=True)\n', (549, 583), True, 'import torch.nn as nn\n'), ((3552, 3576), 'torch.Size', 'torch.Size', (['[batch_size]'], {}), '([batch_size])\n', (3562, 3576), False, 'import torch\n'), ((354, 387), 'torch.nn.Linear', 'nn.Linear', (['linear_dim', 'output_dim'], {}), '(linear_dim, output_dim)\n', (363, 387), True, 'import torch.nn as nn\n'), ((454, 487), 'torch.nn.Linear', 'nn.Linear', (['output_dim', 'output_dim'], {}), '(output_dim, output_dim)\n', (463, 487), True, 'import torch.nn as nn\n'), ((3015, 3042), 'torch.zeros', 'torch.zeros', (['channels', 'h', 'w'], {}), '(channels, h, w)\n', (3026, 3042), False, 'import torch\n'), ((3111, 3132), 'torch.zeros', 'torch.zeros', (['channels'], {}), '(channels)\n', (3122, 3132), False, 'import torch\n')] |
from os import listdir
import json
import numpy as np
DATA_DIR="quickdraw_data_reduced"
def parse_line(ndjson_line):
"""Parse an ndjson line and return ink (as np array) and classname."""
sample = json.loads(ndjson_line)
class_name = sample["word"]
if not class_name:
print ("Empty classname")
return None, None
inkarray = sample["drawing"]
stroke_lengths = [len(stroke[0]) for stroke in inkarray]
total_points = sum(stroke_lengths)
np_ink = np.zeros((total_points, 3), dtype=np.float32)
current_t = 0
if not sample["recognized"]:
return None, None
if not inkarray:
return None, None
for stroke in inkarray:
if len(stroke[0]) != len(stroke[1]):
print("Inconsistent number of x and y coordinates.")
return None, None
for i in [0, 1]:
np_ink[current_t:(current_t + len(stroke[0])), i] = stroke[i]
current_t += len(stroke[0])
np_ink[current_t - 1, 2] = 1 # stroke_end
# Preprocessing.
# 1. Size normalization.
lower = np.min(np_ink[:, 0:2], axis=0)
upper = np.max(np_ink[:, 0:2], axis=0)
scale = upper - lower
scale[scale == 0] = 1
np_ink[:, 0:2] = (np_ink[:, 0:2] - lower) / scale
# 2. Compute deltas.
np_ink[1:, 0:2] -= np_ink[0:-1, 0:2]
np_ink = np_ink[1:, :]
return np_ink, class_name
if __name__ == "__main__":
X = []
Y = []
i=0
files = listdir(DATA_DIR)
for file in files:
with open(DATA_DIR + "/" + file) as f:
content = f.readlines()
for line in content:
x, y = parse_line(line)
if x is not None or y is not None:
X.append(x)
Y.append(y)
i = i + 1
np.save("X.npy", X)
np.save("Y.npy", Y)
| [
"json.loads",
"os.listdir",
"numpy.max",
"numpy.zeros",
"numpy.min",
"numpy.save"
] | [((1620, 1639), 'numpy.save', 'np.save', (['"""X.npy"""', 'X'], {}), "('X.npy', X)\n", (1627, 1639), True, 'import numpy as np\n'), ((1640, 1659), 'numpy.save', 'np.save', (['"""Y.npy"""', 'Y'], {}), "('Y.npy', Y)\n", (1647, 1659), True, 'import numpy as np\n'), ((203, 226), 'json.loads', 'json.loads', (['ndjson_line'], {}), '(ndjson_line)\n', (213, 226), False, 'import json\n'), ((468, 513), 'numpy.zeros', 'np.zeros', (['(total_points, 3)'], {'dtype': 'np.float32'}), '((total_points, 3), dtype=np.float32)\n', (476, 513), True, 'import numpy as np\n'), ((998, 1028), 'numpy.min', 'np.min', (['np_ink[:, 0:2]'], {'axis': '(0)'}), '(np_ink[:, 0:2], axis=0)\n', (1004, 1028), True, 'import numpy as np\n'), ((1039, 1069), 'numpy.max', 'np.max', (['np_ink[:, 0:2]'], {'axis': '(0)'}), '(np_ink[:, 0:2], axis=0)\n', (1045, 1069), True, 'import numpy as np\n'), ((1348, 1365), 'os.listdir', 'listdir', (['DATA_DIR'], {}), '(DATA_DIR)\n', (1355, 1365), False, 'from os import listdir\n')] |
# Collection of preprocessing functions
from nltk.tokenize import word_tokenize
from transformers import CamembertTokenizer
from transformers import BertTokenizer
from tqdm import tqdm
import numpy as np
import pandas as pd
import re
import string
import unicodedata
import tensorflow as tf
import glob
import os
MAX_LEN = 500
IMG_SHAPE = 224
AUTO = tf.data.experimental.AUTOTUNE
model_bert = 'bert-base-multilingual-cased'
model_camembert = 'camembert-base'
tokenizer_bert = BertTokenizer.from_pretrained(model_bert, do_lowercase=False)
tokenizer_cam = CamembertTokenizer.from_pretrained(model_camembert, do_lowercase=False)
def preprocessing_csv(file):
file = pd.read_csv(file, index_col=0)
file = file.reset_index()
file['filename'] = file.apply(
lambda x: "datas/images/image_test/image_" + str(x['imageid']) + "_product_" + str(x['productid']) + ".jpg",
axis=1)
file['text'] = file.apply(lambda x: str(x['designation']) + ' ' + str(x['description']), axis=1)
file['text'] = file['text'].str.replace('nan', '')
file = file.drop(['designation', 'description', 'imageid', 'productid'], axis=1)
return file
# Converts the unicode file to ascii
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
# preprocess sentences
def preprocess_sentence(w):
w = unicode_to_ascii(w.lower().strip())
w = re.sub('https?://\S+|www\.\S+', '', w)
w = re.sub('[%s]' % re.escape(string.punctuation), '', w)
w = re.sub('\n', '', w)
w = re.sub(r"([?.!,¿])", r" \1 ", w)
w = re.sub(r"[^a-zA-Z?.!]+", " ", w)
mots = word_tokenize(w.strip())
return ' '.join(mots).strip()
# encode transfomers to tokenizer
def encode(sentences, tokenizer, maxlen=500):
input_ids = []
attention_masks = []
# Pour chaque sentences...
for sent in tqdm(sentences):
encoded_dict = tokenizer.encode_plus(
sent, # Sentence to encode. / des fois j'écris en Anglais
add_special_tokens=True, # Add '[CLS]' and '[SEP]'
max_length=maxlen, # Pad & truncate all sentences.
pad_to_max_length=True,
return_attention_mask=True, # Construct attn. masks.
return_tensors='np', # Return Numpy.
)
# Add the encoded sentence to the list.
input_ids.append(encoded_dict['input_ids'])
# And its attention mask (simply differentiates padding from non-padding).
attention_masks.append(encoded_dict['attention_mask'])
# Convert the lists into arrays.
input_ids = np.asarray(input_ids, dtype='int32')
attention_masks = np.asarray(attention_masks, dtype='int32')
input_ids = np.squeeze(input_ids)
attention_masks = np.squeeze(attention_masks)
return input_ids, attention_masks
@tf.function
# Fonction pour preprocessing des images
def preprocessing_test(img):
# Lecture et décodage des images:
img = tf.io.read_file(img)
img = tf.io.decode_jpeg(img, channels=3)
# Resize
img = tf.cast(img, dtype=tf.float32)
img = tf.image.resize(img, [IMG_SHAPE, IMG_SHAPE])
img = (img / 255)
return img
def make_test(x1, x2, x3, x4, x5):
dataset = tf.data.Dataset.from_tensor_slices((x1, x2, x3, x4, x5)) \
.map(lambda r, s, t, u, w: [(preprocessing_test(r), s, t, u, w)], num_parallel_calls=AUTO) \
.batch(1) \
.prefetch(AUTO)
return dataset
def fusion_features():
file_names = glob.glob('datas/images/upload_images/*')
df_image = pd.DataFrame(file_names, columns=['filename'])
df_text = pd.read_csv('datas/temp_test.csv')
df_text = df_text.reset_index(drop=True)
df = pd.concat([df_image, df_text], axis=1)
df.to_csv('results/generated.csv', encoding="utf-8", header='True')
return df
def remove_tempfile():
file_names = glob.glob('datas/images/upload_images/*')
for f in file_names:
os.remove(f)
| [
"re.escape",
"pandas.read_csv",
"tensorflow.io.read_file",
"tensorflow.cast",
"os.remove",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.asarray",
"unicodedata.normalize",
"pandas.DataFrame",
"glob.glob",
"numpy.squeeze",
"tensorflow.io.decode_jpeg",
"re.sub",
"tensorflow.image.resi... | [((502, 563), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['model_bert'], {'do_lowercase': '(False)'}), '(model_bert, do_lowercase=False)\n', (531, 563), False, 'from transformers import BertTokenizer\n'), ((581, 652), 'transformers.CamembertTokenizer.from_pretrained', 'CamembertTokenizer.from_pretrained', (['model_camembert'], {'do_lowercase': '(False)'}), '(model_camembert, do_lowercase=False)\n', (615, 652), False, 'from transformers import CamembertTokenizer\n'), ((699, 729), 'pandas.read_csv', 'pd.read_csv', (['file'], {'index_col': '(0)'}), '(file, index_col=0)\n', (710, 729), True, 'import pandas as pd\n'), ((1493, 1534), 're.sub', 're.sub', (['"""https?://\\\\S+|www\\\\.\\\\S+"""', '""""""', 'w'], {}), "('https?://\\\\S+|www\\\\.\\\\S+', '', w)\n", (1499, 1534), False, 'import re\n'), ((1604, 1623), 're.sub', 're.sub', (['"""\n"""', '""""""', 'w'], {}), "('\\n', '', w)\n", (1610, 1623), False, 'import re\n'), ((1633, 1664), 're.sub', 're.sub', (['"""([?.!,¿])"""', '""" \\\\1 """', 'w'], {}), "('([?.!,¿])', ' \\\\1 ', w)\n", (1639, 1664), False, 'import re\n'), ((1675, 1706), 're.sub', 're.sub', (['"""[^a-zA-Z?.!]+"""', '""" """', 'w'], {}), "('[^a-zA-Z?.!]+', ' ', w)\n", (1681, 1706), False, 'import re\n'), ((1965, 1980), 'tqdm.tqdm', 'tqdm', (['sentences'], {}), '(sentences)\n', (1969, 1980), False, 'from tqdm import tqdm\n'), ((2708, 2744), 'numpy.asarray', 'np.asarray', (['input_ids'], {'dtype': '"""int32"""'}), "(input_ids, dtype='int32')\n", (2718, 2744), True, 'import numpy as np\n'), ((2768, 2810), 'numpy.asarray', 'np.asarray', (['attention_masks'], {'dtype': '"""int32"""'}), "(attention_masks, dtype='int32')\n", (2778, 2810), True, 'import numpy as np\n'), ((2830, 2851), 'numpy.squeeze', 'np.squeeze', (['input_ids'], {}), '(input_ids)\n', (2840, 2851), True, 'import numpy as np\n'), ((2875, 2902), 'numpy.squeeze', 'np.squeeze', (['attention_masks'], {}), '(attention_masks)\n', (2885, 2902), True, 'import numpy as np\n'), ((3084, 3104), 'tensorflow.io.read_file', 'tf.io.read_file', (['img'], {}), '(img)\n', (3099, 3104), True, 'import tensorflow as tf\n'), ((3116, 3150), 'tensorflow.io.decode_jpeg', 'tf.io.decode_jpeg', (['img'], {'channels': '(3)'}), '(img, channels=3)\n', (3133, 3150), True, 'import tensorflow as tf\n'), ((3176, 3206), 'tensorflow.cast', 'tf.cast', (['img'], {'dtype': 'tf.float32'}), '(img, dtype=tf.float32)\n', (3183, 3206), True, 'import tensorflow as tf\n'), ((3218, 3262), 'tensorflow.image.resize', 'tf.image.resize', (['img', '[IMG_SHAPE, IMG_SHAPE]'], {}), '(img, [IMG_SHAPE, IMG_SHAPE])\n', (3233, 3262), True, 'import tensorflow as tf\n'), ((3634, 3675), 'glob.glob', 'glob.glob', (['"""datas/images/upload_images/*"""'], {}), "('datas/images/upload_images/*')\n", (3643, 3675), False, 'import glob\n'), ((3692, 3738), 'pandas.DataFrame', 'pd.DataFrame', (['file_names'], {'columns': "['filename']"}), "(file_names, columns=['filename'])\n", (3704, 3738), True, 'import pandas as pd\n'), ((3754, 3788), 'pandas.read_csv', 'pd.read_csv', (['"""datas/temp_test.csv"""'], {}), "('datas/temp_test.csv')\n", (3765, 3788), True, 'import pandas as pd\n'), ((3845, 3883), 'pandas.concat', 'pd.concat', (['[df_image, df_text]'], {'axis': '(1)'}), '([df_image, df_text], axis=1)\n', (3854, 3883), True, 'import pandas as pd\n'), ((4020, 4061), 'glob.glob', 'glob.glob', (['"""datas/images/upload_images/*"""'], {}), "('datas/images/upload_images/*')\n", (4029, 4061), False, 'import glob\n'), ((4097, 4109), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (4106, 4109), False, 'import os\n'), ((1557, 1586), 're.escape', 're.escape', (['string.punctuation'], {}), '(string.punctuation)\n', (1566, 1586), False, 'import re\n'), ((1294, 1325), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFD"""', 's'], {}), "('NFD', s)\n", (1315, 1325), False, 'import unicodedata\n'), ((1349, 1372), 'unicodedata.category', 'unicodedata.category', (['c'], {}), '(c)\n', (1369, 1372), False, 'import unicodedata\n'), ((3359, 3415), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(x1, x2, x3, x4, x5)'], {}), '((x1, x2, x3, x4, x5))\n', (3393, 3415), True, 'import tensorflow as tf\n')] |
import argparse
import asyncio
import logging
import math
import os
import cv2
import numpy
from aiortc import RTCPeerConnection
from aiortc.mediastreams import VideoFrame, VideoStreamTrack
from signaling import CopyAndPasteSignaling
BLUE = (255, 0, 0)
GREEN = (0, 255, 0)
RED = (0, 0, 255)
OUTPUT_PATH = os.path.join(os.path.dirname(__file__), 'output.png')
def frame_from_bgr(data_bgr):
data_yuv = cv2.cvtColor(data_bgr, cv2.COLOR_BGR2YUV_YV12)
return VideoFrame(width=data_bgr.shape[1], height=data_bgr.shape[0], data=data_yuv.tobytes())
def frame_to_bgr(frame):
data_flat = numpy.frombuffer(frame.data, numpy.uint8)
data_yuv = data_flat.reshape((math.ceil(frame.height * 12 / 8), frame.width))
return cv2.cvtColor(data_yuv, cv2.COLOR_YUV2BGR_YV12)
class ColorVideoStreamTrack(VideoStreamTrack):
def __init__(self, width, height, color):
data_bgr = numpy.zeros((height, width, 3), numpy.uint8)
data_bgr[:, :] = color
self.frame = frame_from_bgr(data_bgr=data_bgr)
async def recv(self):
return self.frame
class CombinedVideoStreamTrack(VideoStreamTrack):
def __init__(self, tracks):
self.tracks = tracks
async def recv(self):
coros = [track.recv() for track in self.tracks]
frames = await asyncio.gather(*coros)
data_bgrs = [frame_to_bgr(frame) for frame in frames]
data_bgr = numpy.hstack(data_bgrs)
return frame_from_bgr(data_bgr)
async def run_answer(pc, signaling):
remote_track = None
@pc.on('track')
def on_track(track):
nonlocal remote_track
assert track.kind == 'video'
remote_track = track
# receive offer
offer = await signaling.receive()
await pc.setRemoteDescription(offer)
# send answer
await pc.setLocalDescription(await pc.createAnswer())
await signaling.send(pc.localDescription)
print('Receiving video, press CTRL-C to stop')
while True:
frame = await remote_track.recv()
data_bgr = frame_to_bgr(frame)
cv2.imwrite(OUTPUT_PATH, data_bgr)
async def run_offer(pc, signaling):
# add video track
width = 320
height = 240
local_video = CombinedVideoStreamTrack(tracks=[
ColorVideoStreamTrack(width=width, height=height, color=BLUE),
ColorVideoStreamTrack(width=width, height=height, color=GREEN),
ColorVideoStreamTrack(width=width, height=height, color=RED),
])
pc.addTrack(local_video)
# send offer
await pc.setLocalDescription(await pc.createOffer())
await signaling.send(pc.localDescription)
# receive answer
answer = await signaling.receive()
await pc.setRemoteDescription(answer)
print('Sending video for 10s')
await asyncio.sleep(10)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Video stream with copy-and-paste signaling')
parser.add_argument('role', choices=['offer', 'answer'])
parser.add_argument('--verbose', '-v', action='count')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
pc = RTCPeerConnection()
signaling = CopyAndPasteSignaling()
if args.role == 'offer':
coro = run_offer(pc, signaling)
else:
coro = run_answer(pc, signaling)
# run event loop
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(coro)
except KeyboardInterrupt:
pass
finally:
loop.run_until_complete(pc.close())
| [
"logging.basicConfig",
"cv2.imwrite",
"math.ceil",
"signaling.CopyAndPasteSignaling",
"argparse.ArgumentParser",
"numpy.hstack",
"asyncio.gather",
"os.path.dirname",
"numpy.zeros",
"cv2.cvtColor",
"asyncio.sleep",
"numpy.frombuffer",
"asyncio.get_event_loop",
"aiortc.RTCPeerConnection"
] | [((322, 347), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (337, 347), False, 'import os\n'), ((410, 456), 'cv2.cvtColor', 'cv2.cvtColor', (['data_bgr', 'cv2.COLOR_BGR2YUV_YV12'], {}), '(data_bgr, cv2.COLOR_BGR2YUV_YV12)\n', (422, 456), False, 'import cv2\n'), ((598, 639), 'numpy.frombuffer', 'numpy.frombuffer', (['frame.data', 'numpy.uint8'], {}), '(frame.data, numpy.uint8)\n', (614, 639), False, 'import numpy\n'), ((733, 779), 'cv2.cvtColor', 'cv2.cvtColor', (['data_yuv', 'cv2.COLOR_YUV2BGR_YV12'], {}), '(data_yuv, cv2.COLOR_YUV2BGR_YV12)\n', (745, 779), False, 'import cv2\n'), ((2809, 2895), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Video stream with copy-and-paste signaling"""'}), "(description=\n 'Video stream with copy-and-paste signaling')\n", (2832, 2895), False, 'import argparse\n'), ((3123, 3142), 'aiortc.RTCPeerConnection', 'RTCPeerConnection', ([], {}), '()\n', (3140, 3142), False, 'from aiortc import RTCPeerConnection\n'), ((3159, 3182), 'signaling.CopyAndPasteSignaling', 'CopyAndPasteSignaling', ([], {}), '()\n', (3180, 3182), False, 'from signaling import CopyAndPasteSignaling\n'), ((3336, 3360), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3358, 3360), False, 'import asyncio\n'), ((894, 938), 'numpy.zeros', 'numpy.zeros', (['(height, width, 3)', 'numpy.uint8'], {}), '((height, width, 3), numpy.uint8)\n', (905, 938), False, 'import numpy\n'), ((1401, 1424), 'numpy.hstack', 'numpy.hstack', (['data_bgrs'], {}), '(data_bgrs)\n', (1413, 1424), False, 'import numpy\n'), ((2050, 2084), 'cv2.imwrite', 'cv2.imwrite', (['OUTPUT_PATH', 'data_bgr'], {}), '(OUTPUT_PATH, data_bgr)\n', (2061, 2084), False, 'import cv2\n'), ((2749, 2766), 'asyncio.sleep', 'asyncio.sleep', (['(10)'], {}), '(10)\n', (2762, 2766), False, 'import asyncio\n'), ((3072, 3112), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (3091, 3112), False, 'import logging\n'), ((674, 706), 'math.ceil', 'math.ceil', (['(frame.height * 12 / 8)'], {}), '(frame.height * 12 / 8)\n', (683, 706), False, 'import math\n'), ((1297, 1319), 'asyncio.gather', 'asyncio.gather', (['*coros'], {}), '(*coros)\n', (1311, 1319), False, 'import asyncio\n')] |
import pymongo
import datetime
import os
import numpy as np
import struct
from array import array
from pymongo import MongoClient
from mspasspy.ccore.seismic import (
Seismogram,
TimeReferenceType,
TimeSeries,
DoubleVector,
)
def find_channel(collection):
st = datetime.datetime(1990, 1, 1, 6)
et = datetime.datetime(1990, 1, 4, 6)
for cr in collection.find({"st": {"$gte": st}, "et": {"$lte": et}}):
print(cr)
# net channel station scheme
def save_data(d):
di = d.get_string("dir")
dfile = d.get_string("dfile")
fname = os.path.join(di, dfile)
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, mode="a+b") as fh:
foff = fh.seek(0, 2)
float_array = array("d", d.data)
d.put("nofbytes", float_array.itemsize * float_array.buffer_info()[1])
float_array.tofile(fh)
di = os.path.dirname(os.path.realpath(fname))
dfile = os.path.basename(os.path.realpath(fname))
d.put("dir", di)
d.put("dfile", dfile)
d.put("foff", foff)
def read_data(d):
di = d.get_string("dir")
dfile = d.get_string("dfile")
foff = d.get("foff")
fname = os.path.join(di, dfile)
with open(fname, mode="rb") as fh:
fh.seek(foff)
float_array = array("d")
float_array.frombytes(fh.read(d.get("nofbytes")))
d.data = DoubleVector(float_array)
if __name__ == "__main__":
s = TimeSeries()
s.data = DoubleVector(np.random.rand(255))
s["dir"] = "./"
s["dfile"] = "test_op"
save_data(s)
s2 = TimeSeries()
for k in s:
s2[k] = s[k]
s2.data = DoubleVector([])
print(len(s2.data))
read_data(s2)
print(len(s2.data))
assert all(a == b for a, b in zip(s.data, s2.data))
# client = MongoClient('localhost', 27017)
# db = client.mspass
# channels = db.channels
# find_channel(channels)
| [
"datetime.datetime",
"mspasspy.ccore.seismic.TimeSeries",
"array.array",
"numpy.random.rand",
"os.path.join",
"os.path.realpath",
"os.path.dirname",
"mspasspy.ccore.seismic.DoubleVector"
] | [((283, 315), 'datetime.datetime', 'datetime.datetime', (['(1990)', '(1)', '(1)', '(6)'], {}), '(1990, 1, 1, 6)\n', (300, 315), False, 'import datetime\n'), ((325, 357), 'datetime.datetime', 'datetime.datetime', (['(1990)', '(1)', '(4)', '(6)'], {}), '(1990, 1, 4, 6)\n', (342, 357), False, 'import datetime\n'), ((577, 600), 'os.path.join', 'os.path.join', (['di', 'dfile'], {}), '(di, dfile)\n', (589, 600), False, 'import os\n'), ((1171, 1194), 'os.path.join', 'os.path.join', (['di', 'dfile'], {}), '(di, dfile)\n', (1183, 1194), False, 'import os\n'), ((1427, 1439), 'mspasspy.ccore.seismic.TimeSeries', 'TimeSeries', ([], {}), '()\n', (1437, 1439), False, 'from mspasspy.ccore.seismic import Seismogram, TimeReferenceType, TimeSeries, DoubleVector\n'), ((1561, 1573), 'mspasspy.ccore.seismic.TimeSeries', 'TimeSeries', ([], {}), '()\n', (1571, 1573), False, 'from mspasspy.ccore.seismic import Seismogram, TimeReferenceType, TimeSeries, DoubleVector\n'), ((1625, 1641), 'mspasspy.ccore.seismic.DoubleVector', 'DoubleVector', (['[]'], {}), '([])\n', (1637, 1641), False, 'from mspasspy.ccore.seismic import Seismogram, TimeReferenceType, TimeSeries, DoubleVector\n'), ((617, 639), 'os.path.dirname', 'os.path.dirname', (['fname'], {}), '(fname)\n', (632, 639), False, 'import os\n'), ((747, 765), 'array.array', 'array', (['"""d"""', 'd.data'], {}), "('d', d.data)\n", (752, 765), False, 'from array import array\n'), ((901, 924), 'os.path.realpath', 'os.path.realpath', (['fname'], {}), '(fname)\n', (917, 924), False, 'import os\n'), ((955, 978), 'os.path.realpath', 'os.path.realpath', (['fname'], {}), '(fname)\n', (971, 978), False, 'import os\n'), ((1278, 1288), 'array.array', 'array', (['"""d"""'], {}), "('d')\n", (1283, 1288), False, 'from array import array\n'), ((1364, 1389), 'mspasspy.ccore.seismic.DoubleVector', 'DoubleVector', (['float_array'], {}), '(float_array)\n', (1376, 1389), False, 'from mspasspy.ccore.seismic import Seismogram, TimeReferenceType, TimeSeries, DoubleVector\n'), ((1466, 1485), 'numpy.random.rand', 'np.random.rand', (['(255)'], {}), '(255)\n', (1480, 1485), True, 'import numpy as np\n')] |
# Copyright 2018 University of Basel, Center for medical Image Analysis and Navigation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import multiprocessing as mp
os.environ["ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS"] = str(mp.cpu_count())
import SimpleITK as sitk
import numpy as np
import torch as th
from .image import Image
def auto_crop_image_filter(image, boundary_value=0):
"""
Performs an auto cropping of values on boundary
image (Image): image which has to be cropped
boundary_value (float|int): specifies the boundary value which will be cropped
return (Image): a new image with cropped boundary
"""
msk = 1 - (image.image.squeeze() == boundary_value)
rminmax = []
for d in range(len(msk.shape)):
region = msk.argmax(dim=d).nonzero()
rminmax.append((region.min(dim=0)[0], region.max(dim=0)[0]))
#print(rminmax[-1])
if image.ndim == 2:
cropped = image.image.squeeze()[rminmax[1][0]:rminmax[1][1], rminmax[0][0]:rminmax[0][1]]
origin = image.origin + th.Tensor(image.spacing) * th.Tensor([rminmax[1][0], rminmax[0][0]])
elif image.ndim == 3:
cropped = image.image.squeeze()[rminmax[1][0][0]:rminmax[1][1][0], \
rminmax[0][0][0]:rminmax[0][1][0], \
rminmax[0][0][1]:rminmax[0][1][1]]
#print(cropped.shape)
origin = th.Tensor(image.origin) + th.Tensor(image.spacing) * th.Tensor([rminmax[1][0][0], rminmax[0][0][0],rminmax[0][0][1]])
else:
raise Exception("Only 2 and 3 space dimensions supported")
size = tuple(cropped.shape)
cropped.unsqueeze_(0).unsqueeze_(0)
return Image(cropped, size, image.spacing, origin.tolist())
def normalize_images(fixed_image, moving_image):
"""
Noramlize image intensities by extracting joint minimum and dividing by joint maximum
Note: the function is inplace
fixed_image (Image): fixed image
moving_image (Image): moving image
return (Image, Image): normalized images
"""
fixed_min = fixed_image.image.min()
moving_min = moving_image.image.min()
min_val = min(fixed_min, moving_min)
fixed_image.image -= min_val
moving_image.image -= min_val
moving_max = moving_image.image.max()
fixed_max = fixed_image.image.max()
max_val = max(fixed_max, moving_max)
fixed_image.image /= max_val
moving_image.image /= max_val
return (fixed_image, moving_image)
def remove_bed_filter(image, cropping=True):
"""
Removes fine structures from the image using morphological operators. It can be used to remove the bed structure
usually present in CT images. The resulting image and the respective body mask can be cropped with the cropping
option.
Note: the morphological operations are performed on a downsampled version of the image
image (Image): image of interest
cropping (bool): specifies if the image should be cropped after bed removal
return (Image, Image): bed-free image and a body mask
"""
# define parameters
houndsfield_min = -300
houndsfield_max = 3071
houndsfield_default = -1024
radius_opening = 3
radius_closing = 40
image_itk = image.itk()
# resample image
workingSize = np.array(image.size)
workingSize[0] /= 3
workingSize[1] /= 3
workingSpacing = np.array(image.spacing, dtype=float) * np.array(image.size, dtype=float) / np.array(workingSize, dtype=float)
resampler = sitk.ResampleImageFilter()
resampler.SetOutputOrigin(image.origin)
resampler.SetSize(workingSize.tolist())
resampler.SetOutputSpacing(workingSpacing.tolist())
resampler.SetInterpolator(2) # linear interpolation
resampler.SetNumberOfThreads(mp.cpu_count())
image_tmp = resampler.Execute(image_itk)
# threshold image
thresholder = sitk.BinaryThresholdImageFilter()
thresholder.SetOutsideValue(0)
thresholder.SetInsideValue(1)
thresholder.SetLowerThreshold(houndsfield_min)
thresholder.SetUpperThreshold(houndsfield_max)
thresholder.SetNumberOfThreads(mp.cpu_count())
image_tmp = thresholder.Execute(image_tmp)
# morphological opening with ball as structuring element
# removes thin structures as the bed
opening = sitk.BinaryMorphologicalOpeningImageFilter()
opening.SetKernelType(sitk.sitkBall)
opening.SetKernelRadius(radius_opening)
opening.SetForegroundValue(1)
opening.SetNumberOfThreads(mp.cpu_count())
image_tmp = opening.Execute(image_tmp)
# crop zero values from mask boundary
if cropping:
image_tmp = auto_crop_image_filter(Image(image_tmp).to(device=image.device)).itk()
# morphological closing with ball as structuring element
# fills up the lungs
closing = sitk.BinaryMorphologicalClosingImageFilter()
closing.SetKernelRadius(sitk.sitkBall)
closing.SetKernelRadius(radius_closing)
closing.SetForegroundValue(1)
closing.SetNumberOfThreads(mp.cpu_count())
image_tmp = closing.Execute(image_tmp)
# resample mask to original spacing
mask_size = np.array(np.array(image_tmp.GetSpacing(), dtype=float)*np.array(image_tmp.GetSize(),dtype=float)/np.array(image.spacing, dtype=float), dtype=int).tolist()
resampler = sitk.ResampleImageFilter()
resampler.SetOutputOrigin(image_tmp.GetOrigin())
resampler.SetSize(mask_size)
resampler.SetOutputSpacing(image.spacing)
resampler.SetInterpolator(1) # nearest neighbor interpolation
resampler.SetNumberOfThreads(mp.cpu_count())
bodyMask = resampler.Execute(image_tmp)
# resample also original image
resampler.SetInterpolator(2)
image_itk = resampler.Execute(image_itk)
# mask image with found label map
masking = sitk.MaskImageFilter()
masking.SetMaskingValue(0)
masking.SetOutsideValue(houndsfield_default)
masking.SetNumberOfThreads(mp.cpu_count())
outImage = masking.Execute(image_itk, bodyMask)
return (Image(outImage).to(device=image.device), Image(bodyMask).to(device=image.device))
| [
"SimpleITK.BinaryThresholdImageFilter",
"torch.Tensor",
"SimpleITK.ResampleImageFilter",
"multiprocessing.cpu_count",
"SimpleITK.BinaryMorphologicalClosingImageFilter",
"numpy.array",
"SimpleITK.MaskImageFilter",
"SimpleITK.BinaryMorphologicalOpeningImageFilter"
] | [((730, 744), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (742, 744), True, 'import multiprocessing as mp\n'), ((3801, 3821), 'numpy.array', 'np.array', (['image.size'], {}), '(image.size)\n', (3809, 3821), True, 'import numpy as np\n'), ((4018, 4044), 'SimpleITK.ResampleImageFilter', 'sitk.ResampleImageFilter', ([], {}), '()\n', (4042, 4044), True, 'import SimpleITK as sitk\n'), ((4382, 4415), 'SimpleITK.BinaryThresholdImageFilter', 'sitk.BinaryThresholdImageFilter', ([], {}), '()\n', (4413, 4415), True, 'import SimpleITK as sitk\n'), ((4804, 4848), 'SimpleITK.BinaryMorphologicalOpeningImageFilter', 'sitk.BinaryMorphologicalOpeningImageFilter', ([], {}), '()\n', (4846, 4848), True, 'import SimpleITK as sitk\n'), ((5313, 5357), 'SimpleITK.BinaryMorphologicalClosingImageFilter', 'sitk.BinaryMorphologicalClosingImageFilter', ([], {}), '()\n', (5355, 5357), True, 'import SimpleITK as sitk\n'), ((5799, 5825), 'SimpleITK.ResampleImageFilter', 'sitk.ResampleImageFilter', ([], {}), '()\n', (5823, 5825), True, 'import SimpleITK as sitk\n'), ((6286, 6308), 'SimpleITK.MaskImageFilter', 'sitk.MaskImageFilter', ([], {}), '()\n', (6306, 6308), True, 'import SimpleITK as sitk\n'), ((3966, 4000), 'numpy.array', 'np.array', (['workingSize'], {'dtype': 'float'}), '(workingSize, dtype=float)\n', (3974, 4000), True, 'import numpy as np\n'), ((4278, 4292), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (4290, 4292), True, 'import multiprocessing as mp\n'), ((4622, 4636), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (4634, 4636), True, 'import multiprocessing as mp\n'), ((4999, 5013), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (5011, 5013), True, 'import multiprocessing as mp\n'), ((5510, 5524), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (5522, 5524), True, 'import multiprocessing as mp\n'), ((6057, 6071), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (6069, 6071), True, 'import multiprocessing as mp\n'), ((6420, 6434), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (6432, 6434), True, 'import multiprocessing as mp\n'), ((3891, 3927), 'numpy.array', 'np.array', (['image.spacing'], {'dtype': 'float'}), '(image.spacing, dtype=float)\n', (3899, 3927), True, 'import numpy as np\n'), ((3930, 3963), 'numpy.array', 'np.array', (['image.size'], {'dtype': 'float'}), '(image.size, dtype=float)\n', (3938, 3963), True, 'import numpy as np\n'), ((1553, 1577), 'torch.Tensor', 'th.Tensor', (['image.spacing'], {}), '(image.spacing)\n', (1562, 1577), True, 'import torch as th\n'), ((1580, 1621), 'torch.Tensor', 'th.Tensor', (['[rminmax[1][0], rminmax[0][0]]'], {}), '([rminmax[1][0], rminmax[0][0]])\n', (1589, 1621), True, 'import torch as th\n'), ((1924, 1947), 'torch.Tensor', 'th.Tensor', (['image.origin'], {}), '(image.origin)\n', (1933, 1947), True, 'import torch as th\n'), ((1950, 1974), 'torch.Tensor', 'th.Tensor', (['image.spacing'], {}), '(image.spacing)\n', (1959, 1974), True, 'import torch as th\n'), ((1977, 2042), 'torch.Tensor', 'th.Tensor', (['[rminmax[1][0][0], rminmax[0][0][0], rminmax[0][0][1]]'], {}), '([rminmax[1][0][0], rminmax[0][0][0], rminmax[0][0][1]])\n', (1986, 2042), True, 'import torch as th\n'), ((5725, 5761), 'numpy.array', 'np.array', (['image.spacing'], {'dtype': 'float'}), '(image.spacing, dtype=float)\n', (5733, 5761), True, 'import numpy as np\n')] |
from threading import Lock
from flask import Flask, render_template, session, request, \
copy_current_request_context
from flask_socketio import SocketIO, emit, join_room, leave_room, \
close_room, rooms, disconnect
from keras.models import load_model
import tensorflow as tf
import numpy as np
from vggish_input import waveform_to_examples
import homesounds
from pathlib import Path
import time
import argparse
import wget
from helpers import dbFS
# Set this variable to "threading", "eventlet" or "gevent" to test the
# different async modes, or leave it set to None for the application to choose
# the best option based on installed packages.
async_mode = None
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app, async_mode=async_mode)
thread = None
thread_lock = Lock()
# contexts
context = homesounds.everything
# use this to change context -- see homesounds.py
active_context = homesounds.everything
# thresholds
PREDICTION_THRES = 0.5 # confidence
DBLEVEL_THRES = -30 # dB
CHANNELS = 1
RATE = 16000
CHUNK = RATE
MICROPHONES_DEION = []
FPS = 60.0
###########################
# Download model, if it doesn't exist
###########################
MODEL_URL = "https://www.dropbox.com/s/cq1d7uqg0l28211/example_model.hdf5?dl=1"
MODEL_PATH = "models/example_model.hdf5"
print("=====")
print("2 / 2: Checking model... ")
print("=====")
model_filename = "models/example_model.hdf5"
homesounds_model = Path(model_filename)
if (not homesounds_model.is_file()):
print("Downloading example_model.hdf5 [867MB]: ")
wget.download(MODEL_URL, MODEL_PATH)
##############################
# Load Deep Learning Model
##############################
print("Using deep learning model: %s" % (model_filename))
model = load_model(model_filename)
graph = tf.get_default_graph()
# ##############################
# # Setup Audio Callback
# ##############################
def audio_samples(in_data, frame_count, time_info, status_flags):
global graph
np_wav = np.fromstring(in_data, dtype=np.int16) / \
32768.0 # Convert to [-1.0, +1.0]
# Compute RMS and convert to dB
rms = np.sqrt(np.mean(np_wav**2))
db = dbFS(rms)
# Make predictions
x = waveform_to_examples(np_wav, RATE)
predictions = []
with graph.as_default():
if x.shape[0] != 0:
x = x.reshape(len(x), 96, 64, 1)
print('Reshape x successful', x.shape)
pred = model.predict(x)
predictions.append(pred)
print('Prediction succeeded')
for prediction in predictions:
context_prediction = np.take(
prediction[0], [homesounds.labels[x] for x in active_context])
m = np.argmax(context_prediction)
if (context_prediction[m] > PREDICTION_THRES and db > DBLEVEL_THRES):
print("Prediction: %s (%0.2f)" % (
homesounds.to_human_labels[active_context[m]], context_prediction[m]))
return (in_data, pyaudio.paContinue)
@socketio.on('invalid_audio_feature_data')
def handle_source(json_data):
db = str(json_data['db'])
time = str(json_data['time'])
record_time = str(json_data['record_time'])
socketio.emit(
'audio_label',
{
'label': 'Unidentified Sound',
'accuracy': '1.0',
'db': str(db),
'time': str(time),
'record_time': str(record_time)
},
room=request.sid)
@socketio.on('audio_feature_data')
def handle_source(json_data):
data = str(json_data['data'])
db = str(json_data['db'])
time = str(json_data['time'])
data = data[1:-1]
global graph
x = np.fromstring(data, dtype=np.float16, sep=',')
x = x.reshape(1, 96, 64, 1)
with graph.as_default():
start_time = time.time()
pred = model.predict(x)
elapsed_time = time.time() - start_time
# Write prediction time to file
with open('model_prediction_time.csv', 'a') as file:
file.write(str(elapsed_time) + '\n')
context_prediction = np.take(
pred, [homesounds.labels[x] for x in active_context])
m = np.argmax(context_prediction)
print('Max prediction', str(
homesounds.to_human_labels[active_context[m]]), str(context_prediction[m]))
if (context_prediction[m] > PREDICTION_THRES):
print("Prediction: %s (%0.2f)" % (
homesounds.to_human_labels[active_context[m]], context_prediction[m]))
socketio.emit(
'audio_label',
{
'label': str(homesounds.to_human_labels[active_context[m]]),
'accuracy': str(context_prediction[m]),
'db': str(db),
'time': str(time)
},
room=request.sid)
else:
socketio.emit(
'audio_label',
{
'label': 'Unidentified Sound',
'accuracy': '1.0',
'db': str(db),
'time': str(time)
},
room=request.sid)
@socketio.on('audio_data')
def handle_source(json_data):
data = str(json_data['data'])
data = data[1:-1]
global graph
np_wav = np.fromstring(data, dtype=np.int16, sep=',') / \
32768.0 # Convert to [-1.0, +1.0]
# Compute RMS and convert to dB
rms = np.sqrt(np.mean(np_wav**2))
db = dbFS(rms)
# Make predictions
x = waveform_to_examples(np_wav, RATE)
predictions = []
with graph.as_default():
if x.shape[0] != 0:
x = x.reshape(len(x), 96, 64, 1)
pred = model.predict(x)
predictions.append(pred)
for prediction in predictions:
context_prediction = np.take(
prediction[0], [homesounds.labels[x] for x in active_context])
m = np.argmax(context_prediction)
print('Max prediction', str(
homesounds.to_human_labels[active_context[m]]), str(context_prediction[m]))
if (context_prediction[m] > PREDICTION_THRES and db > DBLEVEL_THRES):
socketio.emit('audio_label',
{
'label': str(homesounds.to_human_labels[active_context[m]]),
'accuracy': str(context_prediction[m]),
'db': str(db)
},
room=request.sid)
print("Prediction: %s (%0.2f)" % (
homesounds.to_human_labels[active_context[m]], context_prediction[m]))
@app.route('/')
def index():
return render_template('index.html',)
@socketio.on('send_message')
def handle_source(json_data):
print('Receive message...' + str(json_data['message']))
text = json_data['message'].encode('ascii', 'ignore')
# socketio.emit('echo', {'echo': 'Server Says: ' + str(text) + " from requestid: " + str(request.sid)})
socketio.emit('echo', {'echo': 'Server Says specifically reply : ' +
str(text) + " to requestid: " + str(request.sid)}, room=request.sid)
print('Sending message back..')
if __name__ == '__main__':
socketio.run(app, host='192.168.3.11', port='8788', debug=True) | [
"flask.render_template",
"wget.download",
"numpy.mean",
"vggish_input.waveform_to_examples",
"keras.models.load_model",
"pathlib.Path",
"flask.Flask",
"threading.Lock",
"numpy.argmax",
"flask_socketio.SocketIO",
"numpy.take",
"helpers.dbFS",
"numpy.fromstring",
"time.time",
"tensorflow.g... | [((680, 695), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (685, 695), False, 'from flask import Flask, render_template, session, request, copy_current_request_context\n'), ((744, 780), 'flask_socketio.SocketIO', 'SocketIO', (['app'], {'async_mode': 'async_mode'}), '(app, async_mode=async_mode)\n', (752, 780), False, 'from flask_socketio import SocketIO, emit, join_room, leave_room, close_room, rooms, disconnect\n'), ((809, 815), 'threading.Lock', 'Lock', ([], {}), '()\n', (813, 815), False, 'from threading import Lock\n'), ((1445, 1465), 'pathlib.Path', 'Path', (['model_filename'], {}), '(model_filename)\n', (1449, 1465), False, 'from pathlib import Path\n'), ((1754, 1780), 'keras.models.load_model', 'load_model', (['model_filename'], {}), '(model_filename)\n', (1764, 1780), False, 'from keras.models import load_model\n'), ((1789, 1811), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1809, 1811), True, 'import tensorflow as tf\n'), ((1561, 1597), 'wget.download', 'wget.download', (['MODEL_URL', 'MODEL_PATH'], {}), '(MODEL_URL, MODEL_PATH)\n', (1574, 1597), False, 'import wget\n'), ((2171, 2180), 'helpers.dbFS', 'dbFS', (['rms'], {}), '(rms)\n', (2175, 2180), False, 'from helpers import dbFS\n'), ((2213, 2247), 'vggish_input.waveform_to_examples', 'waveform_to_examples', (['np_wav', 'RATE'], {}), '(np_wav, RATE)\n', (2233, 2247), False, 'from vggish_input import waveform_to_examples\n'), ((3667, 3713), 'numpy.fromstring', 'np.fromstring', (['data'], {'dtype': 'np.float16', 'sep': '""","""'}), "(data, dtype=np.float16, sep=',')\n", (3680, 3713), True, 'import numpy as np\n'), ((5467, 5476), 'helpers.dbFS', 'dbFS', (['rms'], {}), '(rms)\n', (5471, 5476), False, 'from helpers import dbFS\n'), ((5508, 5542), 'vggish_input.waveform_to_examples', 'waveform_to_examples', (['np_wav', 'RATE'], {}), '(np_wav, RATE)\n', (5528, 5542), False, 'from vggish_input import waveform_to_examples\n'), ((6721, 6750), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (6736, 6750), False, 'from flask import Flask, render_template, session, request, copy_current_request_context\n'), ((2002, 2040), 'numpy.fromstring', 'np.fromstring', (['in_data'], {'dtype': 'np.int16'}), '(in_data, dtype=np.int16)\n', (2015, 2040), True, 'import numpy as np\n'), ((2142, 2162), 'numpy.mean', 'np.mean', (['(np_wav ** 2)'], {}), '(np_wav ** 2)\n', (2149, 2162), True, 'import numpy as np\n'), ((3796, 3807), 'time.time', 'time.time', ([], {}), '()\n', (3805, 3807), False, 'import time\n'), ((4067, 4128), 'numpy.take', 'np.take', (['pred', '[homesounds.labels[x] for x in active_context]'], {}), '(pred, [homesounds.labels[x] for x in active_context])\n', (4074, 4128), True, 'import numpy as np\n'), ((4154, 4183), 'numpy.argmax', 'np.argmax', (['context_prediction'], {}), '(context_prediction)\n', (4163, 4183), True, 'import numpy as np\n'), ((5292, 5336), 'numpy.fromstring', 'np.fromstring', (['data'], {'dtype': 'np.int16', 'sep': '""","""'}), "(data, dtype=np.int16, sep=',')\n", (5305, 5336), True, 'import numpy as np\n'), ((5438, 5458), 'numpy.mean', 'np.mean', (['(np_wav ** 2)'], {}), '(np_wav ** 2)\n', (5445, 5458), True, 'import numpy as np\n'), ((2605, 2675), 'numpy.take', 'np.take', (['prediction[0]', '[homesounds.labels[x] for x in active_context]'], {}), '(prediction[0], [homesounds.labels[x] for x in active_context])\n', (2612, 2675), True, 'import numpy as np\n'), ((2709, 2738), 'numpy.argmax', 'np.argmax', (['context_prediction'], {}), '(context_prediction)\n', (2718, 2738), True, 'import numpy as np\n'), ((3863, 3874), 'time.time', 'time.time', ([], {}), '()\n', (3872, 3874), False, 'import time\n'), ((5813, 5883), 'numpy.take', 'np.take', (['prediction[0]', '[homesounds.labels[x] for x in active_context]'], {}), '(prediction[0], [homesounds.labels[x] for x in active_context])\n', (5820, 5883), True, 'import numpy as np\n'), ((5917, 5946), 'numpy.argmax', 'np.argmax', (['context_prediction'], {}), '(context_prediction)\n', (5926, 5946), True, 'import numpy as np\n')] |
from utils import *
import numpy as np
import h5py
import os
import pandas as pd
from PIL import Image
from tqdm import tqdm
def resize_images(image_list, im_size):
"""Resize a list of images to a given size.
Parameters
----------
image_list : list
A list of images to resize, in any format supported by PIL
im_size : int
The side length of the resized images
"""
return_list = []
for im in image_list:
img = Image.open(im)
img = img.resize((im_size, im_size), Image.ANTIALIAS)
np_img = np.array(img)
return_list.append(np_img)
return return_list
def create_image_label_list(img_path, group, im_size, skip, all_labels):
"""
"""
label = all_labels['label'].loc[int(group)]
image_list = os.listdir(os.path.join(img_path, group))
if len(image_list) < 24:
return [], []
image_list = sorted(image_list[:24:skip])
images = resize_images([os.path.join(img_path, group, i) for i in image_list], im_size)
return images, label
def make_hdf5(img_path, im_size, skip, all_labels, desired_labels, fname='data_hdf5.h5'):
"""Make an HDF5 file from a directory of images.
Parameters
----------
img_path : str
The path of the folder containing the images
im_size : int
The side length to give the output images
skip : int
The number of images to skip over before adding a new image
all_labels : DataFrame
???
desired_labels : list
The labels to be considered
fname : str
The name of the HDF5 file to output
"""
indices = list(all_labels[all_labels['label'].isin(desired_labels)].index)
with h5py.File(fname, 'w') as hf:
for group in tqdm(indices):
group = str(group)
images, label = create_image_label_list(img_path, group, im_size, skip, all_labels)
if not images:
print('{} excluded, because of the short length'.format(group))
continue
label_id = desired_labels.index(label)
hfgroup = hf.create_group(group)
hfgroup.create_dataset('images', data=images)
hfgroup.create_dataset('label', data=label)
hfgroup.create_dataset('label_id', data=label_id)
if __name__ == "__main__":
# read config.ini and use the settings
param = get_configs()
data_path = param['data_path']
img_path = param['img_path']
train_labels = pd.read_csv(param['csv_train'], names=['label'], sep=';')
val_labels = pd.read_csv(param['csv_val'], names=['label'], sep=';')
all_labels = pd.read_csv(param['csv_labels'], sep=';')
labels = param['labels']
fn_postfix = str(len(labels))
print('labels are {}, length of {}'.format(labels, fn_postfix))
train_fn = data_path + os.sep + 'train_hdf5' + fn_postfix + '.h5'
val_fn = data_path + os.sep + 'val_hdf5' + fn_postfix + '.h5'
maker_params = {'img_path': img_path, 'im_size': param['im_size'], 'skip': param['skip'], 'desired_labels': labels}
make_hdf5(all_labels=train_labels, fname=train_fn, **maker_params)
make_hdf5(all_labels=val_labels, fname=val_fn, **maker_params) | [
"PIL.Image.open",
"pandas.read_csv",
"tqdm.tqdm",
"os.path.join",
"h5py.File",
"numpy.array"
] | [((2489, 2546), 'pandas.read_csv', 'pd.read_csv', (["param['csv_train']"], {'names': "['label']", 'sep': '""";"""'}), "(param['csv_train'], names=['label'], sep=';')\n", (2500, 2546), True, 'import pandas as pd\n'), ((2566, 2621), 'pandas.read_csv', 'pd.read_csv', (["param['csv_val']"], {'names': "['label']", 'sep': '""";"""'}), "(param['csv_val'], names=['label'], sep=';')\n", (2577, 2621), True, 'import pandas as pd\n'), ((2641, 2682), 'pandas.read_csv', 'pd.read_csv', (["param['csv_labels']"], {'sep': '""";"""'}), "(param['csv_labels'], sep=';')\n", (2652, 2682), True, 'import pandas as pd\n'), ((469, 483), 'PIL.Image.open', 'Image.open', (['im'], {}), '(im)\n', (479, 483), False, 'from PIL import Image\n'), ((563, 576), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (571, 576), True, 'import numpy as np\n'), ((803, 832), 'os.path.join', 'os.path.join', (['img_path', 'group'], {}), '(img_path, group)\n', (815, 832), False, 'import os\n'), ((1696, 1717), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (1705, 1717), False, 'import h5py\n'), ((1746, 1759), 'tqdm.tqdm', 'tqdm', (['indices'], {}), '(indices)\n', (1750, 1759), False, 'from tqdm import tqdm\n'), ((959, 991), 'os.path.join', 'os.path.join', (['img_path', 'group', 'i'], {}), '(img_path, group, i)\n', (971, 991), False, 'import os\n')] |
# # Chapter 5: Image Enhancement
# Author: <NAME>
###########################################
# ## Problems
# ### 1.1 BLUR Filter to remove Salt & Pepper Noise
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import matplotlib.pylab as plt
from PIL import Image, ImageFilter
from copy import deepcopy
def plot_image(image, title=None, sz=20):
plt.imshow(image)
plt.title(title, size=sz)
plt.axis('off')
def add_noise(im, prop_noise, salt=True, pepper=True):
im = deepcopy(im)
n = int(im.width * im.height * prop_noise)
x, y = np.random.randint(0, im.width, n), np.random.randint(0, im.height, n)
for (x,y) in zip(x,y):
im.putpixel((x, y), # generate salt-and-pepper noise
((0,0,0) if np.random.rand() < 0.5 else (255,255,255)) if salt and pepper \
else (255,255,255) if salt \
else (0, 0, 0)) # if pepper
return im
orig = Image.open('images/Img_05_01.jpg')
i = 1
plt.figure(figsize=(12,35))
for prop_noise in np.linspace(0.05,0.3,6):
# choose random locations inside image
im = add_noise(orig, prop_noise)
plt.subplot(6,2,i), plot_image(im, 'Original Image with ' + str(int(100*prop_noise)) + '% added noise')
im1 = im.filter(ImageFilter.BLUR)
plt.subplot(6,2,i+1), plot_image(im1, 'Blurred Image')
i += 2
plt.show()
# ### 1.2 Gaussian BLUR Filter to remove Salt & Pepper Noise
im = Image.open('images/Img_05_01.jpg')
im = add_noise(im, prop_noise = 0.2)
plt.figure(figsize=(20,15))
i = 1
for radius in np.linspace(1, 3, 12):
im1 = im.filter(ImageFilter.GaussianBlur(radius))
plt.subplot(3,4,i)
plot_image(im1, 'radius = ' + str(round(radius,2)))
i += 1
plt.suptitle('PIL Gaussian Blur with different Radius', size=30)
plt.show()
# ### 1.3 Median Filter to remove Salt & Pepper Noise
im = Image.open('images/Img_05_02.jpg')
im = add_noise(im, prop_noise = 0.1)
plt.figure(figsize=(20,10))
plt.subplot(1,4,1)
plot_image(im, 'Input noisy image')
i = 2
for sz in [3,7,11]:
im1 = im.filter(ImageFilter.MedianFilter(size=sz))
plt.subplot(1,4,i), plot_image(im1, 'Output (Filter size=' + str(sz) + ')', 20)
i += 1
plt.tight_layout()
plt.show()
# ### 1.4 Max, Min and Mode filters to remove outliers from image
# #### Min filter
orig = Image.open('images/Img_05_11.jpg')
im = add_noise(orig, prop_noise = 0.2, pepper=False)
plt.figure(figsize=(20,10))
plt.subplot(1,4,1)
plot_image(im, 'Input noisy image')
i = 2
for sz in [3,7,11]:
im1 = im.filter(ImageFilter.MinFilter(size=sz))
plt.subplot(1,4,i), plot_image(im1, 'Output (Filter size=' + str(sz) + ')')
i += 1
plt.tight_layout()
plt.show()
# #### Max filter
im = add_noise(orig, prop_noise = 0.3, salt=False)
plt.figure(figsize=(20,10))
plt.subplot(1,4,1)
plot_image(im, 'Input noisy image')
i = 2
for sz in [3,7,11]:
im1 = im.filter(ImageFilter.MaxFilter(size=sz))
plt.subplot(1,4,i), plot_image(im1, 'Output (Filter size=' + str(sz) + ')')
i += 1
plt.show()
# #### Mode filter
orig = Image.open('images/Img_05_20.jpg')
im = add_noise(orig, prop_noise = 0.1)
plt.figure(figsize=(20,20))
plt.subplot(1,3,1)
plot_image(im, 'Input noisy image', 25)
i = 2
for sz in [3,5]:
im1 = im.filter(ImageFilter.ModeFilter(size=sz))
plt.subplot(1,3,i), plot_image(im1, 'Output (Filter size=' + str(sz) + ')', 25)
i += 1
plt.tight_layout()
plt.show()
# ### 1.5 Progressive Application of Gaussian Blur, Median, Mode and Max Filters on an image
im = Image.open('images/Img_05_02.jpg')
plt.figure(figsize=(10,15))
plt.subplots_adjust(0,0,1,0.95,0.05,0.05)
im1 = im.copy()
sz = 5
for i in range(8):
im1 = im1.filter(ImageFilter.GaussianBlur(radius=sz))
if i % 2 == 0:
plt.subplot(4,4,4*i//2+1), plot_image(im1, 'Gaussian Blur' if i == 0 else None, 25)
im1 = im.copy()
for i in range(8):
im1 = im1.filter(ImageFilter.MedianFilter(size=sz))
if i % 2 == 0:
plt.subplot(4,4,4*i//2+2), plot_image(im1, 'Median' if i == 0 else None, 25)
im1 = im.copy()
for i in range(8):
im1 = im1.filter(ImageFilter.ModeFilter(size=sz))
if i % 2 == 0:
plt.subplot(4,4,4*i//2+3), plot_image(im1, 'Mode' if i == 0 else None, 25)
im1 = im.copy()
for i in range(8):
im1 = im1.filter(ImageFilter.MaxFilter(size=sz))
if i % 2 == 0:
plt.subplot(4,4,4*i//2+4), plot_image(im1, 'Max' if i == 0 else None, 25)
plt.show()
# ## 2. Unsharp masking to Sharpen an Image
# ### 2.1 With scikit-image filters module
#! pip install --upgrade scikit-image
#import skimage
#skimage.filters.__all__
import numpy as np
import matplotlib.pylab as plt
from skimage.io import imread
from skimage.filters import unsharp_mask
im = imread('images/Img_05_04.jpg')
im1 = unsharp_mask(im, radius=1, amount=1)
im2 = unsharp_mask(im, radius=5, amount=2)
im3 = unsharp_mask(im, radius=20, amount=3)
fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(20, 12))
axes = axes.ravel()
axes[0].set_title('Original image', size=20), axes[0].imshow(im)
axes[1].set_title('Enhanced image, radius=1, amount=1.0', size=20), axes[1].imshow(im1)
axes[2].set_title('Enhanced image, radius=5, amount=2.0', size=20), axes[2].imshow(im2)
axes[3].set_title('Enhanced image, radius=20, amount=3.0', size=20), axes[3].imshow(im3)
for ax in axes:
ax.axis('off')
fig.tight_layout()
plt.show()
# ### 2.2 With PIL ImageFilter module
from PIL import Image, ImageFilter
im = Image.open('images/Img_05_05.jpg')
plt.figure(figsize=(15,16))
plt.subplot(221), plot_image(im, 'original')
im1 = im.filter(ImageFilter.UnsharpMask(radius=2, percent=150))
plt.subplot(222), plot_image(im1, 'unsharp masking, radius=2, percent=150')
im1 = im.filter(ImageFilter.UnsharpMask(radius=5, percent=200))
plt.subplot(223), plot_image(im1, 'unsharp masking, radius=5, percent=200')
im1 = im.filter(ImageFilter.UnsharpMask(radius=10, percent=250))
plt.subplot(224), plot_image(im1, 'unsharp masking, radius=10, percent=250')
plt.tight_layout()
plt.show()
# ### 2.3 Laplacian Sharpening with SimpleITK
import SimpleITK as sitk
import numpy as np
import matplotlib.pylab as plt
image = sitk.ReadImage('images/Img_05_20.jpg', sitk.sitkFloat32)
filt = sitk.UnsharpMaskImageFilter()
filt.SetAmount(1.5) # typically set between 1 and 2
filt.SetSigmas(0.15)
sharpened = filt.Execute(image)
np_image = sitk.GetArrayFromImage(image)
np_image = np_image / np_image.max()
np_sharpened = sitk.GetArrayFromImage(sharpened)
np_sharpened = np_sharpened / np_sharpened.max()
plt.figure(figsize=(20,10))
plt.gray()
plt.subplots_adjust(0,0,1,1,0.05,0.05)
plt.subplot(121), plot_image(np_image, 'Original Image')
plt.subplot(122), plot_image(np_sharpened, 'Sharpened Image (with UnsharpMask)')
plt.show()
# ### 2.4 Implementing Unsharp Mask with opencv-python
import cv2
im = cv2.imread("images/Img_05_13.png")
im_smoothed = cv2.GaussianBlur(im, (11,11), 10, 10)
im1 = cv2.addWeighted(im, 1.0 + 3.0, im_smoothed, -3.0, 0) # im1 = im + 3.0*(im - im_smoothed)
plt.figure(figsize=(20,25))
plt.subplots_adjust(0,0,1,0.95,0.05,0.05)
plt.subplot(211), plot_image(cv2.cvtColor(im, cv2.COLOR_BGR2RGB), 'Original Image')
plt.subplot(212), plot_image(cv2.cvtColor(im1, cv2.COLOR_BGR2RGB), 'Sharpened Image')
plt.show()
# ## 3. Averaging of Images to remove Random Noise
from skimage import img_as_float
from skimage.util import random_noise
from skimage.metrics import peak_signal_noise_ratio
from skimage.io import imread
import matplotlib.pylab as plt
import numpy as np
im = img_as_float(imread('images/Img_05_06.jpg')) # original image
n = 100
images = np.zeros((n, im.shape[0], im.shape[1], im.shape[2]))
sigma = 0.2
for i in range(n):
images[i,...] = random_noise(im, var=sigma**2)
im_mean = images.mean(axis=0)
im_median = np.median(images, axis=0)
plt.figure(figsize=(10,10))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05, hspace=.01)
plt.subplot(221), plot_image(im, 'Original image')
plt.subplot(222), plot_image(images[0], 'Noisy PSNR: ' + str(round(peak_signal_noise_ratio(im, images[0]),3)))
plt.subplot(223), plot_image(im_mean, 'Mean PSNR: ' + str(round(peak_signal_noise_ratio(im, im_mean),3)))
plt.subplot(224), plot_image(im_median, 'Median PSNR: ' + str(round(peak_signal_noise_ratio(im, im_median),3)))
plt.show()
plt.figure(figsize=(10,5))
plt.hist(images[:,100,100,0], color='red', alpha=0.2, label='red')
plt.hist(images[:,100,100,1], color='green', alpha=0.2, label='green')
plt.hist(images[:,100,100,2], color='blue', alpha=0.2, label='blue')
plt.vlines(im[100,100,0], 0, 20, color='red', label='original')
plt.vlines(im[100,100,1], 0, 20, color='green', label='original')
plt.vlines(im[100,100,2], 0, 20, color='blue', label='original')
plt.vlines(im_mean[100,100,0], 0, 20, color='red', linestyles='dashed', label='estimated')
plt.vlines(im_mean[100,100,1], 0, 20, color='green', linestyles='dashed', label='estimated')
plt.vlines(im_mean[100,100,2], 0, 20, color='blue', linestyles='dashed', label='estimated')
plt.legend()
plt.grid()
plt.show()
# ## 4. Image Denoising with Curvature-Driven Algorithms
import SimpleITK as sitk
import matplotlib.pylab as plt
img = sitk.ReadImage('images/Img_05_11.png', sitk.sitkFloat64)
normfilter = sitk.NormalizeImageFilter()
caster = sitk.CastImageFilter()
caster.SetOutputPixelType(sitk.sitkFloat64)
tkfilter = sitk.ShotNoiseImageFilter()
tkfilter.SetScale(0.2)
img_noisy = tkfilter.Execute (img)
img_noisy = sitk.RescaleIntensity(img_noisy)
tkfilter = sitk.CurvatureFlowImageFilter()
tkfilter.SetNumberOfIterations(50)
tkfilter.SetTimeStep(0.1)
img_res_TK = tkfilter.Execute(img_noisy)
tkfilter = sitk.MinMaxCurvatureFlowImageFilter()
tkfilter.SetNumberOfIterations(50)
tkfilter.SetTimeStep(0.1)
tkfilter.SetStencilRadius(4)
img_res_TK1 = tkfilter.Execute(img_noisy)
img_res_TK1 = sitk.RescaleIntensity(img_res_TK1)
# #### Anisotropic Diffusion
tkfilter = sitk.CurvatureAnisotropicDiffusionImageFilter()
tkfilter.SetNumberOfIterations(100);
tkfilter.SetTimeStep(0.05);
tkfilter.SetConductanceParameter(3);
img_res_TK2 = tkfilter.Execute(img_noisy)
#img_res_TK1 = sitk.RescaleIntensity(img_res_TK1)
tkfilter = sitk.GradientAnisotropicDiffusionImageFilter()
tkfilter.SetNumberOfIterations(100);
tkfilter.SetTimeStep(0.05);
tkfilter.SetConductanceParameter(3);
img_res_TK3 = tkfilter.Execute(img_noisy)
plt.figure(figsize=(16,20))
plt.gray()
plt.subplots_adjust(0,0,1,1,0.01,0.05)
plt.subplot(321), plt.imshow(sitk.GetArrayFromImage(img)), plt.axis('off'), plt.title('Original', size=20)
plt.subplot(322), plt.imshow(sitk.GetArrayFromImage(img_noisy)), plt.axis('off'), plt.title('Noisy (with added Shot Noise)', size=20)
plt.subplot(323), plt.imshow(sitk.GetArrayFromImage(img_res_TK)), plt.axis('off'), plt.title('Denoised (with CurvatureFlowImageFilter)', size=20)
plt.subplot(324), plt.imshow(sitk.GetArrayFromImage(img_res_TK1)), plt.axis('off'), plt.title('Denoised (with MinMaxCurvatureFlowImageFilter)', size=20)
plt.subplot(325), plt.imshow(sitk.GetArrayFromImage(img_res_TK2)), plt.axis('off'), plt.title('Denoised (with CurvatureAnisotropicDiffusionImageFilter)', size=20)
plt.subplot(326), plt.imshow(sitk.GetArrayFromImage(img_res_TK3)), plt.axis('off'), plt.title('Denoised (with GradientAnisotropicDiffusionImageFilter)', size=20)
plt.show()
# ## 5. Contrast Strectching / Histogram Equalization with opencv-python
import numpy as np
import matplotlib.pylab as plt
import cv2
def plot_hist(img, col='r'):
hist,bins = np.histogram(img.flatten(),256,[0,256])
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max()/ cdf.max()
plt.plot(cdf_normalized, color = col)
plt.hist(img.flatten(),256,[0,256], color = col, alpha = 0.1)
plt.xlim([0,256])
plt.title('CDF and histogram of the color channels', size=20)
#plt.legend(('cdf','histogram'), loc = 'upper left')
return bins, cdf
def plot_img_hist(img, title):
plt.figure(figsize=(20,10))
plt.subplot(121), plot_image(img, title)
plt.subplot(122), plot_hist(img[...,0], 'r'), plot_hist(img[...,1], 'g'), plot_hist(img[...,2], 'b')
plt.show()
img = cv2.imread('images/Img_05_07.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img2 = img.copy()
for i in range(3):
hist,bins = np.histogram(img[...,i].flatten(),256,[0,256])
cdf = hist.cumsum()
cdf_m = np.ma.masked_equal(cdf,0)
cdf_m = (cdf_m - cdf_m.min())*255/(cdf_m.max()-cdf_m.min())
#cdf_m = 255 * cdf / cdf[-1] # normalize
cdf = np.ma.filled(cdf_m,0).astype('uint8')
img2[...,i] = cdf[img[...,i]]
# use linear interpolation of cdf to find new pixel values
#img2[...,i] = np.reshape(np.interp(img[...,i].flatten(),bins[:-1],cdf), img[...,i].shape)
img_lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
equ = img_lab.copy()
equ[...,0] = cv2.equalizeHist(equ[...,0])
equ = np.clip(cv2.cvtColor(equ, cv2.COLOR_LAB2RGB), 0, 255)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
cl = img_lab.copy()
cl[...,0] = clahe.apply(cl[...,0])
cl = np.clip(cv2.cvtColor(cl, cv2.COLOR_LAB2RGB), 0, 255)
plot_img_hist(img, 'Original Image')
plot_img_hist(img2, 'Hist. Equalized')
plot_img_hist(equ, 'Hist. Equalized (LAB space)')
plot_img_hist(cl, 'Adaptive Hist. Equalized (LAB space)')
# ## 6. Fingerprint Cleaning and Minutiaes extraction
# ### 6.1 Fingerprint Cleaning with Morphological operations
from skimage.io import imread
from skimage.color import rgb2gray
import numpy as np
import matplotlib.pylab as plt
from skimage.morphology import binary_opening, binary_closing, skeletonize, square
from scipy.ndimage import morphological_gradient
from skimage.filters import threshold_otsu
im = rgb2gray(imread('images/Img_05_09.jpg'))
im[im <= 0.5] = 0 # binarize
im[im > 0.5] = 1
im_o = binary_opening(im, square(2))
im_c = binary_closing(im, square(2))
im_oc = binary_closing(binary_opening(im, square(2)), square(3))
im_s = skeletonize(im_oc)
im_g = morphological_gradient(im_oc.astype(np.uint8), size=(2,2))
plt.figure(figsize=(20,12))
plt.gray()
plt.subplot(231), plot_image(im, 'original')
plt.subplot(232), plot_image(im_o, 'opening')
plt.subplot(233), plot_image(im_c, 'closing')
plt.subplot(234), plot_image(im_oc, 'opening + closing')
plt.subplot(235), plot_image(im_s, 'skeletonizing')
plt.subplot(236), plot_image(im_g, 'morphological gradient')
plt.show()
# ### 6.2 Feature (Minutiaes) extraction from an enhanced fingerprint
from PIL import Image, ImageDraw
cells = [(-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1)]
def minutiae_at(pixels, i, j):
values = [pixels[i + k][j + l] for k, l in cells]
crossings = 0
for k in range(0, 8):
crossings += abs(values[k] - values[k + 1])
crossings /= 2
if pixels[i][j] == 1:
if crossings == 1:
return "ending"
if crossings == 3:
return "bifurcation"
return "none"
def calculate_minutiaes(im):
pixels = 255 - np.array(im).T
pixels = 1.0*(pixels > 10)
(x, y) = im.size
result = im.convert("RGB")
draw = ImageDraw.Draw(result)
colors = {"ending" : (150, 0, 0), "bifurcation" : (0, 150, 0)}
ellipse_size = 2
for i in range(1, x - 1):
for j in range(1, y - 1):
minutiae = minutiae_at(pixels, i, j)
if minutiae != "none":
draw.ellipse([(i - ellipse_size, j - ellipse_size), (i + ellipse_size, j + ellipse_size)], outline = colors[minutiae])
del draw
return result
im = Image.open('images/Img_05_10.jpg').convert("L") # covert to grayscale
out = calculate_minutiaes(im)
plt.figure(figsize=(15,12))
plt.gray()
plt.subplot(121), plot_image(im, 'input thinned')
plt.subplot(122), plot_image(out, 'with minutiaes extracted')
plt.show()
# ## 7. Edge Detection with LOG / Zero-Crossing, Canny vs. Holistically-Nested
# ### 7.0 Computing the Image Derivatives
from scipy.signal import convolve
from skimage.io import imread
from skimage.color import rgb2gray
img = rgb2gray(imread('images/Img_05_38.png'))
h, w = img.shape
kd1 = [[1, -1]]
kd2 = [[1, -2, 1]]
imgd1 = convolve(img, kd1, mode='same')
imgd2 = convolve(img, kd2, mode='same')
plt.figure(figsize=(20,10))
plt.gray()
plt.subplot(231), plt.imshow(img), plt.title('image', size=15)
plt.subplot(232), plt.imshow(imgd1), plt.title('1st derivative', size=15)
plt.subplot(233), plt.imshow(imgd2), plt.title('2nd derivative', size=15)
plt.subplot(234), plt.plot(range(w), img[0,:]), plt.title('image function', size=15)
plt.subplot(235), plt.plot(range(w), imgd1[0,:]), plt.title('1st derivative function', size=15)
plt.subplot(236), plt.plot(range(w), imgd2[0,:]), plt.title('2nd derivative function', size=15)
plt.show()
# ### 7.1 With LoG / Zero-Crossing
import numpy as np
from scipy import ndimage
from skimage.io import imread
import matplotlib.pyplot as plt
from skimage.color import rgb2gray
def any_neighbor_neg(img, i, j):
for k in range(-1,2):
for l in range(-1,2):
if img[i+k, j+k] < 0:
return True, img[i, j] - img[i+k, j+k]
return False, None
def zero_crossing(img, th):
out_img = np.zeros(img.shape)
for i in range(1,img.shape[0]-1):
for j in range(1,img.shape[1]-1):
found, slope = any_neighbor_neg(img, i, j)
if img[i,j] > 0 and found and slope > th:
out_img[i,j] = 255
return out_img
img = rgb2gray(imread('images/Img_05_18.jpg'))
#img = misc.imread('../new images/tagore.png')[...,3]
print(np.max(img))
fig = plt.figure(figsize=(10,16))
plt.subplots_adjust(0,0,1,0.95,0.05,0.05)
plt.gray() # show the filtered result in grayscale
for sigma, thres in zip(range(3,10,2), [1e-3, 1e-4, 1e-5, 1e-6]):
plt.subplot(3,2,sigma//2)
result = ndimage.gaussian_laplace(img, sigma=sigma)
result = zero_crossing(result, thres)
plt.imshow(result)
plt.axis('off')
plt.title('LoG with zero-crossing, sigma=' + str(sigma), size=20)
plt.tight_layout()
plt.show()
# ### 7.2 With Canny and Holistically-nested (deep learning model based)
import cv2
import numpy as np
import matplotlib.pylab as plt
image = cv2.imread('images/Img_05_18.jpg')
(h, w) = image.shape[:2]
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
canny = cv2.Canny(blurred, 80, 150)
class CropLayer(object):
def __init__(self, params, blobs):
self.xstart = 0
self.xend = 0
self.ystart = 0
self.yend = 0
def getMemoryShapes(self, inputs):
inputShape, targetShape = inputs[0], inputs[1]
batchSize, numChannels = inputShape[0], inputShape[1]
height, width = targetShape[2], targetShape[3]
self.ystart = (inputShape[2] - targetShape[2]) // 2
self.xstart = (inputShape[3] - targetShape[3]) // 2
self.yend = self.ystart + height
self.xend = self.xstart + width
return [[batchSize, numChannels, height, width]]
def forward(self, inputs):
return [inputs[0][:,:,self.ystart:self.yend,self.xstart:self.xend]]
prototxt_path = "models/deploy.prototxt"
model_path = "models/hed_pretrained_bsds.caffemodel"
net = cv2.dnn.readNetFromCaffe(prototxt_path, model_path)
cv2.dnn_registerLayer('Crop', CropLayer)
blob = cv2.dnn.blobFromImage(image, scalefactor=1.0, size=(w, h), mean=(104.00698793, 116.66876762, 122.67891434), swapRB=False, crop=False)
net.setInput(blob)
hed = net.forward()
hed = cv2.resize(outs[i][0][0,:,:], (w, h))
hed = (255 * hed).astype("uint8")
plt.figure(figsize=(20, 8))
plt.gray()
plt.subplots_adjust(0,0,1,0.975,0.05,0.05)
plt.subplot(131), plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)), plt.axis('off'), plt.title('input', size=20)
plt.subplot(132), plt.imshow(canny), plt.axis('off'), plt.title('canny', size=20)
plt.subplot(133), plt.imshow(hed), plt.axis('off'), plt.title('holistically-nested', size=20)
plt.show()
| [
"matplotlib.pylab.xlim",
"matplotlib.pylab.subplots",
"scipy.signal.convolve",
"numpy.ma.masked_equal",
"numpy.random.rand",
"scipy.ndimage.gaussian_laplace",
"matplotlib.pylab.hist",
"PIL.ImageDraw.Draw",
"matplotlib.pylab.imshow",
"numpy.array",
"matplotlib.pylab.show",
"copy.deepcopy",
"s... | [((927, 961), 'PIL.Image.open', 'Image.open', (['"""images/Img_05_01.jpg"""'], {}), "('images/Img_05_01.jpg')\n", (937, 961), False, 'from PIL import Image, ImageDraw\n'), ((968, 996), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(12, 35)'}), '(figsize=(12, 35))\n', (978, 996), True, 'import matplotlib.pylab as plt\n'), ((1014, 1039), 'numpy.linspace', 'np.linspace', (['(0.05)', '(0.3)', '(6)'], {}), '(0.05, 0.3, 6)\n', (1025, 1039), True, 'import numpy as np\n'), ((1318, 1328), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (1326, 1328), True, 'import matplotlib.pylab as plt\n'), ((1397, 1431), 'PIL.Image.open', 'Image.open', (['"""images/Img_05_01.jpg"""'], {}), "('images/Img_05_01.jpg')\n", (1407, 1431), False, 'from PIL import Image, ImageDraw\n'), ((1470, 1498), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 15)'}), '(figsize=(20, 15))\n', (1480, 1498), True, 'import matplotlib.pylab as plt\n'), ((1518, 1539), 'numpy.linspace', 'np.linspace', (['(1)', '(3)', '(12)'], {}), '(1, 3, 12)\n', (1529, 1539), True, 'import numpy as np\n'), ((1686, 1750), 'matplotlib.pylab.suptitle', 'plt.suptitle', (['"""PIL Gaussian Blur with different Radius"""'], {'size': '(30)'}), "('PIL Gaussian Blur with different Radius', size=30)\n", (1698, 1750), True, 'import matplotlib.pylab as plt\n'), ((1751, 1761), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (1759, 1761), True, 'import matplotlib.pylab as plt\n'), ((1823, 1857), 'PIL.Image.open', 'Image.open', (['"""images/Img_05_02.jpg"""'], {}), "('images/Img_05_02.jpg')\n", (1833, 1857), False, 'from PIL import Image, ImageDraw\n'), ((1896, 1924), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (1906, 1924), True, 'import matplotlib.pylab as plt\n'), ((1924, 1944), 'matplotlib.pylab.subplot', 'plt.subplot', (['(1)', '(4)', '(1)'], {}), '(1, 4, 1)\n', (1935, 1944), True, 'import matplotlib.pylab as plt\n'), ((2147, 2165), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2163, 2165), True, 'import matplotlib.pylab as plt\n'), ((2166, 2176), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (2174, 2176), True, 'import matplotlib.pylab as plt\n'), ((2271, 2305), 'PIL.Image.open', 'Image.open', (['"""images/Img_05_11.jpg"""'], {}), "('images/Img_05_11.jpg')\n", (2281, 2305), False, 'from PIL import Image, ImageDraw\n'), ((2360, 2388), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (2370, 2388), True, 'import matplotlib.pylab as plt\n'), ((2388, 2408), 'matplotlib.pylab.subplot', 'plt.subplot', (['(1)', '(4)', '(1)'], {}), '(1, 4, 1)\n', (2399, 2408), True, 'import matplotlib.pylab as plt\n'), ((2604, 2622), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2620, 2622), True, 'import matplotlib.pylab as plt\n'), ((2623, 2633), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (2631, 2633), True, 'import matplotlib.pylab as plt\n'), ((2706, 2734), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (2716, 2734), True, 'import matplotlib.pylab as plt\n'), ((2734, 2754), 'matplotlib.pylab.subplot', 'plt.subplot', (['(1)', '(4)', '(1)'], {}), '(1, 4, 1)\n', (2745, 2754), True, 'import matplotlib.pylab as plt\n'), ((2950, 2960), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (2958, 2960), True, 'import matplotlib.pylab as plt\n'), ((2989, 3023), 'PIL.Image.open', 'Image.open', (['"""images/Img_05_20.jpg"""'], {}), "('images/Img_05_20.jpg')\n", (2999, 3023), False, 'from PIL import Image, ImageDraw\n'), ((3064, 3092), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (3074, 3092), True, 'import matplotlib.pylab as plt\n'), ((3092, 3112), 'matplotlib.pylab.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (3103, 3112), True, 'import matplotlib.pylab as plt\n'), ((3314, 3332), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3330, 3332), True, 'import matplotlib.pylab as plt\n'), ((3333, 3343), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (3341, 3343), True, 'import matplotlib.pylab as plt\n'), ((3445, 3479), 'PIL.Image.open', 'Image.open', (['"""images/Img_05_02.jpg"""'], {}), "('images/Img_05_02.jpg')\n", (3455, 3479), False, 'from PIL import Image, ImageDraw\n'), ((3480, 3508), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(10, 15)'}), '(figsize=(10, 15))\n', (3490, 3508), True, 'import matplotlib.pylab as plt\n'), ((3508, 3554), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', (['(0)', '(0)', '(1)', '(0.95)', '(0.05)', '(0.05)'], {}), '(0, 0, 1, 0.95, 0.05, 0.05)\n', (3527, 3554), True, 'import matplotlib.pylab as plt\n'), ((4300, 4310), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (4308, 4310), True, 'import matplotlib.pylab as plt\n'), ((4607, 4637), 'skimage.io.imread', 'imread', (['"""images/Img_05_04.jpg"""'], {}), "('images/Img_05_04.jpg')\n", (4613, 4637), False, 'from skimage.io import imread\n'), ((4644, 4680), 'skimage.filters.unsharp_mask', 'unsharp_mask', (['im'], {'radius': '(1)', 'amount': '(1)'}), '(im, radius=1, amount=1)\n', (4656, 4680), False, 'from skimage.filters import unsharp_mask\n'), ((4687, 4723), 'skimage.filters.unsharp_mask', 'unsharp_mask', (['im'], {'radius': '(5)', 'amount': '(2)'}), '(im, radius=5, amount=2)\n', (4699, 4723), False, 'from skimage.filters import unsharp_mask\n'), ((4730, 4767), 'skimage.filters.unsharp_mask', 'unsharp_mask', (['im'], {'radius': '(20)', 'amount': '(3)'}), '(im, radius=20, amount=3)\n', (4742, 4767), False, 'from skimage.filters import unsharp_mask\n'), ((4781, 4855), 'matplotlib.pylab.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'sharex': '(True)', 'sharey': '(True)', 'figsize': '(20, 12)'}), '(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(20, 12))\n', (4793, 4855), True, 'import matplotlib.pylab as plt\n'), ((5257, 5267), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (5265, 5267), True, 'import matplotlib.pylab as plt\n'), ((5348, 5382), 'PIL.Image.open', 'Image.open', (['"""images/Img_05_05.jpg"""'], {}), "('images/Img_05_05.jpg')\n", (5358, 5382), False, 'from PIL import Image, ImageDraw\n'), ((5384, 5412), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(15, 16)'}), '(figsize=(15, 16))\n', (5394, 5412), True, 'import matplotlib.pylab as plt\n'), ((5879, 5897), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5895, 5897), True, 'import matplotlib.pylab as plt\n'), ((5898, 5908), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (5906, 5908), True, 'import matplotlib.pylab as plt\n'), ((6041, 6097), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['"""images/Img_05_20.jpg"""', 'sitk.sitkFloat32'], {}), "('images/Img_05_20.jpg', sitk.sitkFloat32)\n", (6055, 6097), True, 'import SimpleITK as sitk\n'), ((6106, 6135), 'SimpleITK.UnsharpMaskImageFilter', 'sitk.UnsharpMaskImageFilter', ([], {}), '()\n', (6133, 6135), True, 'import SimpleITK as sitk\n'), ((6254, 6283), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['image'], {}), '(image)\n', (6276, 6283), True, 'import SimpleITK as sitk\n'), ((6336, 6369), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['sharpened'], {}), '(sharpened)\n', (6358, 6369), True, 'import SimpleITK as sitk\n'), ((6420, 6448), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (6430, 6448), True, 'import matplotlib.pylab as plt\n'), ((6448, 6458), 'matplotlib.pylab.gray', 'plt.gray', ([], {}), '()\n', (6456, 6458), True, 'import matplotlib.pylab as plt\n'), ((6459, 6502), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', (['(0)', '(0)', '(1)', '(1)', '(0.05)', '(0.05)'], {}), '(0, 0, 1, 1, 0.05, 0.05)\n', (6478, 6502), True, 'import matplotlib.pylab as plt\n'), ((6636, 6646), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (6644, 6646), True, 'import matplotlib.pylab as plt\n'), ((6721, 6755), 'cv2.imread', 'cv2.imread', (['"""images/Img_05_13.png"""'], {}), "('images/Img_05_13.png')\n", (6731, 6755), False, 'import cv2\n'), ((6770, 6808), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['im', '(11, 11)', '(10)', '(10)'], {}), '(im, (11, 11), 10, 10)\n', (6786, 6808), False, 'import cv2\n'), ((6815, 6867), 'cv2.addWeighted', 'cv2.addWeighted', (['im', '(1.0 + 3.0)', 'im_smoothed', '(-3.0)', '(0)'], {}), '(im, 1.0 + 3.0, im_smoothed, -3.0, 0)\n', (6830, 6867), False, 'import cv2\n'), ((6905, 6933), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 25)'}), '(figsize=(20, 25))\n', (6915, 6933), True, 'import matplotlib.pylab as plt\n'), ((6933, 6979), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', (['(0)', '(0)', '(1)', '(0.95)', '(0.05)', '(0.05)'], {}), '(0, 0, 1, 0.95, 0.05, 0.05)\n', (6952, 6979), True, 'import matplotlib.pylab as plt\n'), ((7145, 7155), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (7153, 7155), True, 'import matplotlib.pylab as plt\n'), ((7497, 7549), 'numpy.zeros', 'np.zeros', (['(n, im.shape[0], im.shape[1], im.shape[2])'], {}), '((n, im.shape[0], im.shape[1], im.shape[2]))\n', (7505, 7549), True, 'import numpy as np\n'), ((7675, 7700), 'numpy.median', 'np.median', (['images'], {'axis': '(0)'}), '(images, axis=0)\n', (7684, 7700), True, 'import numpy as np\n'), ((7702, 7730), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (7712, 7730), True, 'import matplotlib.pylab as plt\n'), ((7730, 7827), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.02)', 'right': '(0.98)', 'bottom': '(0.001)', 'top': '(0.96)', 'wspace': '(0.05)', 'hspace': '(0.01)'}), '(left=0.02, right=0.98, bottom=0.001, top=0.96, wspace=\n 0.05, hspace=0.01)\n', (7749, 7827), True, 'import matplotlib.pylab as plt\n'), ((8197, 8207), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (8205, 8207), True, 'import matplotlib.pylab as plt\n'), ((8209, 8236), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (8219, 8236), True, 'import matplotlib.pylab as plt\n'), ((8236, 8305), 'matplotlib.pylab.hist', 'plt.hist', (['images[:, 100, 100, 0]'], {'color': '"""red"""', 'alpha': '(0.2)', 'label': '"""red"""'}), "(images[:, 100, 100, 0], color='red', alpha=0.2, label='red')\n", (8244, 8305), True, 'import matplotlib.pylab as plt\n'), ((8303, 8376), 'matplotlib.pylab.hist', 'plt.hist', (['images[:, 100, 100, 1]'], {'color': '"""green"""', 'alpha': '(0.2)', 'label': '"""green"""'}), "(images[:, 100, 100, 1], color='green', alpha=0.2, label='green')\n", (8311, 8376), True, 'import matplotlib.pylab as plt\n'), ((8374, 8445), 'matplotlib.pylab.hist', 'plt.hist', (['images[:, 100, 100, 2]'], {'color': '"""blue"""', 'alpha': '(0.2)', 'label': '"""blue"""'}), "(images[:, 100, 100, 2], color='blue', alpha=0.2, label='blue')\n", (8382, 8445), True, 'import matplotlib.pylab as plt\n'), ((8443, 8508), 'matplotlib.pylab.vlines', 'plt.vlines', (['im[100, 100, 0]', '(0)', '(20)'], {'color': '"""red"""', 'label': '"""original"""'}), "(im[100, 100, 0], 0, 20, color='red', label='original')\n", (8453, 8508), True, 'import matplotlib.pylab as plt\n'), ((8507, 8574), 'matplotlib.pylab.vlines', 'plt.vlines', (['im[100, 100, 1]', '(0)', '(20)'], {'color': '"""green"""', 'label': '"""original"""'}), "(im[100, 100, 1], 0, 20, color='green', label='original')\n", (8517, 8574), True, 'import matplotlib.pylab as plt\n'), ((8573, 8639), 'matplotlib.pylab.vlines', 'plt.vlines', (['im[100, 100, 2]', '(0)', '(20)'], {'color': '"""blue"""', 'label': '"""original"""'}), "(im[100, 100, 2], 0, 20, color='blue', label='original')\n", (8583, 8639), True, 'import matplotlib.pylab as plt\n'), ((8638, 8734), 'matplotlib.pylab.vlines', 'plt.vlines', (['im_mean[100, 100, 0]', '(0)', '(20)'], {'color': '"""red"""', 'linestyles': '"""dashed"""', 'label': '"""estimated"""'}), "(im_mean[100, 100, 0], 0, 20, color='red', linestyles='dashed',\n label='estimated')\n", (8648, 8734), True, 'import matplotlib.pylab as plt\n'), ((8729, 8827), 'matplotlib.pylab.vlines', 'plt.vlines', (['im_mean[100, 100, 1]', '(0)', '(20)'], {'color': '"""green"""', 'linestyles': '"""dashed"""', 'label': '"""estimated"""'}), "(im_mean[100, 100, 1], 0, 20, color='green', linestyles='dashed',\n label='estimated')\n", (8739, 8827), True, 'import matplotlib.pylab as plt\n'), ((8822, 8919), 'matplotlib.pylab.vlines', 'plt.vlines', (['im_mean[100, 100, 2]', '(0)', '(20)'], {'color': '"""blue"""', 'linestyles': '"""dashed"""', 'label': '"""estimated"""'}), "(im_mean[100, 100, 2], 0, 20, color='blue', linestyles='dashed',\n label='estimated')\n", (8832, 8919), True, 'import matplotlib.pylab as plt\n'), ((8914, 8926), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (8924, 8926), True, 'import matplotlib.pylab as plt\n'), ((8927, 8937), 'matplotlib.pylab.grid', 'plt.grid', ([], {}), '()\n', (8935, 8937), True, 'import matplotlib.pylab as plt\n'), ((8938, 8948), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (8946, 8948), True, 'import matplotlib.pylab as plt\n'), ((9071, 9127), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['"""images/Img_05_11.png"""', 'sitk.sitkFloat64'], {}), "('images/Img_05_11.png', sitk.sitkFloat64)\n", (9085, 9127), True, 'import SimpleITK as sitk\n'), ((9142, 9169), 'SimpleITK.NormalizeImageFilter', 'sitk.NormalizeImageFilter', ([], {}), '()\n', (9167, 9169), True, 'import SimpleITK as sitk\n'), ((9179, 9201), 'SimpleITK.CastImageFilter', 'sitk.CastImageFilter', ([], {}), '()\n', (9199, 9201), True, 'import SimpleITK as sitk\n'), ((9266, 9293), 'SimpleITK.ShotNoiseImageFilter', 'sitk.ShotNoiseImageFilter', ([], {}), '()\n', (9291, 9293), True, 'import SimpleITK as sitk\n'), ((9364, 9396), 'SimpleITK.RescaleIntensity', 'sitk.RescaleIntensity', (['img_noisy'], {}), '(img_noisy)\n', (9385, 9396), True, 'import SimpleITK as sitk\n'), ((9409, 9440), 'SimpleITK.CurvatureFlowImageFilter', 'sitk.CurvatureFlowImageFilter', ([], {}), '()\n', (9438, 9440), True, 'import SimpleITK as sitk\n'), ((9556, 9593), 'SimpleITK.MinMaxCurvatureFlowImageFilter', 'sitk.MinMaxCurvatureFlowImageFilter', ([], {}), '()\n', (9591, 9593), True, 'import SimpleITK as sitk\n'), ((9741, 9775), 'SimpleITK.RescaleIntensity', 'sitk.RescaleIntensity', (['img_res_TK1'], {}), '(img_res_TK1)\n', (9762, 9775), True, 'import SimpleITK as sitk\n'), ((9818, 9865), 'SimpleITK.CurvatureAnisotropicDiffusionImageFilter', 'sitk.CurvatureAnisotropicDiffusionImageFilter', ([], {}), '()\n', (9863, 9865), True, 'import SimpleITK as sitk\n'), ((10073, 10119), 'SimpleITK.GradientAnisotropicDiffusionImageFilter', 'sitk.GradientAnisotropicDiffusionImageFilter', ([], {}), '()\n', (10117, 10119), True, 'import SimpleITK as sitk\n'), ((10265, 10293), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(16, 20)'}), '(figsize=(16, 20))\n', (10275, 10293), True, 'import matplotlib.pylab as plt\n'), ((10293, 10303), 'matplotlib.pylab.gray', 'plt.gray', ([], {}), '()\n', (10301, 10303), True, 'import matplotlib.pylab as plt\n'), ((10304, 10347), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', (['(0)', '(0)', '(1)', '(1)', '(0.01)', '(0.05)'], {}), '(0, 0, 1, 1, 0.01, 0.05)\n', (10323, 10347), True, 'import matplotlib.pylab as plt\n'), ((11208, 11218), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (11216, 11218), True, 'import matplotlib.pylab as plt\n'), ((12024, 12058), 'cv2.imread', 'cv2.imread', (['"""images/Img_05_07.jpg"""'], {}), "('images/Img_05_07.jpg')\n", (12034, 12058), False, 'import cv2\n'), ((12065, 12101), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (12077, 12101), False, 'import cv2\n'), ((12625, 12661), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2LAB'], {}), '(img, cv2.COLOR_RGB2LAB)\n', (12637, 12661), False, 'import cv2\n'), ((12696, 12725), 'cv2.equalizeHist', 'cv2.equalizeHist', (['equ[..., 0]'], {}), '(equ[..., 0])\n', (12712, 12725), False, 'import cv2\n'), ((12794, 12845), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(2.0)', 'tileGridSize': '(8, 8)'}), '(clipLimit=2.0, tileGridSize=(8, 8))\n', (12809, 12845), False, 'import cv2\n'), ((13793, 13811), 'skimage.morphology.skeletonize', 'skeletonize', (['im_oc'], {}), '(im_oc)\n', (13804, 13811), False, 'from skimage.morphology import binary_opening, binary_closing, skeletonize, square\n'), ((13879, 13907), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 12)'}), '(figsize=(20, 12))\n', (13889, 13907), True, 'import matplotlib.pylab as plt\n'), ((13907, 13917), 'matplotlib.pylab.gray', 'plt.gray', ([], {}), '()\n', (13915, 13917), True, 'import matplotlib.pylab as plt\n'), ((14225, 14235), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (14233, 14235), True, 'import matplotlib.pylab as plt\n'), ((15493, 15521), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(15, 12)'}), '(figsize=(15, 12))\n', (15503, 15521), True, 'import matplotlib.pylab as plt\n'), ((15521, 15531), 'matplotlib.pylab.gray', 'plt.gray', ([], {}), '()\n', (15529, 15531), True, 'import matplotlib.pylab as plt\n'), ((15644, 15654), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (15652, 15654), True, 'import matplotlib.pylab as plt\n'), ((15987, 16018), 'scipy.signal.convolve', 'convolve', (['img', 'kd1'], {'mode': '"""same"""'}), "(img, kd1, mode='same')\n", (15995, 16018), False, 'from scipy.signal import convolve\n'), ((16027, 16058), 'scipy.signal.convolve', 'convolve', (['img', 'kd2'], {'mode': '"""same"""'}), "(img, kd2, mode='same')\n", (16035, 16058), False, 'from scipy.signal import convolve\n'), ((16060, 16088), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (16070, 16088), True, 'import matplotlib.pylab as plt\n'), ((16088, 16098), 'matplotlib.pylab.gray', 'plt.gray', ([], {}), '()\n', (16096, 16098), True, 'import matplotlib.pylab as plt\n'), ((16587, 16597), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (16595, 16597), True, 'import matplotlib.pylab as plt\n'), ((17373, 17401), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(10, 16)'}), '(figsize=(10, 16))\n', (17383, 17401), True, 'import matplotlib.pylab as plt\n'), ((17401, 17447), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', (['(0)', '(0)', '(1)', '(0.95)', '(0.05)', '(0.05)'], {}), '(0, 0, 1, 0.95, 0.05, 0.05)\n', (17420, 17447), True, 'import matplotlib.pylab as plt\n'), ((17443, 17453), 'matplotlib.pylab.gray', 'plt.gray', ([], {}), '()\n', (17451, 17453), True, 'import matplotlib.pylab as plt\n'), ((17802, 17820), 'matplotlib.pylab.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17818, 17820), True, 'import matplotlib.pylab as plt\n'), ((17821, 17831), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (17829, 17831), True, 'import matplotlib.pylab as plt\n'), ((17977, 18011), 'cv2.imread', 'cv2.imread', (['"""images/Img_05_18.jpg"""'], {}), "('images/Img_05_18.jpg')\n", (17987, 18011), False, 'import cv2\n'), ((18045, 18084), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (18057, 18084), False, 'import cv2\n'), ((18095, 18128), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (18111, 18128), False, 'import cv2\n'), ((18137, 18164), 'cv2.Canny', 'cv2.Canny', (['blurred', '(80)', '(150)'], {}), '(blurred, 80, 150)\n', (18146, 18164), False, 'import cv2\n'), ((19014, 19065), 'cv2.dnn.readNetFromCaffe', 'cv2.dnn.readNetFromCaffe', (['prototxt_path', 'model_path'], {}), '(prototxt_path, model_path)\n', (19038, 19065), False, 'import cv2\n'), ((19067, 19107), 'cv2.dnn_registerLayer', 'cv2.dnn_registerLayer', (['"""Crop"""', 'CropLayer'], {}), "('Crop', CropLayer)\n", (19088, 19107), False, 'import cv2\n'), ((19116, 19254), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['image'], {'scalefactor': '(1.0)', 'size': '(w, h)', 'mean': '(104.00698793, 116.66876762, 122.67891434)', 'swapRB': '(False)', 'crop': '(False)'}), '(image, scalefactor=1.0, size=(w, h), mean=(\n 104.00698793, 116.66876762, 122.67891434), swapRB=False, crop=False)\n', (19137, 19254), False, 'import cv2\n'), ((19297, 19336), 'cv2.resize', 'cv2.resize', (['outs[i][0][0, :, :]', '(w, h)'], {}), '(outs[i][0][0, :, :], (w, h))\n', (19307, 19336), False, 'import cv2\n'), ((19370, 19397), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 8)'}), '(figsize=(20, 8))\n', (19380, 19397), True, 'import matplotlib.pylab as plt\n'), ((19398, 19408), 'matplotlib.pylab.gray', 'plt.gray', ([], {}), '()\n', (19406, 19408), True, 'import matplotlib.pylab as plt\n'), ((19409, 19456), 'matplotlib.pylab.subplots_adjust', 'plt.subplots_adjust', (['(0)', '(0)', '(1)', '(0.975)', '(0.05)', '(0.05)'], {}), '(0, 0, 1, 0.975, 0.05, 0.05)\n', (19428, 19456), True, 'import matplotlib.pylab as plt\n'), ((19743, 19753), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (19751, 19753), True, 'import matplotlib.pylab as plt\n'), ((378, 395), 'matplotlib.pylab.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (388, 395), True, 'import matplotlib.pylab as plt\n'), ((400, 425), 'matplotlib.pylab.title', 'plt.title', (['title'], {'size': 'sz'}), '(title, size=sz)\n', (409, 425), True, 'import matplotlib.pylab as plt\n'), ((430, 445), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (438, 445), True, 'import matplotlib.pylab as plt\n'), ((511, 523), 'copy.deepcopy', 'deepcopy', (['im'], {}), '(im)\n', (519, 523), False, 'from copy import deepcopy\n'), ((1600, 1620), 'matplotlib.pylab.subplot', 'plt.subplot', (['(3)', '(4)', 'i'], {}), '(3, 4, i)\n', (1611, 1620), True, 'import matplotlib.pylab as plt\n'), ((5412, 5428), 'matplotlib.pylab.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (5423, 5428), True, 'import matplotlib.pylab as plt\n'), ((5473, 5519), 'PIL.ImageFilter.UnsharpMask', 'ImageFilter.UnsharpMask', ([], {'radius': '(2)', 'percent': '(150)'}), '(radius=2, percent=150)\n', (5496, 5519), False, 'from PIL import Image, ImageFilter\n'), ((5521, 5537), 'matplotlib.pylab.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (5532, 5537), True, 'import matplotlib.pylab as plt\n'), ((5613, 5659), 'PIL.ImageFilter.UnsharpMask', 'ImageFilter.UnsharpMask', ([], {'radius': '(5)', 'percent': '(200)'}), '(radius=5, percent=200)\n', (5636, 5659), False, 'from PIL import Image, ImageFilter\n'), ((5661, 5677), 'matplotlib.pylab.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (5672, 5677), True, 'import matplotlib.pylab as plt\n'), ((5753, 5800), 'PIL.ImageFilter.UnsharpMask', 'ImageFilter.UnsharpMask', ([], {'radius': '(10)', 'percent': '(250)'}), '(radius=10, percent=250)\n', (5776, 5800), False, 'from PIL import Image, ImageFilter\n'), ((5802, 5818), 'matplotlib.pylab.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (5813, 5818), True, 'import matplotlib.pylab as plt\n'), ((6498, 6514), 'matplotlib.pylab.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (6509, 6514), True, 'import matplotlib.pylab as plt\n'), ((6555, 6571), 'matplotlib.pylab.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (6566, 6571), True, 'import matplotlib.pylab as plt\n'), ((6975, 6991), 'matplotlib.pylab.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (6986, 6991), True, 'import matplotlib.pylab as plt\n'), ((7059, 7075), 'matplotlib.pylab.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (7070, 7075), True, 'import matplotlib.pylab as plt\n'), ((7431, 7461), 'skimage.io.imread', 'imread', (['"""images/Img_05_06.jpg"""'], {}), "('images/Img_05_06.jpg')\n", (7437, 7461), False, 'from skimage.io import imread\n'), ((7601, 7633), 'skimage.util.random_noise', 'random_noise', (['im'], {'var': '(sigma ** 2)'}), '(im, var=sigma ** 2)\n', (7613, 7633), False, 'from skimage.util import random_noise\n'), ((7817, 7833), 'matplotlib.pylab.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (7828, 7833), True, 'import matplotlib.pylab as plt\n'), ((7868, 7884), 'matplotlib.pylab.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (7879, 7884), True, 'import matplotlib.pylab as plt\n'), ((7979, 7995), 'matplotlib.pylab.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (7990, 7995), True, 'import matplotlib.pylab as plt\n'), ((8085, 8101), 'matplotlib.pylab.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (8096, 8101), True, 'import matplotlib.pylab as plt\n'), ((10343, 10359), 'matplotlib.pylab.subplot', 'plt.subplot', (['(321)'], {}), '(321)\n', (10354, 10359), True, 'import matplotlib.pylab as plt\n'), ((10402, 10417), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10410, 10417), True, 'import matplotlib.pylab as plt\n'), ((10419, 10449), 'matplotlib.pylab.title', 'plt.title', (['"""Original"""'], {'size': '(20)'}), "('Original', size=20)\n", (10428, 10449), True, 'import matplotlib.pylab as plt\n'), ((10450, 10466), 'matplotlib.pylab.subplot', 'plt.subplot', (['(322)'], {}), '(322)\n', (10461, 10466), True, 'import matplotlib.pylab as plt\n'), ((10515, 10530), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10523, 10530), True, 'import matplotlib.pylab as plt\n'), ((10532, 10583), 'matplotlib.pylab.title', 'plt.title', (['"""Noisy (with added Shot Noise)"""'], {'size': '(20)'}), "('Noisy (with added Shot Noise)', size=20)\n", (10541, 10583), True, 'import matplotlib.pylab as plt\n'), ((10584, 10600), 'matplotlib.pylab.subplot', 'plt.subplot', (['(323)'], {}), '(323)\n', (10595, 10600), True, 'import matplotlib.pylab as plt\n'), ((10650, 10665), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10658, 10665), True, 'import matplotlib.pylab as plt\n'), ((10667, 10729), 'matplotlib.pylab.title', 'plt.title', (['"""Denoised (with CurvatureFlowImageFilter)"""'], {'size': '(20)'}), "('Denoised (with CurvatureFlowImageFilter)', size=20)\n", (10676, 10729), True, 'import matplotlib.pylab as plt\n'), ((10730, 10746), 'matplotlib.pylab.subplot', 'plt.subplot', (['(324)'], {}), '(324)\n', (10741, 10746), True, 'import matplotlib.pylab as plt\n'), ((10797, 10812), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10805, 10812), True, 'import matplotlib.pylab as plt\n'), ((10814, 10882), 'matplotlib.pylab.title', 'plt.title', (['"""Denoised (with MinMaxCurvatureFlowImageFilter)"""'], {'size': '(20)'}), "('Denoised (with MinMaxCurvatureFlowImageFilter)', size=20)\n", (10823, 10882), True, 'import matplotlib.pylab as plt\n'), ((10883, 10899), 'matplotlib.pylab.subplot', 'plt.subplot', (['(325)'], {}), '(325)\n', (10894, 10899), True, 'import matplotlib.pylab as plt\n'), ((10950, 10965), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10958, 10965), True, 'import matplotlib.pylab as plt\n'), ((10967, 11045), 'matplotlib.pylab.title', 'plt.title', (['"""Denoised (with CurvatureAnisotropicDiffusionImageFilter)"""'], {'size': '(20)'}), "('Denoised (with CurvatureAnisotropicDiffusionImageFilter)', size=20)\n", (10976, 11045), True, 'import matplotlib.pylab as plt\n'), ((11046, 11062), 'matplotlib.pylab.subplot', 'plt.subplot', (['(326)'], {}), '(326)\n', (11057, 11062), True, 'import matplotlib.pylab as plt\n'), ((11113, 11128), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (11121, 11128), True, 'import matplotlib.pylab as plt\n'), ((11130, 11207), 'matplotlib.pylab.title', 'plt.title', (['"""Denoised (with GradientAnisotropicDiffusionImageFilter)"""'], {'size': '(20)'}), "('Denoised (with GradientAnisotropicDiffusionImageFilter)', size=20)\n", (11139, 11207), True, 'import matplotlib.pylab as plt\n'), ((11518, 11553), 'matplotlib.pylab.plot', 'plt.plot', (['cdf_normalized'], {'color': 'col'}), '(cdf_normalized, color=col)\n', (11526, 11553), True, 'import matplotlib.pylab as plt\n'), ((11626, 11644), 'matplotlib.pylab.xlim', 'plt.xlim', (['[0, 256]'], {}), '([0, 256])\n', (11634, 11644), True, 'import matplotlib.pylab as plt\n'), ((11648, 11709), 'matplotlib.pylab.title', 'plt.title', (['"""CDF and histogram of the color channels"""'], {'size': '(20)'}), "('CDF and histogram of the color channels', size=20)\n", (11657, 11709), True, 'import matplotlib.pylab as plt\n'), ((11824, 11852), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (11834, 11852), True, 'import matplotlib.pylab as plt\n'), ((12006, 12016), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (12014, 12016), True, 'import matplotlib.pylab as plt\n'), ((12239, 12265), 'numpy.ma.masked_equal', 'np.ma.masked_equal', (['cdf', '(0)'], {}), '(cdf, 0)\n', (12257, 12265), True, 'import numpy as np\n'), ((12739, 12775), 'cv2.cvtColor', 'cv2.cvtColor', (['equ', 'cv2.COLOR_LAB2RGB'], {}), '(equ, cv2.COLOR_LAB2RGB)\n', (12751, 12775), False, 'import cv2\n'), ((12913, 12948), 'cv2.cvtColor', 'cv2.cvtColor', (['cl', 'cv2.COLOR_LAB2RGB'], {}), '(cl, cv2.COLOR_LAB2RGB)\n', (12925, 12948), False, 'import cv2\n'), ((13569, 13599), 'skimage.io.imread', 'imread', (['"""images/Img_05_09.jpg"""'], {}), "('images/Img_05_09.jpg')\n", (13575, 13599), False, 'from skimage.io import imread\n'), ((13673, 13682), 'skimage.morphology.square', 'square', (['(2)'], {}), '(2)\n', (13679, 13682), False, 'from skimage.morphology import binary_opening, binary_closing, skeletonize, square\n'), ((13710, 13719), 'skimage.morphology.square', 'square', (['(2)'], {}), '(2)\n', (13716, 13719), False, 'from skimage.morphology import binary_opening, binary_closing, skeletonize, square\n'), ((13775, 13784), 'skimage.morphology.square', 'square', (['(3)'], {}), '(3)\n', (13781, 13784), False, 'from skimage.morphology import binary_opening, binary_closing, skeletonize, square\n'), ((13918, 13934), 'matplotlib.pylab.subplot', 'plt.subplot', (['(231)'], {}), '(231)\n', (13929, 13934), True, 'import matplotlib.pylab as plt\n'), ((13963, 13979), 'matplotlib.pylab.subplot', 'plt.subplot', (['(232)'], {}), '(232)\n', (13974, 13979), True, 'import matplotlib.pylab as plt\n'), ((14009, 14025), 'matplotlib.pylab.subplot', 'plt.subplot', (['(233)'], {}), '(233)\n', (14020, 14025), True, 'import matplotlib.pylab as plt\n'), ((14055, 14071), 'matplotlib.pylab.subplot', 'plt.subplot', (['(234)'], {}), '(234)\n', (14066, 14071), True, 'import matplotlib.pylab as plt\n'), ((14112, 14128), 'matplotlib.pylab.subplot', 'plt.subplot', (['(235)'], {}), '(235)\n', (14123, 14128), True, 'import matplotlib.pylab as plt\n'), ((14164, 14180), 'matplotlib.pylab.subplot', 'plt.subplot', (['(236)'], {}), '(236)\n', (14175, 14180), True, 'import matplotlib.pylab as plt\n'), ((14962, 14984), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['result'], {}), '(result)\n', (14976, 14984), False, 'from PIL import Image, ImageDraw\n'), ((15532, 15548), 'matplotlib.pylab.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (15543, 15548), True, 'import matplotlib.pylab as plt\n'), ((15582, 15598), 'matplotlib.pylab.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (15593, 15598), True, 'import matplotlib.pylab as plt\n'), ((15894, 15924), 'skimage.io.imread', 'imread', (['"""images/Img_05_38.png"""'], {}), "('images/Img_05_38.png')\n", (15900, 15924), False, 'from skimage.io import imread\n'), ((16099, 16115), 'matplotlib.pylab.subplot', 'plt.subplot', (['(231)'], {}), '(231)\n', (16110, 16115), True, 'import matplotlib.pylab as plt\n'), ((16117, 16132), 'matplotlib.pylab.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (16127, 16132), True, 'import matplotlib.pylab as plt\n'), ((16134, 16161), 'matplotlib.pylab.title', 'plt.title', (['"""image"""'], {'size': '(15)'}), "('image', size=15)\n", (16143, 16161), True, 'import matplotlib.pylab as plt\n'), ((16162, 16178), 'matplotlib.pylab.subplot', 'plt.subplot', (['(232)'], {}), '(232)\n', (16173, 16178), True, 'import matplotlib.pylab as plt\n'), ((16180, 16197), 'matplotlib.pylab.imshow', 'plt.imshow', (['imgd1'], {}), '(imgd1)\n', (16190, 16197), True, 'import matplotlib.pylab as plt\n'), ((16199, 16235), 'matplotlib.pylab.title', 'plt.title', (['"""1st derivative"""'], {'size': '(15)'}), "('1st derivative', size=15)\n", (16208, 16235), True, 'import matplotlib.pylab as plt\n'), ((16236, 16252), 'matplotlib.pylab.subplot', 'plt.subplot', (['(233)'], {}), '(233)\n', (16247, 16252), True, 'import matplotlib.pylab as plt\n'), ((16254, 16271), 'matplotlib.pylab.imshow', 'plt.imshow', (['imgd2'], {}), '(imgd2)\n', (16264, 16271), True, 'import matplotlib.pylab as plt\n'), ((16273, 16309), 'matplotlib.pylab.title', 'plt.title', (['"""2nd derivative"""'], {'size': '(15)'}), "('2nd derivative', size=15)\n", (16282, 16309), True, 'import matplotlib.pylab as plt\n'), ((16310, 16326), 'matplotlib.pylab.subplot', 'plt.subplot', (['(234)'], {}), '(234)\n', (16321, 16326), True, 'import matplotlib.pylab as plt\n'), ((16358, 16394), 'matplotlib.pylab.title', 'plt.title', (['"""image function"""'], {'size': '(15)'}), "('image function', size=15)\n", (16367, 16394), True, 'import matplotlib.pylab as plt\n'), ((16395, 16411), 'matplotlib.pylab.subplot', 'plt.subplot', (['(235)'], {}), '(235)\n', (16406, 16411), True, 'import matplotlib.pylab as plt\n'), ((16445, 16490), 'matplotlib.pylab.title', 'plt.title', (['"""1st derivative function"""'], {'size': '(15)'}), "('1st derivative function', size=15)\n", (16454, 16490), True, 'import matplotlib.pylab as plt\n'), ((16491, 16507), 'matplotlib.pylab.subplot', 'plt.subplot', (['(236)'], {}), '(236)\n', (16502, 16507), True, 'import matplotlib.pylab as plt\n'), ((16541, 16586), 'matplotlib.pylab.title', 'plt.title', (['"""2nd derivative function"""'], {'size': '(15)'}), "('2nd derivative function', size=15)\n", (16550, 16586), True, 'import matplotlib.pylab as plt\n'), ((17011, 17030), 'numpy.zeros', 'np.zeros', (['img.shape'], {}), '(img.shape)\n', (17019, 17030), True, 'import numpy as np\n'), ((17262, 17292), 'skimage.io.imread', 'imread', (['"""images/Img_05_18.jpg"""'], {}), "('images/Img_05_18.jpg')\n", (17268, 17292), False, 'from skimage.io import imread\n'), ((17354, 17365), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (17360, 17365), True, 'import numpy as np\n'), ((17564, 17593), 'matplotlib.pylab.subplot', 'plt.subplot', (['(3)', '(2)', '(sigma // 2)'], {}), '(3, 2, sigma // 2)\n', (17575, 17593), True, 'import matplotlib.pylab as plt\n'), ((17603, 17645), 'scipy.ndimage.gaussian_laplace', 'ndimage.gaussian_laplace', (['img'], {'sigma': 'sigma'}), '(img, sigma=sigma)\n', (17627, 17645), False, 'from scipy import ndimage\n'), ((17692, 17710), 'matplotlib.pylab.imshow', 'plt.imshow', (['result'], {}), '(result)\n', (17702, 17710), True, 'import matplotlib.pylab as plt\n'), ((17715, 17730), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (17723, 17730), True, 'import matplotlib.pylab as plt\n'), ((19452, 19468), 'matplotlib.pylab.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (19463, 19468), True, 'import matplotlib.pylab as plt\n'), ((19522, 19537), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (19530, 19537), True, 'import matplotlib.pylab as plt\n'), ((19539, 19566), 'matplotlib.pylab.title', 'plt.title', (['"""input"""'], {'size': '(20)'}), "('input', size=20)\n", (19548, 19566), True, 'import matplotlib.pylab as plt\n'), ((19567, 19583), 'matplotlib.pylab.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (19578, 19583), True, 'import matplotlib.pylab as plt\n'), ((19585, 19602), 'matplotlib.pylab.imshow', 'plt.imshow', (['canny'], {}), '(canny)\n', (19595, 19602), True, 'import matplotlib.pylab as plt\n'), ((19604, 19619), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (19612, 19619), True, 'import matplotlib.pylab as plt\n'), ((19621, 19648), 'matplotlib.pylab.title', 'plt.title', (['"""canny"""'], {'size': '(20)'}), "('canny', size=20)\n", (19630, 19648), True, 'import matplotlib.pylab as plt\n'), ((19649, 19665), 'matplotlib.pylab.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (19660, 19665), True, 'import matplotlib.pylab as plt\n'), ((19667, 19682), 'matplotlib.pylab.imshow', 'plt.imshow', (['hed'], {}), '(hed)\n', (19677, 19682), True, 'import matplotlib.pylab as plt\n'), ((19684, 19699), 'matplotlib.pylab.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (19692, 19699), True, 'import matplotlib.pylab as plt\n'), ((19701, 19742), 'matplotlib.pylab.title', 'plt.title', (['"""holistically-nested"""'], {'size': '(20)'}), "('holistically-nested', size=20)\n", (19710, 19742), True, 'import matplotlib.pylab as plt\n'), ((582, 615), 'numpy.random.randint', 'np.random.randint', (['(0)', 'im.width', 'n'], {}), '(0, im.width, n)\n', (599, 615), True, 'import numpy as np\n'), ((617, 651), 'numpy.random.randint', 'np.random.randint', (['(0)', 'im.height', 'n'], {}), '(0, im.height, n)\n', (634, 651), True, 'import numpy as np\n'), ((1114, 1134), 'matplotlib.pylab.subplot', 'plt.subplot', (['(6)', '(2)', 'i'], {}), '(6, 2, i)\n', (1125, 1134), True, 'import matplotlib.pylab as plt\n'), ((1254, 1278), 'matplotlib.pylab.subplot', 'plt.subplot', (['(6)', '(2)', '(i + 1)'], {}), '(6, 2, i + 1)\n', (1265, 1278), True, 'import matplotlib.pylab as plt\n'), ((1562, 1594), 'PIL.ImageFilter.GaussianBlur', 'ImageFilter.GaussianBlur', (['radius'], {}), '(radius)\n', (1586, 1594), False, 'from PIL import Image, ImageFilter\n'), ((2022, 2055), 'PIL.ImageFilter.MedianFilter', 'ImageFilter.MedianFilter', ([], {'size': 'sz'}), '(size=sz)\n', (2046, 2055), False, 'from PIL import Image, ImageFilter\n'), ((2059, 2079), 'matplotlib.pylab.subplot', 'plt.subplot', (['(1)', '(4)', 'i'], {}), '(1, 4, i)\n', (2070, 2079), True, 'import matplotlib.pylab as plt\n'), ((2486, 2516), 'PIL.ImageFilter.MinFilter', 'ImageFilter.MinFilter', ([], {'size': 'sz'}), '(size=sz)\n', (2507, 2516), False, 'from PIL import Image, ImageFilter\n'), ((2520, 2540), 'matplotlib.pylab.subplot', 'plt.subplot', (['(1)', '(4)', 'i'], {}), '(1, 4, i)\n', (2531, 2540), True, 'import matplotlib.pylab as plt\n'), ((2832, 2862), 'PIL.ImageFilter.MaxFilter', 'ImageFilter.MaxFilter', ([], {'size': 'sz'}), '(size=sz)\n', (2853, 2862), False, 'from PIL import Image, ImageFilter\n'), ((2866, 2886), 'matplotlib.pylab.subplot', 'plt.subplot', (['(1)', '(4)', 'i'], {}), '(1, 4, i)\n', (2877, 2886), True, 'import matplotlib.pylab as plt\n'), ((3191, 3222), 'PIL.ImageFilter.ModeFilter', 'ImageFilter.ModeFilter', ([], {'size': 'sz'}), '(size=sz)\n', (3213, 3222), False, 'from PIL import Image, ImageFilter\n'), ((3226, 3246), 'matplotlib.pylab.subplot', 'plt.subplot', (['(1)', '(3)', 'i'], {}), '(1, 3, i)\n', (3237, 3246), True, 'import matplotlib.pylab as plt\n'), ((3610, 3645), 'PIL.ImageFilter.GaussianBlur', 'ImageFilter.GaussianBlur', ([], {'radius': 'sz'}), '(radius=sz)\n', (3634, 3645), False, 'from PIL import Image, ImageFilter\n'), ((3805, 3838), 'PIL.ImageFilter.MedianFilter', 'ImageFilter.MedianFilter', ([], {'size': 'sz'}), '(size=sz)\n', (3829, 3838), False, 'from PIL import Image, ImageFilter\n'), ((3991, 4022), 'PIL.ImageFilter.ModeFilter', 'ImageFilter.ModeFilter', ([], {'size': 'sz'}), '(size=sz)\n', (4013, 4022), False, 'from PIL import Image, ImageFilter\n'), ((4173, 4203), 'PIL.ImageFilter.MaxFilter', 'ImageFilter.MaxFilter', ([], {'size': 'sz'}), '(size=sz)\n', (4194, 4203), False, 'from PIL import Image, ImageFilter\n'), ((7004, 7039), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (7016, 7039), False, 'import cv2\n'), ((7088, 7124), 'cv2.cvtColor', 'cv2.cvtColor', (['im1', 'cv2.COLOR_BGR2RGB'], {}), '(im1, cv2.COLOR_BGR2RGB)\n', (7100, 7124), False, 'import cv2\n'), ((10372, 10399), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img'], {}), '(img)\n', (10394, 10399), True, 'import SimpleITK as sitk\n'), ((10479, 10512), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img_noisy'], {}), '(img_noisy)\n', (10501, 10512), True, 'import SimpleITK as sitk\n'), ((10613, 10647), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img_res_TK'], {}), '(img_res_TK)\n', (10635, 10647), True, 'import SimpleITK as sitk\n'), ((10759, 10794), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img_res_TK1'], {}), '(img_res_TK1)\n', (10781, 10794), True, 'import SimpleITK as sitk\n'), ((10912, 10947), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img_res_TK2'], {}), '(img_res_TK2)\n', (10934, 10947), True, 'import SimpleITK as sitk\n'), ((11075, 11110), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img_res_TK3'], {}), '(img_res_TK3)\n', (11097, 11110), True, 'import SimpleITK as sitk\n'), ((11856, 11872), 'matplotlib.pylab.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (11867, 11872), True, 'import matplotlib.pylab as plt\n'), ((11901, 11917), 'matplotlib.pylab.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (11912, 11917), True, 'import matplotlib.pylab as plt\n'), ((13763, 13772), 'skimage.morphology.square', 'square', (['(2)'], {}), '(2)\n', (13769, 13772), False, 'from skimage.morphology import binary_opening, binary_closing, skeletonize, square\n'), ((15393, 15427), 'PIL.Image.open', 'Image.open', (['"""images/Img_05_10.jpg"""'], {}), "('images/Img_05_10.jpg')\n", (15403, 15427), False, 'from PIL import Image, ImageDraw\n'), ((19481, 19519), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (19493, 19519), False, 'import cv2\n'), ((3668, 3701), 'matplotlib.pylab.subplot', 'plt.subplot', (['(4)', '(4)', '(4 * i // 2 + 1)'], {}), '(4, 4, 4 * i // 2 + 1)\n', (3679, 3701), True, 'import matplotlib.pylab as plt\n'), ((3861, 3894), 'matplotlib.pylab.subplot', 'plt.subplot', (['(4)', '(4)', '(4 * i // 2 + 2)'], {}), '(4, 4, 4 * i // 2 + 2)\n', (3872, 3894), True, 'import matplotlib.pylab as plt\n'), ((4045, 4078), 'matplotlib.pylab.subplot', 'plt.subplot', (['(4)', '(4)', '(4 * i // 2 + 3)'], {}), '(4, 4, 4 * i // 2 + 3)\n', (4056, 4078), True, 'import matplotlib.pylab as plt\n'), ((4226, 4259), 'matplotlib.pylab.subplot', 'plt.subplot', (['(4)', '(4)', '(4 * i // 2 + 4)'], {}), '(4, 4, 4 * i // 2 + 4)\n', (4237, 4259), True, 'import matplotlib.pylab as plt\n'), ((12384, 12406), 'numpy.ma.filled', 'np.ma.filled', (['cdf_m', '(0)'], {}), '(cdf_m, 0)\n', (12396, 12406), True, 'import numpy as np\n'), ((14850, 14862), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (14858, 14862), True, 'import numpy as np\n'), ((7935, 7973), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['im', 'images[0]'], {}), '(im, images[0])\n', (7958, 7973), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((8043, 8079), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['im', 'im_mean'], {}), '(im, im_mean)\n', (8066, 8079), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((8153, 8191), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['im', 'im_median'], {}), '(im, im_median)\n', (8176, 8191), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((768, 784), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (782, 784), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.