markdown
stringlengths 0
37k
| code
stringlengths 1
33.3k
| path
stringlengths 8
215
| repo_name
stringlengths 6
77
| license
stringclasses 15
values |
|---|---|---|---|---|
<h3>Visualisierung der Daten</h3>
|
# Ausgabe Histogramm
dataset.hist()
pyplot.show()
# Ausgabe der Dichtefunktion
dataset.plot(kind='density', subplots=True, layout=(8,8), sharex=False, legend=False)
pyplot.show()
# Ausgabe scatter plot matrix
scatter_matrix(dataset)
pyplot.show()
# Ausgabe correlation matrix
fig = pyplot.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(dataset.corr(), vmin=-1, vmax=1, interpolation='none')
fig.colorbar(cax)
pyplot.show()
|
18-05-14-ml-workcamp/sonar-daten/Projekt-Sonardaten-Workcamp-ML.ipynb
|
mediagit2016/workcamp-maschinelles-lernen-grundlagen
|
gpl-3.0
|
<h3>Vorbereiten der Daten: Aufteilen in Test und Trainingsdaten </h3>
|
# Split-out validation dataset
array = dataset.values
X = array[:,0:60].astype(float)
Y = array[:,60]
validation_size = 0.20
seed = 7
X_train, X_validation, Y_train, Y_validation = train_test_split(X, Y, test_size=validation_size, random_state=seed)
# Evaluate Algorithms
# Test options and evaluation metric
num_folds = 10
seed = 7
scoring = 'accuracy'
# Spot Check Algorithms
models = []
models.append(('LR', LogisticRegression()))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
results = []
names = []
for name, model in models:
kfold = KFold(n_splits=num_folds, random_state=seed)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# Compare Algorithms
fig = pyplot.figure()
fig.suptitle('Algorithm Comparison')
ax = fig.add_subplot(111)
pyplot.boxplot(results)
ax.set_xticklabels(names)
pyplot.show()
# Standardize the dataset
pipelines = []
pipelines.append(('ScaledLR', Pipeline([('Scaler', StandardScaler()),('LR', LogisticRegression())])))
pipelines.append(('ScaledLDA', Pipeline([('Scaler', StandardScaler()),('LDA', LinearDiscriminantAnalysis())])))
pipelines.append(('ScaledKNN', Pipeline([('Scaler', StandardScaler()),('KNN', KNeighborsClassifier())])))
pipelines.append(('ScaledCART', Pipeline([('Scaler', StandardScaler()),('CART', DecisionTreeClassifier())])))
pipelines.append(('ScaledNB', Pipeline([('Scaler', StandardScaler()),('NB', GaussianNB())])))
pipelines.append(('ScaledSVM', Pipeline([('Scaler', StandardScaler()),('SVM', SVC())])))
results = []
names = []
for name, model in pipelines:
kfold = KFold(n_splits=num_folds, random_state=seed)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# Vergleich der Algorithmen
fig = pyplot.figure()
fig.suptitle('Scaled Algorithm Comparison')
ax = fig.add_subplot(111)
pyplot.boxplot(results)
ax.set_xticklabels(names)
pyplot.show()
|
18-05-14-ml-workcamp/sonar-daten/Projekt-Sonardaten-Workcamp-ML.ipynb
|
mediagit2016/workcamp-maschinelles-lernen-grundlagen
|
gpl-3.0
|
Im Ergebnis sind also K-NN und SCM die Algorithmen<br>
die bessere Ergebnisse liefern<br>
In die weiteren Betrachtungen und Optimierungen werden nur<br>
noch diese Algorithmen einbezogen.<br>
|
# Tuning K-NN mit skalierten Daten - Die Anzahl der Nachbarn wird variiert
scaler = StandardScaler().fit(X_train)
rescaledX = scaler.transform(X_train)
neighbors = [1,3,5,7,9,11,13,15,17,19,21]
param_grid = dict(n_neighbors=neighbors)
model = KNeighborsClassifier()
kfold = KFold(n_splits=num_folds, random_state=seed)
grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold)
grid_result = grid.fit(rescaledX, Y_train)
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# Tuning der SVM mit skalierten Daten über ein param_grid
scaler = StandardScaler().fit(X_train)
rescaledX = scaler.transform(X_train)
c_values = [0.1, 0.3, 0.5, 0.7, 0.9, 1.0, 1.3, 1.5, 1.7, 2.0]
kernel_values = ['linear', 'poly', 'rbf', 'sigmoid']
param_grid = dict(C=c_values, kernel=kernel_values)
model = SVC()
kfold = KFold(n_splits=num_folds, random_state=seed)
grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold)
grid_result = grid.fit(rescaledX, Y_train)
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
print("%f (%f) with: %r" % (mean, stdev, param))
# Aufbauen von Ensembles
ensembles = []
ensembles.append(('AB', AdaBoostClassifier()))
ensembles.append(('GBM', GradientBoostingClassifier()))
ensembles.append(('RF', RandomForestClassifier()))
ensembles.append(('ET', ExtraTreesClassifier()))
results = []
names = []
for name, model in ensembles:
kfold = KFold(n_splits=num_folds, random_state=seed)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# Vergleich der Algorithmen
fig = pyplot.figure()
fig.suptitle('Ensemble Algorithm Comparison')
ax = fig.add_subplot(111)
pyplot.boxplot(results)
ax.set_xticklabels(names)
pyplot.show()
# Überprüfen des ausgewählten Modells,SVM (Support Vektor Machine), auf den Validierungsdaten
# Vorbereiten des Modells
scaler = StandardScaler().fit(X_train)
rescaledX = scaler.transform(X_train)
#Validierung des SVM Modells
model = SVC(C=1.5)
model.fit(rescaledX, Y_train)
# Abschätzung der accuracy auf den Validierungs Daten
rescaledValidationX = scaler.transform(X_validation)
# Durchführen der Vorhersagen auf den Validierungsdaten
predictions = model.predict(rescaledValidationX)
# Ausgabe accuracy
print(accuracy_score(Y_validation, predictions))
# Ausgabe Confusion Matrix
print(confusion_matrix(Y_validation, predictions))
# Ausgabe classification_report
print(classification_report(Y_validation, predictions))
|
18-05-14-ml-workcamp/sonar-daten/Projekt-Sonardaten-Workcamp-ML.ipynb
|
mediagit2016/workcamp-maschinelles-lernen-grundlagen
|
gpl-3.0
|
Then, we set up the parameterizations for the torus and the knot, using a meshgrid from u,v.
|
# First set up the figure, the axis, and the plot element we want to animate
fig = figure()
ax = axes(projection='3d')
# we need to fix some parameters, describing size of the inner radus of the torus/knot
r = .4
# We set the parameterization for the circle and the knot
u = linspace(0, 2*pi, 100)
v = linspace(0, 2*pi, 100)
u,v = meshgrid(u,v)
x_torus = 2*sin(u) + r*sin(u)*cos(v)
y_torus = 2*cos(u) + r*cos(u)*cos(v)
z_torus = r*sin(v)
x_knot = sin(u) + 2*sin(2*u) + r*sin(u)*cos(v)
y_knot = cos(u) - 2*cos(2*u) + r*cos(u)*cos(v)
z_knot = -sin(3*u) + r*sin(v)
ax.plot_surface(x_torus, y_torus, z_torus, color='c')
ax.set_xlim([-2*(1+r), 2*(1+r)])
ax.set_ylim([-2*(1+r), 2*(1+r)])
ax.set_zlim([-(1+r), (1+r)])
|
3D_Animation.ipynb
|
mlamoureux/PIMS_YRC
|
mit
|
We need an initialization function, an animation function, and then we call the animator to put it all together.
|
# initialization function: plot the background of each frame
def init():
thingy = ax.plot_surface([0], [0], [0], color='c')
return (thingy,)
# animation function. This is called sequentially
def animate(i):
a = sin(pi*i/100)**2 # this is an interpolation parameter. a = 0 is torus, a=1 is knot
x = (1-a)*x_torus + a*x_knot
y = (1-a)*y_torus + a*y_knot
z = (1-a)*z_torus + a*z_knot
ax.clear()
ax.set_xlim([-2*(1+r), 2*(1+r)])
ax.set_ylim([-2*(1+r), 2*(1+r)])
ax.set_zlim([-(1+r), (1+r)])
thingy = ax.plot_surface(x, y, z, color='c')
return (thingy,)
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=100, interval=50, blit=True)
|
3D_Animation.ipynb
|
mlamoureux/PIMS_YRC
|
mit
|
Finally, we call the HMML code to conver the animation object into a video. (This depends on having a MovieWriter installed on your system. Should be fine on syzygy.ca but it does not work on my Mac unless I install ffmpeg.)
|
HTML(anim.to_html5_video())
|
3D_Animation.ipynb
|
mlamoureux/PIMS_YRC
|
mit
|
If you click on the image above, you will see there is a button that allows you to download the animation as an mp4 file directly. Or you can use the following command:
|
anim.save('knot.mp4')
2+2
|
3D_Animation.ipynb
|
mlamoureux/PIMS_YRC
|
mit
|
Graphs!
A good example of a real-world graph (because it happens to be one). For now it's just important to know that this is a graph of social interactions between 34 individuals involved in the same karate club. Drawing it less because it's informative, and more because plotting is fun.
|
real_graph = nx.karate_club_graph()
positions = nx.spring_layout(real_graph)
nx.draw(real_graph, node_color = 'blue', pos = positions)
|
networks-201/network_analysis.ipynb
|
blehman/Data-Science-45min-Intros
|
unlicense
|
Now. What's the difference between that (^) drawing of nodes and edges and a completely random assembly of dots and lines? How can we quantify the difference between a social network, which we think probably has important structure, and a completely random network, whose structure contains very little useful information? Which aspects of a network can be explained by simple statistics like average degree, the number of nodes, or the degree distribution? Which characteristics of a network depend on a structure or generative process that could reveal an underlying truth about the way the network came about?
The question to ask is: how likely is a specific network characteristic to have been generated by a random process?
Random Graph Models
The Erdös-Rényi Random Graph
The simplest random graph you can think of. For a graph $G$ with $n$ nodes, each pair of nodes gets an (undirected) edge with probability $p$. There are ${n \choose 2}$ pairs of nodes, so ${n \choose 2}$ possible edges. Then the average degree of a node in this random graph is $(n-1)p$, where $(n-1)$ is the number of possible connections for a node $i$ and $p$ is the probability of that connection existing. Call the expected average degree $\bar k = (n-1)p$.
Giant Components
One property that we see all the time in social graphs (and many other graphs) is the emergence of a "giant" connected component. The Erdös-Rényi also develops a giant component for certain parameter spaces. In fact, when the average degree is more than 1 we see a giant component emerging, and when it is more that 3 that giant component is all or almost all of the graph. That means that for a random graph with $p > \frac{1}{n-1}$ we will always start to see a giant component.
To demonstrate why this is true, consider $u$ to be the fraction of vertices not in the giant component. Then where $u$ is also the probability that a randomly chosen vertex $i$ does not belong to the giant component of the graph. For $i$ to not be a part of the giant component, for every other vertex $j$ ($n-1$ vertices), $i$ is either not connected to $j$ (with probability $1-p$), or $j$ is not connected to the giant component (with probability $pu$). Then:
$$ u = ((1-p) + (pu))^{n-1} $$
We can use $ p = \frac{\bar k}{n-1} $ to rewrite the expression as:
$$ u = (1 - \frac{\bar k(1-u)}{n-1})^{n-1} $$
And then taking the limit for large $n$ and using the fact that $\lim_{x\rightarrow\infty}(1-\frac{x}{n})^n = e^{-x}$:
$$ u = e^{-k(1-u)} $$
Now if $u$ is the fraction of vertices not in the giant component, call $S = 1-u$ the fraction of vertices in the giant component. Then:
$$ S = e^{-\bar kS} $$
There is no closed-form solution to this equation, but below we can show a simulation of random graphs and the size of the largest connected component in each one.
|
# Use the same number of nodes for each example
num_nodes = 500
# list of the sizes of the largest components
big_comp = []
# number of nodes in the graph
#num_nodes = 500
# vector of edge probabilities
p_values = [(1-x*.0001) for x in xrange(9850,10000)]
# try it a few times to get a smoother curve
iterations = 10
for p in p_values:
size_comps = []
for h in xrange(0, iterations):
edge_list = []
for i in xrange(0,num_nodes):
for j in xrange(i,num_nodes):
if (random() < p):
edge_list.append((i,j))
G = igraph.Graph(directed = False)
G.add_vertices(num_nodes)
G.add_edges(edge_list)
comps = [len(x) for x in G.clusters()]
size_comps.append(comps)
big_comp.append((sum([max(x) for x in size_comps])/len(size_comps)/float(num_nodes)))
plt.plot([x*(num_nodes-1) for x in p_values], big_comp, '.')
plt.title("Phase transitions in connectedness")
plt.ylabel("Fraction of nodes in the largest component")
plt.xlabel("Average degree (k = p(n-1)), {} < p < {}".format(p_values[99],p_values[0]))
|
networks-201/network_analysis.ipynb
|
blehman/Data-Science-45min-Intros
|
unlicense
|
Clustering coefficient
The clustering coefficient is a measure of how many trianges (completely connected triples) there are in a graph. You can think about it as the probability that if Alice knows Bob and Charlie, Bob also knows Charlie. The clustering coefficient of a graph is equal to $$ C = \frac{\text{(number of closed triples)}}{\text{number of connected triples}} $$
Finding the expected value of $C$ for a random graph is simple. For any 3 vertices, the probability that they are all connected is $p^3$ and the probability that at least 2 of them are connected is $p^2$. Then the expected values of closed triples (triangles) and connected triples respectiely are ${n \choose 3}p^3 $ and ${n \choose 3}p^2 $, and the expected value for $C$ is then $\frac{p^3}{p^2} = p$. Notice in the above plot that the values for $p$ are very small, even when the graph is fully connected. In a randomly generated sparse graph (a graph where a small fraction of the total possible ${n \choose 2}$ edges exist), the clustering coefficient $C$ is very low.
|
# vector of edge probabilities
p_values_clustering = [x*.01 for x in xrange(0,100)]
# try it a few times to get a smoother curve
iterations = 1
# store the clustering coefficient
clustering = []
for p in p_values_clustering:
size_comps = []
for h in xrange(0, iterations):
edge_list = []
for i in xrange(0,num_nodes):
for j in xrange(i,num_nodes):
if (random() < p):
edge_list.append((i,j))
G = igraph.Graph(directed = False)
G.add_vertices(num_nodes)
G.add_edges(edge_list)
clustering.append((p, G.transitivity_undirected(mode="zero")))
plt.plot([x[0]*(num_nodes-1) for x in clustering], [x[1] for x in clustering], '.')
plt.title("Clustering coeff vs avg degree in a random graph")
plt.ylabel("Clustering coefficient")
plt.xlabel("Average degree (k = (n-1)p), 0 < p < 1")
|
networks-201/network_analysis.ipynb
|
blehman/Data-Science-45min-Intros
|
unlicense
|
Small diameter graphs
So we know that the giant component is very likely, even for sparse graphs, and also that the clustering coefficient is very low, even for relatively dense graphs. This means that the graph is almost completely connected, and that it is, at least locally, pretty similar to a tree graph (acyclic).
Consider that a graph has a mean degree $\bar k$. Now consider the number of vertices reachable from some vertex in the graph, $i$, call the number of vertices that $i$ can reach $l$. Because the clustering coefficient is very low (the graph is locally tree-like), it is likely that any neighbor of $i$'s has a completely new set of neightbors ($k$ neighbors, less $i$, $k-1$ total new neighbors). Then for each step, you reach $k-1$ new vertices. Thus the number of vertices reachable in $l$ steps from some vertex $i$ is $(k-1)^l$.
The diameter of a graph is the maximum number of steps $l$ one would have to take to reach any vetex from any other vertex, or the number of steps needed to make any vertex reachable.
$$ (k-1)^l = n $$
$$ l = \frac{1}{log(k-1)}log(n) \approx O(log(n))$$
Thus the diamater of the graph grows as $O(log(n))$, or shows "small world" characteristics.
|
# list of the average (over X iterations) diameters of the largest components
diam = []
# the degree distribution of the network for each average degree
degrees = {}
# vector of edge probabilities
p_values = [(1-x*.0001) for x in xrange(9850,10000)]
# try it a few times to get a smoother curve
iterations = 10
for p in p_values:
size_comps = []
diameters = []
for h in xrange(0, iterations):
edge_list = []
for i in xrange(0,num_nodes):
for j in xrange(i,num_nodes):
if (random() < p):
edge_list.append((i,j))
G = igraph.Graph(directed = False)
G.add_vertices(num_nodes)
G.add_edges(edge_list)
diameters.append(G.diameter())
degrees[p*(num_nodes-1)] = G.degree()
diam.append(sum(diameters)/len(diameters))
fig, ax1 = plt.subplots(figsize = (8,6))
plt.title("Graph metrics vs avg degree in a random graph", size = 16)
ax1.plot([x*(num_nodes-1) for x in p_values], big_comp, 'o', color = "red", markersize=4)
ax1.set_xlabel('Average degree (k = (n-1)p), 0 < p < 1', size = 16)
ax1.set_ylim(0,1.01)
ax1.set_xlim(0,6)
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('Fraction of nodes in giant component', color='red', size = 16)
ax1.grid(True)
for tl in ax1.get_yticklabels():
tl.set_color('red')
tl.set_size(16)
ax2 = ax1.twinx()
ax2.set_xlim(0,6)
ax2.plot([x*(num_nodes-1) for x in p_values], diam, 's', color = "blue", markersize=4)
ax2.set_ylabel('Diameter of the giant component', color='blue', size = 16)
for tl in ax2.get_yticklabels():
tl.set_color('blue')
tl.set_size(16)
avg_degree_near_5 = min(degrees.keys(), key = lambda x: abs(x-5))
xy = Counter(degrees[avg_degree_near_5]).items()
plt.bar([x[0] for x in xy], [x[1] for x in xy], edgecolor = "none", color = "blue")
plt.ylabel("# of nodes with degree X", size = 16)
plt.xlabel("Degree", size = 16)
plt.title("Degree distribution of the random graph", size = 16)
|
networks-201/network_analysis.ipynb
|
blehman/Data-Science-45min-Intros
|
unlicense
|
A comparison with a real social graph:
|
print("The number of nodes in the graph (all are connected): {}".format(len(real_graph.nodes())))
print("The number of edges in the graph: {}".format(len(real_graph.edges())))
print("The average degree: {}".format(sum(nx.degree(real_graph).values())/len(real_graph.nodes())))
print("The clustering coefficient: {}".format(nx.average_clustering(real_graph)))
print("The clustering coefficient that a random graph with the same degree would predict (k/(n-1)): {}"
.format(sum(nx.degree(real_graph).values())/len(real_graph.nodes())/(len(real_graph.nodes())-1)))
print("The diameter of the graph: {}".format(nx.diameter(real_graph)))
|
networks-201/network_analysis.ipynb
|
blehman/Data-Science-45min-Intros
|
unlicense
|
The Configuration Model
Another random graph model: the configuration model. Instead of generating our own degree sequence, we use a specified degree sequence (say, use the degree sequence of a social graph that we have) and change how the edges are connected. This allows us to ask the question: "how much of this characteristic is completely explained by degree?"
This is an example of using the configuration model to create a null model of our "real graph." Note that the algorithm that I am using works well for creating configuration models for large graphs, but produces more error on this smaller graph.
|
A = []
for v in real_graph.nodes():
for x in range(0, real_graph.degree(v)):
A.append(v)
shuffle(A)
# make the edge list
_E = [(A[2*x], A[2*x+1]) for x in range(0,int(len(A)/2))]
E = set([x for x in _E if x[0]!=x[1]])
# add the edges to a new graph with the name node list
C = real_graph.copy()
C.remove_edges_from(real_graph.edges())
C.add_edges_from(E)
nx.draw(C, node_color = 'blue', pos = positions)
print("The number of nodes in the graph (all are connected): {}".format(len(C.nodes())))
print("The number of edges in the graph: {}".format(len(C.edges())))
print("The average degree: {}".format(sum(nx.degree(C).values())/len(C.nodes())))
print("The clustering coefficient: {}".format(nx.average_clustering(C)))
print("The clustering coefficient that a random graph with the same degree would predict (k/(n-1)): {}"
.format(sum(nx.degree(real_graph).values())/len(C.nodes())/(len(C.nodes())-1)))
print("The diameter of the graph: {}".format(nx.diameter(C)))
|
networks-201/network_analysis.ipynb
|
blehman/Data-Science-45min-Intros
|
unlicense
|
Asking questions using a null model
A famous example of centrality measuring on a social network is the Florentine Families graph. Padgett's reseach on this graph claims that the Medicci family's rise to power can be explained by their high centrality on the graph of business interactions between families in Italy during that time. We will use a null model (configuration model) of the graph to rearrange how edges are places without altering any node's degree to discover how much of the Medicci's power is determined by thier degree (ranther than other structural components of the graph).
|
# get the graph
florentine_families = igraph.Nexus.get("padgett")["PADGB"]
|
networks-201/network_analysis.ipynb
|
blehman/Data-Science-45min-Intros
|
unlicense
|
First, let's show the relative rankings of the families with respect to vertex degree in the network and with respect to our chosen centrality measure, harmonic centrality. I won't go into various centrality measures here, beyond to say that harmonic centrality is formulated:
$$ c_i = \frac{1}{n-1}\sum_{i,i\neq j}^{n-1}\frac{1}{d_{ij}} $$
where $d_{ij}$ is the geodesic distance between vertices $i$ and $j$. Basically, harmonic centrality is a measure of how close a vertes is to every other vertex.
|
# degree centrality
d = florentine_families.degree()
d_rank = [(x, florentine_families.vs[x]['name'], d[x]) for x in range(0,len(florentine_families.vs()))]
d_rank.sort(key = itemgetter(2), reverse = True)
# harmonic centrality
distances = florentine_families.shortest_paths_dijkstra()
h = [sum([1/x for x in dist if x != 0])/(len(distances)-1) for dist in distances]
h_rank = [(x, florentine_families.vs[x]['name'], h[x]) for x in range(0,len(florentine_families.vs()))]
h_rank.sort(key = itemgetter(2), reverse = True)
# make the table
d_table = []
d_table.append(["Rank (by degree)", "degree", "Rank (h centrality)", "harmonic"])
for n in xrange(0,len(florentine_families.vs())):
table_row = []
table_row.extend([d_rank[n][1], str(d_rank[n][2])[0:5]])
table_row.extend([h_rank[n][1], str(h_rank[n][2])[0:5]])
#table_row.extend([e_rank[n][1], str(e_rank[n][2])[0:5]])
#table_row.extend([b_rank[n][1], str(b_rank[n][2])[0:5]])
d_table.append(table_row)
print tabulate(d_table)
|
networks-201/network_analysis.ipynb
|
blehman/Data-Science-45min-Intros
|
unlicense
|
Now the fun (?) part. Create a bunch of different random configuration models based on the florentine families graph, then measure the harmonic centrality on those graphs. The harmonic centality of a node on the null model will deend only on its degree (as the graph structure is now ranom).
|
config_model_centrality = [[] for x in florentine_families.vs()]
config_model_means = []
hc_differences = [[] for x in range(0,16)]
for i in xrange(0,1000):
# build a random graph based on the configuration model
C = florentine_families.copy()
# graph with the same edge list as G
C.delete_edges(None)
# print C.summary()
# Add random edges
# vertex list A
A = []
for v in florentine_families.vs().indices:
for x in range(0,florentine_families.degree(v)):
A.append(v)
shuffle(A)
# print A
# make the edge list
_E = [(A[2*x], A[2*x+1]) for x in range(0,int(len(A)/2))]
E = set([x for x in _E if x[0]!=x[1]])
# add the edges to C
# print E
C.add_edges(E)
# rank the vertices by harmonic centrality
C_distances = C.shortest_paths_dijkstra()
C_h = [sum([1/x for x in dist if x != 0])/(len(C_distances)-1) for dist in C_distances]
del C
for vertex in range(0,16):
hc_differences[vertex].append(h[vertex] - C_h[vertex])
plt.plot([percentile(diff, 50) for diff in hc_differences], '--')
plt.plot([percentile(diff, 25) for diff in hc_differences], 'r--')
plt.plot([percentile(diff, 75) for diff in hc_differences], 'g--')
plt.xticks(range(0,16))
plt.gca().set_xticklabels(florentine_families.vs()['name'])
plt.xticks(rotation = 90)
plt.gca().grid(True)
plt.ylabel("(centrality) - (centrality on the null model)")
plt.title("How much of harmonic centrality is explained by degree?")
|
networks-201/network_analysis.ipynb
|
blehman/Data-Science-45min-Intros
|
unlicense
|
For the classification task, we will build a ridge regression model, and train it on a part of the full dataset
|
from sklearn.linear_model import *
clf = RidgeClassifier(random_state = 1960)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df[lFeatures].values, df['TGT'].values, test_size=0.2, random_state=1960)
clf.fit(X_train , y_train)
|
doc/sklearn_reason_codes.ipynb
|
antoinecarme/sklearn_explain
|
bsd-3-clause
|
This is a standard linear model, that assigns a coefficient to each predictor value, these coefficients can be seen as global importance indicators for the predictors.
|
coefficients = dict(zip(ds.feature_names, [clf.coef_.ravel()[i] for i in range(clf.coef_.shape[1])]))
df_var_importance = pd.DataFrame()
df_var_importance['variable'] = list(coefficients.keys())
df_var_importance['importance'] = df_var_importance['variable'].apply(coefficients.get)
%matplotlib inline
df_var_importance.plot('variable' , ['importance'], kind='bar')
df_var_importance.head()
|
doc/sklearn_reason_codes.ipynb
|
antoinecarme/sklearn_explain
|
bsd-3-clause
|
To put it simply, this is a global view of all the indivduals. The most important variable is 'mean radius', the higher the radius of the tumor, the higher the score of being malignant. On the oppopsite side, the higher the 'mean perimeter' is, the lower the score.
Model Explanation
The goal here is to be able, for a given individual, the impact of each predictor on the final score.
For our model, the score is a linear combination of predictor values:
$$ Score = \alpha_1 X_1 + \alpha_2 X_2 + \alpha_3 X_3 + \alpha_4 X_4 + \beta $$
One can see $\alpha_1 X_1$ as the contribution of the predictor $X_1$, $\alpha_2 X_2$ as the contribution of the predictor $X_2$, etc
These contributions can be seen as partial scores and their sum is the final score (used to assign positive or negative decision).
The intercept $\beta$ being constant, it can be ignored when analyzing individual effects.
In scikit-learn , the score is computed by a decision_function method of the classifier, an individual is detected as positive if the score has a positive value.
|
df['Score'] = clf.decision_function(df[lFeatures].values)
df['Decision'] = clf.predict(df[lFeatures].values)
df.sample(6, random_state=1960)
|
doc/sklearn_reason_codes.ipynb
|
antoinecarme/sklearn_explain
|
bsd-3-clause
|
Predictor Effects
Predictor effects describe the impact of specific predictor values on the partial score. For example, some values of a predictor can increase or decrease the partial score (and hence the score) by 10 or more points and change the negative decision to a positive one.
The effect reflects how a specific predictor increases the score (above the mean contribtution of this variable).
|
for col in lFeatures:
lContrib = df[col] * coefficients[col]
df[col + '_Effect'] = lContrib - lContrib.mean()
df.sample(6, random_state=1960)
|
doc/sklearn_reason_codes.ipynb
|
antoinecarme/sklearn_explain
|
bsd-3-clause
|
The previous sample, shows that the first individual lost 1.148856 score points due to the feature $X_1$, gained 2.076852 with the feature $X_3$, etc
Reason Codes
The reason codes are a user-oriented representation of the decision making process. These are the predictors ranked by their effects.
|
import numpy as np
reason_codes = np.argsort(df[[col + '_Effect' for col in lFeatures]].values, axis=1)
df_rc = pd.DataFrame(reason_codes, columns=['reason_' + str(NC-c) for c in range(NC)])
df_rc = df_rc[list(reversed(df_rc.columns))]
df = pd.concat([df , df_rc] , axis=1)
for c in range(NC):
df['reason_' + str(c+1)] = df['reason_' + str(c+1)].apply(lambda x : lFeatures[x])
df.sample(6, random_state=1960)
df[['reason_' + str(NC-c) for c in range(NC)]].describe()
|
doc/sklearn_reason_codes.ipynb
|
antoinecarme/sklearn_explain
|
bsd-3-clause
|
Implement Sarsa(λ) in 21s.
[x] Initialise the value function to zero.
[x] Use the same step-size and exploration schedules as in the previous section.
[x] Run the algorithm with parameter values λ ∈ {0, 0.1, 0.2, ..., 1}.
[x] Stop each run after 1000 episodes
and report the mean-squared error over all states and actions,
comparing the true values Q∗(s,a) computed in the previous section with the estimated values Q(s, a) computed by Sarsa.
[x] Plot the mean- squared error against λ.
[x] For λ = 0 and λ = 1 only, plot the learning curve of mean-squared error against episode number.
|
class Sarsa_Agent:
def __init__(self, environment, n0, mlambda):
self.n0 = float(n0)
self.env = environment
self.mlambda = mlambda
# N(s) is the number of times that state s has been visited
# N(s,a) is the number of times that action a has been selected from state s.
self.N = np.zeros((self.env.dealer_values_count,
self.env.player_values_count,
self.env.actions_count))
self.Q = np.zeros((self.env.dealer_values_count,
self.env.player_values_count,
self.env.actions_count))
self.E = np.zeros((self.env.dealer_values_count, self.env.player_values_count, self.env.actions_count))
# Initialise the value function to zero.
self.V = np.zeros((self.env.dealer_values_count, self.env.player_values_count))
self.count_wins = 0
self.iterations = 0
# get optimal action, with epsilon exploration (epsilon dependent on number of visits to the state)
# ε-greedy exploration strategy with εt = N0/(N0 + N(st)),
def train_get_action(self, state):
dealer_idx = state.dealer-1
player_idx = state.player-1
try:
n_visits = sum(self.N[dealer_idx, player_idx, :])
except:
n_visits = 0
# epsilon = N0/(N0 + N(st)
curr_epsilon = self.n0 / (self.n0 + n_visits)
# epsilon greedy policy
if random.random() < curr_epsilon:
r_action = Actions.hit if random.random()<0.5 else Actions.stick
# if (dealer_idx == 0 and player_idx == 0):
# print ("epsilon:%s, random:%s " % (curr_epsilon, r_action))
return r_action
else:
action = Actions.to_action(np.argmax(self.Q[dealer_idx, player_idx, :]))
# if (dealer_idx == 0 and player_idx == 0):
# print ("epsilon:%s Qvals:%s Q:%s" % (curr_epsilon, self.Q[dealer_idx, player_idx, :], action))
return action
def get_action(self, state):
action = Actions.to_action(np.argmax(self.Q[state.dealer_idx(), state.player_idx(), :]))
return action
def validate(self, iterations):
wins = 0;
# Loop episodes
for episode in xrange(iterations):
s = self.env.get_start_state()
while not s.term:
# execute action
a = self.get_action(s)
s, r = self.env.step(s, a)
wins = wins+1 if r==1 else wins
win_percentage = float(wins)/iterations*100
return win_percentage
def train(self, iterations):
# Loop episodes
for episode in xrange(iterations):
self.E = np.zeros((self.env.dealer_values_count, self.env.player_values_count, self.env.actions_count))
# get initial state for current episode
s = self.env.get_start_state()
a = self.train_get_action(s)
a_next = a
# Execute until game ends
while not s.term:
# update visits
self.N[s.dealer_idx(), s.player_idx(), Actions.as_int(a)] += 1
# execute action
s_next, r = self.env.step(s, a)
q = self.Q[s.dealer_idx(), s.player_idx(), Actions.as_int(a)]
if not s_next.term:
# choose next action with epsilon greedy policy
a_next = self.train_get_action(s_next)
next_q = self.Q[s_next.dealer_idx(), s_next.player_idx(), Actions.as_int(a_next)]
delta = r + next_q - q
else:
delta = r - q
# alpha = 1.0 / (self.N[s.dealer_idx(), s.player_idx(), Actions.as_int(a)])
# update = alpha * delta
# self.Q[s.dealer_idx(), s.player_idx(), Actions.as_int(a)] += update
self.E[s.dealer_idx(), s.player_idx(), Actions.as_int(a)] += 1
alpha = 1.0 / (self.N[s.dealer_idx(), s.player_idx(), Actions.as_int(a)])
update = alpha * delta * self.E
self.Q += update
self.E *= self.mlambda
# reassign s and a
s = s_next
a = a_next
#if episode%10000==0: print "Episode: %d, Reward: %d" %(episode, my_state.rew)
self.count_wins = self.count_wins+1 if r==1 else self.count_wins
self.iterations += iterations
# print float(self.count_wins)/self.iterations*100
# Derive value function
for d in xrange(self.env.dealer_values_count):
for p in xrange(self.env.player_values_count):
self.V[d,p] = max(self.Q[d, p, :])
def plot_frame(self, ax):
def get_stat_val(x, y):
return self.V[x, y]
X = np.arange(0, self.env.dealer_values_count, 1)
Y = np.arange(0, self.env.player_values_count, 1)
X, Y = np.meshgrid(X, Y)
Z = get_stat_val(X, Y)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
return surf
N0 = 100
agent = Sarsa_Agent(Environment(), N0, 0.9)
for i in xrange (10):
agent.train(50000)
|
Joe #3 TD Learning in Easy21/Joe #3 TD Learning in Easy21.ipynb
|
analog-rl/Easy21
|
mit
|
Plot the mean- squared error against λ.
Stop each run after 1000 episodes and report the mean-squared error over all states and actions, comparing the true values Q∗(s,a) computed in the previous section with the estimated values Q(s, a) computed by Sarsa.
|
mc_agent = MC_Agent(Environment(), 100)
mc_agent.train(1000000)
N0 = 100
lambdas = [0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1]
agent_list = []
sme_list = []
n_elements = mc_agent.Q.shape[0]*mc_agent.Q.shape[1]*2
for l in lambdas:
agent = Sarsa_Agent(Environment(), N0, l)
agent_list.append(l)
agent.train(1000)
sme = np.sum(np.square(agent.Q-mc_agent.Q))/float(n_elements)
sme_list.append(sme)
|
Joe #3 TD Learning in Easy21/Joe #3 TD Learning in Easy21.ipynb
|
analog-rl/Easy21
|
mit
|
For λ = 0 and λ = 1 only, plot the learning curve of mean-squared error against episode number.
|
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
%matplotlib inline
fig = plt.figure("N100")
surf = plt.plot(lambdas[1:10], sme_list[1:10])
plt.show()
N0 = 100
l = 0.0
learning_rate = []
learning_rate_i = []
n_elements = len(mc_agent.Q)
agent = Sarsa_Agent(Environment(), N0, l)
for i in xrange(1000):
learning_rate_i.append(i)
agent.train(1)
sme = np.sum(np.square(agent.Q-mc_agent.Q))/float(1000)
learning_rate.append(sme)
fig = plt.figure("0.0")
surf = plt.plot(learning_rate_i, learning_rate)
plt.show()
N0 = 100
l = 1.0
learning_rate = []
learning_rate_i = []
agent = Sarsa_Agent(Environment(), N0, l)
for i in xrange(1000):
learning_rate_i.append(i)
agent.train(1)
sme = np.sum(np.square(agent.Q-mc_agent.Q))/float(1000)
learning_rate.append(sme)
fig = plt.figure("0.0")
surf = plt.plot(learning_rate_i, learning_rate)
plt.show()
|
Joe #3 TD Learning in Easy21/Joe #3 TD Learning in Easy21.ipynb
|
analog-rl/Easy21
|
mit
|
plot from #2
|
def animate(frame):
i = agent.iterations
step_size = i
step_size = max(1, step_size)
step_size = min(step_size, 2 ** 16)
agent.train(step_size)
ax.clear()
surf = agent.plot_frame(ax)
plt.title('MC score:%s frame:%s step_size:%s ' % (float(agent.count_wins)/agent.iterations*100, frame, step_size) )
# plt.draw()
fig.canvas.draw()
print "done ", frame, step_size, i
return surf
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
%matplotlib inline
N0 = 100
mlambda = 0.2
agent = Sarsa_Agent(Environment(), N0, mlambda)
fig = plt.figure("N100")
ax = fig.add_subplot(111, projection='3d')
# ani = animation.FuncAnimation(fig, animate, 32, repeat=False)
ani = animation.FuncAnimation(fig, animate, 500, repeat=False)
# note: requires gif writer; swap with plt.show()
ani.save('Sarsa_Agent.gif', writer='imagemagick', fps=3)
# plt.show()
from IPython.display import Image
Image(url="Sarsa_Agent.gif")
# for i in xrange (10):
# agent.train(50000)
agent.validate(50000)
# 100, .75, 520 itterations = 52.8
# 1000, .75, 520 itterations =
stick_v = np.zeros((agent.env.dealer_values_count, agent.env.player_values_count))
hit_v = np.zeros((agent.env.dealer_values_count, agent.env.player_values_count))
actions = np.zeros((agent.env.dealer_values_count, agent.env.player_values_count))
values = np.zeros((agent.env.dealer_values_count, agent.env.player_values_count))
for d in xrange(agent.env.dealer_values_count):
for p in xrange(agent.env.player_values_count):
action = Actions.to_action(np.argmax(agent.Q[d, p, :]))
value = agent.V[d, p]
values[d,p] = value
if (action == Actions.stick):
stick_v[d,p] = value
hit_v[d,p] = 0
actions[d,p] = -1
else:
hit_v[d,p] = value
stick_v[d,p] = 0
actions[d,p] = 1
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
%matplotlib inline
# fig = plt.figure("N100")
# ax = fig.add_subplot()
# bx = fig.add_subplot()
fig, ax = plt.subplots()
fig2, bx = plt.subplots()
fig3, cx = plt.subplots()
fig4, dx = plt.subplots()
heatmap3 = ax.pcolor(actions, cmap=plt.cm.hot, alpha=0.8)
heatmap = bx.pcolor(stick_v, cmap=plt.cm.afmhot, alpha=0.8)
heatmap2 = cx.pcolor(hit_v, cmap=plt.cm.afmhot, alpha=0.8)
heatmap4 = dx.pcolor(values, cmap=plt.cm.afmhot, alpha=0.8)
plt.show()
|
Joe #3 TD Learning in Easy21/Joe #3 TD Learning in Easy21.ipynb
|
analog-rl/Easy21
|
mit
|
Time to build the network
Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.
<img src="assets/neural_network.png" width=300px>
The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation.
We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation.
Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.
Below, you have these tasks:
1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function.
2. Implement the forward pass in the train method.
3. Implement the backpropagation algorithm in the train method, including calculating the output error.
4. Implement the forward pass in the run method.
|
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5,
(self.input_nodes, self.hidden_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x : 1/(1 + np.exp(-x)) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, features, targets):
''' Train the network on batch of features and targets.
Arguments
---------
features: 2D array, each row is one data record, each column is a feature
targets: 1D array of target values
'''
n_records = features.shape[0]
delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape)
delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape)
for X, y in zip(features, targets):
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(X,self.weights_input_to_hidden) #(self.weights_input_to_hidden.T,X[:,None]) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(hidden_outputs, self.weights_hidden_to_output)#(self.weights_hidden_to_output.T, hidden_outputs)#.reshape(self.hidden_nodes,self.output_nodes)) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#final outputs' activation function is f(x) = x
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
error = y - final_outputs # Output layer error is the difference between desired target and actual output.
# derivative of activation function f(x) = x is f'(x) = 1
# TODO: Calculate the hidden layer's contribution to the error
hidden_error = np.dot(self.weights_hidden_to_output, error*1.0)
# TODO: Backpropagated error terms - Replace these values with your calculations.
output_error_term = error*1.0
hidden_error_term = hidden_error * hidden_outputs * (1-hidden_outputs)
# Weight step (input to hidden)
delta_weights_i_h += hidden_error_term * X[:,None]
# Weight step (hidden to output)
delta_weights_h_o += hidden_outputs[:,None] * output_error_term
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr*delta_weights_h_o/n_records # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr*delta_weights_i_h/n_records # update input-to-hidden weights with gradient descent step
def run(self, features):
''' Run a forward pass through the network with input features
Arguments
---------
features: 1D array of feature values
'''
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(features,self.weights_input_to_hidden) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(hidden_outputs,self.weights_hidden_to_output)#.reshape(self.hidden_nodes,self.output_nodes)) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#final outputs' activation function is f(x) = x
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
|
first-neural-network/Your_first_neural_network.ipynb
|
arturops/deep-learning
|
mit
|
Unit tests
Run these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly befor you starting trying to train it. These tests must all be successful to pass the project.
|
import unittest
inputs = np.array([[0.5, -0.2, 0.1]])
targets = np.array([[0.4]])
test_w_i_h = np.array([[0.1, -0.2],
[0.4, 0.5],
[-0.3, 0.2]])
test_w_h_o = np.array([[0.3],
[-0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
print(network.weights_hidden_to_output)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328],
[-0.03172939]])))
print(network.weights_input_to_hidden)
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, -0.20185996],
[0.39775194, 0.50074398],
[-0.29887597, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
|
first-neural-network/Your_first_neural_network.ipynb
|
arturops/deep-learning
|
mit
|
Training the network
Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.
You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.
Choose the number of iterations
This is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, if you use too many iterations, then the model with not generalize well to other data, this is called overfitting. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. As you start overfitting, you'll see the training loss continue to decrease while the validation loss starts to increase.
Choose the learning rate
This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.
Choose the number of hidden nodes
The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.
|
import sys
### Set the hyperparameters here ###
iterations = 30000 #100
learning_rate = 0.1 #0.1
hidden_nodes = 14 #2
output_nodes = 1
#good settings: 1000, 0.5, 2;
#10000, 0.1,4 ; 10000, 0.1, 6;
#best ve = 0.052,vl=0.17 w 100000,0.1,6
# best ve = 0.058,vl=0.142 w 25000,0.15,8
# best ve = 0.051,vl=0.133 w 25000,0.15,12
# best ve = 0.059,vl=0.128 w 30000,0.1,14
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for ii in range(iterations):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt']
network.train(X, y)
# Printing out the training progress
train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values)
val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values)
sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
sys.stdout.flush()
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
_ = plt.ylim()
|
first-neural-network/Your_first_neural_network.ipynb
|
arturops/deep-learning
|
mit
|
Importing the datasets
|
redSetPath = "classification/winequality-red.csv"
# whiteSetPath = "classification/winequality-white.csv"
#Reading in the raw data. Note that the features are seperated by ';' character
redSet = pd.read_csv(redSetPath, sep=';')
# whiteSet = pd.read_csv(whiteSetPath, sep=';')
# redSet.drop(['index'], axis=1, inplace=True)
redSet.head()
# whiteSet.head()
|
vagrant/dataset/hw2/ClassificationDataset.ipynb
|
justiceamoh/ENGS108
|
apache-2.0
|
Braking datasets into training and testing sets
|
#Breaking the datasets into 70% training and 30% testing
red_train, red_test = train_test_split(redSet,test_size=0.30)
red_train, red_valid = train_test_split(red_train,test_size=0.20)
# white_train, white_test = train_test_split(whiteSet,test_size=0.30)
# white_train, white_valid = train_test_split(white_train,test_size=0.20)
|
vagrant/dataset/hw2/ClassificationDataset.ipynb
|
justiceamoh/ENGS108
|
apache-2.0
|
Saving the train and test datasets
|
# Red Wine
red_train_path = "classification/red_train.csv"
red_valid_path = "classification/red_valid.csv"
red_test_path = "classification/red_test.csv"
# # White Wine
# white_train_path = "classification/white_train.csv"
# white_valid_path = "classification/white_valid.csv"
# white_test_path = "classification/white_test.csv"
red_train.to_csv(path_or_buf=red_train_path, index=False)
red_valid.to_csv(path_or_buf=red_valid_path, index=False)
red_test.to_csv(path_or_buf=red_test_path, index=False)
# white_train.to_csv(path_or_buf=white_train_path, sep=';')
# white_valid.to_csv(path_or_buf=white_valid_path, sep=';')
# white_test.to_csv(path_or_buf=white_test_path, sep=';')
|
vagrant/dataset/hw2/ClassificationDataset.ipynb
|
justiceamoh/ENGS108
|
apache-2.0
|
Checking the saved data and their shapes:
|
print 'Red Wine - Number of Instances Per Set'
print 'Training Set: %d'%(len(red_train))
print 'Validation Set: %d'%(len(red_valid))
print 'Testing Set: %d'%(len(red_test))
# print ''
# print ''
# print 'White Wine - Number of Instances Per Set'
# print 'Training Set: %d'%(len(white_train))
# print 'Validation Set: %d'%(len(white_valid))
# print 'Testing Set: %d'%(len(white_test))
|
vagrant/dataset/hw2/ClassificationDataset.ipynb
|
justiceamoh/ENGS108
|
apache-2.0
|
Set the random seed:
|
class linear_regression(nn.Module):
def __init__(self,input_size,output_size):
super(linear_regression,self).__init__()
self.linear=nn.Linear(input_size,output_size)
def forward(self,x):
yhat=self.linear(x)
return yhat
|
DL0110EN/2.6.3.multi-target_linear_regression.ipynb
|
atlury/deep-opencl
|
lgpl-3.0
|
create a linear regression object, as our input and output will be two we set the parameters accordingly
|
model=linear_regression(2,2)
|
DL0110EN/2.6.3.multi-target_linear_regression.ipynb
|
atlury/deep-opencl
|
lgpl-3.0
|
we can use the diagram to represent the model or object
<img src = "https://ibm.box.com/shared/static/icmwnxru7nytlhnq5x486rffea9ncpk7.png" width = 600, align = "center">
we can see the parameters
|
list(model.parameters())
|
DL0110EN/2.6.3.multi-target_linear_regression.ipynb
|
atlury/deep-opencl
|
lgpl-3.0
|
we can create a tensor with two rows representing one sample of data
|
x=torch.tensor([[1.0,3.0]])
|
DL0110EN/2.6.3.multi-target_linear_regression.ipynb
|
atlury/deep-opencl
|
lgpl-3.0
|
we can make a prediction
|
yhat=model(x)
yhat
|
DL0110EN/2.6.3.multi-target_linear_regression.ipynb
|
atlury/deep-opencl
|
lgpl-3.0
|
each row in the following tensor represents a different sample
|
X=torch.tensor([[1.0,1.0],[1.0,2.0],[1.0,3.0]])
|
DL0110EN/2.6.3.multi-target_linear_regression.ipynb
|
atlury/deep-opencl
|
lgpl-3.0
|
we can make a prediction using multiple samples
|
Yhat=model(X)
Yhat
|
DL0110EN/2.6.3.multi-target_linear_regression.ipynb
|
atlury/deep-opencl
|
lgpl-3.0
|
QGrid
Interactive pandas dataframes: https://github.com/quantopian/qgrid
pip install qgrid --upgrade
|
df2 = df[df['Mine_State'] != "Wyoming"].groupby('Mine_State').sum()
df3 = df.groupby('Mine_State').sum()
# have to run this from the home dir of this repo
# cd insight/
# python setup.py develop
%aimport insight.plotting
insight.plotting.plot_prod_vs_hours(df3, color_index=1)
# insight.plotting.plot_prod_vs_hours(df2, color_index=1)
def plot_prod_vs_hours(
df, color_index=0, output_file="../img/production-vs-hours-worked.png"
):
fig, ax = plt.subplots(figsize=(10, 8))
sns.regplot(
df["Labor_Hours"],
df["Production_short_tons"],
ax=ax,
color=sns.color_palette()[color_index],
)
ax.set_xlabel("Labor Hours Worked")
ax.set_ylabel("Total Amount Produced")
x = ax.set_xlim(-9506023.213266129, 204993853.21326613)
y = ax.set_ylim(-51476801.43653282, 746280580.4034251)
fig.tight_layout()
fig.savefig(output_file)
plot_prod_vs_hours(df2, color_index=0)
plot_prod_vs_hours(df3, color_index=1)
# make a change via qgrid
df3 = qgrid_widget.get_changed_df()
|
notebooks/08-old.ipynb
|
jbwhit/jupyter-tips-and-tricks
|
mit
|
Deep Dream
This notebook contains the code samples found in Chapter 8, Section 2 of Deep Learning with Python. Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.
[...]
Implementing Deep Dream in Keras
We will start from a convnet pre-trained on ImageNet. In Keras, we have many such convnets available: VGG16, VGG19, Xception, ResNet50...
albeit the same process is doable with any of these, your convnet of choice will naturally affect your visualizations, since different
convnet architectures result in different learned features. The convnet used in the original Deep Dream release was an Inception model, and
in practice Inception is known to produce very nice-looking Deep Dreams, so we will use the InceptionV3 model that comes with Keras.
|
from keras.applications import inception_v3
from keras import backend as K
# We will not be training our model,
# so we use this command to disable all training-specific operations
K.set_learning_phase(0)
# Build the InceptionV3 network.
# The model will be loaded with pre-trained ImageNet weights.
model = inception_v3.InceptionV3(weights='imagenet',
include_top=False)
|
keras-notebooks/advanced/8.2-deep-dream.ipynb
|
infilect/ml-course1
|
mit
|
Next, we compute the "loss", the quantity that we will seek to maximize during the gradient ascent process. In Chapter 5, for filter
visualization, we were trying to maximize the value of a specific filter in a specific layer. Here we will simultaneously maximize the
activation of all filters in a number of layers. Specifically, we will maximize a weighted sum of the L2 norm of the activations of a
set of high-level layers. The exact set of layers we pick (as well as their contribution to the final loss) has a large influence on the
visuals that we will be able to produce, so we want to make these parameters easily configurable. Lower layers result in
geometric patterns, while higher layers result in visuals in which you can recognize some classes from ImageNet (e.g. birds or dogs).
We'll start from a somewhat arbitrary configuration involving four layers --
but you will definitely want to explore many different configurations later on:
|
# Dict mapping layer names to a coefficient
# quantifying how much the layer's activation
# will contribute to the loss we will seek to maximize.
# Note that these are layer names as they appear
# in the built-in InceptionV3 application.
# You can list all layer names using `model.summary()`.
layer_contributions = {
'mixed2': 0.2,
'mixed3': 3.,
'mixed4': 2.,
'mixed5': 1.5,
}
|
keras-notebooks/advanced/8.2-deep-dream.ipynb
|
infilect/ml-course1
|
mit
|
Now let's define a tensor that contains our loss, i.e. the weighted sum of the L2 norm of the activations of the layers listed above.
|
# Get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers])
# Define the loss.
loss = K.variable(0.)
for layer_name in layer_contributions:
# Add the L2 norm of the features of a layer to the loss.
coeff = layer_contributions[layer_name]
activation = layer_dict[layer_name].output
# We avoid border artifacts by only involving non-border pixels in the loss.
scaling = K.prod(K.cast(K.shape(activation), 'float32'))
loss += coeff * K.sum(K.square(activation[:, 2: -2, 2: -2, :])) / scaling
|
keras-notebooks/advanced/8.2-deep-dream.ipynb
|
infilect/ml-course1
|
mit
|
Now we can set up the gradient ascent process:
|
# This holds our generated image
dream = model.input
# Compute the gradients of the dream with regard to the loss.
grads = K.gradients(loss, dream)[0]
# Normalize gradients.
grads /= K.maximum(K.mean(K.abs(grads)), 1e-7)
# Set up function to retrieve the value
# of the loss and gradients given an input image.
outputs = [loss, grads]
fetch_loss_and_grads = K.function([dream], outputs)
def eval_loss_and_grads(x):
outs = fetch_loss_and_grads([x])
loss_value = outs[0]
grad_values = outs[1]
return loss_value, grad_values
def gradient_ascent(x, iterations, step, max_loss=None):
for i in range(iterations):
loss_value, grad_values = eval_loss_and_grads(x)
if max_loss is not None and loss_value > max_loss:
break
print('...Loss value at', i, ':', loss_value)
x += step * grad_values
return x
|
keras-notebooks/advanced/8.2-deep-dream.ipynb
|
infilect/ml-course1
|
mit
|
Finally, here is the actual Deep Dream algorithm.
First, we define a list of "scales" (also called "octaves") at which we will process the images. Each successive scale is larger than
previous one by a factor 1.4 (i.e. 40% larger): we start by processing a small image and we increasingly upscale it:
Then, for each successive scale, from the smallest to the largest, we run gradient ascent to maximize the loss we have previously defined,
at that scale. After each gradient ascent run, we upscale the resulting image by 40%.
To avoid losing a lot of image detail after each successive upscaling (resulting in increasingly blurry or pixelated images), we leverage a
simple trick: after each upscaling, we reinject the lost details back into the image, which is possible since we know what the original
image should look like at the larger scale. Given a small image S and a larger image size L, we can compute the difference between the
original image (assumed larger than L) resized to size L and the original resized to size S -- this difference quantifies the details lost
when going from S to L.
The code above below leverages the following straightforward auxiliary Numpy functions, which all do just as their name suggests. They
require to have SciPy installed.
|
import scipy
from keras.preprocessing import image
def resize_img(img, size):
img = np.copy(img)
factors = (1,
float(size[0]) / img.shape[1],
float(size[1]) / img.shape[2],
1)
return scipy.ndimage.zoom(img, factors, order=1)
def save_img(img, fname):
pil_img = deprocess_image(np.copy(img))
scipy.misc.imsave(fname, pil_img)
def preprocess_image(image_path):
# Util function to open, resize and format pictures
# into appropriate tensors.
img = image.load_img(image_path)
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = inception_v3.preprocess_input(img)
return img
def deprocess_image(x):
# Util function to convert a tensor into a valid image.
if K.image_data_format() == 'channels_first':
x = x.reshape((3, x.shape[2], x.shape[3]))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((x.shape[1], x.shape[2], 3))
x /= 2.
x += 0.5
x *= 255.
x = np.clip(x, 0, 255).astype('uint8')
return x
import numpy as np
# Playing with these hyperparameters will also allow you to achieve new effects
step = 0.01 # Gradient ascent step size
num_octave = 3 # Number of scales at which to run gradient ascent
octave_scale = 1.4 # Size ratio between scales
iterations = 20 # Number of ascent steps per scale
# If our loss gets larger than 10,
# we will interrupt the gradient ascent process, to avoid ugly artifacts
max_loss = 10.
# Fill this to the path to the image you want to use
base_image_path = '/home/ubuntu/data/original_photo_deep_dream.jpg'
# Load the image into a Numpy array
img = preprocess_image(base_image_path)
# We prepare a list of shape tuples
# defining the different scales at which we will run gradient ascent
original_shape = img.shape[1:3]
successive_shapes = [original_shape]
for i in range(1, num_octave):
shape = tuple([int(dim / (octave_scale ** i)) for dim in original_shape])
successive_shapes.append(shape)
# Reverse list of shapes, so that they are in increasing order
successive_shapes = successive_shapes[::-1]
# Resize the Numpy array of the image to our smallest scale
original_img = np.copy(img)
shrunk_original_img = resize_img(img, successive_shapes[0])
for shape in successive_shapes:
print('Processing image shape', shape)
img = resize_img(img, shape)
img = gradient_ascent(img,
iterations=iterations,
step=step,
max_loss=max_loss)
upscaled_shrunk_original_img = resize_img(shrunk_original_img, shape)
same_size_original = resize_img(original_img, shape)
lost_detail = same_size_original - upscaled_shrunk_original_img
img += lost_detail
shrunk_original_img = resize_img(original_img, shape)
save_img(img, fname='dream_at_scale_' + str(shape) + '.png')
save_img(img, fname='final_dream.png')
from matplotlib import pyplot as plt
plt.imshow(deprocess_image(np.copy(img)))
plt.show()
|
keras-notebooks/advanced/8.2-deep-dream.ipynb
|
infilect/ml-course1
|
mit
|
What TensorFlow actually did in that single line was to add new operations to the computation graph. These operations included ones to compute gradients, compute parameter update steps, and apply update steps to the parameters.
The returned operation train_step, when run, will apply the gradient descent updates to the parameters. Training the model can therefore be accomplished by repeatedly running train_step.
|
for i in range(1000):
batch = data_sets.train.next_batch(50)
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
|
deep_polygoggles.ipynb
|
silberman/polygoggles
|
mit
|
Evaluate the Model
How well did our model do?
First we'll figure out where we predicted the correct label. tf.argmax is an extremely useful function which gives you the index of the highest entry in a tensor along some axis. For example, tf.argmax(y,1) is the label our model thinks is most likely for each input, while tf.argmax(y_,1) is the true label. We can use tf.equal to check if our prediction matches the truth.
|
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
|
deep_polygoggles.ipynb
|
silberman/polygoggles
|
mit
|
Finally, we can evaluate our accuracy on the test data. (On MNIST this should be about 91% correct.)
|
print(accuracy.eval(feed_dict={x: data_sets.test.images, y_: data_sets.test.labels}))
|
deep_polygoggles.ipynb
|
silberman/polygoggles
|
mit
|
Build a Multilayer Convolutional Network
Getting 91% accuracy on MNIST is bad. It's almost embarrassingly bad. In this section, we'll fix that, jumping from a very simple model to something moderately sophisticated: a small convolutional neural network. This will get us to around 99.2% accuracy -- not state of the art, but respectable.
Weight Initialization
To create this model, we're going to need to create a lot of weights and biases. One should generally initialize weights with a small amount of noise for symmetry breaking, and to prevent 0 gradients. Since we're using ReLU neurons, it is also good practice to initialize them with a slightly positive initial bias to avoid "dead neurons." Instead of doing this repeatedly while we build the model, let's create two handy functions to do it for us.
|
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
|
deep_polygoggles.ipynb
|
silberman/polygoggles
|
mit
|
To apply the layer, we first reshape x to a 4d tensor, with the second and third dimensions corresponding to image width and height, and the final dimension corresponding to the number of color channels.
|
x_image = tf.reshape(x, [-1, width, height,1]) # XXX not sure which is width and which is height
# We then convolve x_image with the weight tensor, add the bias, apply the ReLU function, and finally max pool.
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
|
deep_polygoggles.ipynb
|
silberman/polygoggles
|
mit
|
Densely Connected Layer
Now that the image size has been reduced to 7x7, we add a fully-connected layer with 1024 neurons to allow processing on the entire image. We reshape the tensor from the pooling layer into a batch of vectors, multiply by a weight matrix, add a bias, and apply a ReLU.
XXX where is the 7x7 coming from?
when bumping to width, height of 50 each:
InvalidArgumentError: Input to reshape is a tensor with 540800 values, but the requested shape requires a multiple of 3136
7 x 7 x 64 = 3136
540800 / 64. = 8450
13 x 13 x 50 x 64 = 540800
On MNIST, if I change the densely connected layer to fail (change the 7x7x64 to 7x7x65 in both W_fcl and h_pool2_flat
for example, then I get the following error as soon as start to train:
InvalidArgumentError: Input to reshape is a tensor with 156800 values, but the requested shape requires a multiple of 3185
note 3185 = 7x7x65
156800 = 7 * 7 * 64 * 50
50 is batch size
with width & height = 70:
Input to reshape is a tensor with 1036800 values, but the requested shape requires a multiple of 10816
with width & height = 150:
Input to reshape is a tensor with 4620800 values, but the requested shape requires a multiple of 20736
|
def get_size_reduced_to_from_input_tensor_size(input_tensor_size):
size_reduced_to_squared = input_tensor_size / 64. / batch_size # last divide is 50., pretty sure it's batch size
return math.sqrt(size_reduced_to_squared)
print(get_size_reduced_to_from_input_tensor_size(4620800))
print(get_size_reduced_to_from_input_tensor_size(1036800))
if use_MNIST_instead_of_our_data:
size_reduced_to = 7
else:
# for width & height = 50, size_reduced_to seems to be 13
# for width & height = 70, size_reduced_to seems to be 18
# for width & height = 150, size_reduced_to seems to be 38
size_reduced_to = 18
#W_fc1 = weight_variable([7 * 7 * 64, 1024])
W_fc1 = weight_variable([size_reduced_to * size_reduced_to * 64, 1024])
b_fc1 = bias_variable([1024])
#h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_pool2_flat = tf.reshape(h_pool2, [-1, size_reduced_to*size_reduced_to*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
|
deep_polygoggles.ipynb
|
silberman/polygoggles
|
mit
|
Readout Layer
Finally, we add a softmax layer, just like for the one layer softmax regression above.
|
W_fc2 = weight_variable([1024, num_labels])
b_fc2 = bias_variable([num_labels])
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
|
deep_polygoggles.ipynb
|
silberman/polygoggles
|
mit
|
Train and Evaluate the Model
How well does this model do? To train and evaluate it we will use code that is nearly identical to that for the simple one layer SoftMax network above. The differences are that: we will replace the steepest gradient descent optimizer with the more sophisticated ADAM optimizer; we will include the additional parameter keep_prob in feed_dict to control the dropout rate; and we will add logging to every 100th iteration in the training process.
|
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.initialize_all_variables())
for i in range(num_training_steps):
batch = data_sets.train.next_batch(batch_size)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print("test accuracy %g"%accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
|
deep_polygoggles.ipynb
|
silberman/polygoggles
|
mit
|
Unit Tests
Overview and Principles
Testing is the process by which you exercise your code to determine if it performs as expected. The code you are testing is referred to as the code under test.
There are two parts to writing tests.
1. invoking the code under test so that it is exercised in a particular way;
1. evaluating the results of executing code under test to determine if it behaved as expected.
The collection of tests performed are referred to as the test cases. The fraction of the code under test that is executed as a result of running the test cases is referred to as test coverage.
For dynamical languages such as Python, it's extremely important to have a high test coverage. In fact, you should try to get 100% coverage. This is because little checking is done when the source code is read by the Python interpreter. For example, the code under test might contain a line that has a function that is undefined. This would not be detected until that line of code is executed.
Test cases can be of several types. Below are listed some common classifications of test cases.
- Smoke test. This is an invocation of the code under test to see if there is an unexpected exception. It's useful as a starting point, but this doesn't tell you anything about the correctness of the results of a computation.
- One-shot test. In this case, you call the code under test with arguments for which you know the expected result.
- Edge test. The code under test is invoked with arguments that should cause an exception, and you evaluate if the expected exception occurrs.
- Pattern test - Based on your knowledge of the calculation (not implementation) of the code under test, you construct a suite of test cases for which the results are known or there are known patterns in these results that are used to evaluate the results returned.
Another principle of testing is to limit what is done in a single test case. Generally, a test case should focus on one use of one function. Sometimes, this is a challenge since the function being tested may call other functions that you are testing. This means that bugs in the called functions may cause failures in the tests of the calling functions. Often, you sort this out by knowing the structure of the code and focusing first on failures in lower level tests. In other situations, you may use more advanced techniques called mocking. A discussion of mocking is beyond the scope of this course.
A best practice is to develop your tests while you are developing your code. Indeed, one school of thought in software engineering, called test-driven development, advocates that you write the tests before you implement the code under test so that the test cases become a kind of specification for what the code under test should do.
Examples of Test Cases
This section presents examples of test cases. The code under test is the calculation of entropy.
Entropy of a set of probabilities
$$
H = -\sum_i p_i \log(p_i)
$$
where $\sum_i p_i = 1$.
|
import numpy as np
# Code Under Test
def entropy(ps):
if any([(p < 0.0) or (p > 1.0) for p in ps]):
raise ValueError("Bad input.")
if sum(ps) > 1:
raise ValueError("Bad input.")
items = ps * np.log(ps)
new_items = []
for item in items:
if np.isnan(item):
new_items.append(0)
else:
new_items.append(item)
return np.abs(-np.sum(new_items))
ps = [.8, .2]
ps = [ 1.00000001, 0]
[(p < 0.0) or (p > 1.0) for p in ps]
np.isclose(1.1, 1)
# Smoke test
entropy([0.2, 0.8])
# One shot test
entropy([1, 0, 0, 0])
|
Spring2019/04a_Exceptions_and_Testing/unit-tests.ipynb
|
UWSEDS/LectureNotes
|
bsd-2-clause
|
You see that there are many, many cases to test. So far, we've been writing special codes for each test case. We can do better.
Testing Data Producing Codes
Much of your python (or R) codes will be creating and/or transforming dataframes. A dataframe is structured like a table with:
Columns that have values of the same type
Rows that have a value for each column
An index that uniquely identifies a row.
|
def makeProbabilityMatrix(column_names, nrows):
"""
Makes a dataframe with the specified column names such that each
cell is a value in [0, 1] and columns sum to 1.
:param list-str column_names: names of the columns
:param int nrows: number of rows
"""
df = pd.DataFrame(np.random.uniform(0, 1, (nrows, len(column_names))))
df.columns = column_names
for column in df.columns:
df[column] = df[column]/df[column].sum()
return df
# Smoke test
makeProbabilityMatrix(['a', 'b'], 3)
# Test 2: Check columns
COLUMNS = ['a', 'b']
df = makeProbabilityMatrix(COLUMNS, 3)
set(COLUMNS) == set(df.columns)
|
Spring2019/04a_Exceptions_and_Testing/unit-tests.ipynb
|
UWSEDS/LectureNotes
|
bsd-2-clause
|
Exercise
Write a function that tests the following:
- The returned dataframe has the expected columns
- The returned dataframe has the expected rows
- Values in columns are of the correct type and range
- Values in column sum to 1
Unittest Infrastructure
There are several reasons to use a test infrastructure:
- If you have many test cases (which you should!), the test infrastructure will save you from writing a lot of code.
- The infrastructure provides a uniform way to report test results, and to handle test failures.
- A test infrastructure can tell you about coverage so you know what tests to add.
We'll be using the unittest framework. This is a separate Python package. Using this infrastructure, requires the following:
1. import the unittest module
1. define a class that inherits from unittest.TestCase
1. write methods that run the code to be tested and check the outcomes.
The last item has two subparts. First, we must identify which methods in the class inheriting from unittest.TestCase are tests. You indicate that a method is to be run as a test by having the method name begin with "test".
Second, the "test methods" should communicate with the infrastructure the results of evaluating output from the code under test. This is done by using assert statements. For example, self.assertEqual takes two arguments. If these are objects for which == returns True, then the test passes. Otherwise, the test fails.
|
import unittest
# Define a class in which the tests will run
class UnitTests(unittest.TestCase):
# Each method in the class to execute a test
def test_success(self):
self.assertEqual(1, 1)
def test_success1(self):
self.assertTrue(1 == 1)
def test_failure(self):
self.assertLess(1, 2)
suite = unittest.TestLoader().loadTestsFromTestCase(UnitTests)
_ = unittest.TextTestRunner().run(suite)
# Function the handles test loading
#def test_setup(argument ?):
|
Spring2019/04a_Exceptions_and_Testing/unit-tests.ipynb
|
UWSEDS/LectureNotes
|
bsd-2-clause
|
LinSpace:0 means output of LinSpace. TensorFlow doesn't compute the values immediately. It only specifies the nature of the output of a TF operation, also called an Op node.
|
x = tf.linspace(-3.0, 3.0, 100) # Doesn't compute immediately
# Note that tf.linspace(-3, 3, 5) gives an error because datatypes are
# mismatched
print (x)
|
Introduction/TensorFlow Basics.ipynb
|
aliasvishnu/TensorFlow-Creative-Applications
|
gpl-3.0
|
We can get the elements of the graph by doing as follows. We can also get the output of a certain node in the graph
|
g = tf.get_default_graph()
print [op.name for op in g.get_operations()] # List of ops
# This next step would not work because the tensor doesn't exist yet, we will compute it later.
### print g.get_tensor_by_name('LinSpace_1:0')
# Note that LinSpace has a :0 at the end of it. Without :0, it refers to the Node itself, with :0 it refers to the
# tensor.
|
Introduction/TensorFlow Basics.ipynb
|
aliasvishnu/TensorFlow-Creative-Applications
|
gpl-3.0
|
Session
In order to get run a TF program, we need a session. The session computes the graph we construct. Here's an example.
|
sess = tf.Session()
# We can ask a session to compute the value of a node
computed_x = sess.run(x)
# print (computed_x)
# Or we can ask the node to compute itself using the session
computed_x = x.eval(session = sess)
# print computed_x
# We can close the session by doing this
sess.close()
|
Introduction/TensorFlow Basics.ipynb
|
aliasvishnu/TensorFlow-Creative-Applications
|
gpl-3.0
|
We can ask TF to create a new graph and have it be connected to another session. We are allowed to have multiple sessions running at the same time.
|
g = tf.get_default_graph() # Fetch the default graph
g2 = tf.Graph()
print g2
sess2 = tf.Session(graph = g2)
print sess2
sess2.close()
|
Introduction/TensorFlow Basics.ipynb
|
aliasvishnu/TensorFlow-Creative-Applications
|
gpl-3.0
|
Interactive Session - This is a way to run session in environments like notebooks where you don't want to pass around a session variable. But it's just like a session. Here's how to create one. Also this behaves more like a normal python program. You have to recompute the formula if you want updates. For example, z is defined below in the gaussian curve example. You have to rerun the formula after changing x to get new z. Just running z.eval() won't do it. However, in a normal session, it will.
Without interactive session, whenever you call x.eval(), you have to pass session as x.eval(session = sess)
|
sess = tf.InteractiveSession()
# print x.eval()
print x.get_shape() # x.shape
print x.get_shape().as_list() # x.shape.tolist()
|
Introduction/TensorFlow Basics.ipynb
|
aliasvishnu/TensorFlow-Creative-Applications
|
gpl-3.0
|
Example - Creating a Gaussian Curve
|
mean = 0
sigma = 1.0
z = 1.0/(tf.sqrt(2*3.14)*sigma) * (tf.exp(-1*(tf.pow(x-mean, 2)/(2*tf.pow(sigma, 2)))))
res = z.eval() # Note that x is already defined from above
plt.plot(res)
plt.show()
|
Introduction/TensorFlow Basics.ipynb
|
aliasvishnu/TensorFlow-Creative-Applications
|
gpl-3.0
|
Making it into a 2D Gaussian
|
l = z.get_shape().as_list()[0]
res2d = tf.matmul(tf.reshape(z, [l, 1]), tf.reshape(z, [1, l])).eval()
plt.imshow(res2d)
plt.show()
|
Introduction/TensorFlow Basics.ipynb
|
aliasvishnu/TensorFlow-Creative-Applications
|
gpl-3.0
|
Convolution
Loading 'camera' images from sklearn
|
from skimage import data
img = data.camera().astype(np.float32)
plt.imshow(img, cmap='gray')
plt.show()
|
Introduction/TensorFlow Basics.ipynb
|
aliasvishnu/TensorFlow-Creative-Applications
|
gpl-3.0
|
Convolution operation in TF takes in a 4d tensor for images. The dimensions are (Batch x Height x Width x Channel). Our image is grayscale. So we reshape it using numpy into 4d as shown below.
Tensors must be float16, float32 or float64.
|
# Image shape is 512x512
img4d = tf.reshape(img, [1, img.shape[0], img.shape[1], 1])
print img4d.get_shape()
|
Introduction/TensorFlow Basics.ipynb
|
aliasvishnu/TensorFlow-Creative-Applications
|
gpl-3.0
|
For the convolution operation we need to provide the specifics of the kernels - Height x Width x Channel x Number of kernels. Let's now convert our gaussian kernel in this format and convolve our image.
|
l = res2d.shape[0]
kernel = tf.reshape(res2d, [l, l, 1, 1])
print kernel.get_shape()
# Convolution operation
convolved = tf.nn.conv2d(img4d, kernel, strides = [1, 1, 1, 1],
padding = 'SAME')
plt.imshow(convolved.eval()[0, :, :, 0], cmap = 'gray')
plt.show()
|
Introduction/TensorFlow Basics.ipynb
|
aliasvishnu/TensorFlow-Creative-Applications
|
gpl-3.0
|
Gabor Kernel
We can take a sin wave and modulate it with the gaussian kernel to get a gabor kernel.
|
ksize = 100
xs = tf.linspace(-3.0, 3.0, ksize)
ys = tf.sin(xs+2)
# The following two statements are equivalent to
# plt.plot(xs.eval(), ys.eval())
plt.figure()
plt.plot(ys.eval())
plt.show()
|
Introduction/TensorFlow Basics.ipynb
|
aliasvishnu/TensorFlow-Creative-Applications
|
gpl-3.0
|
We need to convert this sine wave into a matrix and multiply with the gaussian kernel. That will be the gabor filter.
|
ys = tf.reshape(ys, [ksize, 1])
ones = tf.ones([1, ksize])
mat = tf.matmul(ys, ones)
plt.imshow(mat.eval(), cmap = 'gray')
plt.show()
# Multiply with the gaussian kernel
# kernel is 4 dimensional, res2d is the 2d version
gabor = tf.matmul(mat, res2d)
plt.imshow(gabor.eval(), cmap = 'gray')
plt.show()
|
Introduction/TensorFlow Basics.ipynb
|
aliasvishnu/TensorFlow-Creative-Applications
|
gpl-3.0
|
Convolution using Placeholders
We can specify parameters that we expect to fit in the graph later on, now, by using placeholders. Convolution using placeholders is presented below.
|
img = tf.placeholder(tf.float32, shape = [None, None], name = 'img')
# Reshaping inbuilt function
img3d = tf.expand_dims(img, 2)
print img3d.get_shape()
img4d = tf.expand_dims(img3d, 0)
print img4d.get_shape()
mean = tf.placeholder(tf.float32, name = 'mean')
sigma = tf.placeholder(tf.float32, name = 'sigma')
ksize = tf.placeholder(tf.int32, name = 'ksize')
# Giving formula for x, gaussian kernel, gabor kernel etc..
x = tf.linspace(-3.0, 3.0, ksize)
z = 1.0/(tf.sqrt(2*3.14)*sigma) * (tf.exp(-1*(tf.pow(x-mean, 2)/(2*tf.pow(sigma, 2)))))
z2d = tf.matmul(tf.reshape(z, [ksize, 1]), tf.reshape(z, [1, ksize]))
xs = tf.linspace(-3.0, 3.0, ksize)
ys = tf.sin(xs)
ys = tf.reshape(ys, [ksize, 1])
ones = tf.ones([1, ksize])
mat = tf.matmul(ys, ones)
gabor = tf.matmul(mat, z2d)
gabor4d = tf.reshape(gabor, [ksize, ksize, 1, 1])
convolved = tf.nn.conv2d(img4d, gabor4d, strides = [1, 1, 1, 1],
padding = 'SAME')
# We defined the graph above, now we are going to evaluate it.
result = convolved.eval(feed_dict = {
img: data.camera(),
mean: 0.0,
sigma: 1.0,
ksize: 5
})
plt.imshow(result[0, :, :, 0], cmap = 'gray')
plt.title('Gabor filter output')
plt.show()
|
Introduction/TensorFlow Basics.ipynb
|
aliasvishnu/TensorFlow-Creative-Applications
|
gpl-3.0
|
Leveraging Quantile Regression For A/B Test
When launching new features to our product, we often times leverage experiments, or so called A/B tests in order to understand and quantify their impact. Popular statistical methods such as t-test often focuses on calculating average treatment effects. Not that there's anything wrong with the approach is just that because average reduces an entire distribution into one single number, any heterogeneity in our distribution may potentially get unnoticed. Keep in mind that in real world settings, negative experience often times stick inside people's head longer and stronger than positive ones. The goal of this notebook is to show how to leverage quantile regression to calculate quantile treatment effect, which offers a more precise alternative to only estimating average treatment effects.
We will use the NYCflights13 data to conduct our experiments, which contains over 300,000 observations of flights departing NYC in 2013. We will focus on a single variable, the delay time of flights arrival in minutes.
|
# constant column names used across the notebook
arr_delay = 'arr_delay'
airline_name = 'name'
def read_data(path='flights.csv'):
"""Will try and download the data to local [path] if it doesn't exist."""
if not os.path.exists(path):
base_url = 'https://media.githubusercontent.com/media/WillKoehrsen/Data-Analysis/master'
url = base_url + '/univariate_dist/data/formatted_flights.csv'
df = pd.read_csv(url, usecols=[arr_delay, airline_name])
df.to_csv(path, index=False)
else:
df = pd.read_csv(path, usecols=[arr_delay, airline_name])
return df
df = read_data()
print(df.shape)
df.head()
|
ab_tests/quantile_regression/ab_test_regression.ipynb
|
ethen8181/machine-learning
|
mit
|
To start with, we'll use density plot to visualize the distribution of the arr_delay field. For those that are not familiar, think of it as a continuous version of histogram (The plot below overlays density plot on top of histogram).
|
# change default style figure and font size
plt.rcParams['figure.figsize'] = 10, 8
plt.rcParams['font.size'] = 12
sns.distplot(df[arr_delay], hist=True, kde=True,
hist_kws={'edgecolor':'black'},
kde_kws={'linewidth': 4})
plt.show()
|
ab_tests/quantile_regression/ab_test_regression.ipynb
|
ethen8181/machine-learning
|
mit
|
Now, let's say we would like to use this data and compare the arrive time delay of two airlines and once again we'll use our good old density plot to visualize and compare the two airline's arrival time distribution.
Not affiliated with any one of the airline in anyway and neither is this data guaranteed to be up to date with the status quo.
|
endeavor_airline = 'Endeavor Air Inc.'
us_airway_airline = 'US Airways Inc.'
for airline in [endeavor_airline, us_airway_airline]:
subset = df[df[airline_name] == airline]
sns.distplot(subset[arr_delay], hist=False, kde=True,
kde_kws={'shade': False, 'linewidth': 3}, label=airline)
plt.legend(prop={'size': 12}, title='Airline')
plt.title('Density Plot of Arrival Delays')
plt.xlabel('Delay (min)')
plt.ylabel('Density')
plt.show()
|
ab_tests/quantile_regression/ab_test_regression.ipynb
|
ethen8181/machine-learning
|
mit
|
After visualizing the arrival time on the two airlines, we can see that although both distribution seems to be centered around the same area, the tail-end of both side tells a different story, where one of the airline shows that it has a larger tendency of resulting in a delay.
If we were to leverage the two sample t-test to compare the means of two separate samples. We would see that the statistical test would tell us we can't reject the null hypothesis and there is no statistically significant difference between the arrival delay time of the two airlines.
|
airline1_delay = df.loc[df[airline_name] == endeavor_airline, arr_delay]
airline2_delay = df.loc[df[airline_name] == us_airway_airline, arr_delay]
result = stats.ttest_ind(airline1_delay, airline2_delay, equal_var=True)
result
|
ab_tests/quantile_regression/ab_test_regression.ipynb
|
ethen8181/machine-learning
|
mit
|
We can also leverage a single binary variable linear regression to arrive at the same conclusion as the two sample t-test above. The step to do this is do convert our airline variable into a dummy variable and fit a linear regression using the dummy variable as the input feature and the arrival delay time as the response variable.
|
mask = df[airline_name].isin([endeavor_airline, us_airway_airline])
df_airline_delay = df[mask].reset_index(drop=True)
y = df_airline_delay[arr_delay]
X = pd.get_dummies(df_airline_delay[airline_name], drop_first=True)
X.head()
|
ab_tests/quantile_regression/ab_test_regression.ipynb
|
ethen8181/machine-learning
|
mit
|
We'll be using statsmodel to build the linear regression as it gives R-like statistical output. For people coming from scikit-learn, y variable comes first in statsmodel and by default, it doesn't automatically fit a constant/intercept, so we'll need to add it ourselves.
|
# ordinary least square
model = sm.OLS(y, sm.add_constant(X))
result = model.fit()
result.summary()
|
ab_tests/quantile_regression/ab_test_regression.ipynb
|
ethen8181/machine-learning
|
mit
|
Notice that the numbers for the t-statistic and p-value matches the two-sample t-test result above. The benefit of using a linear regression is that, we can include many other features to see if they are the reasons behind the arrival delay.
By looking at average treatment effect, we can see that we would be drawing the conclusion that there is no statistical difference in the two airlines' arrival time, however, based on looking that the distribution of two airline's arrival delay, our hunch tells us that we're probably missing something. This is where estimating quantile treatment effect really provide additional insights not found by simply looking at the average treatment effects. To do so, we change our model to a quantile regression and specify the quantile we are interested in.
|
model = sm.QuantReg(y, sm.add_constant(X))
result = model.fit(q=0.9, kernel='gau')
result.summary()
|
ab_tests/quantile_regression/ab_test_regression.ipynb
|
ethen8181/machine-learning
|
mit
|
At 0.9 quantile, we were able to detect a statistically significant effect!
We can of course, also compute this across multiples quantiles and plot the quantile treatment effect in a single figure to get a much more nuanced insights into the treatment effect of our experiment that different quantiles. This allows us the detect for extreme scenarios where a single two-sample t-tests would fail raise any concerns towards the experiment.
|
def compute_quantile_treatment_effect(X, y, quantiles):
coefs = []
pvalues = []
for q in quantiles:
model = sm.QuantReg(y, sm.add_constant(X))
result = model.fit(q=q, kernel='gau')
coef = result.params[1]
coefs.append(coef)
pvalue = result.pvalues[1]
pvalues.append(pvalue)
coef_name = result.params.index[1]
df_quantile_effect = pd.DataFrame({
'quantile': quantiles,
coef_name: coefs,
'pvalue': pvalues
})
return df_quantile_effect
quantiles = np.arange(0.1, 1.0, 0.1)
df_quantile_effect = compute_quantile_treatment_effect(X, y, quantiles)
df_quantile_effect
fig = plt.figure(figsize=(10, 7))
ax = fig.add_subplot(111)
ax.plot(df_quantile_effect['quantile'], df_quantile_effect['US Airways Inc.'])
ax.set_xlabel('quantiles')
ax.set_ylabel('effect size')
plt.show()
|
ab_tests/quantile_regression/ab_test_regression.ipynb
|
ethen8181/machine-learning
|
mit
|
Step 3 training the network
|
model = PriceHistoryAutoencoder(rng=random_state, dtype=dtype, config=config)
npz_test = npz_path + '_test.npz'
assert path.isfile(npz_test)
path.abspath(npz_test)
def experiment():
return model.run(npz_path=npz_path,
epochs=50,
batch_size = 53,
enc_num_units = 450,
dec_num_units = 450,
ts_len=max_seq_len,
learning_rate = 1e-3,
preds_gather_enabled = True,
)
#%%time
# dyn_stats_dic, preds_dict, targets, twods = experiment()
dyn_stats_dic, preds_dict, targets, twods = get_or_run_nn(experiment, filename='035_autoencoder_001',
nn_runs_folder = data_path + "/nn_runs")
dyn_stats_dic['dyn_stats'].plotStats()
plt.show()
dyn_stats_dic['dyn_stats_diff'].plotStats()
plt.show()
r2_scores = [r2_score(y_true=targets[ind], y_pred=preds_dict[ind])
for ind in range(len(targets))]
ind = np.argmin(r2_scores)
ind
reals = targets[ind]
preds = preds_dict[ind]
r2_score(y_true=reals, y_pred=preds)
#sns.tsplot(data=dp.inputs[ind].flatten())
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
%%time
dtw_scores = [fastdtw(targets[ind], preds_dict[ind])[0]
for ind in range(len(targets))]
np.mean(dtw_scores)
coint(preds, reals)
cur_ind = np.random.randint(len(targets))
reals = targets[cur_ind]
preds = preds_dict[cur_ind]
fig = plt.figure(figsize=(15,6))
plt.plot(reals, 'b', label='reals')
plt.plot(preds, 'g')
plt.legend(['reals','preds'])
plt.show()
|
04_time_series_prediction/35a_price_history_autoencoder_dyn_rnn_with_diff.ipynb
|
pligor/predicting-future-product-prices
|
agpl-3.0
|
This much we knew already. Now we just have to produce an instance of the BestMSM.MSM class that has the same count and transition matrices.
|
import bestmsm.msm as msm
bhsmsm = msm.MSM(keys = range(4))
bhsmsm.count = bhs.count
bhsmsm.keep_states = range(4)
bhsmsm.keep_keys = range(4)
bhsmsm.trans = bhs.trans
bhsmsm.rate = bhs.K
bhsmsm.tauK, bhsmsm.peqK = bhsmsm.calc_eigsK()
|
example/fourstate/fourstate_to_bestmsm.ipynb
|
daviddesancho/BestMSM
|
gpl-2.0
|
So now we already have a transition matrix, eigenvalues and eigenvectors which are the same for the Fourstate and BestMSM.MSM class. The important bits start next, with the flux and pfold estimations.
|
bhs.run_commit()
bhsmsm.do_pfold(UU=[0], FF=[3])
print " These are the flux matrices"
print bhs.J
print bhsmsm.J
print " And these are the total flux values"
print " from Fourstate: %g"%bhs.sum_flux
print " from BestMSM.MSM: %g"%bhs.sum_flux
|
example/fourstate/fourstate_to_bestmsm.ipynb
|
daviddesancho/BestMSM
|
gpl-2.0
|
It really looks like we have got the same exact thing, as intended. Next comes the generation of paths. This implies defining a function in the case of the Fourstate object.
|
def gen_path_lengths(keys, J, pfold, flux, FF, UU):
nkeys = len(keys)
I = [x for x in range(nkeys) if x not in FF+UU]
Jnode = []
# calculate flux going through nodes
for i in range(nkeys):
Jnode.append(np.sum([J[i,x] for x in range(nkeys) \
if pfold[x] < pfold[i]]))
# define matrix with edge lengths
Jpath = np.zeros((nkeys, nkeys), float)
for i in UU:
for j in I + FF:
if J[j,i] > 0:
Jpath[j,i] = np.log(flux/J[j,i]) + 1 # I add 1
for i in I:
for j in [x for x in FF+I if pfold[x] > pfold[i]]:
if J[j,i] > 0:
Jpath[j,i] = np.log(Jnode[j]/J[j,i]) + 1 # I add 1
return Jnode, Jpath
Jnode, Jpath = gen_path_lengths(range(4), bhs.J, bhs.pfold, \
bhs.sum_flux, [3], [0])
JpathG = nx.DiGraph(Jpath.transpose())
tot_flux = 0
for path in nx.all_simple_paths(JpathG, 0, 3):
print path
f = bhs.J[path[1],path[0]]
print "%2i -> %2i: %10.4e "%(path[0], path[1], \
bhs.J[path[1],path[0]])
for i in range(2, len(path)):
print "%2i -> %2i: %10.4e %10.4e"%(path[i-1], path[i], \
bhs.J[path[i],path[i-1]], Jnode[path[i-1]])
f *= bhs.J[path[i],path[i-1]]/Jnode[path[i-1]]
tot_flux +=f
print " J(path) = %10.4e"%f
print
print " Commulative flux: %10.4e"%tot_flux
|
example/fourstate/fourstate_to_bestmsm.ipynb
|
daviddesancho/BestMSM
|
gpl-2.0
|
For BestMSM.MSM everything should be built in. First we obtain the 4 highest flux paths.
|
bhsmsm.do_dijkstra(UU=[0], FF=[3], npath=3)
|
example/fourstate/fourstate_to_bestmsm.ipynb
|
daviddesancho/BestMSM
|
gpl-2.0
|
Then we use the alternative mechanism of giving a cutoff for the flux left. Here we want to account for 80% of the flux.
|
bhsmsm.do_pfold(UU=[0], FF=[3])
bhsmsm.do_dijkstra(UU=[0], FF=[3], cut=0.2)
|
example/fourstate/fourstate_to_bestmsm.ipynb
|
daviddesancho/BestMSM
|
gpl-2.0
|
Discriminator
Implement discriminator to create a discriminator neural network that discriminates on images. This function should be able to reuse the variabes in the neural network. Use tf.variable_scope with a scope name of "discriminator" to allow the variables to be reused. The function should return a tuple of (tensor output of the discriminator, tensor logits of the discriminator).
|
def discriminator(images, reuse=False):
"""
Create the discriminator network
:param image: Tensor of input image(s)
:param reuse: Boolean if the weights should be reused
:return: Tuple of (tensor output of the discriminator, tensor logits of the discriminator)
"""
# TODO: Implement Function
return None, None
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_discriminator(discriminator, tf)
|
face_generation/dlnd_face_generation.ipynb
|
greg-ashby/deep-learning-nanodegree
|
mit
|
NumPy binary files (NPY, NPZ)
Q1. Save x into temp.npy and load it.
|
x = np.arange(10)
...
# Check if there exists the 'temp.npy' file.
import os
if os.path.exists('temp.npy'):
x2 = ...
print(np.array_equal(x, x2))
|
numpy/numpy_exercises_from_kyubyong/Input_and_Output.ipynb
|
mohanprasath/Course-Work
|
gpl-3.0
|
Q2. Save x and y into a single file 'temp.npz' and load it.
|
x = np.arange(10)
y = np.arange(11, 20)
...
with ... as data:
x2 = data['x']
y2 = data['y']
print(np.array_equal(x, x2))
print(np.array_equal(y, y2))
|
numpy/numpy_exercises_from_kyubyong/Input_and_Output.ipynb
|
mohanprasath/Course-Work
|
gpl-3.0
|
Text files
Q3. Save x to 'temp.txt' in string format and load it.
|
x = np.arange(10).reshape(2, 5)
header = 'num1 num2 num3 num4 num5'
...
...
|
numpy/numpy_exercises_from_kyubyong/Input_and_Output.ipynb
|
mohanprasath/Course-Work
|
gpl-3.0
|
Q4. Save x, y, and z to 'temp.txt' in string format line by line, then load it.
|
x = np.arange(10)
y = np.arange(11, 21)
z = np.arange(22, 32)
...
...
|
numpy/numpy_exercises_from_kyubyong/Input_and_Output.ipynb
|
mohanprasath/Course-Work
|
gpl-3.0
|
Q5. Convert x into bytes, and load it as array.
|
x = np.array([1, 2, 3, 4])
x_bytes = ...
x2 = ...
print(np.array_equal(x, x2))
|
numpy/numpy_exercises_from_kyubyong/Input_and_Output.ipynb
|
mohanprasath/Course-Work
|
gpl-3.0
|
Q6. Convert a into an ndarray and then convert it into a list again.
|
a = [[1, 2], [3, 4]]
x = ...
a2 = ...
print(a == a2)
|
numpy/numpy_exercises_from_kyubyong/Input_and_Output.ipynb
|
mohanprasath/Course-Work
|
gpl-3.0
|
String formatting¶
Q7. Convert x to a string, and revert it.
|
x = np.arange(10).reshape(2,5)
x_str = ...
print(x_str, "\n", type(x_str))
x_str = x_str.replace("[", "") # [] must be stripped
x_str = x_str.replace("]", "")
x2 = ...
assert np.array_equal(x, x2)
|
numpy/numpy_exercises_from_kyubyong/Input_and_Output.ipynb
|
mohanprasath/Course-Work
|
gpl-3.0
|
Text formatting options
Q8. Print x such that all elements are displayed with precision=1, no suppress.
|
x = np.random.uniform(size=[10,100])
np.set_printoptions(...)
print(x)
|
numpy/numpy_exercises_from_kyubyong/Input_and_Output.ipynb
|
mohanprasath/Course-Work
|
gpl-3.0
|
So, this is a microstructure evolution problem, and final microstructures look very similar to each other (just looking at them). Can we check it using PyMKS tools? We have 200 files (microstructure outputs) for each simulation at every fixed Monte-Carlo step, so we can also take a look at path each simulation takes.
Microstructure Statistics
To get started, we are going to perform 2-point statistics first for couple of microstructures using correlate from pymks.stats :
The correlations can be plotted using draw_autocorrelations from pymks.tools. Here 10th step is plotted since initial is completely random microstructure and its statistics does not look exciting. So, we are going to take a look at 10th Monte-Carlo step output.
|
from pymks import PrimitiveBasis
from pymks.stats import correlate
from pymks.tools import draw_autocorrelations
p_basis = PrimitiveBasis(n_states=2,domain=[1, 2])
X_auto = correlate(X, p_basis, periodic_axes=(0, 1), correlations=[(0, 0),(1, 1)])
X_auto.shape
correlations = [('black', 'black'), ('white', 'white')]
draw_autocorrelations(X_auto[10], autocorrelations=correlations)
center = (X_auto.shape[2]) / 2
print 'Volume fraction of black phase', X_auto[0, center, center, 0]
print 'Volume fraction of white phase', X_auto[0, center, center, 1]
|
notebooks/structure_ising_2D.ipynb
|
davidbrough1/pymks
|
mit
|
Reduced-order representations (PCA)
Using MKSStructureAnalysis we can perform 2-points statistics and dimentionality reduction (PCA) right after. So we are not going to use whatever we have done in the previous section, it was just to show how 2-point statistics look like for our data.
So, total we have 5 simulations and they already have been concatenated into X_con.
|
from pymks import MKSStructureAnalysis
analyzer = MKSStructureAnalysis(basis=p_basis, periodic_axes=[0,1])
XY_PCA=analyzer.fit_transform(X_con)
XY_PCA.shape
|
notebooks/structure_ising_2D.ipynb
|
davidbrough1/pymks
|
mit
|
R1 and R2 are two different simulation results with the same initial microstructure, but different seeds for random number generation for Monte-Carlo simulations. The hope is to see that the same initial microstructure will take two different paths and will end up in quite the same spot. Let's check it!
So let's take a look at PCA plot:
|
from pymks.tools import draw_components_scatter
draw_components_scatter([XY_PCA[0:201, :3], XY_PCA[201:402, :3], XY_PCA[402:603, :3],
XY_PCA[603:804, :3], XY_PCA[804:1005, :3]],
['ising 50%', 'ising 30%', 'ising 10%',
'ising 40% run#1', 'ising 40% run#2'],
view_angles=(30, 100), legend_outside=True, fig_size=(10,8))
|
notebooks/structure_ising_2D.ipynb
|
davidbrough1/pymks
|
mit
|
Looks cool but not clear! Now, let's plot only initial and final structures.
|
draw_components_scatter([XY_PCA[:201:200, :3], XY_PCA[201:402:200, :3],
XY_PCA[402:603:200, :3], XY_PCA[603:804:200, :3],
XY_PCA[804:1005:200, :3]],
['ising 50%', 'ising 30%', 'ising 10%',
'ising 40% run#1', 'ising 40% run#2'],
view_angles=(30, 100), legend_outside=True,
fig_size=(10,8), title='Initial and Final microstructures')
|
notebooks/structure_ising_2D.ipynb
|
davidbrough1/pymks
|
mit
|
机器学习模型的学习效果评价基于测试集,而不依赖于训练集;过拟合的含义是模型可以很好的匹配训练集,但是对于未知的训练集数据效果不佳;下面的代码是之前的分类器模型可视化:
|
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot all samples
X_test, y_test = X[test_idx, :], y[test_idx]
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl)
# highlight test samples
if test_idx:
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0], X_test[:, 1], c='',
alpha=1.0, linewidth=1, marker='o', s=55, label='test set')
X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X=X_combined_std, y=y_combined, classifier=ppn, test_idx=range(105,150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.show()
|
jupyter/machine_learning_1.ipynb
|
lichao890427/lichao890427.github.io
|
mit
|
感知器算法对于无法线性分割的数据集,是不收敛的,因此实际中很少只用感知器算法。后面将会介绍更强大的线性分类器,对无法线性分割的数据集可以收敛到最佳程度
1.3 使用sklearn逻辑回归实现分类器
  前面使用最简单的感知器实现了分类,存在的一个巨大缺陷是分类器对于无法线性分割的数据无法收敛。逻辑回归是另一种用于解决线性/二进制分类问题的算法,虽然名为逻辑回归,却是分类器模型,而非回归模型。逻辑回归在工业中使用很广泛
|
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(C=1000.0, random_state=0)
lr.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined, classifier=lr, test_idx=range(105,150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.show()
|
jupyter/machine_learning_1.ipynb
|
lichao890427/lichao890427.github.io
|
mit
|
使用正规化解决过拟合
  过拟合是机器学习很常见的问题,在过拟合时,模型对训练数据集表现良好而对测试数据集表现欠佳。可能的原因是包含太多参数导致模型过于复杂;同样的欠拟合是模型过于简单,对训练数据集和测试数据集表现都不理想;使用正规化可以从数据中去除噪声从而防止过拟合
1.4 使用sklearn SVM实现分类器
  另一个有效且广泛实用的学习算法是SVM(支持向量机),可以认为是感知器的扩展。使用感知器算法可以最小化误分类,而使用SVM可以最小化类间距(松弛变量),并解决非线性分割问题。
|
from sklearn.svm import SVC
svm = SVC(kernel='linear', C=1.0, random_state=0)
svm.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined, classifier=svm, test_idx=range(105,150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.show()
|
jupyter/machine_learning_1.ipynb
|
lichao890427/lichao890427.github.io
|
mit
|
核函数SVM解决非线性分类问题
  SVM算法另一个吸引人的地方是可以使用核函数解决非线性分类问题。典型的非线性问题例子如下图。
|
np.random.seed(0)
X_xor = np.random.randn(200, 2)
y_xor = np.logical_xor(X_xor[:, 0] > 0, X_xor[:, 1] > 0)
y_xor = np.where(y_xor, 1, -1)
plt.scatter(X_xor[y_xor==1, 0], X_xor[y_xor==1, 1], c='b', marker='x', label='1')
plt.scatter(X_xor[y_xor==-1, 0], X_xor[y_xor==-1, 1], c='r', marker='s', label='-1')
plt.ylim(-3.0)
plt.legend()
plt.show()
svm = SVC(kernel='rbf', random_state=0, gamma=0.10, C=10.0)
svm.fit(X_xor, y_xor)
plot_decision_regions(X_xor, y_xor, classifier=svm)
plt.legend(loc='upper left')
plt.show()
svm = SVC(kernel='rbf', random_state=0, gamma=0.2, C=1.0)
svm.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined, classifier=svm, test_idx=range(105,150))
plt.xlabel('petal length [standardized]')
plt.ylabel('petal width [standardized]')
plt.legend(loc='upper left')
plt.show()
|
jupyter/machine_learning_1.ipynb
|
lichao890427/lichao890427.github.io
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.