code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
```
# default_exp eval
#hide
%load_ext autoreload
%autoreload 2
%matplotlib inline
```
# Eval
> This module contains all the necessary functions for evaluating different video duplication detection techniques.
```
#export
import cv2
import ffmpeg
import pickle
import numpy as np
from fastprogress.fastprogress import progress_bar
from matplotlib import pyplot as plt
from pathlib import Path
from tango.prep import *
from sklearn.cluster import KMeans
# hide
from nbdev.showdoc import *
#hide
path = Path("<path>")
video_paths = sorted(path.glob("**/*.mp4")); video_paths[:6]
#export
def calc_tf_idf(tfs, dfs):
tf_idf = np.array([])
for tf, df in zip(tfs, dfs):
tf = tf / np.sum(tfs)
idf = np.log(len(tfs) / (df + 1))
tf_idf = np.append(tf_idf, tf * idf)
return tf_idf
#export
def cosine_similarity(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
# export
def hit_rate_at_k(rs, k):
hits = 0
for r in rs:
if np.sum(r[:k]) > 0: hits += 1
return hits / len(rs)
```
## Following methods from: https://gist.github.com/bwhite/3726239
```
# export
def mean_reciprocal_rank(rs):
"""Score is reciprocal of the rank of the first relevant item
First element is 'rank 1'. Relevance is binary (nonzero is relevant).
Example from http://en.wikipedia.org/wiki/Mean_reciprocal_rank
>>> rs = [[0, 0, 1], [0, 1, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.61111111111111105
>>> rs = np.array([[0, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> mean_reciprocal_rank(rs)
0.5
>>> rs = [[0, 0, 0, 1], [1, 0, 0], [1, 0, 0]]
>>> mean_reciprocal_rank(rs)
0.75
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean reciprocal rank
"""
rs = (np.asarray(r).nonzero()[0] for r in rs)
return np.mean([1. / (r[0] + 1) if r.size else 0. for r in rs])
def r_precision(r):
"""Score is precision after all relevant documents have been retrieved
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> r_precision(r)
0.33333333333333331
>>> r = [0, 1, 0]
>>> r_precision(r)
0.5
>>> r = [1, 0, 0]
>>> r_precision(r)
1.0
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
R Precision
"""
r = np.asarray(r) != 0
z = r.nonzero()[0]
if not z.size:
return 0.
return np.mean(r[:z[-1] + 1])
def precision_at_k(r, k):
"""Score is precision @ k
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> precision_at_k(r, 1)
0.0
>>> precision_at_k(r, 2)
0.0
>>> precision_at_k(r, 3)
0.33333333333333331
>>> precision_at_k(r, 4)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
ValueError: Relevance score length < k
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Precision @ k
Raises:
ValueError: len(r) must be >= k
"""
assert k >= 1
r = np.asarray(r)[:k] != 0
if r.size != k:
raise ValueError('Relevance score length < k')
return np.mean(r)
def average_precision(r):
"""Score is average precision (area under PR curve)
Relevance is binary (nonzero is relevant).
>>> r = [1, 1, 0, 1, 0, 1, 0, 0, 0, 1]
>>> delta_r = 1. / sum(r)
>>> sum([sum(r[:x + 1]) / (x + 1.) * delta_r for x, y in enumerate(r) if y])
0.7833333333333333
>>> average_precision(r)
0.78333333333333333
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Average precision
"""
r = np.asarray(r) != 0
out = [precision_at_k(r, k + 1) for k in range(r.size) if r[k]]
if not out:
return 0.
return np.mean(out)
def mean_average_precision(rs):
"""Score is mean average precision
Relevance is binary (nonzero is relevant).
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1]]
>>> mean_average_precision(rs)
0.78333333333333333
>>> rs = [[1, 1, 0, 1, 0, 1, 0, 0, 0, 1], [0]]
>>> mean_average_precision(rs)
0.39166666666666666
Args:
rs: Iterator of relevance scores (list or numpy) in rank order
(first element is the first item)
Returns:
Mean average precision
"""
return np.mean([average_precision(r) for r in rs])
def recall_at_k(r, k, l):
"""Score is recall @ k
Relevance is binary (nonzero is relevant).
>>> r = [0, 0, 1]
>>> recall_at_k(r, 1, 2)
0.0
>>> recall_at_k(r, 2, 2)
0.0
>>> recall_at_k(r, 3, 2)
0.5
>>> recall_at_k(r, 4, 2)
Traceback (most recent call last):
File "<stdin>", line 1, in ?
ValueError: Relevance score length < k
Args:
r: Relevance scores (list or numpy) in rank order
(first element is the first item)
k: the length or size of the relevant items
Returns:
Recall @ k
Raises:
ValueError: len(r) must be >= k
"""
assert k >= 1
assert l >= 1
r2 = np.asarray(r)[:k] != 0
if r2.size != k:
raise ValueError('Relevance score length < k')
return np.sum(r2)/l
rs = [[1, 0, 0], [0, 1, 0], [0, 0, 0]]
mean_reciprocal_rank(rs)
r = [1, 1, 0, 1, 0, 1, 0, 0, 0, 1]
average_precision(r)
mean_average_precision(rs)
# export
def rank_stats(rs):
ranks = []
for r in rs:
ranks.append(r.nonzero()[0][0] + 1)
ranks = np.asarray(ranks)
recipical_ranks = 1 / ranks
return np.std(ranks), np.mean(ranks), np.median(ranks), np.mean(recipical_ranks)
# export
def evaluate(rankings, top_k = [1, 5, 10]):
output = {}
for app in rankings:
output[app] = {}
app_rs = []
for bug in rankings[app]:
if bug == 'elapsed_time': continue
output[app][bug] = {}
bug_rs = []
for report in rankings[app][bug]:
output[app][bug][report] = {'ranks': []}
r = []
for labels, score in rankings[app][bug][report].items():
output[app][bug][report]['ranks'].append((labels, score))
if labels[0] == bug: r.append(1)
else: r.append(0)
r = np.asarray(r)
output[app][bug][report]['rank'] = r.nonzero()[0][0] + 1
output[app][bug][report]['average_precision'] = average_precision(r)
bug_rs.append(r)
bug_rs_std, bug_rs_mean, bug_rs_med, bug_mRR = rank_stats(bug_rs)
bug_mAP = mean_average_precision(bug_rs)
output[app][bug]['Bug std rank'] = bug_rs_std
output[app][bug]['Bug mean rank'] = bug_rs_mean
output[app][bug]['Bug median rank'] = bug_rs_med
output[app][bug]['Bug mRR'] = bug_mRR
output[app][bug]['Bug mAP'] = bug_mAP
for k in top_k:
bug_hit_rate = hit_rate_at_k(bug_rs, k)
output[app][f'Bug Hit@{k}'] = bug_hit_rate
app_rs.extend(bug_rs)
app_rs_std, app_rs_mean, app_rs_med, app_mRR = rank_stats(app_rs)
app_mAP = mean_average_precision(app_rs)
output[app]['App std rank'] = app_rs_std
output[app]['App mean rank'] = app_rs_mean
output[app]['App median rank'] = app_rs_med
output[app]['App mRR'] = app_mRR
output[app]['App mAP'] = app_mAP
print(f'{app} Elapsed Time in Seconds', rankings[app]['elapsed_time'])
print(f'{app} σ Rank', app_rs_std)
print(f'{app} μ Rank', app_rs_mean)
print(f'{app} Median Rank', app_rs_med)
print(f'{app} mRR:', app_mRR)
print(f'{app} mAP:', app_mAP)
for k in top_k:
app_hit_rate = hit_rate_at_k(app_rs, k)
output[app][f'App Hit@{k}'] = app_hit_rate
print(f'{app} Hit@{k}:', app_hit_rate)
return output
# export
def get_eval_results(evals, app, item):
for bug in evals[app]:
if bug == 'elapsed_time': continue
for vid in evals[app][bug]:
try:
print(evals[app][bug][vid][item])
except: continue
# export
def evaluate_ranking(ranking, ground_truth):
relevance = []
for doc in ranking:
if doc in ground_truth:
relevance.append(1)
else:
relevance.append(0)
r = np.asarray(relevance)
first_rank = int(r.nonzero()[0][0] + 1)
avg_precision = average_precision(r)
recip_rank = 1 / first_rank
ranks = []
precisions = []
recalls = []
limit = 10
for k in range(1, limit + 1):
ranks.append(1 if first_rank <= k else 0)
precisions.append(precision_at_k(r, k))
recalls.append(recall_at_k(r, k, len(ground_truth)))
results = {
'first_rank': first_rank,
'recip_rank': recip_rank,
'avg_precision': avg_precision
}
for i in range(limit):
k = i + 1
results["rr@" + str(k)] = ranks[i]
results["p@" + str(k)] = precisions[i]
results["r@" + str(k)] = recalls[i]
return results
from nbdev.export import notebook2script
notebook2script()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D5_DimensionalityReduction/student/W1D5_Tutorial1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Neuromatch Academy: Week 1, Day 5, Tutorial 1
# Dimensionality Reduction: Geometric view of data
---
Tutorial objectives
In this notebook we'll explore how multivariate data can be represented in different orthonormal bases. This will help us build intuition that will be helpful in understanding PCA in the following tutorial.
Steps:
1. Generate correlated multivariate data.
2. Define an arbitrary orthonormal basis.
3. Project data onto new basis.
---
```
#@title Video: Geometric view of data
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="emLW0F-VUag", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
# Setup
Run these cells to get the tutorial started.
```
#library imports
import time # import time
import numpy as np # import numpy
import scipy as sp # import scipy
import math # import basic math functions
import random # import basic random number generator functions
import matplotlib.pyplot as plt # import matplotlib
from IPython import display
#@title Figure Settings
%matplotlib inline
fig_w, fig_h = (8, 8)
plt.rcParams.update({'figure.figsize': (fig_w, fig_h)})
plt.style.use('ggplot')
%config InlineBackend.figure_format = 'retina'
#@title Helper functions
def get_data(cov_matrix):
"""
Returns a matrix of 1000 samples from a bivariate, zero-mean Gaussian
Note that samples are sorted in ascending order for the first random variable.
Args:
cov_matrix (numpy array of floats): desired covariance matrix
Returns:
(numpy array of floats) : samples from the bivariate Gaussian, with
each column corresponding to a different random variable
"""
mean = np.array([0,0])
X = np.random.multivariate_normal(mean,cov_matrix,size = 1000)
indices_for_sorting = np.argsort(X[:,0])
X = X[indices_for_sorting,:]
return X
def plot_data(X):
"""
Plots bivariate data. Includes a plot of each random variable, and a scatter
plot of their joint activity. The title indicates the sample correlation
calculated from the data.
Args:
X (numpy array of floats): Data matrix
each column corresponds to a different random variable
Returns:
Nothing.
"""
fig = plt.figure(figsize=[8,4])
gs = fig.add_gridspec(2,2)
ax1 = fig.add_subplot(gs[0,0])
ax1.plot(X[:,0],color='k')
plt.ylabel('Neuron 1')
plt.title('Sample var 1: {:.1f}'.format(np.var(X[:,0])))
ax1.set_xticklabels([])
ax2 = fig.add_subplot(gs[1,0])
ax2.plot(X[:,1],color='k')
plt.xlabel('Sample Number')
plt.ylabel('Neuron 2')
plt.title('Sample var 2: {:.1f}'.format(np.var(X[:,1])))
ax3 = fig.add_subplot(gs[:, 1])
ax3.plot(X[:,0],X[:,1],'.',markerfacecolor=[.5,.5,.5], markeredgewidth=0)
ax3.axis('equal')
plt.xlabel('Neuron 1 activity')
plt.ylabel('Neuron 2 activity')
plt.title('Sample corr: {:.1f}'.format(np.corrcoef(X[:,0],X[:,1])[0,1]))
def plot_basis_vectors(X,W):
"""
Plots bivariate data as well as new basis vectors.
Args:
X (numpy array of floats): Data matrix
each column corresponds to a different random variable
W (numpy array of floats): Square matrix representing new orthonormal basis
each column represents a basis vector
Returns:
Nothing.
"""
plt.figure(figsize=[4,4])
plt.plot(X[:,0],X[:,1],'.',color=[.5,.5,.5],label='Data')
plt.axis('equal')
plt.xlabel('Neuron 1 activity')
plt.ylabel('Neuron 2 activity')
plt.plot([0,W[0,0]],[0,W[1,0]],color='r',linewidth=3,label = 'Basis vector 1')
plt.plot([0,W[0,1]],[0,W[1,1]],color='b',linewidth=3,label = 'Basis vector 2')
plt.legend()
def plot_data_new_basis(Y):
"""
Plots bivariate data after transformation to new bases. Similar to plot_data but
with colors corresponding to projections onto basis 1 (red) and basis 2 (blue).
The title indicates the sample correlation calculated from the data.
Note that samples are re-sorted in ascending order for the first random variable.
Args:
Y (numpy array of floats): Data matrix in new basis
each column corresponds to a different random variable
Returns:
Nothing.
"""
fig = plt.figure(figsize=[8,4])
gs = fig.add_gridspec(2,2)
ax1 = fig.add_subplot(gs[0,0])
ax1.plot(Y[:,0],'r')
plt.xlabel
plt.ylabel('Projection \n basis vector 1')
plt.title('Sample var 1: {:.1f}'.format(np.var(Y[:,0])))
ax1.set_xticklabels([])
ax2 = fig.add_subplot(gs[1,0])
ax2.plot(Y[:,1],'b')
plt.xlabel('Sample number')
plt.ylabel('Projection \n basis vector 2')
plt.title('Sample var 2: {:.1f}'.format(np.var(Y[:,1])))
ax3 = fig.add_subplot(gs[:, 1])
ax3.plot(Y[:,0],Y[:,1],'.',color=[.5,.5,.5])
ax3.axis('equal')
plt.xlabel('Projection basis vector 1')
plt.ylabel('Projection basis vector 2')
plt.title('Sample corr: {:.1f}'.format(np.corrcoef(Y[:,0],Y[:,1])[0,1]))
```
# Generate correlated multivariate data
```
#@title Video: Multivariate data
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="YOan2BQVzTQ", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
To study multivariate data, first we generate it. In this exercise we generate data from a *bivariate normal distribution*. This is an extension of the one-dimensional normal distribution to two dimensions, in which each $x_i$ is marginally normal with mean $\mu_i$ and variance $\sigma_i^2$:
\begin{align}
x_i \sim \mathcal{N}(\mu_i,\sigma_i^2)
\end{align}
Additionally, the joint distribution for $x_1$ and $x_2$ has a specified correlation coefficient $\rho$. Recall that the correlation coefficient is a normalized version of the covariance, and ranges between -1 and +1.
\begin{align}
\rho = \frac{\text{cov}(x_1,x_2)}{\sqrt{\sigma_1^2 \sigma_2^2}}
\end{align}
For simplicity, we will assume that the mean of each variable has already been subtracted, so that $\mu_i=0$. The remaining parameters can be summarized in the covariance matrix:
\begin{equation*}
{\bf \Sigma} =
\begin{pmatrix}
\text{var}(x_1) & \text{cov}(x_1,x_2) \\
\text{cov}(x_1,x_2) &\text{var}(x_2)
\end{pmatrix}
\end{equation*}
Note that this is a symmetric matrix with the variances $\text{var}(x_i) = \sigma_i^2$ on the diagonal, and the covariance on the off-diagonal.
### Exercise
We have provided code to draw random samples from a zero-mean bivariate normal distribution. These samples could be used to simulate changes in firing rates for two neurons. Fill in the function below to calculate the covariance matrix given the desired variances and correlation coefficient. The covariance can be found by rearranging the equation above:
\begin{align}
\text{cov}(x_1,x_2) = \rho \sqrt{\sigma_1^2 \sigma_2^2}
\end{align}
Use these functions to generate and plot data while varying the parameters. You should get a feel for how changing the correlation coefficient affects the geometry of the simulated data.
**Suggestions**
* Fill in the function `calculate_cov_matrix` to calculate the covariance.
* Generate and plot the data for $\sigma_1^2 =1$, $\sigma_1^2 =1$, and $\rho = .8$. Try plotting the data for different values of the correlation coefficent: $\rho = -1, -.5, 0, .5, 1$.
```
help(plot_data)
help(get_data)
def calculate_cov_matrix(var_1,var_2,corr_coef):
"""
Calculates the covariance matrix based on the variances and correlation coefficient.
Args:
var_1 (scalar): variance of the first random variable
var_2 (scalar): variance of the second random variable
corr_coef (scalar): correlation coefficient
Returns:
(numpy array of floats) : covariance matrix
"""
###################################################################
## Insert your code here to:
## calculate the covariance from the variances and correlation
# cov = ...
cov_matrix = np.array([[var_1,cov],[cov,var_2]])
#uncomment once you've filled in the function
raise NotImplementedError("Student excercise: calculate the covariance matrix!")
###################################################################
return cov
###################################################################
## Insert your code here to:
## generate and plot bivariate Gaussian data with variances of 1
## and a correlation coefficients of: 0.8
## repeat while varying the correlation coefficient from -1 to 1
###################################################################
variance_1 = 1
variance_2 = 1
corr_coef = 0.8
#uncomment to test your code and plot
#cov_matrix = calculate_cov_matrix(variance_1,variance_2,corr_coef)
#X = get_data(cov_matrix)
#plot_data(X)
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial1_Solution_62df7ae6.py)
*Example output:*
<img alt='Solution hint' align='left' width=510 height=303 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D5_DimensionalityReduction/static/W1D5_Tutorial1_Solution_62df7ae6_0.png>
# Define a new orthonormal basis
```
#@title Video: Orthonormal bases
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="dK526Nbn2Xo", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
Next, we will define a new orthonormal basis of vectors ${\bf u} = [u_1,u_2]$ and ${\bf w} = [w_1,w_2]$. As we learned in the video, two vectors are orthonormal if:
1. They are orthogonal (i.e., their dot product is zero):
\begin{equation}
{\bf u\cdot w} = u_1 w_1 + u_2 w_2 = 0
\end{equation}
2. They have unit length:
\begin{equation}
||{\bf u} || = ||{\bf w} || = 1
\end{equation}
In two dimensions, it is easy to make an arbitrary orthonormal basis. All we need is a random vector ${\bf u}$, which we have normalized. If we now define the second basis vector to be ${\bf w} = [-u_2,u_1]$, we can check that both conditions are satisfied:
\begin{equation}
{\bf u\cdot w} = - u_1 u_2 + u_2 u_1 = 0
\end{equation}
and
\begin{equation}
{|| {\bf w} ||} = \sqrt{(-u_2)^2 + u_1^2} = \sqrt{u_1^2 + u_2^2} = 1,
\end{equation}
where we used the fact that ${\bf u}$ is normalized. So, with an arbitrary input vector, we can define an orthonormal basis, which we will write in matrix by stacking the basis vectors horizontally:
\begin{equation}
{{\bf W} } =
\begin{pmatrix}
u_1 & w_1 \\
u_2 & w_2
\end{pmatrix}.
\end{equation}
### Exercise
In this exercise you will fill in the function below to define an orthonormal basis, given a single arbitrary 2-dimensional vector as an input.
**Suggestions**
* Modify the function `define_orthonormal_basis` to first normalize the first basis vector $\bf u$.
* Then complete the function by finding a basis vector $\bf w$ that is orthogonal to $\bf u$.
* Test the function using initial basis vector ${\bf u} = [3,1]$. Plot the resulting basis vectors on top of the data scatter plot using the function `plot_basis_vectors`. (For the data, use $\sigma_1^2 =1$, $\sigma_1^2 =1$, and $\rho = .8$).
```
help(plot_basis_vectors)
def define_orthonormal_basis(u):
"""
Calculates an orthonormal basis given an arbitrary vector u.
Args:
u (numpy array of floats): arbitrary 2-dimensional vector used for new basis
Returns:
(numpy array of floats) : new orthonormal basis
columns correspond to basis vectors
"""
###################################################################
## Insert your code here to:
## normalize vector u
## calculate vector w that is orthogonal to w
#u = ....
#w = ...
#W = np.column_stack((u,w))
#comment this once you've filled the function
raise NotImplementedError("Student excercise: implement the orthonormal basis function")
###################################################################
return W
variance_1 = 1
variance_2 = 1
corr_coef = 0.8
cov_matrix = calculate_cov_matrix(variance_1,variance_2,corr_coef)
X = get_data(cov_matrix)
u = np.array([3,1])
#uncomment and run below to plot the basis vectors
##define_orthonormal_basis(u)
#plot_basis_vectors(X,W)
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial1_Solution_c9ca4afa.py)
*Example output:*
<img alt='Solution hint' align='left' width=286 height=281 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D5_DimensionalityReduction/static/W1D5_Tutorial1_Solution_c9ca4afa_0.png>
# Project data onto new basis
```
#@title Video: Change of basis
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="5MWSUtpbSt0", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
Finally, we will express our data in the new basis that we have just found. Since $\bf W$ is orthonormal, we can project the data into our new basis using simple matrix multiplication :
\begin{equation}
{\bf Y = X W}.
\end{equation}
We will explore the geometry of the transformed data $\bf Y$ as we vary the choice of basis.
#### Exercise
In this exercise you will fill in the function below to define an orthonormal basis, given a single arbitrary vector as an input.
**Suggestions**
* Complete the function `change_of_basis` to project the data onto the new basis.
* Plot the projected data using the function `plot_data_new_basis`.
* What happens to the correlation coefficient in the new basis? Does it increase or decrease?
* What happens to variance?
```
def change_of_basis(X,W):
"""
Projects data onto new basis W.
Args:
X (numpy array of floats) : Data matrix
each column corresponding to a different random variable
W (numpy array of floats): new orthonormal basis
columns correspond to basis vectors
Returns:
(numpy array of floats) : Data matrix expressed in new basis
"""
###################################################################
## Insert your code here to:
## project data onto new basis described by W
#Y = ...
#comment this once you've filled the function
raise NotImplementedError("Student excercise: implement change of basis")
###################################################################
return Y
## Unomment below to transform the data by projecting it into the new basis
## Plot the projected data
# Y = change_of_basis(X,W)
# plot_data_new_basis(Y)
# disp(...)
```
[*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W1D5_DimensionalityReduction/solutions/W1D5_Tutorial1_Solution_b434bc0d.py)
*Example output:*
<img alt='Solution hint' align='left' width=544 height=303 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W1D5_DimensionalityReduction/static/W1D5_Tutorial1_Solution_b434bc0d_0.png>
#### Exercise
To see what happens to the correlation as we change the basis vectors, run the cell below. The parameter $\theta$ controls the angle of $\bf u$ in degrees. Use the slider to rotate the basis vectors.
**Questions**
* What happens to the projected data as you rotate the basis?
* How does the correlation coefficient change? How does the variance of the projection onto each basis vector change?
* Are you able to find a basis in which the projected data is uncorrelated?
```
###### MAKE SURE TO RUN THIS CELL VIA THE PLAY BUTTON TO ENABLE SLIDERS ########
import ipywidgets as widgets
def refresh(theta = 0):
u = [1,np.tan(theta * np.pi/180.)]
W = define_orthonormal_basis(u)
Y = change_of_basis(X,W)
plot_basis_vectors(X,W)
plot_data_new_basis(Y)
_ = widgets.interact(refresh,
theta = (0, 90, 5))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Tharushal/Automated-diagnosis-system-using-A.I/blob/master/Final_Project.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Install Faiss,TF,git datasets
```
#To use CPU FAISS use
!wget https://anaconda.org/pytorch/faiss-cpu/1.2.1/download/linux-64/faiss-cpu-1.2.1-py36_cuda9.0.176_1.tar.bz2
#To use GPU FAISS use
# !wget https://anaconda.org/pytorch/faiss-gpu/1.2.1/download/linux-64/faiss-gpu-1.2.1-py36_cuda9.0.176_1.tar.bz2
!tar xvjf faiss-cpu-1.2.1-py36_cuda9.0.176_1.tar.bz2
!cp -r lib/python3.6/site-packages/* /usr/local/lib/python3.6/dist-packages/
!pip install mkl
!pip install tensorflow
!pip install tensorflow-gpu==2.0.0-alpha0
!pip install https://github.com/re-search/DocProduct/archive/v0.2.0_dev.zip
!pip install gpt2-estimator
!pip install pyarrow
import tensorflow as tf
!pip install flask-ngrok
!pip install Flask-JSON
```
Downaload all model checkpoints, and question/answer data.
```
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
import os
import requests
import urllib.request
# Download the file from `url` and save it locally under `file_name`:
urllib.request.urlretrieve('https://github.com/naver/biobert-pretrained/releases/download/v1.0-pubmed-pmc/biobert_v1.0_pubmed_pmc.tar.gz', 'BioBert.tar.gz')
if not os.path.exists('BioBertFolder'):
os.makedirs('BioBertFolder')
import tarfile
tar = tarfile.open("BioBert.tar.gz")
tar.extractall(path='BioBertFolder/')
tar.close()
file_id = '1uCXv6mQkFfpw5txGnVCsl93Db7t5Z2mp'
download_file_from_google_drive(file_id, 'Float16EmbeddingsExpanded5-27-19.pkl')
file_id = 'https://onedrive.live.com/download?cid=9DEDF3C1E2D7E77F&resid=9DEDF3C1E2D7E77F%2132792&authkey=AEQ8GtkcDbe3K98'
urllib.request.urlretrieve( file_id, 'DataAndCheckpoint.zip')
if not os.path.exists('newFolder'):
os.makedirs('newFolder')
import zipfile
zip_ref = zipfile.ZipFile('DataAndCheckpoint.zip', 'r')
zip_ref.extractall('newFolder')
zip_ref.close()
```
Load model weights and Q&A data.
```
from docproduct.predictor import RetreiveQADoc
pretrained_path = 'BioBertFolder/biobert_v1.0_pubmed_pmc/'
# ffn_weight_file = None
bert_ffn_weight_file = 'newFolder/models/bertffn_crossentropy/bertffn'
embedding_file = 'Float16EmbeddingsExpanded5-27-19.pkl'
doc = RetreiveQADoc(pretrained_path=pretrained_path,
ffn_weight_file=None,
bert_ffn_weight_file=bert_ffn_weight_file,
embedding_file=embedding_file)
```
Type in your question
```
from flask_ngrok import run_with_ngrok
from flask import Flask
from flask import jsonify
app = Flask(__name__)
run_with_ngrok(app) #starts ngrok when the app is run
result = ""
@app.route('/<string:question_text>', methods=['GET'])
def get_answer(question_text):
global result
result=[]
def get_result():
search_similarity_by = 'answer' #@param ['answer', "question"]
number_results_toReturn=10 #@param {type:"number"}
answer_only=True #@param ["False", "True"] {type:"raw"}
returned_results = doc.predict( question_text ,
search_by=search_similarity_by, topk=number_results_toReturn, answer_only=answer_only)
#print('')
for jk in range(len(returned_results)):
#print("Result ", jk+1)
#print(returned_results[jk])
global result
result.append(returned_results[jk])
#print('')
return result
return jsonify({"result": get_result()})
app.run()
```
| github_jupyter |
# Django UnChained
<img src="images/django.jpg">
# View
<img src="https://mdn.mozillademos.org/files/13931/basic-django.png">
# EXP1
# URLs
```
from django.conf.urls import url
from . import views
app_name = 'polls'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^(?P<question_id>[0-9]+)/$', views.detail, name='detail'),
url(r'^(?P<question_id>[0-9]+)/results/$', views.results, name='results'),
url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'),
]
```
# models
```
from django.db import models
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
```
# HTTP Request, HTTP Response
```
from django.http import HttpResponse
def index(request):
return HttpResponse("Hello, world. You're at the polls index.")
def detail(request, question_id):
return HttpResponse("You're looking at question %s." % question_id)
def results(request, question_id):
response = "You're looking at the results of question %s."
return HttpResponse(response % question_id)
def vote(request, question_id):
return HttpResponse("You're voting on question %s." % question_id)
```
# views and models
```
def list_view(request):
objs=models.ModelName.objects.all()
return HttpResponse("You're looking at list_view" %objs)
from django.shortcuts import get_object_or_404
def detail_view(request, pk):
obj = get_object_or_404(ModelName, pk=pk)
return HttpResponse("You're looking at detail_view", pk, obj)
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
output = ', '.join([q.question_text for q in latest_question_list])
return HttpResponse(output)
```
# views and templates
```
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
template = loader.get_template('polls/index.html')
context = {
'latest_question_list': latest_question_list,
}
return HttpResponse(template.render(context, request))
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {'latest_question_list': latest_question_list}
return render(request, 'polls/index.html', context)
{% if latest_question_list %}
<ul>
{% for question in latest_question_list %}
<li><a href="/polls/{{ question.id }}/">{{ question.question_text }}</a></li>
<li><a href="{% url 'detail' question.id %}">{{ question.question_text }}</a></li>
<li><a href="{% url 'polls:detail' question.id %}">{{ question.question_text }}</a></li>
{% endfor %}
</ul>
{% else %}
<p>No polls are available.</p>
{% endif %}
```
# detail_view
```
from django.shortcuts import render
def detail_view(request, pk):
obj = get_object_or_404(ModelName, pk=pk)
return render(request, 'app/template.html', {'obj': obj})
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'question': question})
<h1>{{ question.question_text }}</h1>
<ul>
{% for choice in question.choice_set.all %}
<li>{{ choice.choice_text }}</li>
{% endfor %}
</ul>
```
# Forms
```
<h1>{{ question.question_text }}</h1>
{% if error_message %}<p><strong>{{ error_message }}</strong></p>{% endif %}
<form action="{% url 'polls:vote' question.id %}" method="post">
{% csrf_token %}
{% for choice in question.choice_set.all %}
<input type="radio" name="choice" id="choice{{ forloop.counter }}" value="{{ choice.id }}" />
<label for="choice{{ forloop.counter }}">{{ choice.choice_text }}</label><br />
{% endfor %}
<input type="submit" value="Vote" />
</form>
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
```
# Final Step
```
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/results.html', {'question': question})
<h1>{{ question.question_text }}</h1>
<ul>
{% for choice in question.choice_set.all %}
<li>{{ choice.choice_text }} -- {{ choice.votes }} vote{{ choice.votes|pluralize }}</li>
{% endfor %}
</ul>
<a href="{% url 'polls:detail' question.id %}">Vote again?</a>
```
# Built-in class-based views API
### Base vs Generic views¶
Base class-based views can be thought of as parent views, which can be used by themselves or inherited from. They may not provide all the capabilities required for projects, in which case there are Mixins which extend what base views can do.
Django’s generic views are built off of those base views, and were developed as a shortcut for common usage patterns such as displaying the details of an object. They take certain common idioms and patterns found in view development and abstract them so that you can quickly write common views of data without having to repeat yourself.
https://docs.djangoproject.com/en/2.0/ref/class-based-views/
# Generic display views
```
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Question.objects.order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
app_name = 'polls'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'),
url(r'^(?P<pk>[0-9]+)/results/$', views.ResultsView.as_view(), name='results'),
url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'),
]
```
| github_jupyter |
# Introduction to C++
## Hello world
There are many lessons in writing a simple "Hello world" program
- C++ programs are normally written using a text editor or integrated development environment (IDE) - we use the %%file magic to simulate this
- The #include statement literally pulls in and prepends the source code from the `iostream` header file
- Types must be declared - note the function return type is `int`
- There is a single function called `main` - every program has `main` as the entry point although you can write libraries without a `main` function
- Notice the use of braces to delimit blocks
- Notice the use of semi-colons to delimit expressions
- Unlike Python, white space is not used to delimit blocks or expressions only tokens
- Note the use of the `std` *namespace* - this is similar to Python except C++ uses `::` rather than `.` (like R)
- The I/O shown here uses *streaming* via the `<<` operator to send output to `cout`, which is the name for the standard output
- `std::endl` provides a line break and flushes the input buffer
```
%%file hello.cpp
#include <iostream>
int main() {
std::cout << "Hello, world!" << std::endl;
}
```
### Compilation
- The source file must be compiled to machine code before it can be exeuted
- Compilation is done with a C++ compiler - here we use one called `g++`
- By default, the output of compilation is called `a.out` - we use `-o` to change the output executable filename to `hello.exe`
- Note the use of `.exe` is a Windows convention; Unix executables typically have no extension - for example, just be the name `hello`
```
%%bash
g++ hello.cpp -o hello.exe
```
### Execution
```
%%bash
./hello.exe
```
### C equivalent
Before we move on, we briefly show the similar `Hello world` program in C. C is a precursor to C++ that is still widely used. While C++ is derived from C, it is a much richer and more complex language. We focus on C++ because the intent is to show how to wrap C++ code using `pybind11` and take advantage of C++ numerical libraries that do not exist in C.
```
%%file hello01.c
#include <stdio.h>
int main() {
printf("Hello, world from C!\n");
}
%%bash
gcc hello01.c
%%bash
./a.out
```
## Namespaces
Just like Python, C++ has namespaces that allow us to build large libraries without worrying about name collisions. In the `Hello world` program, we used the explicit name `std::cout` indicating that `cout` is a member of the standard workspace. We can also use the `using` keyword to import selected functions or classes from a namespace.
```c++
using std::cout;
int main()
{
cout << "Hello, world!\n";
}
```
For small programs, we sometimes import the entire namespace for convenience, but this may cause namespace collisions in larger programs.
```c++
using namespace std;
int main()
{
cout << "Hello, world!\n";
}
```
You can easily create your own namespace.
```c++
namespace sta_663 {
const double pi=2.14159;
void greet(string name) {
cout << "\nTraditional first program\n";
cout << "Hello, " << name << "\n";
}
}
int main()
{
cout << "\nUsing namespaces\n";
string name = "Tom";
cout << sta_663::pi << "\n";
sta_663::greet(name);
}
```
#### Using qualified imports
```
%%file hello02.cpp
#include <iostream>
using std::cout;
using std::endl;
int main() {
cout << "Hello, world!" << endl;
}
%%bash
g++ hello02.cpp -o hello02
%%bash
./hello02
```
#### Global imports of a namespace
Wholesale imports of namespace is generally frowned upon, similar to how `from X import *` is frowned upon in Python.
```
%%file hello03.cpp
#include <iostream>
using namespace std;
int main() {
cout << "Hello, world!" << endl;
}
%%bash
g++ hello03.cpp -o hello03
%%bash
./hello03
```
## Types
```
%%file dtypes.cpp
#include <iostream>
#include <complex>
using std::cout;
int main() {
// Boolean
bool a = true, b = false;
cout << "and " << (a and b) << "\n";
cout << "&& " << (a && b) << "\n";
cout << "or " << (a or b) << "\n";
cout << "|| " << (a || b) << "\n";
cout << "not " << not (a or b) << "\n";
cout << "! " << !(a or b) << "\n";
// Integral numbers
cout << "char " << sizeof(char) << "\n";
cout << "short int " << sizeof(short int) << "\n";
cout << "int " << sizeof(int) << "\n";
cout << "long " << sizeof(long) << "\n";
// Floating point numbers
cout << "float " << sizeof(float) << "\n";
cout << "double " << sizeof(double) << "\n";
cout << "long double " << sizeof(long double) << "\n";
cout << "complex double " << sizeof(std::complex<double>) << "\n";
// Characters and strings
char c = 'a'; // Note single quotes
char word[] = "hello"; // C char arrays
std::string s = "hello"; // C++ string
cout << c << "\n";
cout << word << "\n";
cout << s << "\n";
}
%%bash
g++ dtypes.cpp -o dtypes.exe
./dtypes.exe
```
## Type conversions
Converting between types can get pretty complicated in C++. We will show some simple versions.
```
%%file type.cpp
#include <iostream>
using std::cout;
using std::string;
using std::stoi;
int main() {
char c = '3'; // A char is an integer type
string s = "3"; // A string is not an integer type
int i = 3;
float f = 3.1;
double d = 3.2;
cout << c << "\n";
cout << i << "\n";
cout << f << "\n";
cout << d << "\n";
cout << "c + i is " << c + i << "\n";
cout << "c + i is " << c - '0' + i << "\n";
// Casting string to number
cout << "s + i is " << stoi(s) + i << "\n"; // Use std::stod to convert to double
// Two ways to cast float to int
cout << "f + i is " << f + i << "\n";
cout << "f + i is " << int(f) + i << "\n";
cout << "f + i is " << static_cast<int>(f) + i << "\n";
}
%%bash
g++ -o type.exe type.cpp -std=c++14
%%bash
./type.exe
```
## Header, source, and driver files
C++ allows separate compilation of functions and programs that use those functions. The way it does this is to write functions in *source* files that can be compiled. To use these compiled functions, the calling program includes *header* files that contain the function signatures - this provides enough information for the compiler to link to the compiled function machine code when executing the program.
- Here we show a toy example of typical C++ program organization
- We build a library of math functions in `my_math.cpp`
- We add a header file for the math functions in `my_math.hpp`
- We build a library of stats functions in `my_stats.cpp`
- We add a header file for the stats functions in `my_stats.hpp`
- We write a program that uses math and stats functions called `my_driver.cpp`
- We pull in the function signatures with `#include` for the header files
- Once you understand the code, move on to see how compilation is done
- Note that it is customary to include the header file in the source file itself to let the compiler catch any mistakes in the function signatures
```
%%file my_math.hpp
#pragma once
int add(int a, int b);
int multiply(int a, int b);
%%file my_math.cpp
#include "my_math.hpp"
int add(int a, int b) {
return a + b;
}
int multiply(int a, int b) {
return a * b;
}
%%file my_stats.hpp
#pragma once
int mean(int xs[], int n);
%%file my_stats.cpp
#include "my_math.hpp"
int mean(int xs[], int n) {
double s = 0;
for (int i=0; i<n; i++) {
s += xs[i];
}
return s/n;
}
%%file my_driver.cpp
#include <iostream>
#include "my_math.hpp"
#include "my_stats.hpp"
int main() {
int xs[] = {1,2,3,4,5};
int n = 5;
int a = 3, b= 4;
std::cout << "sum = " << add(a, b) << "\n";
std::cout << "prod = " << multiply(a, b) << "\n";
std::cout << "mean = " << mean(xs, n) << "\n";
}
```
Compilation
- Notice in the first 2 compile statements, that the source files are compiled to *object* files with default extension `.o` by usin gthe flag `-c`
- The 3rd compile statement builds an *executable* by linking the `main` file with the recently created object files
- The function signatures in the included header files tells the compiler how to match the function calls `add`, `multiply` and `mean` with the matching compiled functions
```
%%bash
g++ -c my_math.cpp
g++ -c my_stats.cpp
g++ my_driver.cpp my_math.o my_stats.o
%%bash
./a.out
```
### Using `make`
As building C++ programs can quickly become quite complicated, there are *builder* programs that help simplify this task. One of the most widely used is `make`, which uses a file normally called `Makefile` to coordinate the instructions for building a program
- Note that `make` can be used for more than compiling programs; for example, you can use it to automatically rebuild tables and figures for a manuscript whenever the data is changed
- Another advantage of `make` is that it keeps track of dependencies, and only re-compiles files that have changed or depend on another changed file since the last compilation
We will build a simple `Makefile` to build the `my_driver` executable:
- Each section consists of a make target denoted by `<targget>:` followed by files the target depends on
- The next line is the command given to build the target. This must begin with a TAB character (it MUST be a TAB and not spaces)
- If a target has dependencies that are not met, `make` will see if each dependency itself is a target and build that first
- It uses timestamps to decide whether to rebuild a target (not actually changes)
- By default, `make` builds the first target, but can also build named targets
How to get the TAB character. Copy and paste the blank space between `a` and `b`.
```
! echo "a\tb"
%%file Makefile
driver: my_math.o my_stats.o
g++ my_driver.cpp my_math.o my_stats.o -o my_driver
my_math.o: my_math.cpp my_math.hpp
g++ -c my_math.cpp
my_stats.o: my_stats.cpp my_stats.hpp
g++ -c my_stats.cpp
```
- We first start with a clean slate
```
%%capture logfile
%%bash
rm *\.o
rm my_driver
%%bash
make
%%bash
./my_driver
```
- Re-building does not trigger re-compilation of source files since the timestamps have not changed
```
%%bash
make
%%bash
touch my_stats.hpp
```
- As `my_stats.hpp` was listed as a dependency of the target `my_stats.o`, `touch`, which updates the timestamp, forces a recompilation of `my_stats.o`
```
%%bash
make
```
#### Use of variables in Makefile
```
%%file Makefile2
CC=g++
CFLAGS=-Wall -std=c++14
driver: my_math.o my_stats.o
$(CC) $(CFLAGS) my_driver.cpp my_math.o my_stats.o -o my_driver2
my_math.o: my_math.cpp my_math.hpp
$(CC) $(CFLAGS) -c my_math.cpp
my_stats.o: my_stats.cpp my_stats.hpp
$(CC) $(CFLAGS) -c my_stats.cpp
```
### Compilation
Note that no re-compilation occurs!
```
%%bash
make -f Makefile2
```
### Execution
```
%%bash
./my_driver2
```
## Input and output
#### Arguments to main
```
%%file main_args.cpp
#include <iostream>
using std::cout;
int main(int argc, char* argv[]) {
for (int i=0; i<argc; i++) {
cout << i << ": " << argv[i] << "\n";
}
}
%%bash
g++ main_args.cpp -o main_args
%%bash
./main_args hello 1 2 3
```
**Exercise**
Write, compile and execute a progrm called `greet` that when called on the command line with
```bash
greet Santa 3
```
gives the output
```
Hello Santa!
Hello Santa!
Hello Santa!
```
#### Reading from files
```
%%file data.txt
9 6
%%file io.cpp
#include <fstream>
#include "my_math.hpp"
int main() {
std::ifstream fin("data.txt");
std::ofstream fout("result.txt");
double a, b;
fin >> a >> b;
fin.close();
fout << add(a, b) << std::endl;
fout << multiply(a, b) << std::endl;
fout.close();
}
%%bash
g++ io.cpp -o io.exe my_math.cpp
%%bash
./io.exe
! cat result.txt
```
## Arrays
```
%%file array.cpp
#include <iostream>
using std::cout;
using std::endl;
int main() {
int N = 3;
double counts[N];
counts[0] = 1;
counts[1] = 3;
counts[2] = 3;
double avg = (counts[0] + counts[1] + counts[2])/3;
cout << avg << endl;
}
%%bash
g++ -o array.exe array.cpp
%%bash
./array.exe
```
## Loops
```
%%file loop.cpp
#include <iostream>
using std::cout;
using std::endl;
using std::begin;
using std::end;
int main()
{
int x[] = {1, 2, 3, 4, 5};
cout << "\nTraditional for loop\n";
for (int i=0; i < sizeof(x)/sizeof(x[0]); i++) {
cout << i << endl;
}
cout << "\nUsing iterators\n";
for (auto it=begin(x); it != end(x); it++) {
cout << *it << endl;
}
cout << "\nRanged for loop\n\n";
for (auto const &i : x) {
cout << i << endl;
}
}
%%bash
g++ -o loop.exe loop.cpp -std=c++14
%%bash
./loop.exe
```
## Function arguments
- A value argument means that the argument is copied in the body of the function
- A referene argument means that the addresss of the value is useed in the function. Reference or pointer arugments are used to avoid copying large objects.
```
%%file func_arg.cpp
#include <iostream>
using std::cout;
using std::endl;
// Value parameter
void f1(int x) {
x *= 2;
cout << "In f1 : x=" << x << endl;
}
// Reference parameter
void f2(int &x) {
x *= 2;
cout << "In f2 : x=" << x << endl;
}
/* Note
If you want to avoid side effects
but still use references to avoid a copy operation
use a const refernece like this to indicate that x cannot be changed
void f2(const int &x)
*/
/* Note
Raw pointers are prone to error and
generally avoided in modern C++
See unique_ptr and shared_ptr
*/
// Raw pointer parameter
void f3(int *x) {
*x *= 2;
cout << "In f3 : x=" << *x << endl;
}
int main() {
int x = 1;
cout << "Before f1: x=" << x << "\n";
f1(x);
cout << "After f1 : x=" << x << "\n";
cout << "Before f2: x=" << x << "\n";
f2(x);
cout << "After f2 : x=" << x << "\n";
cout << "Before f3: x=" << x << "\n";
f3(&x);
cout << "After f3 : x=" << x << "\n";
}
%%bash
c++ -o func_arg.exe func_arg.cpp --std=c++14
%%bash
./func_arg.exe
```
## Arrays, pointers and dynamic memory
A pointer is a number that represents an address in computer memory. What is stored at the address is a bunch of binary numbers. How those binary numbers are interpetedd depends on the type of the pointer. To get the value at the pointer adddress, we *derefeernce* the pointer using `*ptr`. Pointers are often used to indicagte the start of a block of value - the name of a plain C-style array is essentialy a pointer to the start of the array.
For example, the argument `char** argv` means that `argv` has type pointer to pointer to `char`. The pointer to `char` can be thought of as an array of `char`, hence the argument is also sometimes written as `char* argv[]` to indicate pointer to `char` array. So conceptually, it refers to an array of `char` arrays - or a colleciton of strings.
We generally avoid using raw pointers in C++, but this is standard in C and you should at least understand what is going on.
In C++, we typically use smart pointers, STL containers or convenient array constructs provided by libraries such as Eigen and Armadillo.
### Pointers and addresses
```
%%file p01.cpp
#include <iostream>
using std::cout;
int main() {
int x = 23;
int *xp;
xp = &x;
cout << "x " << x << "\n";
cout << "Address of x " << &x << "\n";
cout << "Pointer to x " << xp << "\n";
cout << "Value at pointer to x " << *xp << "\n";
}
%%bash
g++ -o p01.exe p01.cpp -std=c++14
./p01.exe
```
### Arrays
```
%%file p02.cpp
#include <iostream>
using std::cout;
using std::begin;
using std::end;
int main() {
int xs[] = {1,2,3,4,5};
int ys[3];
for (int i=0; i<5; i++) {
ys[i] = i*i;
}
for (auto x=begin(xs); x!=end(xs); x++) {
cout << *x << " ";
}
cout << "\n";
for (auto x=begin(ys); x!=end(ys); x++) {
cout << *x << " ";
}
cout << "\n";
}
%%bash
g++ -o p02.exe p02.cpp -std=c++14
./p02.exe
```
### Dynamic memory
- Use `new` and `delete` for dynamic memory allocation in C++.
- Do not use the C style `malloc`, `calloc` and `free`
- Abosolutely never mix the C++ and C style dynamic memory allocation
```
%%file p03.cpp
#include <iostream>
using std::cout;
using std::begin;
using std::end;
int main() {
// declare memory
int *z = new int; // single integer
*z = 23;
// Allocate on heap
int *zs = new int[3]; // array of 3 integers
for (int i=0; i<3; i++) {
zs[i] = 10*i;
}
cout << *z << "\n";
for (int i=0; i < 3; i++) {
cout << zs[i] << " ";
}
cout << "\n";
// need for manual management of dynamically assigned memory
delete z;
delete[] zs;
}
%%bash
g++ -o p03.exe p03.cpp -std=c++14
./p03.exe
```
### Pointer arithmetic
When you increemnt or decrement an array, it moves to the preceding or next locaion in memory as aprpoprite for the pointer type. You can also add or substract an number, since that is equivalent to mulitple increments/decrements. This is know as pointer arithmetic.
```
%%file p04.cpp
#include <iostream>
using std::cout;
using std::begin;
using std::end;
int main() {
int xs[] = {100,200,300,400,500,600,700,800,900,1000};
cout << xs << ": " << *xs << "\n";
cout << &xs << ": " << *xs << "\n";
cout << &xs[3] << ": " << xs[3] << "\n";
cout << xs+3 << ": " << *(xs+3) << "\n";
}
%%bash
g++ -std=c++11 -o p04.exe p04.cpp
./p04.exe
```
### C style dynamic memory for jagged array ("matrix")
```
%%file p05.cpp
#include <iostream>
using std::cout;
using std::begin;
using std::end;
int main() {
int m = 3;
int n = 4;
int **xss = new int*[m]; // assign memory for m pointers to int
for (int i=0; i<m; i++) {
xss[i] = new int[n]; // assign memory for array of n ints
for (int j=0; j<n; j++) {
xss[i][j] = i*10 + j;
}
}
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
cout << xss[i][j] << "\t";
}
cout << "\n";
}
// Free memory
for (int i=0; i<m; i++) {
delete[] xss[i];
}
delete[] xss;
}
%%bash
g++ -std=c++11 -o p05.exe p05.cpp
./p05.exe
```
## Functions
```
%%file func01.cpp
#include <iostream>
double add(double x, double y) {
return x + y;
}
double mult(double x, double y) {
return x * y;
}
int main() {
double a = 3;
double b = 4;
std::cout << add(a, b) << std::endl;
std::cout << mult(a, b) << std::endl;
}
%%bash
g++ -o func01.exe func01.cpp -std=c++14
./func01.exe
```
### Function parameters
In the example below, the space allocated *inside* a function is deleted *outside* the function. Such code in practice will almost certainly lead to memory leakage. This is why C++ functions often put the *output* as an argument to the function, so that all memory allocation can be controlled outside the function.
```
void add(double *x, double *y, double *res, n)
```
```
%%file func02.cpp
#include <iostream>
double* add(double *x, double *y, int n) {
double *res = new double[n];
for (int i=0; i<n; i++) {
res[i] = x[i] + y[i];
}
return res;
}
int main() {
double a[] = {1,2,3};
double b[] = {4,5,6};
int n = 3;
double *c = add(a, b, n);
for (int i=0; i<n; i++) {
std::cout << c[i] << " ";
}
std::cout << "\n";
delete[] c; // Note difficulty of book-keeping when using raw pointers!
}
%%bash
g++ -o func02.exe func02.cpp -std=c++14
./func02.exe
%%file func03.cpp
#include <iostream>
using std::cout;
// Using value
void foo1(int x) {
x = x + 1;
}
// Using pointer
void foo2(int *x) {
*x = *x + 1;
}
// Using ref
void foo3(int &x) {
x = x + 1;
}
int main() {
int x = 0;
cout << x << "\n";
foo1(x);
cout << x << "\n";
foo2(&x);
cout << x << "\n";
foo3(x);
cout << x << "\n";
}
%%bash
g++ -o func03.exe func03.cpp -std=c++14
./func03.exe
```
## Generic programming with templates
In C, you need to write a *different* function for each input type - hence resulting in duplicated code like
```C
int iadd(int a, int b)
float fadd(float a, float b)
```
In C++, you can make functions *generic* by using *templates*.
Note: When you have a template function, the entire funciton must be written in the header file, and not the source file. Hence, heavily templated libaries are often "header-only".
```
%%file template.cpp
#include <iostream>
template<typename T>
T add(T a, T b) {
return a + b;
}
int main() {
int m =2, n =3;
double u = 2.5, v = 4.5;
std::cout << add(m, n) << std::endl;
std::cout << add(u, v) << std::endl;
}
%%bash
g++ -o template.exe template.cpp
%%bash
./template.exe
```
## Anonymous functions
```
%%file lambda.cpp
#include <iostream>
using std::cout;
using std::endl;
int main() {
int a = 3, b = 4;
int c = 0;
// Lambda function with no capture
auto add1 = [] (int a, int b) { return a + b; };
// Lambda function with value capture
auto add2 = [c] (int a, int b) { return c * (a + b); };
// Lambda funciton with reference capture
auto add3 = [&c] (int a, int b) { return c * (a + b); };
// Change value of c after function definition
c += 5;
cout << "Lambda function\n";
cout << add1(a, b) << endl;
cout << "Lambda function with value capture\n";
cout << add2(a, b) << endl;
cout << "Lambda function with reference capture\n";
cout << add3(a, b) << endl;
}
%%bash
c++ -o lambda.exe lambda.cpp --std=c++14
%%bash
./lambda.exe
```
## Function pointers
```
%%file func_pointer.cpp
#include <iostream>
#include <vector>
#include <functional>
using std::cout;
using std::endl;
using std::function;
using std::vector;
int main()
{
cout << "\nUsing generalized function pointers\n";
using func = function<double(double, double)>;
auto f1 = [](double x, double y) { return x + y; };
auto f2 = [](double x, double y) { return x * y; };
auto f3 = [](double x, double y) { return x + y*y; };
double x = 3, y = 4;
vector<func> funcs = {f1, f2, f3,};
for (auto& f : funcs) {
cout << f(x, y) << "\n";
}
}
%%bash
g++ -o func_pointer.exe func_pointer.cpp -std=c++14
%%bash
./func_pointer.exe
```
## Standard template library (STL)
The STL provides templated containers and gneric algorithms acting on these containers with a consistent API.
```
%%file stl.cpp
#include <iostream>
#include <vector>
#include <map>
#include <unordered_map>
using std::vector;
using std::map;
using std::unordered_map;
using std::string;
using std::cout;
using std::endl;
struct Point{
int x;
int y;
Point(int x_, int y_) :
x(x_), y(y_) {};
};
int main() {
vector<int> v1 = {1,2,3};
v1.push_back(4);
v1.push_back(5);
cout << "Vecotr<int>" << endl;
for (auto n: v1) {
cout << n << endl;
}
cout << endl;
vector<Point> v2;
v2.push_back(Point(1, 2));
v2.emplace_back(3,4);
cout << "Vector<Point>" << endl;
for (auto p: v2) {
cout << "(" << p.x << ", " << p.y << ")" << endl;
}
cout << endl;
map<string, int> v3 = {{"foo", 1}, {"bar", 2}};
v3["hello"] = 3;
v3.insert({"goodbye", 4});
// Note the a C++ map is ordered
// Note using (traditional) iterators instead of ranged for loop
cout << "Map<string, int>" << endl;
for (auto iter=v3.begin(); iter != v3.end(); iter++) {
cout << iter->first << ": " << iter->second << endl;
}
cout << endl;
unordered_map<string, int> v4 = {{"foo", 1}, {"bar", 2}};
v4["hello"] = 3;
v4.insert({"goodbye", 4});
// Note the unordered_map is similar to Python' dict.'
// Note using ranged for loop with const ref to avoid copying or mutation
cout << "Unordered_map<string, int>" << endl;
for (const auto& i: v4) {
cout << i.first << ": " << i.second << endl;
}
cout << endl;
}
%%bash
g++ -o stl.exe stl.cpp -std=c++14
%%bash
./stl.exe
```
## STL algorithms
```
%%file stl_algorithm.cpp
#include <vector>
#include <iostream>
#include <numeric>
using std::cout;
using std::endl;
using std::vector;
using std::begin;
using std::end;
int main() {
vector<int> v(10);
// iota is somewhat like range
std::iota(v.begin(), v.end(), 1);
for (auto i: v) {
cout << i << " ";
}
cout << endl;
// C++ version of reduce
cout << std::accumulate(begin(v), end(v), 0) << endl;
// Accumulate with lambda
cout << std::accumulate(begin(v), end(v), 1, [](int a, int b){return a * b; }) << endl;
}
%%bash
g++ -o stl_algorithm.exe stl_algorithm.cpp -std=c++14
%%bash
./stl_algorithm.exe
```
## Random numbers
```
%%file random.cpp
#include <iostream>
#include <random>
#include <functional>
using std::cout;
using std::random_device;
using std::mt19937;
using std::default_random_engine;
using std::uniform_int_distribution;
using std::poisson_distribution;
using std::student_t_distribution;
using std::bind;
// start random number engine with fixed seed
// Note default_random_engine may give differnet values on different platforms
// default_random_engine re(1234);
// or
// Using a named engine will work the same on differnt platforms
// mt19937 re(1234);
// start random number generator with random seed
random_device rd;
mt19937 re(rd());
uniform_int_distribution<int> uniform(1,6); // lower and upper bounds
poisson_distribution<int> poisson(30); // rate
student_t_distribution<double> t(10); // degrees of freedom
int main()
{
cout << "\nGenerating random numbers\n";
auto runif = bind (uniform, re);
auto rpois = bind(poisson, re);
auto rt = bind(t, re);
for (int i=0; i<10; i++) {
cout << runif() << ", " << rpois() << ", " << rt() << "\n";
}
}
%%bash
g++ -o random.exe random.cpp -std=c++14
%%bash
./random.exe
```
## Numerics
### Using Armadillo
```
%%file test_arma.cpp
#include <iostream>
#include <armadillo>
using std::cout;
using std::endl;
int main()
{
using namespace arma;
vec u = linspace<vec>(0,1,5);
vec v = ones<vec>(5);
mat A = randu<mat>(4,5); // uniform random deviates
mat B = randn<mat>(4,5); // normal random deviates
cout << "\nVecotrs in Armadillo\n";
cout << u << endl;
cout << v << endl;
cout << u.t() * v << endl;
cout << "\nRandom matrices in Armadillo\n";
cout << A << endl;
cout << B << endl;
cout << A * B.t() << endl;
cout << A * v << endl;
cout << "\nQR in Armadillo\n";
mat Q, R;
qr(Q, R, A.t() * A);
cout << Q << endl;
cout << R << endl;
}
%%bash
g++ -o test_arma.exe test_arma.cpp -std=c++14 -larmadillo
%%bash
./test_arma.exe
```
### Using Eigen
```
%%file test_eigen.cpp
#include <iostream>
#include <fstream>
#include <random>
#include <Eigen/Dense>
#include <functional>
using std::cout;
using std::endl;
using std::ofstream;
using std::default_random_engine;
using std::normal_distribution;
using std::bind;
// start random number engine with fixed seed
default_random_engine re{12345};
normal_distribution<double> norm(5,2); // mean and standard deviation
auto rnorm = bind(norm, re);
int main()
{
using namespace Eigen;
VectorXd x1(6);
x1 << 1, 2, 3, 4, 5, 6;
VectorXd x2 = VectorXd::LinSpaced(6, 1, 2);
VectorXd x3 = VectorXd::Zero(6);
VectorXd x4 = VectorXd::Ones(6);
VectorXd x5 = VectorXd::Constant(6, 3);
VectorXd x6 = VectorXd::Random(6);
double data[] = {6,5,4,3,2,1};
Map<VectorXd> x7(data, 6);
VectorXd x8 = x6 + x7;
MatrixXd A1(3,3);
A1 << 1 ,2, 3,
4, 5, 6,
7, 8, 9;
MatrixXd A2 = MatrixXd::Constant(3, 4, 1);
MatrixXd A3 = MatrixXd::Identity(3, 3);
Map<MatrixXd> A4(data, 3, 2);
MatrixXd A5 = A4.transpose() * A4;
MatrixXd A6 = x7 * x7.transpose();
MatrixXd A7 = A4.array() * A4.array();
MatrixXd A8 = A7.array().log();
MatrixXd A9 = A8.unaryExpr([](double x) { return exp(x); });
MatrixXd A10 = MatrixXd::Zero(3,4).unaryExpr([](double x) { return rnorm(); });
VectorXd x9 = A1.colwise().norm();
VectorXd x10 = A1.rowwise().sum();
MatrixXd A11(x1.size(), 3);
A11 << x1, x2, x3;
MatrixXd A12(3, x1.size());
A12 << x1.transpose(),
x2.transpose(),
x3.transpose();
JacobiSVD<MatrixXd> svd(A10, ComputeThinU | ComputeThinV);
cout << "x1: comman initializer\n" << x1.transpose() << "\n\n";
cout << "x2: linspace\n" << x2.transpose() << "\n\n";
cout << "x3: zeors\n" << x3.transpose() << "\n\n";
cout << "x4: ones\n" << x4.transpose() << "\n\n";
cout << "x5: constant\n" << x5.transpose() << "\n\n";
cout << "x6: rand\n" << x6.transpose() << "\n\n";
cout << "x7: mapping\n" << x7.transpose() << "\n\n";
cout << "x8: element-wise addition\n" << x8.transpose() << "\n\n";
cout << "max of A1\n";
cout << A1.maxCoeff() << "\n\n";
cout << "x9: norm of columns of A1\n" << x9.transpose() << "\n\n";
cout << "x10: sum of rows of A1\n" << x10.transpose() << "\n\n";
cout << "head\n";
cout << x1.head(3).transpose() << "\n\n";
cout << "tail\n";
cout << x1.tail(3).transpose() << "\n\n";
cout << "slice\n";
cout << x1.segment(2, 3).transpose() << "\n\n";
cout << "Reverse\n";
cout << x1.reverse().transpose() << "\n\n";
cout << "Indexing vector\n";
cout << x1(0);
cout << "\n\n";
cout << "A1: comma initilizer\n";
cout << A1 << "\n\n";
cout << "A2: constant\n";
cout << A2 << "\n\n";
cout << "A3: eye\n";
cout << A3 << "\n\n";
cout << "A4: mapping\n";
cout << A4 << "\n\n";
cout << "A5: matrix multiplication\n";
cout << A5 << "\n\n";
cout << "A6: outer product\n";
cout << A6 << "\n\n";
cout << "A7: element-wise multiplication\n";
cout << A7 << "\n\n";
cout << "A8: ufunc log\n";
cout << A8 << "\n\n";
cout << "A9: custom ufucn\n";
cout << A9 << "\n\n";
cout << "A10: custom ufunc for normal deviates\n";
cout << A10 << "\n\n";
cout << "A11: np.c_\n";
cout << A11 << "\n\n";
cout << "A12: np.r_\n";
cout << A12 << "\n\n";
cout << "2x2 block startign at (0,1)\n";
cout << A1.block(0,1,2,2) << "\n\n";
cout << "top 2 rows of A1\n";
cout << A1.topRows(2) << "\n\n";
cout << "bottom 2 rows of A1";
cout << A1.bottomRows(2) << "\n\n";
cout << "leftmost 2 cols of A1";
cout << A1.leftCols(2) << "\n\n";
cout << "rightmost 2 cols of A1";
cout << A1.rightCols(2) << "\n\n";
cout << "Diagonal elements of A1\n";
cout << A1.diagonal() << "\n\n";
A1.diagonal() = A1.diagonal().array().square();
cout << "Transforming diagonal eelemtns of A1\n";
cout << A1 << "\n\n";
cout << "Indexing matrix\n";
cout << A1(0,0) << "\n\n";
cout << "singular values\n";
cout << svd.singularValues() << "\n\n";
cout << "U\n";
cout << svd.matrixU() << "\n\n";
cout << "V\n";
cout << svd.matrixV() << "\n\n";
}
import os
if not os.path.exists('./eigen'):
! git clone https://gitlab.com/libeigen/eigen.git
%%bash
g++ -o test_eigen.exe test_eigen.cpp -std=c++11 -I./eigen
%%bash
./test_eigen.exe
```
### Check SVD
```
import numpy as np
A10 = np.array([
[5.17237, 3.73572, 6.29422, 6.55268],
[5.33713, 3.88883, 1.93637, 4.39812],
[8.22086, 6.94502, 6.36617, 6.5961]
])
U, s, Vt = np.linalg.svd(A10, full_matrices=False)
s
U
Vt.T
```
## Probability distributions and statistics
A nicer library for working with probability distributions. Show integration with Armadillo. Integration with Eigen is also possible.
```
import os
if not os.path.exists('./stats'):
! git clone https://github.com/kthohr/stats.git
if not os.path.exists('./gcem'):
! git clone https://github.com/kthohr/gcem.git
%%file stats.cpp
#define STATS_ENABLE_STDVEC_WRAPPERS
#define STATS_ENABLE_ARMA_WRAPPERS
// #define STATS_ENABLE_EIGEN_WRAPPERS
#include <iostream>
#include <vector>
#include "stats.hpp"
using std::cout;
using std::endl;
using std::vector;
// set seed for randome engine to 1776
std::mt19937_64 engine(1776);
int main() {
// evaluate the normal PDF at x = 1, mu = 0, sigma = 1
double dval_1 = stats::dnorm(1.0,0.0,1.0);
// evaluate the normal PDF at x = 1, mu = 0, sigma = 1, and return the log value
double dval_2 = stats::dnorm(1.0,0.0,1.0,true);
// evaluate the normal CDF at x = 1, mu = 0, sigma = 1
double pval = stats::pnorm(1.0,0.0,1.0);
// evaluate the Laplacian quantile at p = 0.1, mu = 0, sigma = 1
double qval = stats::qlaplace(0.1,0.0,1.0);
// draw from a normal distribution with mean 100 and sd 15
double rval = stats::rnorm(100, 15);
// Use with std::vectors
vector<int> pois_rvs = stats::rpois<vector<int> >(1, 10, 3);
cout << "Poisson draws with rate=3 inton std::vector" << endl;
for (auto &x : pois_rvs) {
cout << x << ", ";
}
cout << endl;
// Example of Armadillo usage: only one matrix library can be used at a time
arma::mat beta_rvs = stats::rbeta<arma::mat>(5,5,3.0,2.0);
// matrix input
arma::mat beta_cdf_vals = stats::pbeta(beta_rvs,3.0,2.0);
/* Example of Eigen usage: only one matrix library can be used at a time
Eigen::MatrixXd gamma_rvs = stats::rgamma<Eigen::MatrixXd>(10, 5,3.0,2.0);
*/
cout << "evaluate the normal PDF at x = 1, mu = 0, sigma = 1" << endl;
cout << dval_1 << endl;
cout << "evaluate the normal PDF at x = 1, mu = 0, sigma = 1, and return the log value" << endl;
cout << dval_2 << endl;
cout << "evaluate the normal CDF at x = 1, mu = 0, sigma = 1" << endl;
cout << pval << endl;
cout << "evaluate the Laplacian quantile at p = 0.1, mu = 0, sigma = 1" << endl;
cout << qval << endl;
cout << "draw from a normal distribution with mean 100 and sd 15" << endl;
cout << rval << endl;
cout << "draws from a beta distribuiotn to populate Armadillo matrix" << endl;
cout << beta_rvs << endl;
cout << "evaluaate CDF for beta draws from Armadillo inputs" << endl;
cout << beta_cdf_vals << endl;
/* If using Eigen
cout << "draws from a Gamma distribuiotn to populate Eigen matrix" << endl;
cout << gamma_rvs << endl;
*/
}
%%bash
g++ -std=c++11 -I./stats/include -I./gcem/include -I./eigen stats.cpp -o stats.exe
%%bash
./stats.exe
```
**Solution to exercise**
```
%%file greet.cpp
#include <iostream>
#include <string>
using std::string;
using std::cout;
int main(int argc, char* argv[]) {
string name = argv[1];
int n = std::stoi(argv[2]);
for (int i=0; i<n; i++) {
cout << "Hello " << name << "!" << "\n";
}
}
%%bash
g++ -std=c++11 greet.cpp -o greet
%%bash
./greet Santa 3
```
| github_jupyter |
```
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from tqdm import tqdm as tqdm
%matplotlib inline
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import random
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
type(trainset.targets)
type(trainset.data)
index1 = [np.where(np.array(trainset.targets)==0)[0] , np.where(np.array(trainset.targets)==1)[0], np.where(np.array(trainset.targets)==2)[0] ]
index1 = np.concatenate(index1,axis=0)
len(index1) #15000
#index1
disp = np.array(trainset.targets)
true = 100
total = 35000
sin = total-true
sin
epochs= 100
indices = np.random.choice(index1,true)
_,count = np.unique(disp[indices],return_counts=True)
print(count, indices.shape)
index = np.where(np.logical_and(np.logical_and(np.array(trainset.targets)!=0, np.array(trainset.targets)!=1), np.array(trainset.targets)!=2))[0] #35000
len(index)
req_index = np.random.choice(index.shape[0], sin, replace=False)
index = index[req_index]
index.shape
values = np.random.choice([0,1,2],size= len(index)) #labeling others as 0,1,2
print(sum(values ==0),sum(values==1), sum(values==2))
# trainset.data = torch.tensor( trainset.data )
# trainset.targets = torch.tensor(trainset.targets)
trainset.data = np.concatenate((trainset.data[indices],trainset.data[index]))
trainset.targets = np.concatenate((np.array(trainset.targets)[indices],values))
trainset.targets.shape, trainset.data.shape
# mnist_trainset.targets[index] = torch.Tensor(values).type(torch.LongTensor)
j =20078 # Without Shuffle upto True Training numbers correct , after that corrupted
print(plt.imshow(trainset.data[j]),trainset.targets[j])
trainloader = torch.utils.data.DataLoader(trainset, batch_size=256,shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(testset, batch_size=256,shuffle=False, num_workers=2)
classes = ('zero', 'one','two')
dataiter = iter(trainloader)
images, labels = dataiter.next()
images[:4].shape
# def imshow(img):
# img = img / 2 + 0.5 # unnormalize
# npimg = img.numpy()
# plt.imshow(np.transpose(npimg, (1, 2, 0)))
# plt.show()
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img#.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
imshow(torchvision.utils.make_grid(images[:10]))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(10)))
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
class Conv_module(nn.Module):
def __init__(self,inp_ch,f,s,k,pad):
super(Conv_module,self).__init__()
self.inp_ch = inp_ch
self.f = f
self.s = s
self.k = k
self.pad = pad
self.conv = nn.Conv2d(self.inp_ch,self.f,k,stride=s,padding=self.pad)
self.bn = nn.BatchNorm2d(self.f)
self.act = nn.ReLU()
def forward(self,x):
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
return x
class inception_module(nn.Module):
def __init__(self,inp_ch,f0,f1):
super(inception_module, self).__init__()
self.inp_ch = inp_ch
self.f0 = f0
self.f1 = f1
self.conv1 = Conv_module(self.inp_ch,self.f0,1,1,pad=0)
self.conv3 = Conv_module(self.inp_ch,self.f1,1,3,pad=1)
#self.conv1 = nn.Conv2d(3,self.f0,1)
#self.conv3 = nn.Conv2d(3,self.f1,3,padding=1)
def forward(self,x):
x1 = self.conv1.forward(x)
x3 = self.conv3.forward(x)
#print(x1.shape,x3.shape)
x = torch.cat((x1,x3),dim=1)
return x
class downsample_module(nn.Module):
def __init__(self,inp_ch,f):
super(downsample_module,self).__init__()
self.inp_ch = inp_ch
self.f = f
self.conv = Conv_module(self.inp_ch,self.f,2,3,pad=0)
self.pool = nn.MaxPool2d(3,stride=2,padding=0)
def forward(self,x):
x1 = self.conv(x)
#print(x1.shape)
x2 = self.pool(x)
#print(x2.shape)
x = torch.cat((x1,x2),dim=1)
return x,x1
class inception_net(nn.Module):
def __init__(self):
super(inception_net,self).__init__()
self.conv1 = Conv_module(3,96,1,3,0)
self.incept1 = inception_module(96,32,32)
self.incept2 = inception_module(64,32,48)
self.downsample1 = downsample_module(80,80)
self.incept3 = inception_module(160,112,48)
self.incept4 = inception_module(160,96,64)
self.incept5 = inception_module(160,80,80)
self.incept6 = inception_module(160,48,96)
self.downsample2 = downsample_module(144,96)
self.incept7 = inception_module(240,176,60)
self.incept8 = inception_module(236,176,60)
self.pool = nn.AvgPool2d(5)
self.linear = nn.Linear(236,10)
def forward(self,x):
x = self.conv1.forward(x)
#act1 = x
x = self.incept1.forward(x)
#act2 = x
x = self.incept2.forward(x)
#act3 = x
x,act4 = self.downsample1.forward(x)
x = self.incept3.forward(x)
#act5 = x
x = self.incept4.forward(x)
#act6 = x
x = self.incept5.forward(x)
#act7 = x
x = self.incept6.forward(x)
#act8 = x
x,act9 = self.downsample2.forward(x)
x = self.incept7.forward(x)
#act10 = x
x = self.incept8.forward(x)
#act11 = x
#print(x.shape)
x = self.pool(x)
#print(x.shape)
x = x.view(-1,1*1*236)
x = self.linear(x)
return x
inc = inception_net()
inc = inc.to("cuda")
criterion_inception = nn.CrossEntropyLoss()
optimizer_inception = optim.SGD(inc.parameters(), lr=0.01, momentum=0.9)
acti = []
loss_curi = []
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_inception.zero_grad()
# forward + backward + optimize
outputs = inc(inputs)
loss = criterion_inception(outputs, labels)
loss.backward()
optimizer_inception.step()
# print statistics
running_loss += loss.item()
if i % 50 == 49: # print every 50 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 50))
ep_lossi.append(running_loss/50) # loss per minibatch
running_loss = 0.0
loss_curi.append(np.mean(ep_lossi)) #loss per epoch
if(np.mean(ep_lossi)<=0.03):
break
# if (epoch%5 == 0):
# _,actis= inc(inputs)
# acti.append(actis)
print('Finished Training')
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = inc(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 35000 train images: %d %%' % ( 100 * correct / total))
total,correct
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= inc(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
out = np.concatenate(out,axis=0)
pred = np.concatenate(pred,axis=0)
index = np.logical_or(np.logical_or(out ==1,out==0),out == 2)
print(index.shape)
acc = sum(out[index] == pred[index])/sum(index)
print('Accuracy of the network on the 0-1-2 test images: %d %%' % (
100*acc))
np.unique(out[index],return_counts = True) #== pred[index])
np.unique(pred[index],return_counts = True) #== pred[index])
sum(out[index] == pred[index])
cnt = np.zeros((3,3))
true = out[index]
predict = pred[index]
for i in range(len(true)):
cnt[true[i]][predict[i]] += 1
cnt
# torch.save(inc.state_dict(),"/content/drive/My Drive/Research/CIFAR Random/model_True_"+str(true_data_count)+"_epoch_"+str(epochs)+".pkl")
```
| github_jupyter |
Chapter 2
======
## Lógica proposicional
> "Poder-se-á definir a Lógica como a ciência das regras que legitimam
a utilização da palavra portanto." B. Ruyer in Logique.
### Proposição
No caso das instruções *if* e *while*, a execução dum bloco de código está dependente da avaliação duma função proposicional (condição). Com o objectivo de estudar estas instruções e formalizar a noção de função proposicional começa-se por rever algumas noções de lógica proposicional e do cálculo de predicados.

Os elementos básicos da lógica são as *proposições* ou *sentenças* que se entendem como afirmações precisas. Na lógica clássica, que abordamos, a avaliação duma proposição é regida por dois princípios fundamentais:
- **Princípio da não contradição** - Uma proposição não pode ser simultaneamente verdadeira e falsa;
- **Princípio do terceiro excluído** - Uma proposição ou é verdadeira ou é falsa;
Por exemplo "1 é maior que 3" é uma proposição cujo valor lógico é o de
"falsidade" enquanto que "todos os triângulos têm três lados e três ângulos" é uma proposição cujo valor lógico é o de "verdade".
Por outro lado "*x < 3*" não é uma proposição (depende do valor que venha a ser atribuído à variável *x*) sendo denominada *função proposicional*.
Representam-se por letras (geralmente minúsculas) as proposições genéricas (ou variáveis proposicionais) e por 1 (ou V) e 0 (ou F) os valores lógicos de "*verdade*" e "*falsidade*", respectivamente.
A área da lógica que trata as proposições neste contexto é designada por *cálculo proposicional* ou *lógica proposicional*.
### Proposição simples e proposição composta
Por vezes combinam-se várias proposições para obter proposições mais expressivas. Neste sentido, classificamos as proposições como **simples** (também denominada atómica) ou
**composta** (também denominada molecular).
As proposições simples apresentam apenas uma afirmação:
- $p:$ $\sqrt{2}$ não é um número racional.
- $q:$ existem mais números reais que inteiros.
- $v:$ $1=2$.
- $r:2+3>4$.
As proposições compostas são definidas por uma ou por mais do que uma
proposição, usando na sua formação **operadores lógicos**
(também designados de **conectivas lógicas** ou operadores para formação de proposições):
- $x = 2$ e $y = 1$.
- se $x > y$ então $y < x$.
- não é verdade que $2+3>4$.
#### Conectivas lógicas
Em cálculo proposicional as proposições são geradas a partir de proposições simples, usando operadores para formação de proposições. Vamos tomar como sintacticamente válidas proposições compostas da forma:
- *não* $p$,
- $p$ *e* $q$,
- $p$ *ou* $q$,
- *ou* $p$ *ou* (exclusivo) $q$,
- *se* $p$ *então* $q$,
- $p$ *se e só se* $q$.
onde $p$ e $q$ são proposições (simples ou compostas). Neste casos, em geral, pretende-se obter os valores lógicos das proposições compostas em função dos valores lógicos conhecidos das proposições mais simples que as compõem. Por forma a podermos formalizar a lógica e a avaliação de proposições, convencionamos a seguinte representação para os operadores sintácticos usados na formação de proposições:
Operações Lógicas | Símbolos | Notação | Significado
------------------|----------|---------|------------
Negação | $\neg$ ou $\sim$ | $\neg p$ | não *p*
Conjunção | $\wedge$ | $p \wedge q$ | *p* e *q*
Disjunção | $\vee$ | $p \vee q$ | *p* ou *q*
Disjunção exclusiva | $\oplus$ ou $\dot{\vee}$ | $p\oplus q$ | ou *p* ou (exclusivo) *q*
Implicação | $\rightarrow$ | $p\rightarrow q$ | se *p* então *q*
Bi-implicação | $\leftrightarrow$ | $p\leftrightarrow q$ | *p* se só se *q*
#### Negação
Seja $p$ uma proposição. A afirmação "não se verifica que
*p*" é uma nova proposição, designada de **negação** de $p$. A
negação de $p$ é denotada por $\neg p$ ou $\sim p$. A proposição
$\neg p$ deve ler-se "não *p*" e é verdadeira se *p* é falsa. A proposição $\neg p$ é falsa se *p* é verdadeira.
É usual definir a interpretação dum operador lógico através de
tabelas do tipo:
$p$ | $\neg p$
:----:|:-------:
T | F
F | T
ou
$p$ | $\neg p$
:----:|:--------:
1 | 0
0 | 1
stas tabelas são designadas por **tabelas de verdade**. Neste
caso define completamente o operador negação, relacionando os
valores lógicos de *p* e $\neg p$.
Note que, em linguagem corrente nem sempre se pode negar logicamente uma proposição,
antepondo o advérbio "não" ao verbo da proposição, isto apenas se verifica nos casos mais simples.
Por exemplo: negar "Hoje é sábado." é afirmar "Hoje não é sábado".
Mas negar que "Todas as aves voam" é o mesmo que afirmar "não se verifica que todas as aves voam" o que é equivalente a afirmar que "Nem todas as aves voam" mas não é afirmar que "Todas as aves não voam".
Em linguagem Matemática, dado o rigor da interpretação das
designações usadas, o processo de negação fica simplificado. Por
exemplo, negar "*5>2*" é o mesmo que afirmar "$\neg$*(5>2)*" que é equivalente, por definição da relação *>*, a escrever "*5*$\leq$*2*". Assim como "*5>2*" é verdade, temos pela interpretação da negação que "$\neg$*(5>2)*" é falso.
```
#
# Tabela da Negação
#
for p in [True,False]:
print('not',p,"=", not p)
```
#### Conjunção
Sejam $p$ e $q$ proposições. A proposição "$p$ e $q$", denotada
$p\wedge q$, é a proposição que é verdadeira apenas quando $p$ e $q$
são ambas verdadeiras, caso contrário é falsa. A proposição $p\wedge q$
diz-se a \textbf{conjunção} de $p$ e $q$.
Assim, os valores lógicos das três proposições $p$, $q$, e $p\wedge
q$ estão relacionados pela tabela de verdade:
$p$ | $q$ | $p$ $\wedge$ $q$
:-----:|:----:|:--------:
V | V | V
V | F | F
F | V | F
F | F | F
Note que a tabela tem quatro linhas, uma por cada combinação
possível de valores de verdade para as proposições $p$ e $q$.
```
#
# Tabela da conjunção
#
for p in [True,False]:
for q in [True,False]:
print(p,'and',q,'=', p and q)
```
#### Disjunção
Sejam *p* e *q* proposições. A proposição "$p$ ou $q$", denotada
*p$\vee$q*, é a proposição que é falsa apenas quando $p$ e $q$ são
ambas falsas, caso contrário é verdade. A proposição *p$\vee$q*
diz-se a **disjunção** de *p* e *q*.
A tabela de verdade de *p $\vee$q* toma assim a forma:
$p$ | $q$ | $p$ $\vee$ $q$
:------:|:-----:|:---------:
V | V | V
V | F | V
F | V | V
F | F | F
A conectiva **ou** é interpretada na versão inclusiva da
palavra "ou" em linguagem corrente. Note que, nas proposições seguintes *ou* tem ou *significado inclusivo* ou *significado
exclusivo* consoante o contexto de interpretação:
- O João pratica futebol ou natação.[ou ambas as coisas]
- Ele é do Sporting ou do Porto.[mas não as duas coisas]
```
#
# Tabela da disjunção
#
for p in [True,False]:
for q in [True,False]:
print(p,'or',q,'=', p or q)
```
#### Disjunção exclusiva
Para tornar a interpretação da disjunção independente do contexto definimos: A **disjunção exclusiva**
de *p* e *q*, denotada *p$\oplus$q* ou *p$\dot{\vee}$q*, é a
proposição que é verdade apenas quando, ou *p* é verdadeira ou *q* é
verdadeira, caso contrário é falsa.
A tabela de verdade de *p$\oplus$q* toma assim a forma:
$p$ | $q$ | $p$ $\oplus$ $q$
:------:|:-----:|:--------:
V | V | F
V | F | V
F | V | V
F | F | F
```
#
# Tabela da disjunção exclusiva
#
for p in [True,False]:
for q in [True,False]:
if p!=q:
print(p,'xor',q,'=', True)
else:
print(p,'xor',q,'=', False)
```
##### Exercício:
Relacione o valor lógico das proposições $p$, $q$, $r$ e
$(p\wedge (\neg q))\oplus (r\vee p)$.
##### Exercício:
Indique os valores (de verdade ou falsidade) das seguintes afirmações:
- $3\leq 7$ e 4 é um número inteiro ímpar.
- $3\leq 7$ ou 4 é um número inteiro ímpar.
- 5 é ímpar ou divisível por 4.
#### Implicação
Sejam *p* e *q* proposições. A implicação *p$\rightarrow$q* é
a proposição que é falsa quando *p* é verdadeira e *q* é falsa, nos
outros casos é verdadeira.
A tabela de verdade de *p$\rightarrow$q* toma assim a forma:
$p$ | $q$ | $p$ $\rightarrow$ $q$
:------:|:-----:|:----------:
V | V | V
V | F | F
F | V | V
F | F | V
Numa proposição do tipo *p$\rightarrow$q* a proposição *p* recebe o
nome de **hipótese** (antecedente ou premissa) e a *q* chama-se
**tese** (conclusão ou consequente). A proposição *p$\rightarrow$q* também é muitas vezes designada por **declaração
condicional**. Estas designações são compatíveis com o uso da implicação em linguagem corrente, devemos no entanto notar que a tabela entra em conflito com a interpretação que fazemos da implicação: neste caso não se dirá "*p* implica *q*" quando se sabe à priori que *p* é falsa. Na interpretação que apresentamos para a implicação ela é verdade sempre que "*p*" é falsa independentemente do valor lógico de "*q*". Esta situação pode ilustrar-se com a implicação "se 1+1=1 então 2=3" que é verdadeira, uma vez que o antecedente é falso.
```
#
# Tabela da implicação
#
for p in [True,False]:
for q in [True,False]:
if p and not q:
print(p,'-->',q,'=',False)
else:
print(p,'-->',q,'=',True)
```
#### Bi-implicação
Sejam *p* e *q* proposições. A **bi-condicional** ou **bi-implicação** de *p* e *q* é a proposição *p$\leftrightarrow$q* que é verdadeira
quando *p* e *q* têm o mesmo valor lógico.
A tabela de verdade de *p$\leftrightarrow$q* toma assim a forma:
$p$ | $q$ | $p$ $\leftrightarrow$ $q$
:------:|:-----:|:----------:
V | V | V
V | F | F
F | V | F
F | F | V
A proposição *p$\leftrightarrow$q* deve ler-se "*p* se e só se *q*"
(abreviado por "*p* sse *q*") ou "*p* é condição necessária e
suficiente para *q*".
```
#
# Tabela da disjunção exclusiva
#
for p in [True,False]:
for q in [True,False]:
if p==q:
print(p,'<->',q,'=', True)
else:
print(p,'<->',q,'=', False)
```
Facilmente podemos mostrar que as proposições *p$\leftrightarrow$q*
e $(p\rightarrow q)\wedge(q\rightarrow p)$ têm os mesmos valores
lógicos, ou seja a proposição $(p\leftrightarrow q)\leftrightarrow
((p\rightarrow q)\wedge(q\rightarrow p))$ é sempre verdadeira.
(p | $\leftrightarrow$ | q) | $\leftrightarrow$ | ((p | $\rightarrow$ | q) | $\wedge$ | (q | $\rightarrow$ | p))
:------:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:
V | V | V | V | V | V | V | V | V | V | V
V | F | F | V | V | F | F | F | F | V | V
F | F | V | V | F | V | V | F | V | F | F
F | V | F | V | F | V | F | V | F | V | F
------|----|----|----|----|----|----|----|----|----|----
1 | 2 | 1 | 4 | 1 | 2 | 1 | 3 | 1 | 2 | 1
##### Exercício:
Suponhamos que *p,q,r* representam as seguintes sentenças:
>$p:$"7 é um número inteiro par"
>$q:3+1=4$
>$r:$"24 é divisível por 8"
1. Escreva em linguagem simbólica as proposições
1. $3+1\neq 4$ e 24 é divisível por 8
1. não é verdade que 7 seja ímpar ou 3+1=4
1. se 3+1=4 então 24 não é divisível por 8
1. Escreva por palavras as sentenças
1. $p\vee(\neg q)$
1. $\neg(p\wedge q)$
1. $(\neg r)\vee (\neg q)$
##### Exercício:
Construir as tabelas de verdade das seguintes proposições:
1. $((p\rightarrow q)\wedge p)\rightarrow q$
1. $p\leftrightarrow(q\rightarrow r)$
1. $(p\wedge(\neg p))\rightarrow q$
1. $((p\vee r)\wedge(q\vee r))\wedge((\neg p)\vee (\neg r))$
1. $(p\wedge(q\vee r))\wedge (q\wedge (p\vee r))$
##### Exercício:
Quantas linhas tem a tabela de verdade de uma proposição com $n$ variáveis proposicionais?
#### Ordem de precedência das conectivas lógicas
Até aqui, temos usado parêntesis para definir a ordem de
aplicação dos operadores lógicos numa proposição composta. Por forma
a reduzir o número de parêntesis adoptamos a seguinte convenção: Sempre que numa expressão estiverem presentes várias operações lógicas, **convenciona-se**, na ausência de parêntesis, que as operações se efectuem na ordem seguinte:
1. a negação;
1. a conjunção e a disjunção;
1. a implicação e a bi-implicação.
Assim,
1. $p\rightarrow ((\neg p)\vee r)$ pode escrever-se $p\rightarrow \neg p\vee r$;
1. $(p\wedge (\neg q))\leftrightarrow c$ pode escrever-se $p\wedge \neg q\leftrightarrow c$;
1. $p\vee q\wedge \neg r \rightarrow p \rightarrow\neg q$ deve ser entendida como
$(((p\vee q)\wedge(\neg r))\rightarrow p) \rightarrow(\neg q)$.
### Tautologia
Chama-se **tautologia** (ou fórmula logicamente
verdadeira) a uma proposição que é verdadeira, para quaisquer que sejam os valores lógicos atribuídos às variáveis proposicionais que a compõem. Dito de outra forma, chama-se tautologia a uma proposição cuja coluna correspondente na tabela de verdade possui apenas Vs ou 1s. Exemplo duma tautologia é a proposição $p\vee(\neg p)$, designada de "Princípio do terceiro excluído",

A negação duma tautologia, ou seja uma proposição que é sempre falsa, diz-se uma **contra-tautologia** ou **contradição**. Se uma proposição não é nem uma tautologia nem uma contradição denomina-se por **contingência**.
Não deve confundir-se contradição com proposição falsa, assim como não deve confundir-se tautologia com proposição verdadeira. O facto de uma tautologia ser sempre verdadeira e uma contradição ser sempre falsa deve-se à sua forma lógica (sintaxe) e não ao significado que se lhes pode atribuir (semântica).
A tabela de verdade

mostra que $p\rightarrow(p\vee q)$ é uma tautologia, enquanto que $(p\rightarrow q)\wedge (p\wedge (\neg q))$ é uma contradição.
#### Exercício
Mostre que são tautologias:
1. $(\neg q\rightarrow \neg p)\leftrightarrow(p\rightarrow q)$
1. $(p\leftrightarrow q)\leftrightarrow ((p\rightarrow q)\wedge(q\rightarrow p))$
Exemplos de outras tautologias são apresentadas abaixo:

### Equivalências proposicionais
As proposições $p$ e $q$ dizem-se **logicamente
equivalentes** se $p\leftrightarrow q$ é uma tautologia. Por $p\equiv
q$ ou $p\Leftrightarrow q$ denotamos que $p$ e $q$ são logicamente
equivalentes.
Diz-se que a proposição $p$ **implica logicamente** a proposição $q$ se a veracidade da primeira arrastar necessariamente a veracidade da segunda, ou seja, se a proposição *p$\rightarrow$q* for uma tautologia.
1. $\neg q\rightarrow \neg p \Leftrightarrow p\rightarrow q$
$\neg$ | $q$ | $\rightarrow$ | $\neg$ | $p$
:-----------:|:-------:|:---------------:|:--------:|:-----:
F | V | V | F | V
V | F | F | F | V
F | V | V | V | F
V | F | V | V | F
-----------|-------|---------------|--------|-----
2 | 1 | 3 | 2 | 1
e
$p$ | $\rightarrow$ | $q$
:-----:|:-------------:|:----:
V | V | V
V | F | F
F | V | V
F | V | F
-------|---------------|------
1 | 2 | 1
1. $p\leftrightarrow q\Leftrightarrow (p\rightarrow q)\wedge(q\rightarrow p)$
($p$ | $\leftrightarrow$ | q) | $\leftrightarrow$ | (($p$ | $\rightarrow$ | $q$) | $\wedge$ | ($q$ | $\rightarrow$ | $p$))
:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:
V | V | V | V | V | V | V | V | V | V | V
V | F | F | V | V | F | F | F | F | V | V
F | F | V | V | F | V | V | F | V | F | F
F | V | F | V | F | V | F | V | F | V | F
----|----|----|----|----|----|----|----|----|----|----
1 | 2 | 1 | 4 | 1 | 2 | 1 | 3 | 1 | 2 | 1
Deste modo, a equivalência proposicional pode ser sempre verificada através duma tabela de verdade. Em particular, as proposições $p$ e $q$ são equivalentes se e só se as colunas, na tabela de verdade, que determinam os seu valores lógicos coincidirem.
#### Exercício
Mostre que são exemplos de equivalências proposicionais:
1. $\neg(p\vee \neg p) \Leftrightarrow p \wedge \neg p$
1. $\neg (p\vee q)\Leftrightarrow \neg p \wedge \neg q$
1. $\neg p\vee q \Leftrightarrow p \rightarrow q$
1. $p\vee(q\wedge r)\Leftrightarrow(p\vee q)\wedge(p\vee r)$
#### Exercício
Indique quais das sentenças seguintes são equivalentes:
1. $p\wedge(\neg q)$
1. $p\rightarrow q$
1. $\neg((\neg p)\vee q)$
1. $q\rightarrow(\neg q)$
1. $(\neg p)\vee q$
1. $\neg(p\rightarrow q)$
1. $p\rightarrow(\neg q)$
1. $(\neg p)\rightarrow (\neg q)$
#### Exercício
Mostre que cada uma das proposições que se seguem:
1. $(\neg p)\vee q$
1. $(\neg q)\rightarrow (\neg p)$
1. $\neg(p\wedge (\neg q))$
é equivalente a $p\rightarrow q$.
#### Exercício
Mostre que:
1. $p\vee(q\wedge r)$ não é logicamente equivalente a $(p\vee q)\wedge r$.
1. $p\vee (q\wedge r)$ é logicamente equivalente a $(p\vee q)\wedge (p\vee r)$.
1. $p\vee(\neg (q \vee r))$ é logicamente equivalente a $(p\vee(\neg q))\vee(\neg r)$
De seguida apresentamos exemplos de equivalências úteis para o que se segue (que podem ser verificadas através de tabelas de verdade):
Nome | Propriedade | Propriedade
-------------|----------------------|-------------------
Comutatividade | $p \wedge q \Leftrightarrow q \wedge p$ | $p \vee q \Leftrightarrow q \vee p$
Associativa| $(p\wedge q)\wedge r \Leftrightarrow p \wedge (q \wedge r)$ | $(p\vee q)\vee r \Leftrightarrow p \vee (q \vee r)$
Idempotência | $p\wedge p \Leftrightarrow p$ | $p\vee p \Leftrightarrow p$
Identidade | $p\wedge V\Leftrightarrow p$ | $p\vee F\Leftrightarrow p$
Dominância | $p\wedge F\Leftrightarrow F$ | $p\vee V\Leftrightarrow V$
Absorção | $p\wedge(p\vee r)\Leftrightarrow p$ |$p\vee(p\wedge r)\Leftrightarrow p$
Distributivas | $p\wedge(q\vee r)\Leftrightarrow(p\wedge q)\vee(p\wedge r)$ | $p\vee(q\wedge r)\Leftrightarrow(p\vee q)\wedge(p\vee r)$
Distributivas | $p\rightarrow(q\vee r)\Leftrightarrow(p\rightarrow q)\vee(p\rightarrow r)$ | $p\rightarrow(q\wedge r)\Leftrightarrow (p\rightarrow q)\wedge(p\rightarrow r)$
Leis de De Morgan | $\neg (p\wedge q)\Leftrightarrow \neg p \vee \neg q$ | $\neg (p\vee q)\Leftrightarrow \neg p \wedge \neg q$
Def. Implicação | $p\rightarrow q \Leftrightarrow \neg p \vee q$ | $p\rightarrow q\Leftrightarrow \neg(p\wedge\neg q)$
Def. Bi-condicional | $p\leftrightarrow q \Leftrightarrow (p\rightarrow q) \wedge (q \rightarrow p)$ | $p\leftrightarrow q \Leftrightarrow (\neg p \vee q) \wedge (\neg q \vee p)$
Negação | $\neg(\neg p)\Leftrightarrow p$ |
Contraposição | $p\rightarrow q \Leftrightarrow \neg q \rightarrow \neg p$|
Troca de premissas | $p\rightarrow (q\rightarrow r)\Leftrightarrow q\rightarrow (p\rightarrow r)$ |
As equivalências lógicas apresentadas na tabela anterior, podem ser usadas na determinação de equivalências lógicas adicionais. Isso porque, podemos numa proposição composta, substituir
proposições por proposições que lhes sejam equivalentes
sem que isso altere os valores de verdade da proposição original.
Por exemplo:
$$
\begin{array}{rcll}
\neg(p\vee(\neg p \wedge q)) & \Leftrightarrow & \neg p \wedge \neg(\neg p \wedge q) & \text{da segunda lei de De Morgan} \\
& \Leftrightarrow & \neg p \wedge [\neg(\neg p) \vee \neg q] & \text{da primeira lei de De Morgan} \\
& \Leftrightarrow & \neg p \wedge (p\vee \neg q) & \text{da lei da dupla negação} \\
& \Leftrightarrow & (\neg p \wedge p) \vee (\neg p \wedge \neg q) & \text{da segunda distributividade} \\
& \Leftrightarrow & F \vee (\neg p \wedge \neg q) & \text{já que } \neg p \wedge p \Leftrightarrow F \\
& \Leftrightarrow & \neg p \wedge \neg q & \text{da lei identidade}
\end{array}
$$
Donde podemos concluir que $\neg(p\vee(\neg p \wedge q))$ e $\neg p
\wedge \neg q$ são proposições logicamente equivalentes:
$$
\neg(p\vee(\neg p \wedge q)) \Leftrightarrow \neg p \wedge \neg q
$$
#### Exercício
Simplifique as seguintes proposições:
1. $p\vee(q\wedge (\neg p))$
1. $\neg(p\vee(q\wedge(\neg r)))\wedge q$
1. $\neg((\neg p)\wedge(\neg q))$
1. $\neg((\neg p)\vee q)\vee(p\wedge(\neg r))$
1. $(p\wedge q)\vee (p\wedge (\neg q))$
1. $(p\wedge r)\vee((\neg r)\wedge (p\vee q))$
#### Exercício
Por vezes usa-se o símbolo $\downarrow$ para construir proposições compostas $p\downarrow q$ definidas por duas proposições $p$ e $q$, que é verdadeira quando e só quando $p$ e $q$ são simultaneamente falsas, e é falsa em todos os outros casos. A proposição $p\downarrow q$ lê-se "nem $p$ nem $q$".
1. Apresente a tabela de verdade de $p\downarrow q$.
1. Expresse $p\downarrow q$ em termos das conectivas $\wedge,\vee$ e $\neg$.
1. Determine proposições apenas definidas pela conectiva $\downarrow$ que sejam equivalentes a $\neg p$, $p\wedge q$ e $p\vee q$.
#### Exercício
Expresse a proposição $p\leftrightarrow q$ usando apenas os símbolos $\wedge,\vee$ e $\neg$.
### Considerações sobre a implicação
As duas primeiras linhas da tabela da implicação
$p$ | $q$ | $p\rightarrow q$
:-------:|:-------:|:------------:
V | V | V
V | F | F
F | V | V
F | F | V
não apresentam qualquer problema sob o ponto de vista intuitivo do senso comum. Quanto às duas últimas, qualquer outra escolha possível apresenta desvantagens sob o ponto de vista lógico, o que levou à escolha das soluções apresentadas, já que:
1. fazendo F na 3º linha e F na 4º linha, obtém-se a tabela da conjunção
1. fazendo F na 3º linha e V na 4º linha, obtém-se a tabela da bi-implicação
1. resta a possibilidade de fazer V na 3º linha e F na 4º linha que também não é, pois isso equivaleria a recusar a equivalência
$$
(p\rightarrow q)\Leftrightarrow(\neg q\rightarrow\neg p)
$$
que é uma equivalência aconselhável, já que a proposição "se o Pedro fala, existe" é (intuitivamente) equivalente à proposição "se o Pedro não existe, não fala". A aceitação desta equivalência impõe a tabela considerada para a implicação.
$\neg$ | $q$ | $\rightarrow$ | $\neg$ | $p$
:-------:|:-----:|:---------------:|:--------:|:-------:
F | V | V | F | V
V | F | F | F | V
F | V | V | V | F
V | F | V | V | F
-------|-----|---------------|--------|-------
2 | 1 | 3 | 2 | 1
e
$p$ | $\rightarrow$ | $q$
:----:|:---------------:|:-------:
V | V | V
V | F | F
F | V | V
F | V | F
----|---------------|-------
1 | 2 | 1
A partir duma implicação $r$ dada por $p\rightarrow q$ define-se as
proposições:
1. $q\rightarrow p$, designada de *recíproca* da implicação $r$;
1. $\neg q\rightarrow \neg p$, designada por *contra-recíproca* de $r$;
1. $\neg p\rightarrow \neg q$, designada por *inversa* de $r$.
Observe-se que, embora a contra-recíproca seja equivalente à proposição original, o mesmo não acontece com a recíproca (e a inversa, que lhe é equivalente) o que se pode verificar através das respectivas tabelas de verdade.
#### Exercício
Determine:
1. a contra-recíproca de $(\neg p)\rightarrow q$
1. a inversa de $(\neg q)\rightarrow p$
1. a recíproca da inversa de $q\rightarrow (\neg p)$
1. a negação de $p\rightarrow (\neg q)$
## Exercícios de python
##### Exercício:
Implemente os operadores de implicação e bi-implicação, através de funções
imp(bool,bool)->bool e biimp(bool,bool)->bool.
```
def imp(p,q):
u''' imp(bool,bool)->bool
Operador de implicação '''
return not p or q
def biimp(p,q):
u''' biimp(bool,bool)->bool
Operador de bi-implicação'''
return imp(p,q) and imp(q,p)
imp(False,True)
biimp(False,True)
```
##### Exercício:
Apresente as tabelas de verdade da implicação da bi-implicação e da proposição $P4:(p\rightarrow q)\vee h$. Por exemplo, tal que
>>> TabelaP4()
-----------------------------
p | q | h | (p->q)|h
-----------------------------
False|False|False| True
False|False| True| True
False| True|False| True
False| True| True| True
True|False|False| False
True|False| True| True
True| True|False| True
True| True| True| True
```
def TabelaP4():
u''' TabelaP4()->
tabela de (p->q)|h'''
print('p'.center(5)+'|'+'q'.center(5)+'|'+'h'.center(5)+'| (p->q)|h')
print('-'*27)
for p in [False,True]:
for q in [False,True]:
for h in [False,True]:
aval = imp(p,q) or h
print(str(p).center(5)+'|'+str(q).center(5)+'|'+str(h).center(5)+'|'+str(aval).center(10))
TabelaP4()
```
##### Exercício:
Defina a função
cab(list)->
em que dado uma lista de strings ['p1','p2','p3',...,'pn'], imprima o cabeçalho duma tabela de verdade. Por exemplo, tal que
>>> cab(['p1','p2','imp(p1,p2)'])
-------------------------
p1 | p2 | imp(p1,p2)
-------------------------
```
def cab(lista):
u''' cab(list)->
Imprime cabeçalho de tabela'''
print('-'*5*(len(lista)+1))
for prop in lista[:-1]:
print(prop.center(5)+'|', end='')
print(lista[-1]) # imprime último elemento
print('-'*5*(len(lista)+1))
cab(['p1','p2','imp(p1,p2)'])
```
##### Exercício:
Defina a função
linha(list)->
em que dada uma lista de valores lógicos ['p1','p2','p3',...,'pn'], imprima uma linha 'p1|p2|p3|...|pn' duma tabela de verdade, onde cada valor lógico está numa string com 5 posições. Por exemplo, tal que
>>> linha([True,False,True])
True|False| True
```
def linha(lista):
u''' linha(list)->
Imprime linha de tabela'''
for prop in lista[:-1]:
print(str(prop).center(5)+'|', end='')
print(str(lista[-1])) # imprime último elemento
linha([True,False,True])
```
##### Exercício:
Defina uma função trad(string)->string que faça a tradução duma expressão proposicional codificada, usando os símbolos 0,1,\&,$|$ e $\sim$, numa expressão proposicional no Python usando False, True, and, or e not. Por exemplo, tal que
>>> trad('(p&~(q|w))')
'(p and not (q or w))'
```
def trad(exp):
u''' trans(str)->str
Tradução duma expressão proposicional codificada,
usando os símbolos 0,1,\&,$|$ e $\sim$, numa expressão
proposicional no Python usando False, True, and, or e not.
'''
exp = exp.replace('0','False')
exp = exp.replace('1','True')
exp = exp.replace('&',' and ')
exp = exp.replace('|',' or ')
exp = exp.replace('~',' not ')
return exp
trad('(p&~(q|w))')
```
##### Exercício:
Defina a função
Eval(string,list)->bool
que avalia a expressão proposicional, na sintaxe do Python, associando a cada variável usada <var> o valor lógico <bool>. A associação entre variáveis e valores lógicos deve ser descrita por pares (<var>,<bool>) na lista que serve de argumento.
Eval('(p1 and not (p2 or p3))',[('p1',True),('p2',False),('p3',True)])} avalia '(True and not (False or True))'.
Por exemplo, tal que
>>> Eval('not(p1 and p2) or p1',[('p1',True),('p2',False)])
True
```
def Eval(exp, atrib):
u''' Eval(string,list)->bool
Avalia a expressão proposicional, na sintaxe do Python,
associando a cada variável usada <var> o valor lógico <bool>.
A associação entre variáveis e valores lógicos deve ser descrita
por pares (<var>,<bool>) na lista que serve de argumento.
'''
for var in atrib:
exp = exp.replace(var[0],str(var[1]))
return eval(exp)
Eval('not(p1 and p2) or p1',[('p1',True),('p2',False)])
```
##### Exercício:
Represente em representação binário os números de $2^n-1$ até zero. Exemplo:
>>> binlist(3)
111
110
101
100
011
010
001
000
```
def binlist(nvar):
u''' binlist(int)->
lista em representação binária os números de 2**n-1 até 0
'''
for n in range(2**nvar-1,-1,-1):
print(bin(n)[2:].rjust(nvar,'0'))
binlist(3)
```
##### Exercício:
Usando as funções anteriores, defina uma função
tabela(string, list)->
que imprima a tabela de verdade da proposição $q$, descrita pela string, assumindo que as suas variáveis estão na lista $[p1,p2,...,pn]$. (USANDO: a linguagem proposicional de símbolos 0,1,\&,$|$ e $\sim$, mais as funções imp(bool,bool)->bool e biimp(bool,bool)->bool))
Por exemplo, tal que
>>> tabela('imp(u,q)|w',['u','q','w'])
-------------------------
u | q | w |imp(u,q)|w
-------------------------
True| True| True|True
True| True|False|True
True|False| True|True
True|False|False|False
False| True| True|True
False| True|False|True
False|False| True|True
False|False|False|True
```
def tabela(exp,var):
u''' tabela(str,list)->
Imprime a tabela de verdade da proposição descrita pela string,
assumindo que as suas variáveis estão na lista.
USANDO: a linguagem proposicional de símbolos 0,1,\&,$|$ e $\sim$,
mais as funções imp(bool,bool)->bool e biimp(bool,bool)->bool)
'''
cab(var+[exp])
nvar = len(var)
for n in range(2**nvar-1,-1,-1):
l=bin(n)[2:].rjust(nvar,'0')
cont=0
lista = []
vlog = []
for v in var:
lista.append((v,bool(int(l[cont]))))
vlog.append(bool(int(l[cont])))
cont = cont + 1
linha(vlog+ [Eval(trad(exp),lista)])
tabela('imp(u,q)|w',['u','q','w'])
```
##### Exercício:
Usando as funções anteriores, defina uma função
tautologia(string, list)->bool
que verifica se a proposição $q$, descrita pela string, é uma tautologia e assumindo que as suas variáveis estão descritas na lista $[p1,p2,...p_n]$. (USANDO: a linguagem proposicional de símbolos 0,1,\&,$|$ e $\sim$, mais as funções imp(bool,bool)->bool e biimp(bool,bool)->bool)
>>> tautologia('biimp(~q | w, imp(q,w))',['q','w'])
False
```
def tautologia(exp,var):
u''' tautologia(str,list)->bool
Verifica se a proposição descrita pela string é uma tautologia,
assumindo que as suas variáveis estão descritas na lista.
USANDO: a linguagem proposicional de símbolos 0,1,\&,$|$ e $\sim$,
mais as funções imp(bool,bool)->bool e biimp(bool,bool)->bool
'''
sai = True
nvar = len(var)
for n in range(2**nvar-1,-1,-1):
l=str(bin(n))[2:].rjust(nvar,'0')
cont=0
lista = []
for v in var:
lista.append((v,bool(int(l[cont]))))
cont = cont + 1
sai = sai and bool(Eval(exp,lista))
return sai
tautologia('biimp(~q | w, imp(q,w))',['q','w'])
```
| github_jupyter |
# LES Band Data Analysis
---
### Carter J. Humphreys
Email: [chumphre@oswego.edu](mailto:chumphre@oswego.edu) | GitHub:[@HumphreysCarter](https://github.com/HumphreysCarter) | Website: [carterhumphreys.com](http://carterhumphreys.com/)
```
import os
import pandas as pd
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import numpy as np
from metpy.plots import USCOUNTIES
from datetime import datetime
from scipy.stats import gaussian_kde
dataDIR='../data/BAND_POSITION'
def find_nthIndex(fullString, find, n):
start = fullString.find(find)
while start >= 0 and n > 1:
start = fullString.find(find, start+len(find))
n -= 1
return start
data=[]
eventCount=0
for eventID in range(1, 37):
eventDIR=f'{dataDIR}/Ontario_LES_Event{str(eventID).zfill(2)}'
for dataFile in os.listdir(eventDIR):
eventCount+=1
posData = pd.read_csv(f'{eventDIR}/{dataFile}')
radarSite=dataFile[(find_nthIndex(dataFile, '_', 1)+1):find_nthIndex(dataFile, '_', 2)]
dateTime=dataFile[(find_nthIndex(dataFile, '_', 2)+1):find_nthIndex(dataFile, '.csv', 1)]
dateTime=datetime.strptime(dateTime, '%Y%m%d_%H%M%S')
for index, row in posData.iterrows():
#posData.insert(0, 0, dateTime)
#posData.insert(0, 1, radarSite)
#posData=posData.values[i]
data.append(row)
df = pd.DataFrame(data, columns = ['Latitude', 'Longitude', 'Azimuth [deg]', 'Range [km]', 'Data Value [dBZ]'])
df
# Plot extent
plotExtent = [-78.5, -73.5, 42.5, 45]
# Create the figure and an axes set to the projection
proj = ccrs.Stereographic(central_longitude=((plotExtent[1]-plotExtent[0])/2+plotExtent[0]), central_latitude=((plotExtent[3]-plotExtent[2])/2+plotExtent[2]))
fig = plt.figure(figsize=(15, 10))
ax = fig.add_subplot(1, 1, 1, projection=proj)
ax.set_extent(plotExtent)
# Add geographic features
ax.add_feature(USCOUNTIES.with_scale('5m'), edgecolor='gray', linewidth=0.25)
state_borders = cfeature.NaturalEarthFeature(category='cultural', name='admin_1_states_provinces_lakes', scale='10m', facecolor='none')
ax.add_feature(state_borders, edgecolor='black', linewidth=0.5)
country_borders = cfeature.NaturalEarthFeature(category='cultural', name='admin_0_countries', scale='10m', facecolor='none')
ax.add_feature(country_borders, edgecolor='black', linewidth=1.0)
# Get point data
lat = df['Latitude'].values
lon = df['Longitude'].values
dbz = df['Data Value [dBZ]'].values
# Calculate the point density
xy = np.vstack([lon, lat])
z = gaussian_kde(xy)(xy)
# Sort the points by density, so that the densest points are plotted last
idx = z.argsort()
lon, lat, z = lon[idx], lat[idx], z[idx]
scatter=ax.scatter(lon, lat, c=z, cmap='plasma', transform=ccrs.PlateCarree(), marker='s', s=25)
cbar=plt.colorbar(scatter)
cbar.ax.set_ylabel('Frequency')
# Set a title and show the plot
ax.set_title('Hourly Lake-Effect Snow Band Positions', loc='Left')
ax.set_title(f'{eventCount} Events (October 2015 - April 2019)', loc='Right')
# Export fig
plt.show()
```
| github_jupyter |
# RandomForestClassifier
© Thomas Robert Holy 2019
<br>
Version 1.0
<br><br>
Visit me on GitHub: https://github.com/trh0ly
<br>
Kaggle Link: https://www.kaggle.com/c/dda-p2/leaderboard
## Package Import
```
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.metrics import auc
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import datetime as dt
from IPython.core.display import display, HTML
from scipy.spatial.distance import euclidean
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.svm import SVC
```
## Hilfsfunktionen
### Funktion zur Betrachtung der Konfusinsmatrix
```
# Definition einer Funktion, welche eine Konfusionsmatrix und einen Klassifikationsreport
# zurückgibt. Die Konfusionsmatrix kann, wenn ein Wert für c gegeben ist, für beliebige
# Werte von c betrachtet werden.
#------------
# Argumente:
# - X: DataFrame auf welchem die Prognose durchgefürt werden soll (ohne die Zielgröße)
# - y_true: Zum DataFrame X gehörige Werte der Zielgröße
# - model: Modell auf Basis dessen die Konfusionsmatrix berechnet werden soll
# - class_names: Bezeichnung für die Spalten des Dataframes (default=['0', '1'], mit 0 = negativ und 1 = positiv)
# - c:
# ---> Wenn None, dann wird die Konfusionsmatrix ohne die Einbeziehung von c bestimmt
# ---> Wenn != None, dann wird die Konfusionsmatrix in Abhängigkeit von c bestimmt
#------------
def get_confusion_matrix(X, y_true, model, class_names=['0', '1'], c=None):
#----------------------------
# Vorgelagerte Berechnung falls ein Wert für c gegeben ist
# und die Konfusionsmatrix für ein gegebenes c anpasst
if c != None:
pred_probability = model.predict_proba(X)
pred_probability = pred_probability >= c
y_pred = pred_probability[:, 1].astype(int)
#----------------------------
# Wenn kein Wert für c gegeben, dann führe Prognose
# lediglich auf Basis des Modells durch
if c == None:
y_pred = model.predict(X)
#----------------------------
# Berechnet die Konfusionsmatrix
conf_mat = confusion_matrix(y_true, y_pred)
#----------------------------
# Überführung in einen DataFrame für eine bessere Übersichtlichkeit
df_index = pd.MultiIndex.from_tuples([('Wahrer Wert', cn) for cn in class_names])
df_cols = pd.MultiIndex.from_tuples([('Prognose des Modells', cn) for cn in class_names])
df_conf_mat = pd.DataFrame(conf_mat, index=df_index, columns=df_cols)
return df_conf_mat, classification_report(y_true, y_pred)
```
### Funktion zur Betrachtung der ROC-Kurve
```
# Definition einer Funktion, welche auf Basis eines gegeben Modells und zweier zusammengehöriger
# DataFrames die receiver operating characteristic curve (ROC-Curve) visualisiert
#------------
# Argumente:
# - X: DataFrame auf welchem die Prognose durchgefürt werden soll (ohne die Zielgröße)
# - y_true: Zum DataFrame X gehörige Werte der Zielgröße
# - model: Modell auf Basis dessen die ROC-Curve berechnet werden soll
#------------
def roc_curve_func(X, y_true, model):
#----------------------------
# Berechnung FPR, TPR und AUC auf Basis des Modells
y_score = model.predict_proba(X)[:,1]
FPR, TPR, _ = roc_curve(y_true, y_score)
AUC = auc(FPR, TPR)
#----------------------------
# Darstellung als Grafik
plt.figure()
plt.plot(FPR, TPR, color='red', lw=2, label='ROC-Kurve (AUC = %0.5f)' % AUC)
plt.plot([0, 1], [0, 1], color='black', lw=2, linestyle='--')
plt.xlim([-0.005, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC-Kurve')
plt.legend(loc="lower right")
plt.show()
```
### Funktion für das Submitten
```
# Definition einer Funktion, welche das Submitten der Prognose auf dem Testdatensatz erleichert
#------------
# Argumente:
# - model: Modell auf Basis dessen die Prognose erfolgt
# - c:
# ---> Wenn None, dann wird die Prognose ohne die Berücksichtung von c vorgenommen
# ---> Wenn != None, dann wird Prognose mit der Berücksichtung von c vorgenommen
# - save:
# ---> Wenn False, dann werden die prognostizierten Daten nicht gespeichert
# ---> Wenn True, dann werden die prognostizierten Daten als .csv gespeichert
# - manu_name:
# ---> Wenn None, dann wird ein nicht eindeutiger Standardname als Bezeichnung der .csv gewählt
# ---> Wenn != None, dann wird die zu speichernde .csv mit einem timestamp versehen
#------------
def submit(model, c=None, save=False, manu_name=False):
#--------------------------------
# Testdatensatz einlesen
X_test = pd.read_csv('test.csv', index_col=0)
#--------------------------------
# Prognosewerte auf Sensordaten des Testdatensatzes und unter
# Berücksichtung von c erzeugen
if c != None:
predicted_test = (model.predict_proba(X_test) >= c)[:,1].astype(int)
#--------------------------------
# Prognosewerte auf Sensordaten des Testdatensatzes erzeugen
# ohne c zu Berücksichtigen
if c == None:
predicted_test = model.predict(X_test)
#--------------------------------
# Submissiondatensatz einlesen und prognostizierte Werte hineinschreiben
submission = pd.read_csv('sample_submission.csv')
submission['Fehlerhaft'] = predicted_test
#--------------------------------
# In .csv speichern, wenn save=True
if save == True:
#--------------------------------
# Standardnamen wählen, wenn manu_name == False
if manu_name == False:
submission.to_csv('./predicted_values.csv', index=False)
#--------------------------------
# Standardnamen mit timestamp kombinieren, wenn manu_name == True
if manu_name == True:
import datetime
now = datetime.datetime.now()
name = now.strftime('%Y-%m-%dT%H%M%S') + ('-%02d' % (now.microsecond / 10000))
submission.to_csv('./predicted_values_' + str(name) + '.csv', index=False)
return submission.head(), submission.loc[submission['Fehlerhaft'] == 1]
```
### Funktion zum Filtern von Quantilen
```
# Definition einer Funktion, welche einen gegeben DataFrame
# um untere und obere Quantile beschneiden kann
#------------
# Argumente:
# - orignal_df: DataFrame welcher bearbeitet werden soll
# - quantile_low: Unteres Quantil bis zu welchem orignal_df beschnitten werden soll
# - quantile_high: Oberes Quantil welchem orignal_df beschnitten werden soll
# - colum_to_drop: Spalte des orignal_df, welche während des Vorgangs gedroppt werden soll
#------------
def filter_my_df(orignal_df, quantile_low, quantile_high, colum_to_drop):
#----------------------------
# Spalte "colum_to_drop" aus dem Datensatz entfernen
df_filtered = orignal_df.loc[:, orignal_df.columns != colum_to_drop]
# Quantil-DataFrame erzeugen
quant_df = df_filtered.quantile([quantile_low, quantile_high])
# Quantil-DataFrame auf orignal_df anweden
df_filtered = df_filtered.apply(lambda x: x[(x>quant_df.loc[quantile_low,x.name]) &
(x < quant_df.loc[quantile_high,x.name])],
axis=0)
#----------------------------
# Spalte "Fehlerhaft" dem gefiltertem DataFrame wieder anfügen
df_filtered = pd.concat([orignal_df.loc[:,colum_to_drop], df_filtered], axis=1)
# Aus Beschneidung resultierende NaN-Werte bereinigen
df_filtered.dropna(inplace=True)
return df_filtered
```
## Datensatz einlesen (bereinigigen) und betrachten
### Datensatz einlesen
```
#----------------------------
# Datensatz einlesen
data = pd.read_csv('train.csv', index_col=0)
```
### Optionale Datensatzbereinigung
```
"""
#----------------------------
# Datensatz unterteilen
df_fehlerfrei = data.loc[data['Fehlerhaft'] == 0]
df_fehlerhaft = data.loc[data['Fehlerhaft'] == 1]
"""
"""
#----------------------------
# Fehlerfreie Stückgüter
colum_to_drop = 'Fehlerhaft'
orignal_df = df_fehlerfrei
low = .0 # Unteres Quantil
high = .99 # Oberes Quantil
df_fehlerfrei_filtered = filter_my_df(df_fehlerfrei, low, high, colum_to_drop)
#----------------------------
# Fehlerhafte Stückgüter
colum_to_drop = 'Fehlerhaft'
orignal_df = df_fehlerhaft
low = .018333 # Unteres Quantil
high = 1. # Oberes Quantil
df_fehlerhaft_filtered = filter_my_df(df_fehlerhaft, low, high, colum_to_drop)
#----------------------------
# Teil-DataFrames zusammenführen
data_filtered = pd.concat([df_fehlerhaft_filtered, df_fehlerfrei_filtered], sort=False)
"""
```
### Beschreibung der separierten Datensätze (Betrachtung Min-/ Maximum und Qunatile)
```
"""
df_fehlerfrei.describe()
"""
"""
df_fehlerhaft.describe()
"""
data_new = data #_filtered
data_new['Fehlerhaft'].value_counts()
```
### Betrachtung Korrelationsmatrix
```
data_new = data #_filtered
#----------------------------
# Für schnellere Laufzeit und mehr Übersicht in den Plots: Stichprobe der Daten abbilden
data_sample = data_new.sample(2000, random_state=28) # random_state sorgt für reproduzierbare Stichprobe, sodass die Stichprobe für uns alle identisch ist
_ = pd.plotting.scatter_matrix(data_sample, c=data_sample['Fehlerhaft'], cmap='seismic', figsize=(16, 20))
```
### Dateinsatz in Traings- und Validierungsteil splitten
```
X = data_new.drop('Fehlerhaft', axis=1)
y = data_new['Fehlerhaft']
X_train, X_validierung, y_train, y_validierung = train_test_split(X, y, test_size=0.2, random_state=2121)
```
## Modell aufstellen
```
# Definition einer Funktion, welche eine Gittersuche mit einem RandomForestClassifier durchführt
# und nach einer 5-fach Kreuzvalidierung das beste Modell zurückgibt
#------------
# Argumente:
# - i: Fügt X^i der Featurematrix hinzu
# - X: DataFrame auf welchem die Prognose durchgefürt werden soll (ohne die Zielgröße)
# - y_true: Zum DataFrame X gehörige Werte der Zielgröße
# - my_scaler: Zu verwendender Scaler; per default MinMaxScaler; weitere Scaler: RobustScaler, Standardscaler
# - max_features: Anzahl der Features die einbezogen werden sollen (default=['auto']=sqrt(n_features))
# - n_estimators: Anzahl der "Bäume" im "Wald"
# - jobs: Anzahl der Threads die für den Durchlauf zur Verfügung stehen
# - gs_scoring: Scoring Verfahren im Rahmen der GridSearch
# - folts: Komplexität der Kreuzvalidierung
#------------
def rndfrst_1(i, X, y_true, my_scaler=MinMaxScaler, jobs=-3, gs_scoring='f1', folts=5, n_estimators=list(range(10, 200 + 1,10)), max_features=None):
#--------------------------------
# Pipeline erzeugen
prediction_pipe = Pipeline([('scaler', my_scaler()),
('add_x_square', PolynomialFeatures(degree=i)),
('classifier' , RandomForestClassifier(n_jobs=jobs))
])
#--------------------------------
# Parameter Grid
param_grid = [{'classifier' : [RandomForestClassifier()],
'classifier__n_estimators' : n_estimators,
'classifier__max_features' : [max_features],
'classifier__random_state': [2111]}
]
#--------------------------------
# StratifiedKFold für unbalancierten Datensatz
scv = StratifiedKFold(n_splits=folts)
#--------------------------------
# Gittersuche
grid_search = GridSearchCV(
estimator=prediction_pipe,
param_grid=param_grid,
scoring=gs_scoring,
cv=scv,
verbose=True,
n_jobs=jobs,
iid=False)
#--------------------------------
# Fit
model = grid_search.fit(X,y_true)
return model, grid_search.best_score_
```
### Modelaufruf und Scoring
#### Modell 1
```
rndfrst_model, rndfrst_score = rndfrst_1(1, X_train, y_train, my_scaler=MinMaxScaler, jobs=-3, gs_scoring='f1', folts=5, n_estimators=list(range(10, 200 + 1,10)), max_features=None)
rndfrst_score
```
#### Modell 2
```
rndfrst_model2, rndfrst_score2 = rndfrst_1(1, X_train, y_train, my_scaler=StandardScaler, jobs=-3, gs_scoring='f1', folts=5, n_estimators=list(range(10, 200 + 1,10)), max_features=None)
rndfrst_score2
rndfrst_model3, rndfrst_score3 = rndfrst_1(1, X_train, y_train, my_scaler=RobustScaler, jobs=-3, gs_scoring='f1', folts=5, n_estimators=list(range(10, 200 + 1,10)), max_features=None)
rndfrst_score3
```
#### Scoring Model 1 / Modell 2
```
model = rndfrst_model
print(model.best_params_)
model = rndfrst_model2
print(model.best_params_)
model = rndfrst_model3
print(model.best_params_)
model = rndfrst_model
c = 0.35
class_names = ['Stückgut fehlerfrei', 'Stückgut fehlerhaft']
confusion_matrix1, report1 = get_confusion_matrix(X_train, y_train, model, class_names, c)
confusion_matrix1
roc_curve_func(X_train, y_train, model)
print(report1)
```
#### Scoring auf Validerungsdatensatz
```
model = rndfrst_model
c = 0.35
class_names = ['Stückgut fehlerfrei', 'Stückgut fehlerhaft']
confusion_matrix2, report2 = get_confusion_matrix(X_validierung, y_validierung, model, class_names, c)
confusion_matrix2
roc_curve_func(X_validierung, y_validierung, model)
print(report2)
```
## Submit
### Kontrolle Modellwahl (Modell 1 oder 2) anhand der Konfusionsmatrix
```
"""
model = rndfrst_model
c = 0.35
class_names = ['Stückgut fehlerfrei', 'Stückgut fehlerhaft']
confusion_matrix3, report3 = get_confusion_matrix(X_validierung, y_validierung, model, class_names, c)
confusion_matrix3
"""
```
### Submit der Prognose
```
"""
submission_head, submission_fehlerhaft = submit(model, c, save=True, manu_name=True)
submission_head
"""
```
### Ausgabe DataFrame mit als defekt klassifizierten Stückgütern im Testdatensatz
```
"""
submission_fehlerhaft
"""
```
| github_jupyter |
## Observations and Insights
1. The data and charts below show that the biggest contributing factor to increased tumor volume is the weight of the mouse.
2. Male and female mice were almost evenly represented and does not seem to effect the outcomes on each drug regimen or the total tumor volume at the beginning or end of the study.
3. When comparing Capomulin against the other three regimens of interest it appears this initial data shows that Ramicane may be the superior regimen for decreasing tumor volume. Both Capomulin and Ramicane had the most time points evaluated. In determining the most effective regimen at the lowest cost other data points would need to be evaluated such as cost to administer each regimen and additional timepoints to determine if a longer study could factor into the efficacy of each drug regimen.
```
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
from scipy.stats import linregress
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# Combine the data into a single dataset
combined_df = pd.merge(mouse_metadata,study_results, on = "Mouse ID", how = "outer")
# Display the data table for preview
combined_df
# Checking the number of mice.
combined_df["Mouse ID"].nunique()
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
duplicate_df = combined_df[combined_df[["Mouse ID", "Timepoint"]].duplicated() == True]
duplicate_df[["Mouse ID", "Timepoint"]]
# Optional: Get all the data for the duplicate mouse ID.
duplicate_df = combined_df[combined_df.duplicated(["Mouse ID", "Timepoint"], keep = False)] #https://www.geeksforgeeks.org/find-duplicate-rows-in-a-dataframe-based-on-all-or-selected-columns/
duplicate_df
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
clean_df = combined_df.drop_duplicates(subset=['Mouse ID', 'Timepoint'], keep='first')
clean_df
# Checking the number of mice in the clean DataFrame.
clean_df["Mouse ID"].nunique()
# double checking if any duplicates left
clean_df1 = clean_df[clean_df[["Mouse ID", "Timepoint"]].duplicated() == True]
clean_df1
```
## Summary Statistics
```
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Use groupby and summary statistical methods to calculate the following properties of each drug regimen:
# mean, median, variance, standard deviation, and SEM of the tumor volume.
# Assemble the resulting series into a single summary dataframe.
print(f"Key statistcis for the tumor volume for each regimen:")
group = clean_df.groupby("Drug Regimen")
group_df = pd.DataFrame({"Mean": group["Tumor Volume (mm3)"].mean()})
group_df["Median"] = group["Tumor Volume (mm3)"].median()
group_df["Variance"] = group["Tumor Volume (mm3)"].var()
group_df["St Deviation"] = group["Tumor Volume (mm3)"].std()
group_df["SEM"] = group["Tumor Volume (mm3)"].sem()
group_df
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Using the aggregation method, produce the same summary statistics in a single line
clean_df.groupby("Drug Regimen").agg({"Tumor Volume (mm3)": ['mean','median','var','std','sem']})
```
## Bar and Pie Charts
```
# Generate a bar plot showing the total number of timepoints for all mice tested for each drug regimen using Pandas.
tpoint_count = pd.DataFrame({"Count Timepoint": group["Timepoint"].count()})
print(tpoint_count)
tpoint_count.plot(kind = "bar", figsize = (10,5), color = "g", legend=False)
plt.title("Number of Timepoints per Drug Regimen", fontsize = 13, fontweight = 'bold')
plt.ylabel("# Timepoints")
plt.show()
tpoint_count.reset_index(inplace=True)
# Generate a bar plot showing the total number of timepoints for all mice tested for each drug regimen using pyplot.
y_axis = tpoint_count["Count Timepoint"]
x_axis = tpoint_count["Drug Regimen"]
plt.figure(figsize = (10,5))
plt.bar(x_axis, y_axis, color = 'y')
plt.title("Number of Timepoints per Drug Regimen", fontsize = 13, fontweight = 'bold')
plt.ylabel("# Timepoints")
plt.xlabel("Drug Regimen")
plt.xticks(rotation=90)
plt.show()
# Generate a pie plot showing the distribution of female versus male mice using Pandas
group_gender = clean_df.groupby("Sex")
gender = pd.DataFrame(group_gender["Mouse ID"].nunique())
print(gender)
colors = ["orange", "grey"]
gender.plot(kind = "pie", colors = colors, figsize = (10,5), subplots = True, legend = False, autopct = "%1.1f%%")
plt.title("Gender Distribution", fontsize = 13, fontweight = 'bold')
plt.show()
# Generate a pie plot showing the distribution of female versus male mice using pyplot
gender.reset_index(inplace=True) # reset index to be able to take names for the chart
plt.figure(figsize = (10,5))
plt.pie(gender["Mouse ID"], labels = gender["Sex"], colors = colors, autopct = "%1.1f%%")
plt.title("Gender Distribution", fontsize = 13, fontweight = 'bold')
plt.show()
```
## Quartiles, Outliers and Boxplots
```
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
# Start by getting the last (greatest) timepoint for each mouse
group_id = clean_df.groupby(["Mouse ID", "Drug Regimen"])
group_id_df = pd.DataFrame({"Timepoint": group_id["Timepoint"].max()})
group_id_df.reset_index(inplace=True)
group_id_df
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
maxpoint_df = pd.merge(group_id_df,clean_df, on = ["Mouse ID","Drug Regimen","Timepoint"], how = "inner")
maxpoint_df
# Put treatments into a list for for loop (and later for plot labels)
# Create empty list to fill with tumor vol data (for plotting)
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Locate the rows which contain mice on each drug and get the tumor volumes
# add subset
# Determine outliers using upper and lower bounds
regimens = ["Capomulin", "Ramicane", "Infubinol", "Ceftamin"]
tumvoldata = []
for i in range(len(regimens)):
data = maxpoint_df.loc[maxpoint_df["Drug Regimen"] == regimens[i],:]
tumvoldata.append(data["Tumor Volume (mm3)"])
for x in range(len(tumvoldata)):
d=pd.DataFrame(tumvoldata[x])
quartiles = d['Tumor Volume (mm3)'].quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
lower_bound = lowerq - (1.5 * iqr)
upper_bound = upperq + (1.5 * iqr)
print(f"The lower quartile of occupancy is: {lowerq}")
print(f"The upper quartile of occupancy is: {upperq}")
print(f"The interquartile range of occupancy is: {iqr}")
print(f"The the median of occupancy is: {quartiles[0.5]} ")
print(f"Values below {lower_bound} could be outliers.")
print(f"Values above {upper_bound} could be outliers.")
outlier_occupancy = d.loc[(d['Tumor Volume (mm3)'] < lower_bound) | (d['Tumor Volume (mm3)'] > upper_bound)]
print(f"Values outside the boudnaries: ")
print(outlier_occupancy["Tumor Volume (mm3)"])
print("--------------------------------")
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
fig1, ax1 = plt.subplots()
flierprops = dict(marker='o', markerfacecolor='orange', markersize=8,
markeredgecolor='none')
ax1.set_title("Tumor Volume (mm3)")
ax1.set_ylabel("Tumor Volume (mm3)")
ax1.boxplot(tumvoldata,labels=regimens,notch=True, flierprops = flierprops)
plt.show()
```
## Line and Scatter Plots
```
# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin
mouse = "m601"
capomulin_plot = pd.DataFrame(clean_df.loc[(clean_df["Drug Regimen"] == regimens[0]) & (clean_df["Mouse ID"] == mouse),:])
capomulin_plot
plt.plot(capomulin_plot["Timepoint"],capomulin_plot["Tumor Volume (mm3)"], color = "g", marker = "o")
plt.title(f"Dynamics of the tumor volume for mouse id {mouse} treated with {regimens[0]}", fontsize = 13, fontweight = 'bold',y=1.05)
plt.xlabel("Timepoint")
plt.ylabel("Tumor Volume (mm3)")
plt.show()
# Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen
capomulin_plot_sc = pd.DataFrame(clean_df.loc[(clean_df["Drug Regimen"] == regimens[0]),:])
capomulin_plot_sc = capomulin_plot_sc.groupby(['Mouse ID']).mean()
capomulin_plot_sc
plt.scatter(capomulin_plot_sc["Tumor Volume (mm3)"],capomulin_plot_sc["Weight (g)"], marker = "o",facecolors = 'green')
plt.title(f"Average tumor volume vs. mouse weight for {regimens[0]} regimen", fontsize = 13, fontweight = 'bold', y=1.05)
plt.xlabel("Tumor Volume (mm3)")
plt.ylabel("Weight (g)")
plt.ylim(10,max(capomulin_plot_sc["Weight (g)"])+5)
plt.xlim(32,max(capomulin_plot_sc["Tumor Volume (mm3)"])+2)
plt.show()
```
## Correlation and Regression
```
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
x_values = capomulin_plot_sc["Tumor Volume (mm3)"]
y_values = capomulin_plot_sc["Weight (g)"]
correlation = st.pearsonr(x_values,y_values)
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values, marker = "o",facecolors = 'g')
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(40,19),fontsize=11,color="r")
plt.title(f"Average tumor volume vs. mouse weight for {regimens[0]} regimen", fontsize = 13, fontweight = 'bold', y=1.05)
plt.xlabel("Tumor Volume (mm3)")
plt.ylabel("Weight (g)")
plt.ylim(10,max(capomulin_plot_sc["Weight (g)"])+5)
plt.xlim(32,max(capomulin_plot_sc["Tumor Volume (mm3)"])+2)
print(f"The correlation between mouse weight and average tumor volume for the Capomulin regimen is {round(correlation[0],2)}" )
plt.show()
```
| github_jupyter |
# Introduction to Python programming
J.R. Johansson (jrjohansson at gmail.com)
The latest version of this [IPython notebook](http://ipython.org/notebook.html) lecture is available at [http://github.com/jrjohansson/scientific-python-lectures](http://github.com/jrjohansson/scientific-python-lectures).
The other notebooks in this lecture series are indexed at [http://jrjohansson.github.io](http://jrjohansson.github.io).
## Python program files
* Python code is usually stored in text files with the file ending "`.py`":
myprogram.py
* Every line in a Python program file is assumed to be a Python statement, or part thereof.
* The only exception is comment lines, which start with the character `#` (optionally preceded by an arbitrary number of white-space characters, i.e., tabs or spaces). Comment lines are usually ignored by the Python interpreter.
* To run our Python program from the command line we use:
$ python myprogram.py
* On UNIX systems it is common to define the path to the interpreter on the first line of the program (note that this is a comment line as far as the Python interpreter is concerned):
#!/usr/bin/env python
If we do, and if we additionally set the file script to be executable, we can run the program like this:
$ myprogram.py
### Example:
```
ls scripts/hello-world*.py
cat scripts/hello-world.py
!python scripts/hello-world.py
```
### Character encoding
The standard character encoding is ASCII, but we can use any other encoding, for example UTF-8. To specify that UTF-8 is used we include the special line
# -*- coding: UTF-8 -*-
at the top of the file.
```
cat scripts/hello-world-in-swedish.py
!python scripts/hello-world-in-swedish.py
```
Other than these two *optional* lines in the beginning of a Python code file, no additional code is required for initializing a program.
## IPython notebooks
This file - an IPython notebook - does not follow the standard pattern with Python code in a text file. Instead, an IPython notebook is stored as a file in the [JSON](http://en.wikipedia.org/wiki/JSON) format. The advantage is that we can mix formatted text, Python code and code output. It requires the IPython notebook server to run it though, and therefore isn't a stand-alone Python program as described above. Other than that, there is no difference between the Python code that goes into a program file or an IPython notebook.
## Modules
Most of the functionality in Python is provided by *modules*. The Python Standard Library is a large collection of modules that provides *cross-platform* implementations of common facilities such as access to the operating system, file I/O, string management, network communication, and much more.
### References
* The Python Language Reference: http://docs.python.org/2/reference/index.html
* The Python Standard Library: http://docs.python.org/2/library/
To use a module in a Python program it first has to be imported. A module can be imported using the `import` statement. For example, to import the module `math`, which contains many standard mathematical functions, we can do:
```
import math
```
This includes the whole module and makes it available for use later in the program. For example, we can do:
```
import math
x = math.cos(2 * math.pi)
print(x)
```
Alternatively, we can chose to import all symbols (functions and variables) in a module to the current namespace (so that we don't need to use the prefix "`math.`" every time we use something from the `math` module:
```
from math import *
x = cos(2 * pi)
print(x)
```
This pattern can be very convenient, but in large programs that include many modules it is often a good idea to keep the symbols from each module in their own namespaces, by using the `import math` pattern. This would elminate potentially confusing problems with name space collisions.
As a third alternative, we can chose to import only a few selected symbols from a module by explicitly listing which ones we want to import instead of using the wildcard character `*`:
```
from math import cos, pi
x = cos(2 * pi)
print(x)
```
### Looking at what a module contains, and its documentation
Once a module is imported, we can list the symbols it provides using the `dir` function:
```
import math
print(dir(math))
```
And using the function `help` we can get a description of each function (almost .. not all functions have docstrings, as they are technically called, but the vast majority of functions are documented this way).
```
help(math.log)
log(10)
log(10, 2)
```
We can also use the `help` function directly on modules: Try
help(math)
Some very useful modules form the Python standard library are `os`, `sys`, `math`, `shutil`, `re`, `subprocess`, `multiprocessing`, `threading`.
A complete lists of standard modules for Python 2 and Python 3 are available at http://docs.python.org/2/library/ and http://docs.python.org/3/library/, respectively.
## Variables and types
### Symbol names
Variable names in Python can contain alphanumerical characters `a-z`, `A-Z`, `0-9` and some special characters such as `_`. Normal variable names must start with a letter.
By convention, variable names start with a lower-case letter, and Class names start with a capital letter.
In addition, there are a number of Python keywords that cannot be used as variable names. These keywords are:
and, as, assert, break, class, continue, def, del, elif, else, except,
exec, finally, for, from, global, if, import, in, is, lambda, not, or,
pass, print, raise, return, try, while, with, yield
Note: Be aware of the keyword `lambda`, which could easily be a natural variable name in a scientific program. But being a keyword, it cannot be used as a variable name.
### Assignment
The assignment operator in Python is `=`. Python is a dynamically typed language, so we do not need to specify the type of a variable when we create one.
Assigning a value to a new variable creates the variable:
```
# variable assignments
x = 1.0
my_variable = 12.2
```
Although not explicitly specified, a variable does have a type associated with it. The type is derived from the value that was assigned to it.
```
type(x)
```
If we assign a new value to a variable, its type can change.
```
x = 1
type(x)
```
If we try to use a variable that has not yet been defined we get an `NameError`:
```
print(y)
```
### Fundamental types
```
# integers
x = 1
type(x)
# float
x = 1.0
type(x)
# boolean
b1 = True
b2 = False
type(b1)
# complex numbers: note the use of `j` to specify the imaginary part
x = 1.0 - 1.0j
type(x)
print(x)
print(x.real, x.imag)
```
### Type utility functions
The module `types` contains a number of type name definitions that can be used to test if variables are of certain types:
```
import types
# print all types defined in the `types` module
print(dir(types))
x = 1.0
# check if the variable x is a float
type(x) is float
# check if the variable x is an int
type(x) is int
```
We can also use the `isinstance` method for testing types of variables:
```
isinstance(x, float)
```
### Type casting
```
x = 1.5
print(x, type(x))
x = int(x)
print(x, type(x))
z = complex(x)
print(z, type(z))
x = float(z)
```
Complex variables cannot be cast to floats or integers. We need to use `z.real` or `z.imag` to extract the part of the complex number we want:
```
y = bool(z.real)
print(z.real, " -> ", y, type(y))
y = bool(z.imag)
print(z.imag, " -> ", y, type(y))
```
## Operators and comparisons
Most operators and comparisons in Python work as one would expect:
* Arithmetic operators `+`, `-`, `*`, `/`, `//` (integer division), '**' power
```
1 + 2, 1 - 2, 1 * 2, 1 / 2
1.0 + 2.0, 1.0 - 2.0, 1.0 * 2.0, 1.0 / 2.0
# Integer division of float numbers
3.0 // 2.0
# Note! The power operators in python isn't ^, but **
2 ** 2
```
Note: The `/` operator always performs a floating point division in Python 3.x.
This is not true in Python 2.x, where the result of `/` is always an integer if the operands are integers.
to be more specific, `1/2 = 0.5` (`float`) in Python 3.x, and `1/2 = 0` (`int`) in Python 2.x (but `1.0/2 = 0.5` in Python 2.x).
* The boolean operators are spelled out as the words `and`, `not`, `or`.
```
True and False
not False
True or False
```
* Comparison operators `>`, `<`, `>=` (greater or equal), `<=` (less or equal), `==` equality, `is` identical.
```
2 > 1, 2 < 1
2 > 2, 2 < 2
2 >= 2, 2 <= 2
# equality
[1,2] == [1,2]
# objects identical?
l1 = l2 = [1,2]
l1 is l2
```
## Compound types: Strings, List and dictionaries
### Strings
Strings are the variable type that is used for storing text messages.
```
s = "Hello world"
type(s)
# length of the string: the number of characters
len(s)
# replace a substring in a string with something else
s2 = s.replace("world", "test")
print(s2)
```
We can index a character in a string using `[]`:
```
s[0]
```
**Heads up MATLAB users:** Indexing start at 0!
We can extract a part of a string using the syntax `[start:stop]`, which extracts characters between index `start` and `stop` -1 (the character at index `stop` is not included):
```
s[0:5]
s[4:5]
```
If we omit either (or both) of `start` or `stop` from `[start:stop]`, the default is the beginning and the end of the string, respectively:
```
s[:5]
s[6:]
s[:]
```
We can also define the step size using the syntax `[start:end:step]` (the default value for `step` is 1, as we saw above):
```
s[::1]
s[::2]
```
This technique is called *slicing*. Read more about the syntax here: http://docs.python.org/release/2.7.3/library/functions.html?highlight=slice#slice
Python has a very rich set of functions for text processing. See for example http://docs.python.org/2/library/string.html for more information.
#### String formatting examples
```
print("str1", "str2", "str3") # The print statement concatenates strings with a space
print("str1", 1.0, False, -1j) # The print statements converts all arguments to strings
print("str1" + "str2" + "str3") # strings added with + are concatenated without space
print("value = %f" % 1.0) # we can use C-style string formatting
# this formatting creates a string
s2 = "value1 = %.2f. value2 = %d" % (3.1415, 1.5)
print(s2)
# alternative, more intuitive way of formatting a string
s3 = 'value1 = {0}, value2 = {1}'.format(3.1415, 1.5)
print(s3)
```
### List
Lists are very similar to strings, except that each element can be of any type.
The syntax for creating lists in Python is `[...]`:
```
l = [1,2,3,4]
print(type(l))
print(l)
```
We can use the same slicing techniques to manipulate lists as we could use on strings:
```
print(l)
print(l[1:3])
print(l[::2])
```
**Heads up MATLAB users:** Indexing starts at 0!
```
l[0]
```
Elements in a list do not all have to be of the same type:
```
l = [1, 'a', 1.0, 1-1j]
print(l)
```
Python lists can be inhomogeneous and arbitrarily nested:
```
nested_list = [1, [2, [3, [4, [5]]]]]
nested_list
```
Lists play a very important role in Python. For example they are used in loops and other flow control structures (discussed below). There are a number of convenient functions for generating lists of various types, for example the `range` function:
```
start = 10
stop = 30
step = 2
range(start, stop, step)
# in python 3 range generates an iterator, which can be converted to a list using 'list(...)'.
# It has no effect in python 2
list(range(start, stop, step))
list(range(-10, 10))
s
# convert a string to a list by type casting:
s2 = list(s)
s2
# sorting lists
s2.sort()
print(s2)
```
#### Adding, inserting, modifying, and removing elements from lists
```
# create a new empty list
l = []
# add an elements using `append`
l.append("A")
l.append("d")
l.append("d")
print(l)
```
We can modify lists by assigning new values to elements in the list. In technical jargon, lists are *mutable*.
```
l[1] = "p"
l[2] = "p"
print(l)
l[1:3] = ["d", "d"]
print(l)
```
Insert an element at an specific index using `insert`
```
l.insert(0, "i")
l.insert(1, "n")
l.insert(2, "s")
l.insert(3, "e")
l.insert(4, "r")
l.insert(5, "t")
print(l)
```
Remove first element with specific value using 'remove'
```
l.remove("A")
print(l)
```
Remove an element at a specific location using `del`:
```
del l[7]
del l[6]
print(l)
```
See `help(list)` for more details, or read the online documentation
### Tuples
Tuples are like lists, except that they cannot be modified once created, that is they are *immutable*.
In Python, tuples are created using the syntax `(..., ..., ...)`, or even `..., ...`:
```
point = (10, 20)
print(point, type(point))
point = 10, 20
print(point, type(point))
```
We can unpack a tuple by assigning it to a comma-separated list of variables:
```
x, y = point
print("x =", x)
print("y =", y)
```
If we try to assign a new value to an element in a tuple we get an error:
```
point[0] = 20
```
### Dictionaries
Dictionaries are also like lists, except that each element is a key-value pair. The syntax for dictionaries is `{key1 : value1, ...}`:
```
params = {"parameter1" : 1.0,
"parameter2" : 2.0,
"parameter3" : 3.0,}
print(type(params))
print(params)
print("parameter1 = " + str(params["parameter1"]))
print("parameter2 = " + str(params["parameter2"]))
print("parameter3 = " + str(params["parameter3"]))
params["parameter1"] = "A"
params["parameter2"] = "B"
# add a new entry
params["parameter4"] = "D"
print("parameter1 = " + str(params["parameter1"]))
print("parameter2 = " + str(params["parameter2"]))
print("parameter3 = " + str(params["parameter3"]))
print("parameter4 = " + str(params["parameter4"]))
```
## Control Flow
### Conditional statements: if, elif, else
The Python syntax for conditional execution of code uses the keywords `if`, `elif` (else if), `else`:
```
statement1 = False
statement2 = False
if statement1:
print("statement1 is True")
elif statement2:
print("statement2 is True")
else:
print("statement1 and statement2 are False")
```
For the first time, here we encounted a peculiar and unusual aspect of the Python programming language: Program blocks are defined by their indentation level.
Compare to the equivalent C code:
if (statement1)
{
printf("statement1 is True\n");
}
else if (statement2)
{
printf("statement2 is True\n");
}
else
{
printf("statement1 and statement2 are False\n");
}
In C blocks are defined by the enclosing curly brakets `{` and `}`. And the level of indentation (white space before the code statements) does not matter (completely optional).
But in Python, the extent of a code block is defined by the indentation level (usually a tab or say four white spaces). This means that we have to be careful to indent our code correctly, or else we will get syntax errors.
#### Examples:
```
statement1 = statement2 = True
if statement1:
if statement2:
print("both statement1 and statement2 are True")
# Bad indentation!
if statement1:
if statement2:
print("both statement1 and statement2 are True") # this line is not properly indented
statement1 = False
if statement1:
print("printed if statement1 is True")
print("still inside the if block")
if statement1:
print("printed if statement1 is True")
print("now outside the if block")
```
## Loops
In Python, loops can be programmed in a number of different ways. The most common is the `for` loop, which is used together with iterable objects, such as lists. The basic syntax is:
### **`for` loops**:
```
for x in [1,2,3]:
print(x)
```
The `for` loop iterates over the elements of the supplied list, and executes the containing block once for each element. Any kind of list can be used in the `for` loop. For example:
```
for x in range(4): # by default range start at 0
print(x)
```
Note: `range(4)` does not include 4 !
```
for x in range(-3,3):
print(x)
for word in ["scientific", "computing", "with", "python"]:
print(word)
```
To iterate over key-value pairs of a dictionary:
```
for key, value in params.items():
print(key + " = " + str(value))
```
Sometimes it is useful to have access to the indices of the values when iterating over a list. We can use the `enumerate` function for this:
```
for idx, x in enumerate(range(-3,3)):
print(idx, x)
```
### List comprehensions: Creating lists using `for` loops:
A convenient and compact way to initialize lists:
```
l1 = [x**2 for x in range(0,5)]
print(l1)
```
### `while` loops:
```
i = 0
while i < 5:
print(i)
i = i + 1
print("done")
```
Note that the `print("done")` statement is not part of the `while` loop body because of the difference in indentation.
## Functions
A function in Python is defined using the keyword `def`, followed by a function name, a signature within parentheses `()`, and a colon `:`. The following code, with one additional level of indentation, is the function body.
```
def func0():
print("test")
func0()
```
Optionally, but highly recommended, we can define a so called "docstring", which is a description of the functions purpose and behaivor. The docstring should follow directly after the function definition, before the code in the function body.
```
def func1(s):
"""
Print a string 's' and tell how many characters it has
"""
print(s + " has " + str(len(s)) + " characters")
help(func1)
func1("test")
```
Functions that returns a value use the `return` keyword:
```
def square(x):
"""
Return the square of x.
"""
return x ** 2
square(4)
```
We can return multiple values from a function using tuples (see above):
```
def powers(x):
"""
Return a few powers of x.
"""
return x ** 2, x ** 3, x ** 4
powers(3)
x2, x3, x4 = powers(3)
print(x3)
```
### Default argument and keyword arguments
In a definition of a function, we can give default values to the arguments the function takes:
```
def myfunc(x, p=2, debug=False):
if debug:
print("evaluating myfunc for x = " + str(x) + " using exponent p = " + str(p))
return x**p
```
If we don't provide a value of the `debug` argument when calling the the function `myfunc` it defaults to the value provided in the function definition:
```
myfunc(5)
myfunc(5, debug=True)
```
If we explicitly list the name of the arguments in the function calls, they do not need to come in the same order as in the function definition. This is called *keyword* arguments, and is often very useful in functions that takes a lot of optional arguments.
```
myfunc(p=3, debug=True, x=7)
```
### Unnamed functions (lambda function)
In Python we can also create unnamed functions, using the `lambda` keyword:
```
f1 = lambda x: x**2
# is equivalent to
def f2(x):
return x**2
f1(2), f2(2)
```
This technique is useful for example when we want to pass a simple function as an argument to another function, like this:
```
# map is a built-in python function
map(lambda x: x**2, range(-3,4))
# in python 3 we can use `list(...)` to convert the iterator to an explicit list
list(map(lambda x: x**2, range(-3,4)))
```
## Classes
Classes are the key features of object-oriented programming. A class is a structure for representing an object and the operations that can be performed on the object.
In Python a class can contain *attributes* (variables) and *methods* (functions).
A class is defined almost like a function, but using the `class` keyword, and the class definition usually contains a number of class method definitions (a function in a class).
* Each class method should have an argument `self` as its first argument. This object is a self-reference.
* Some class method names have special meaning, for example:
* `__init__`: The name of the method that is invoked when the object is first created.
* `__str__` : A method that is invoked when a simple string representation of the class is needed, as for example when printed.
* There are many more, see http://docs.python.org/2/reference/datamodel.html#special-method-names
```
class Point:
"""
Simple class for representing a point in a Cartesian coordinate system.
"""
def __init__(self, x, y):
"""
Create a new Point at x, y.
"""
self.x = x
self.y = y
def translate(self, dx, dy):
"""
Translate the point by dx and dy in the x and y direction.
"""
self.x += dx
self.y += dy
def __str__(self):
return("Point at [%f, %f]" % (self.x, self.y))
```
To create a new instance of a class:
```
p1 = Point(0, 0) # this will invoke the __init__ method in the Point class
print(p1) # this will invoke the __str__ method
```
To invoke a class method in the class instance `p`:
```
p2 = Point(1, 1)
p1.translate(0.25, 1.5)
print(p1)
print(p2)
```
Note that calling class methods can modifiy the state of that particular class instance, but does not effect other class instances or any global variables.
That is one of the nice things about object-oriented design: code such as functions and related variables are grouped in separate and independent entities.
## Modules
One of the most important concepts in good programming is to reuse code and avoid repetitions.
The idea is to write functions and classes with a well-defined purpose and scope, and reuse these instead of repeating similar code in different part of a program (modular programming). The result is usually that readability and maintainability of a program is greatly improved. What this means in practice is that our programs have fewer bugs, are easier to extend and debug/troubleshoot.
Python supports modular programming at different levels. Functions and classes are examples of tools for low-level modular programming. Python modules are a higher-level modular programming construct, where we can collect related variables, functions and classes in a module. A python module is defined in a python file (with file-ending `.py`), and it can be made accessible to other Python modules and programs using the `import` statement.
Consider the following example: the file `mymodule.py` contains simple example implementations of a variable, function and a class:
```
%%file mymodule.py
"""
Example of a python module. Contains a variable called my_variable,
a function called my_function, and a class called MyClass.
"""
my_variable = 0
def my_function():
"""
Example function
"""
return my_variable
class MyClass:
"""
Example class.
"""
def __init__(self):
self.variable = my_variable
def set_variable(self, new_value):
"""
Set self.variable to a new value
"""
self.variable = new_value
def get_variable(self):
return self.variable
```
We can import the module `mymodule` into our Python program using `import`:
```
import mymodule
```
Use `help(module)` to get a summary of what the module provides:
```
help(mymodule)
mymodule.my_variable
mymodule.my_function()
my_class = mymodule.MyClass()
my_class.set_variable(10)
my_class.get_variable()
```
If we make changes to the code in `mymodule.py`, we need to reload it using `reload`:
```
reload(mymodule) # works only in python 2
```
## Exceptions
In Python errors are managed with a special language construct called "Exceptions". When errors occur exceptions can be raised, which interrupts the normal program flow and fallback to somewhere else in the code where the closest try-except statement is defined.
To generate an exception we can use the `raise` statement, which takes an argument that must be an instance of the class `BaseException` or a class derived from it.
```
raise Exception("description of the error")
```
A typical use of exceptions is to abort functions when some error condition occurs, for example:
def my_function(arguments):
if not verify(arguments):
raise Exception("Invalid arguments")
# rest of the code goes here
To gracefully catch errors that are generated by functions and class methods, or by the Python interpreter itself, use the `try` and `except` statements:
try:
# normal code goes here
except:
# code for error handling goes here
# this code is not executed unless the code
# above generated an error
For example:
```
try:
print("test")
# generate an error: the variable test is not defined
print(test)
except:
print("Caught an exception")
```
To get information about the error, we can access the `Exception` class instance that describes the exception by using for example:
except Exception as e:
```
try:
print("test")
# generate an error: the variable test is not defined
print(test)
except Exception as e:
print("Caught an exception:" + str(e))
```
## Further reading
* http://www.python.org - The official web page of the Python programming language.
* http://www.python.org/dev/peps/pep-0008 - Style guide for Python programming. Highly recommended.
* http://www.greenteapress.com/thinkpython/ - A free book on Python programming.
* [Python Essential Reference](http://www.amazon.com/Python-Essential-Reference-4th-Edition/dp/0672329786) - A good reference book on Python programming.
## Versions
```
%load_ext version_information
%version_information
```
| github_jupyter |
**※ GPU環境で利用してください**
```
!pip install timm
import argparse
import operator
import os
import time
from collections import OrderedDict
import timm
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from timm.data import create_dataset, create_loader, resolve_data_config
from timm.optim import create_optimizer
from timm.utils import AverageMeter, accuracy
from timm.utils.summary import update_summary
from torch.autograd import Variable
from IPython.display import display
parser = argparse.ArgumentParser(description="Training Config", add_help=False)
parser.add_argument(
"--opt",
default="sgd",
type=str,
metavar="OPTIMIZER",
help='Optimizer (default: "sgd"',
)
parser.add_argument(
"--weight-decay", type=float, default=0.0001, help="weight decay (default: 0.0001)"
)
parser.add_argument(
"--lr", type=float, default=0.01, metavar="LR", help="learning rate (default: 0.01)"
)
parser.add_argument(
"--momentum",
type=float,
default=0.9,
metavar="M",
help="Optimizer momentum (default: 0.9)",
)
parser.add_argument(
"--input-size",
default=None,
nargs=3,
type=int,
metavar="N N N",
help="Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty",
)
args = parser.parse_args(["--input-size", "3", "224", "224"])
EPOCHS = 30
BATCH_SIZE = 32
NUM_WORKERS = 2
# 適宜GoogleColab上のデータセットディレクトリ(train, validation, testが含まれれるディレクトリ)のパスを指定してください
dataset_path = '/content/drive/MyDrive/VisionTransformer/'
# 対応モデルを確認
model_names = timm.list_models(pretrained=True)
model_names
NUM_FINETUNE_CLASSES = 2 # {'clear': 0, 'cloudy': 1} の2種類
model = timm.create_model('vit_base_patch16_224', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
model.cuda()
data_config = resolve_data_config(vars(args), model=model)
dataset_train = create_dataset('train', root=os.path.join(dataset_path, 'train'), is_training=True, batch_size=BATCH_SIZE)
dataset_eval = create_dataset('validation', root=os.path.join(dataset_path, 'validation'), is_training=False, batch_size=BATCH_SIZE)
dataset_test = create_dataset('test', root=os.path.join(dataset_path, 'test'), is_training=False, batch_size=BATCH_SIZE)
loader_train = create_loader(dataset_train, input_size=data_config['input_size'], batch_size=BATCH_SIZE, is_training=True, num_workers=NUM_WORKERS)
loader_eval = create_loader(dataset_eval, input_size=data_config['input_size'], batch_size=BATCH_SIZE, is_training=False, num_workers=NUM_WORKERS)
loader_test = create_loader(dataset_test, input_size=data_config['input_size'], batch_size=BATCH_SIZE, is_training=False, num_workers=NUM_WORKERS)
train_loss_fn = nn.CrossEntropyLoss().cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
optimizer = create_optimizer(args, model)
def train_one_epoch(epoch, model, loader, optimizer, loss_fn, args, output_dir=None):
second_order = hasattr(optimizer, "is_second_order") and optimizer.is_second_order
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train()
end = time.time()
num_updates = epoch * len(loader)
for _, (input, target) in enumerate(loader):
data_time_m.update(time.time() - end)
output = model(input)
loss = loss_fn(output, target)
optimizer.zero_grad()
loss.backward(create_graph=second_order)
optimizer.step()
torch.cuda.synchronize()
num_updates += 1
batch_time_m.update(time.time() - end)
end = time.time()
if hasattr(optimizer, "sync_lookahead"):
optimizer.sync_lookahead()
return OrderedDict([("loss", losses_m.avg)])
def validate(model, loader, loss_fn, args):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
accuracy_m = AverageMeter()
model.eval()
end = time.time()
with torch.no_grad():
for _, (input, target) in enumerate(loader):
input = input.cuda()
target = target.cuda()
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
loss = loss_fn(output, target)
acc1, _ = accuracy(output, target, topk=(1, 2))
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
accuracy_m.update(acc1.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
metrics = OrderedDict([("loss", losses_m.avg), ("accuracy", accuracy_m.avg)])
return metrics
num_epochs = EPOCHS
eval_metric = "accuracy"
best_metric = None
best_epoch = None
compare = operator.gt
# 学習結果CSVファイルやファインチューニング後のモデルデータの出力先
output_dir = "/content/drive/MyDrive/VisionTransformer/output"
for epoch in range(0, num_epochs):
train_metrics = train_one_epoch(
epoch, model, loader_train, optimizer, train_loss_fn, args, output_dir=output_dir
)
eval_metrics = validate(model, loader_eval, validate_loss_fn, args)
if output_dir is not None:
update_summary(
epoch,
train_metrics,
eval_metrics,
os.path.join(output_dir, "summary.csv"),
write_header=best_metric is None,
)
metric = eval_metrics[eval_metric]
if best_metric is None or compare(metric, best_metric):
best_metric = metric
best_epoch = epoch
torch.save(model.state_dict(), os.path.join(output_dir, "best_model.pth"))
print(epoch)
print(eval_metrics)
print("Best metric: {0} (epoch {1})".format(best_metric, best_epoch))
model.load_state_dict(
torch.load(
os.path.join(output_dir, "best_model.pth"), map_location=torch.device("cuda")
)
)
model.eval()
image_size = data_config["input_size"][-1]
loader = transforms.Compose([transforms.Resize(image_size), transforms.ToTensor()])
def image_loader(image_name):
image = Image.open(image_name).convert("RGB")
image = loader(image)
image = Variable(image, requires_grad=True)
image = image.unsqueeze(0)
return image.cuda()
m = nn.Softmax(dim=1)
clear_image_path = os.path.join(dataset_path, 'test/clear/12_3542_1635.png')
predicted_clear_image = image_loader(clear_image_path)
display(Image.open(clear_image_path))
m(model(predicted_clear_image))
cloudy_image_path = os.path.join(dataset_path, 'test/cloudy/12_3503_1735.png')
predicted_cloudy_image = image_loader(cloudy_image_path)
display(Image.open(cloudy_image_path))
m(model(predicted_cloudy_image))
def test(model, loader, args):
batch_time_m = AverageMeter()
accuracy_m = AverageMeter()
model.eval()
end = time.time()
with torch.no_grad():
for _, (input, target) in enumerate(loader):
input = input.cuda()
target = target.cuda()
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
acc1, _ = accuracy(output, target, topk=(1, 2))
torch.cuda.synchronize()
accuracy_m.update(acc1.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
return {'accuracy': accuracy_m.avg}
test(model, loader_test, args)
```
| github_jupyter |
# Tutorial 6: Population Level Modeling (with PopNet)
In this tutorial we will focus on modeling of populations and population firing rates. This is done with the PopNet simulator application of bmtk which uses [DiPDE](https://github.com/AllenInstitute/dipde) engine as a backend. We will first build our networks using the bmtk NetworkBuilder and save them into the SONATA data format. Then we will show how to simulate the firing rates over a given time-source.
Requirements:
* BMTK
* DiPDE
## 1. Building the network
#### Converting existing networks
Like BioNet for biophysically detailed modeling, and PointNet with point-based networks, PopNet stores networks in the SONATA data format. PopNet supports simulating networks of individual cells at the population level. First thing you have to do is modify the node-types and edge-types of an existing network to use Population level models (rather than models of individual cells.
<div class="alert alert-warning">
**WARNING** - Converting a network of individual nodes into population of nodes is good for a quick and naive simulation, but for faster and more reliable results it's best to build a network from scratch (next section).
</div>
Here is the node-types csv file of a network set to work with BioNet
```
import pandas as pd
pd.read_csv('sources/chapter06/converted_network/V1_node_types_bionet.csv', sep=' ')
```
vs the equivelent form for PopNet
```
pd.read_csv('sources/chapter06/converted_network/V1_node_types_popnet.csv', sep=' ')
```
Some things to note:
* **model_type** is now a population for all nodes, rather than individual biophysical/point types
* We have set **model_template** to dipde:Internal which will tell the simulator to use special DiPDE model types
* We are using new **dynamic_params** files with parameters that have been adjusted to appropiate range for DiPDE models.
* **morophology_file** and **model_processing**, which were used to set and processes individual cell morphologies, is no longer applicable.
We must make similar adjustments to our edge_types.csv files. And finally when we run the simulation we must tell PopNet to cluster nodes together using the **group_by** property
```python
network = popnet.PopNetwork.from_config(configure, group_by='node_type_id')
```
#### Building a network
We will create a network of two populations, one population of excitatory cells and another of inhibitory cells. Then we will save the network into SONATA formated data files.
The first step is to use the NetworkBuilder to instantiate a new network with two populations:
```
from bmtk.builder import NetworkBuilder
net = NetworkBuilder('V1')
net.add_nodes(pop_name='excitatory', # name of specific population optional
ei='e', # Optional
location='VisL4', # Optional
model_type='population', # Required, indicates what types of cells are being model
model_template='dipde:Internal', # Required, instructs what DiPDE objects will be created
dynamics_params='exc_model.json' # Required, contains parameters used by DiPDE during initialization of object
)
net.add_nodes(pop_name='inhibitory',
ei='i',
model_type='population',
model_template='dipde:Internal',
dynamics_params='inh_model.json')
```
Next we will create connections between the two populations:
```
net.add_edges(source={'ei': 'e'}, target={'ei': 'i'},
syn_weight=0.005,
nsyns=20,
delay=0.002,
dynamics_params='ExcToInh.json')
net.add_edges(source={'ei': 'i'}, target={'ei': 'e'},
syn_weight=-0.002,
nsyns=10,
delay=0.002,
dynamics_params='InhToExc.json')
```
and finally we must build and save the network
```
net.build()
net.save_nodes(output_dir='network')
net.save_edges(output_dir='network')
```
##### External Nodes
The *dipde:Internal* nodes we created don't carry intrinsic firing rates, and instead we will use External Populations to drive the network activity. To do this we will create a separate network of 'virtual' populations, or alternativly use model_type=dipde:External, that connect to our excitatory population.
Note: we could add 'virtual' populations directly to our V1 network. However creating them as a separate network provides a great advantage if/when we want to replace our external connections with a different model (Or if we want to remove the reccurrent connections and simulation with only feed-foward activity).
```
input_net = NetworkBuilder('LGN')
input_net.add_nodes(pop_name='tON',
ei='e',
model_type='virtual')
input_net.add_edges(target=net.nodes(ei='e'),
syn_weight=0.0025,
nsyns=10,
delay=0.002,
dynamics_params='input_ExcToExc.json')
input_net.build()
input_net.save_nodes(output_dir='network')
input_net.save_edges(output_dir='network')
```
## 2. Setting up the PopNet environment
Before running the simulation we need to set up our simulation environment, inlcuding setting up run-scripts, configuration parameters, and placing our parameter files in their appropiate location. The easiest way to do this is through the command-line:
```bash
$ python -m bmtk.utils.sim_setup -n network --run-time 1500.0 popnet
```
Which creates initial files to run a 1500 ms simulation using the network files found in our ./network directory.
#### Inputs
We next need to set the firing rates of the External Population. There are multiple ways to set this value which will be discussed later. The best way is to set the firing rates using a input-rates file for each External Population, we can fetch an existing one using the command:
```bash
$ wget https://github.com/AllenInstitute/bmtk/raw/develop/docs/examples/pop_2pops/lgn_rates.csv
```
Then we must open the simulation_config.json file with a text editor and add the lgn_rates.csv file as a part of our inputs:
```json
{
"inputs": {
"LGN_pop_rates": {
"input_type": "csv",
"module": "pop_rates",
"rates": "${BASE_DIR}/lgn_rates.csv",
"node_set": "LGN"
}
}
}
```
## 3. Running the simulation
The call to sim_setup created a file run_pointnet.py which we can run directly in a command line:
```bash
$ python run_popnet.py config.json
```
Or we can run it directly using the following python code:
```
from bmtk.simulator import popnet
configure = popnet.config.from_json('simulation_config.json')
configure.build_env()
network = popnet.PopNetwork.from_config(configure)
sim = popnet.PopSimulator.from_config(configure, network)
sim.run()
```
## 4. Analyzing results
As specified in the "output" section of simulation_config.json, the results will be written to ouput/spike_rates.csv. The BMTK analyzer includes code for ploting and analyzing the firing rates of our network:
```
from bmtk.analyzer.visualization.spikes import plot_rates_popnet
plot_rates_popnet('network/V1_node_types.csv', 'output/firing_rates.csv', model_keys='pop_name')
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import plotly
import matplotlib.pyplot as plt
from random import seed
from random import randrange
from csv import reader
from google.colab import drive
drive.mount('/content/drive')
'''
We proceed as follows:
1. Compute the Gini Index of the Dataset.
2. Create a split of each node based on the Gini Index calculated.
3. Build a decision tree.
4. Make predictions with the decision tree.
'''
def Gini_Index(groups, classes):
'''
To compute the Gini Index
'''
n_instances = float(sum([len(group) for group in groups]))
gini = 0.0
for group in groups:
size = float(len(group))
if size == 0:
continue
score = 0.0
for class_val in classes:
p = [row[-1] for row in group].count(class_val) / size
score += p * p
gini += (1.0 - score) * (size / n_instances)
return gini
print(Gini_Index([[[1, 1], [1, 0]], [[1, 1], [1, 0]]], [0, 1]))
def test_split(index, value, dataset):
left, right = list(), list()
for row in dataset:
if row[index] < value:
left.append(row)
else:
right.append(row)
return left, right
def get_split(dataset):
class_values = list(set(row[-1] for row in dataset))
b_index, b_value, b_score, b_groups = 999, 999, 999, None
for index in range(len(dataset[0])-1):
for row in dataset:
groups = test_split(index, row[index], dataset)
gini = Gini_Index(groups, class_values)
if gini < b_score:
b_index, b_value, b_score, b_groups = index, row[index], gini, groups
return {'index':b_index, 'value':b_value, 'groups':b_groups}
def to_terminal(group):
outcomes = [row[-1] for row in group]
return max(set(outcomes), key=outcomes.count)
def split(node, max_depth, min_size, depth):
left, right = node['groups']
del(node['groups'])
# check for a no split
if not left or not right:
node['left'] = node['right'] = to_terminal(left + right)
return
# check for max depth
if depth >= max_depth:
node['left'], node['right'] = to_terminal(left), to_terminal(right)
return
# process left child
if len(left) <= min_size:
node['left'] = to_terminal(left)
else:
node['left'] = get_split(left)
split(node['left'], max_depth, min_size, depth+1)
# process right child
if len(right) <= min_size:
node['right'] = to_terminal(right)
else:
node['right'] = get_split(right)
split(node['right'], max_depth, min_size, depth+1)
def build_tree(train, max_depth, min_size):
root = get_split(train)
split(root, max_depth, min_size, 1)
return root
def predict(node, row):
if row[node['index']] < node['value']:
if isinstance(node['left'], dict):
return predict(node['left'], row)
else:
return node['left']
else:
if isinstance(node['right'], dict):
return predict(node['right'], row)
else:
return node['right']
def load_csv(filename):
file = open(filename, "rt")
lines = reader(file)
dataset = list(lines)
return dataset
def str_column_to_float(dataset, column):
for row in dataset:
row[column] = float(row[column].strip())
def cross_validation_split(dataset, n_folds):
dataset_split = list()
dataset_copy = list(dataset)
fold_size = int(len(dataset) / n_folds)
for i in range(n_folds):
fold = list()
while len(fold) < fold_size:
index = randrange(len(dataset_copy))
fold.append(dataset_copy.pop(index))
dataset_split.append(fold)
return dataset_split
def accuracy_metric(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100.0
def evaluate_algorithm(dataset, algorithm, n_folds, *args):
folds = cross_validation_split(dataset, n_folds)
scores = list()
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = sum(train_set, [])
test_set = list()
for row in fold:
row_copy = list(row)
test_set.append(row_copy)
row_copy[-1] = None
predicted = algorithm(train_set, test_set, *args)
actual = [row[-1] for row in fold]
accuracy = accuracy_metric(actual, predicted)
scores.append(accuracy)
return scores
def decision_tree(train, test, max_depth, min_size):
tree = build_tree(train, max_depth, min_size)
predictions = list()
for row in test:
prediction = predict(tree, row)
predictions.append(prediction)
return(predictions)
file_path = "/content/drive/My Drive/banknotes.csv"
dataset = load_csv(file_path)
for i in range(len(dataset[0])):
str_column_to_float(dataset, i)
n_folds = 5
max_depth = 5
min_size = 10
scores = evaluate_algorithm(dataset, decision_tree, n_folds, max_depth, min_size)
print('Scores: %s' % scores)
print('Mean Accuracy: %.3f%%' % (sum(scores)/float(len(scores))))
```
| github_jupyter |
# Problem Description
- The competition is called : **Corporacion Favorita Grocery Sales Forecasting**.
- The task is to predict sales in the stores of an Ecuadorian supermarket chain so that they can avoid overstocking.
- The given data is a table with the following variables/features: date, store id, item id, sales volume, promotion.
- We can see the data as N time series, one per (store, item) combination. Many of these time series are most likely correlated to each other and some sort of <b>dimensional reduction</b> will be most welcome here.
- The company also offers some other data sets, such as a list of stores, a time series of daily transactions per store, a list of holidays and events, a list of products by category, and the price of oil, of which a good chunk of the ecuadorian economy is allegedly tied to. These are additional tools to simplify and/or enhance the predictions, and some other external data could also be used in this regard.
# Set-up
```
# DATA MANIPULATION
import numpy as np # linear algebra
import random as rd
import pandas as pd # data processing
import datetime # manipulating date formats
from operator import add # elementwise addition
# VIZUALIZATION
import matplotlib.pyplot as plt # basic plotting
import seaborn # for prettier plots
#import folium # plotting data on interactive maps
%matplotlib inline
# SUPERVISED LEARNING
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
# Reading daily transfers per store
sales = pd.read_csv('../input/transactions.csv', parse_dates=['date'])
# Reading store list
stores = pd.read_csv('../input/stores.csv')
stores.type=stores.type.astype('category')
# Reading the holiday and events schedule
holidays=pd.read_csv('../input/holidays_events.csv', parse_dates=['date'])
# Reading oil
oil=pd.read_csv('../input/processed/oil.csv', parse_dates=['date'])
# Merge datasets
def merge_sales(sales):
sales=pd.merge(sales,stores,how='left')
sales=pd.merge(sales,oil,how='left')
return sales
sales = merge_sales(sales)
```
# Feature engineering
### Date
First, let's work on creating features derived from the date, which is expected to be an important field as we are working with time series
```
def add_date_features(sales):
sales['year'] = sales['date'].dt.year
sales['month'] = sales['date'].dt.month
sales['day'] = sales['date'].dt.day
sales['week'] = sales['date'].dt.week
sales['dow'] = sales['date'].dt.dayofweek
sales['dayofyear'] = sales['date'].dt.dayofyear
sales['dayoff']=[x in [5,6] for x in sales.dow] ## Weekends
add_date_features(sales)
sales.head()
```
### Holiday events
This events are expected to be correlated with high volume sales
```
holidays.head()
# Enable all holidays
def enable_holidays(sales):
for (d,t,l,n) in zip(holidays.date,holidays.type,holidays.locale,holidays.locale_name):
if t!='Work Day' and t!='Event':
if l=='National':
sales.loc[sales.date==d,'dayoff']=True
elif l=='Regional':
sales.loc[(sales.date==d)&(sales.state==n),'dayoff']=True
else:
sales.loc[(sales.date==d)&(sales.city==n),'dayoff']=True
else:
sales.loc[(sales.date==d),'dayoff']=False
enable_holidays(sales)
## Some manual verifications
sales.loc[lambda df: df.date=='2015-01-10'].head()
#sales.loc[lambda df: (df.date=='2017-04-13') & (df.city=='Cuenca')].head()
#sales.loc[lambda df: (df.date=='2013-04-01') & (df.state=='Cotopaxi')].head()
```
### Transactions
```
# Transformation
def transform_transactions(sales):
sales['transactions'] = np.log1p(sales.transactions)
transform_transactions(sales)
# Normalized
#sales['transactions3'] = (sales['transactions2'] - sales['transactions2'].mean()) / sales['transactions2'].std()
# Normalize independently
# Histograms
#plt.figure(figsize=(15,5))
#sales.transactions.hist(ax=plt.subplot(1,3,1))
#sales.transactions2.hist(ax=plt.subplot(1,3,2))
#sales.transactions3.hist(ax=plt.subplot(1,3,3))
# Denormalize
def denormalize_target(sales, target, transform=False):
target_std = sales['transactions2'].std()
target_mean = sales['transactions2'].mean()
out = target * target_std + target_mean
if transform:
out = np.expm1(out)
return out
#test = denormalize_target(sales, sales.transactions3, True)
#test.hist()
```
Both distributions are skewed. But the transformed looks more normal
### Categorical features
Use one-hot encoding for city, state, type.
This might create hundreds of features, which could be restricting given the amount of data.
```
def encode(df, column) -> pd.DataFrame:
one_hot = pd.get_dummies(df[column], drop_first=False, prefix=column)
#return (one_hot - one_hot.mean()) / one_hot.std()
return one_hot
def encode_categorical_features(sales):
cat_columns = ['store_nbr','city', 'state', 'type']
for column in cat_columns:
column_enc = encode(sales, column)
sales = pd.concat([sales,column_enc], axis=1)
return sales
sales = encode_categorical_features(sales)
print_cols = [c for c in sales.columns if
not c.startswith('store_nbr_') and
not c.startswith('city_') and
not c.startswith('state') and
not c.startswith('type_')]
```
### Lagged features: weekly and annual
```
#u_dates = sales.date.unique() # There are no records for some dates (eg: 25-dic)
dates_range = pd.date_range(sales.date.min(), sales.date.max())
u_stores = sales.store_nbr.unique()
def add_lag_features(sales):
## Fill missing rows using a product between the stores and the dates (range min-max)
sales2 = sales.copy()
sales2.set_index(["date", "store_nbr"], inplace=True)
sales2 = sales2.reindex(
pd.MultiIndex.from_product(
[dates_range, u_stores],
names=["date", "store_nbr"]
)
)
sales2.sort_index(inplace=True)
#some_cols2 = [c for c in some_cols if c!='date' and c!='store_nbr']
## Lag 7
sales2['lag_7']=np.nan
sales2['lag_7']=sales2['transactions'].shift(7*len(u_stores))
print(sales2[['transactions','lag_7']].corr())
## Lag 14
#sales2['lag_14']=np.nan
#sales2['lag_14']=sales2['transactions'].shift(14*len(u_stores))
#print(sales2[['transactions','lag_14']].corr())
#It did not reduce error metric
## Lag 364
sales2['lag_annual']= np.nan
sales2['lag_annual']= sales2['transactions'].shift(364*len(u_stores)).values
print(sales2[['transactions','lag_annual']].corr())
#sales2['lag_annual']= \
# (1 * sales2['transactions2'].shift(364*len(u_stores)).values +
# 1.5 * sales2['transactions2'].shift(365*len(u_stores)).values +
# 1 *sales2['transactions2'].shift(366*len(u_stores)).values)/3.5
# It was not better than (364 shift)
## Lag 364*2
#sales2['lag_annual_']= np.nan
#sales2['lag_annual_']= sales2['transactions'].shift(364*2*len(u_stores)).values
#print(sales2[['transactions','lag_annual_']].corr())
# Delete temporal df
sales = sales2.reset_index()
del sales2
return sales
sales = add_lag_features(sales)
```
Why using a simple shift of 364 provides a better R2 metric???
Observaciones:
- No existe registros para el 25-dic. Esto puede ser debido a que no trabajan en esa fecha
- Some stores have a late start / short life
- It seems safe to ignore rows with missing data in the lag features
# Prediction
### Drop nan and sort data
There is lots of them because of the creation of the lag features
```
def clean_data_for_prediction(df):
# Drop
print('Dropping nan rows...')
print("Before: ", df.shape)
df.dropna(inplace=True)
print("After: ", df.shape)
# Sort
print('Sorting')
df.sort_values(['store_nbr', 'date'], ascending=[True, True], inplace=True)
df = df.reindex()
return df
sales = clean_data_for_prediction(sales)
```
### Splitting data
```
cols = [c for c in sales if c not in ['date','store_nbr','type','city','state',
'transactions','transactions','transactions3',
'prediction']]
X1 = sales.loc[(sales.date<'2017-08-01') & (sales.date>='2016-01-01')].copy()
X2 = sales.loc[sales.date>='2017-08-01'].copy()
print(X1.shape)
print(X2.shape)
target_column = 'transactions'
y1 = X1[target_column].values
y2 = X2[target_column].values
```
### PCA
```
from sklearn import decomposition
pca = decomposition.PCA(n_components=50)
pca.fit(X1[cols])
X1 = pca.transform(X1[cols])
X2 = pca.transform(X2[cols])
print(X1.shape)
print(X2.shape)
```
### Regressors
```
from sklearn import metrics
np.random.seed(1122)
number_regressors_to_test = 3
for method in range(1, number_regressors_to_test+1):
print('\nmethod = ', method)
if (method==1):
print('Multilayer perceptron (MLP) neural network 01')
str_method = 'MLP model01'
r = MLPRegressor(hidden_layer_sizes=(3,), max_iter=100)
if (method==2):
print('Bagging Regressor 01')
str_method = 'BaggingRegressor01'
r = BaggingRegressor(DecisionTreeRegressor(max_depth=6,max_features=0.85))
if (method==3):
np.random.seed(1122)
print('GradientBoosting 01')
str_method = 'GradientBoosting01'
r = GradientBoostingRegressor(n_estimators=85, max_depth=6, learning_rate = 0.01,
verbose=0, warm_start=True,
subsample= 0.87, max_features = 0.8)
r.fit(X1, y1)
yh2 = r.predict(X2)
m = metrics.mean_squared_error(y2, yh2)**0.5
print("Error: %f" % (m))
```
# Conclusions
PCA demonstrated that dimensionality reduction and feature selection are very necessary, specially taking into account all the one-hot encoded categorical features
| github_jupyter |
```
import scipy.io as scio
import numpy as np
keypoint_file = '/V2V-PoseNet/V2V-PoseNet-pytorch/datasets/nyu_keypoint/joint_data.mat'
keypointsXYZ_test = scio.loadmat(keypoint_file)["joint_xyz"].astype(np.float32)
keypointsXYZ_test = keypointsXYZ_test[0]
keypointsXYZ_test
source = '/V2V-PoseNet/V2V-PoseNet-pytorch/test_res.txt'
results = [line.split()[0:] for line in open(source, 'r').readlines()]
arr = np.array(results).astype(np.float32)
results1 = np.ones((8252, 21, 3))
for i in range(0,8252):
results1[i] = arr[i].reshape([-1, 3])
MSRA_correspond_joints = {"wrist": 0, "index_mcp":1, "index_pip":2, "index_dip":3,
"index_tip":4, "middle_mcp":5, "middle_pip":6, "middle_dip":7,
"middle_tip":8, "ring_mcp":9, "ring_pip":10, "ring_dip":11,
"ring_tip":12, "little_mcp":13, "little_pip":14, "little_dip":15,
"little_tip":16, "thumb_mcp":17, "thumb_pip":18, "thumb_dip":19, "thumb_tip":20}
```
# Tip
```
#PTIP
EVAL_PTIP = np.array([0])
pred_PTIP = np.array([16])
errors_PTIP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_PTIP,:] - results1[::][:,pred_PTIP,:]) ** 2, axis=2))
print(np.mean(errors_PTIP))
#RTIP
EVAL_RTIP = np.array([6])
pred_RTIP = np.array([12])
errors_RTIP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_RTIP,:] - results1[::][:,pred_RTIP,:]) ** 2, axis=2))
print(np.mean(errors_RTIP))
#MTIP
EVAL_MTIP = np.array([12])
pred_MTIP = np.array([8])
errors_MTIP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_MTIP,:] - results1[::][:,pred_MTIP,:]) ** 2, axis=2))
print(np.mean(errors_MTIP))
#ITIP
EVAL_ITIP = np.array([18])
pred_ITIP = np.array([4])
errors_ITIP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_ITIP,:] - results1[::][:,pred_ITIP,:]) ** 2, axis=2))
print(np.mean(errors_ITIP))
#TTIP
EVAL_TTIP = np.array([24])
pred_TTIP = np.array([20])
errors_TTIP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_TTIP,:] - results1[::][:,pred_TTIP,:]) ** 2, axis=2))
print(np.mean(errors_TTIP))
```
# Distal
```
#PDIP
EVAL_PDIP = np.array([1])
pred_PDIP = np.array([15])
errors_PDIP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_PDIP,:] - results1[::][:,pred_PDIP,:]) ** 2, axis=2))
print(np.mean(errors_PDIP))
#RDIP
EVAL_RDIP = np.array([7])
pred_RDIP = np.array([11])
errors_RDIP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_RDIP,:] - results1[::][:,pred_RDIP,:]) ** 2, axis=2))
print(np.mean(errors_RDIP))
#MDIP
EVAL_MDIP = np.array([13])
pred_MDIP = np.array([7])
errors_MDIP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_MDIP,:] - results1[::][:,pred_MDIP,:]) ** 2, axis=2))
print(np.mean(errors_MDIP))
#IDIP
EVAL_IDIP = np.array([19])
pred_IDIP = np.array([3])
errors_IDIP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_IDIP,:] - results1[::][:,pred_IDIP,:]) ** 2, axis=2))
print(np.mean(errors_IDIP))
#TDIP
EVAL_TDIP = np.array([25])
pred_TDIP = np.array([19])
errors_TDIP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_TDIP,:] - results1[::][:,pred_TDIP,:]) ** 2, axis=2))
print(np.mean(errors_TDIP))
```
# Proximal
```
#PPIP
EVAL_PPIP = np.array([4])
pred_PPIP = np.array([14])
errors_PPIP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_PPIP,:] - results1[::][:,pred_PPIP,:]) ** 2, axis=2))
print(np.mean(errors_PPIP))
#RPIP
EVAL_RPIP = np.array([11])
pred_RPIP = np.array([10])
errors_RPIP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_RPIP,:] - results1[::][:,pred_RPIP,:]) ** 2, axis=2))
print(np.mean(errors_RPIP))
#MPIP
EVAL_MPIP = np.array([15])
pred_MPIP = np.array([6])
errors_MPIP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_MPIP,:] - results1[::][:,pred_MPIP,:]) ** 2, axis=2))
print(np.mean(errors_MPIP))
#IPIP
EVAL_IPIP = np.array([21])
pred_IPIP = np.array([2])
errors_IPIP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_IPIP,:] - results1[::][:,pred_IPIP,:]) ** 2, axis=2))
print(np.mean(errors_IPIP))
#TPIP
EVAL_TPIP = np.array([26])
pred_TPIP = np.array([18])
errors_TPIP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_TPIP,:] - results1[::][:,pred_TPIP,:]) ** 2, axis=2))
print(np.mean(errors_TPIP))
```
# Metacarpal
```
#PMCP
EVAL_PMCP = np.array([5])
pred_PMCP = np.array([13])
errors_PMCP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_PMCP,:] - results1[::][:,pred_PMCP,:]) ** 2, axis=2))
print(np.mean(errors_PMCP))
#RMCP
EVAL_RMCP = np.array([11])
pred_RMCP = np.array([9])
errors_RMCP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_RMCP,:] - results1[::][:,pred_RMCP,:]) ** 2, axis=2))
print(np.mean(errors_RMCP))
#MMCP
EVAL_MMCP = np.array([17])
pred_MMCP = np.array([5])
errors_MMCP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_MMCP,:] - results1[::][:,pred_MMCP,:]) ** 2, axis=2))
print(np.mean(errors_MMCP))
#IMCP
EVAL_IMCP = np.array([23])
pred_IMCP = np.array([1])
errors_IMCP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_IMCP,:] - results1[::][:,pred_IMCP,:]) ** 2, axis=2))
print(np.mean(errors_IMCP))
#TMCP
EVAL_TMCP = np.array([28])
pred_TMCP = np.array([17])
errors_TMCP = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_TMCP,:] - results1[::][:,pred_TMCP,:]) ** 2, axis=2))
print(np.mean(errors_TMCP))
```
# Wrist
```
#WRIST
EVAL_WRIST = np.array([29])
pred_WRIST = np.array([0])
errors_wrist = np.sqrt(np.sum((keypointsXYZ_test[::][:,EVAL_WRIST,:] - results1[::][:,pred_WRIST,:]) ** 2, axis=2))
print(np.mean(errors_wrist))
mean_error = np.mean([errors_PTIP,
errors_RTIP,
errors_MTIP,
errors_ITIP,
errors_TTIP,
errors_PDIP,
errors_RDIP,
errors_MDIP,
errors_IDIP,
errors_TDIP,
errors_PPIP,
errors_RPIP,
errors_MPIP,
errors_IPIP,
errors_TPIP,
errors_PMCP,
errors_RMCP,
errors_MMCP,
errors_IMCP,
errors_TMCP,
errors_wrist])
mean_error
```
# mean_error ~= 42 mm
| github_jupyter |
Evaluation of the frame-based matching algorithm
================================================
This notebook aims at evaluating the performance of the Markov Random Field (MRF) algorithm implemented in `stereovis/framed/algorithms/mrf.py` on the three datasets presented above. For each, the following experiments have been done:
* running MRF on each dataset without any SNN-based prior
* running MRF with prior initialisation from best-performing SNN configuration
* running MRF with prior initialisation and adjustment from motion
* comparing difference between the above scenarios and visually assessing their quality for no ground truth is recorded or computed.
A slightly altered and abbreviated version of this notebook can also be found under `notebooks/evaluation.ipynb`.
```
%matplotlib inline
import numpy as np
import sys
import skimage.io as skio
import os
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import Normalize
from skimage import transform, filters, feature, morphology
sys.path.append("../")
from stereovis.framed.algorithms.mrf import StereoMRF
from stereovis.spiking.algorithms.vvf import VelocityVectorField
from stereovis.utils.frames_io import load_frames, load_ground_truth, generate_frames_from_spikes, split_frames_by_time
from stereovis.utils.spikes_io import load_spikes
from stereovis.utils.config import load_config
```
In the next we define some usefull functions to load, compute and plot data. They should be used for each dataset independetly and although they export some experiment-specific parametes to the user, other configuration options are "hard-coded" into configuration files -- at least one for each dataset. They define the data paths, camera resolution, frame rate and similar parameters and can be found under `experiments/config/hybrid/experiment_name.yaml`, where `experiment_name` shoud be substituded with the respective name of the experiment.
```
def load_mrf_frames(config):
"""
Load the images used for the frame-based matching.
Args:
config: dict, configuration object. Should be loaded beforehand.
Returns:
A tuple of numpy arrays with the left-camera frames, right-camera frames and the timestamps
provided by the left-camera.
"""
frames_left, times = load_frames(input_path=os.path.join('..', config['input']['frames_path'], 'left'),
resolution=config['input']['resolution'],
crop_region=config['input']['crop'],
scale_down_factor=config['input']['scale_down_factor'],
simulation_time=config['simulation']['duration'],
timestamp_unit=config['input']['timestamp_unit'],
adjust_contrast=True)
frames_right, _ = load_frames(input_path=os.path.join('..', config['input']['frames_path'], 'right'),
resolution=config['input']['resolution'],
crop_region=config['input']['crop'],
scale_down_factor=config['input']['scale_down_factor'],
simulation_time=config['simulation']['duration'],
timestamp_unit=config['input']['timestamp_unit'],
adjust_contrast=True)
return frames_left, frames_right, times
def load_retina_spikes(config, build_frames=True, pivots=None, buffer_length=10):
"""
Load the events used for visualisation purposes.
Args:
config: dict, configuration object.
build_frames: bool, whether to load the events in buffered frame-wise manner or as a continuous stream.
pivots: list, timestamps which serve as ticks to buffer the events in frames at precise locations.
Otherwise, equdistant buffering will be performed, according to the buffer length.
buffer_length: int, buffer span time in ms.
Returns:
Buffered left and right retina events, or non-buffered numpy array.
Notes:
The SNN's output is assumed fixed for this evaluation
and only the MRF tests are performed. To experiment with the SNN, please see the framework.
"""
retina_spikes = load_spikes(input_file=os.path.join('..', config['input']['spikes_path']),
resolution=config['input']['resolution'],
crop_region=config['input']['crop'],
simulation_time=config['simulation']['duration'],
timestep_unit=config['input']['timestamp_unit'],
dt_thresh=1,
scale_down_factor=config['input']['scale_down_factor'],
as_spike_source_array=False)
if not build_frames:
return retina_spikes
effective_frame_resolution = (np.asarray(config['input']['resolution'])
/ config['input']['scale_down_factor']).astype(np.int32)
retina_frames_l, times_l = \
generate_frames_from_spikes(resolution=effective_frame_resolution,
xs=retina_spikes['left'][:, 1],
ys=retina_spikes['left'][:, 2],
ts=retina_spikes['left'][:, 0],
zs=retina_spikes['left'][:, 3],
time_interval=buffer_length,
pivots=pivots,
non_pixel_value=-1)
retina_frames_r, times_r = \
generate_frames_from_spikes(resolution=effective_frame_resolution,
xs=retina_spikes['right'][:, 1],
ys=retina_spikes['right'][:, 2],
ts=retina_spikes['right'][:, 0],
zs=retina_spikes['right'][:, 3],
time_interval=buffer_length,
pivots=pivots,
non_pixel_value=-1)
assert retina_frames_l.shape == retina_frames_r.shape
return retina_spikes, retina_frames_l, retina_frames_r
def load_snn_spikes(spikes_file, build_frames=True, pivots=None,
buffer_length=10, non_pixel_value=-1):
"""
Load the SNN output events used as a prior for the frame-based matching.
Args:
spikes_file: str, filepath for the SNN output events.
build_frames: bool, whether to buffer the events as frames.
pivots: list, timestamps for the frames.
buffer_length: int, buffered frame time span in ms
non_pixel_value: numerical value for the frame pixels for which there is no event
Returns:
Buffered frames, timestamps and indices of the events that hass been buffered in each frame accordingly.
"""
prior_disparities = load_spikes(spikes_file)
if not build_frames:
return prior_disparities
effective_frame_resolution = prior_disparities['meta']['resolution']
prior_frames, timestamps, prior_frame_indices = \
generate_frames_from_spikes(resolution=effective_frame_resolution,
xs=prior_disparities['xs'],
ys=prior_disparities['ys'],
ts=prior_disparities['ts'],
zs=prior_disparities['disps'],
time_interval=buffer_length,
pivots=pivots,
non_pixel_value=non_pixel_value,
return_time_indices=True)
return prior_frames, timestamps, prior_frame_indices
def eval_mrf(left_img, right_img, max_disp, prior=None,
prior_mode='adaptive', prior_const=1.0, n_iter=10,
show_outline=False, show_plots=True):
"""
Run the MRF frame-based matching from given frames and algorithm parameters.
Args:
left_img: 2d array with the pre-processed left image
right_img: 2d array with the pre-processed right image
max_dist: int, largest detectable disparity value
prior: optionally a 2d array with the prior frame oriented to the left image
prior_mode: str, mode of incorporating the prior frame. Can be 'adaptive' for mixing proportionally to the
data cost, or 'const' for normal mixing.
prior_const: float, if the prior mode is 'const', this is the mixing coefficient.
n_iter: int, number of BP iterations
show_outline: bool, whether to plot the outline of the objects (using Canny filter)
show_plots: bool, whether to plot the results
Returns:
A 2d numpy array with the resulted disparity map.
"""
img_res = left_img.shape
mrf = StereoMRF(img_res, n_levels=max_disp)
disp_map = mrf.lbp(left_img, right_img, prior=prior,
prior_influence_mode=prior_mode,
prior_trust_factor=prior_const,
n_iter=n_iter).astype(np.float32)
disp_map[:, :max_disp] = np.nan
if not show_plots:
return disp_map
fig, axs = plt.subplots(2, 2)
fig.set_size_inches(10, 8)
axs[0, 0].imshow(left_img, interpolation='none', cmap='gray')
axs[0, 0].set_title("Left frame")
axs[0, 1].imshow(right_img, interpolation='none', cmap='gray')
axs[0, 1].set_title("Right frame")
print("Image resolution is: {}".format(img_res))
if show_outline:
val = filters.threshold_otsu(left_img)
ref_shape = (left_img > val).reshape(img_res).astype(np.float32)
ref_outline = feature.canny(ref_shape, sigma=1.0) > 0
disp_map[ref_outline] = np.nan
cmap = plt.cm.jet
cmap.set_bad((1, 1, 1, 1))
depth_map_im = axs[1, 0].imshow(disp_map, interpolation='none')
axs[1, 0].set_title("Depth frame")
depth_map_pos = axs[1, 0].get_position()
cbaxes = plt.axes([depth_map_pos.x0*1.05 + depth_map_pos.width * 1.05,
depth_map_pos.y0, 0.01, depth_map_pos.height])
fig.colorbar(depth_map_im, cax=cbaxes)
axs[1, 1].set_visible(False)
return disp_map
def eval_snn(experiment_name, disparity_max, frame_id, buffer_len=20):
"""
Visualise the pre-computed SNN output along with the retina input.
Args:
experiment_name: str, the name of the experiment which also match an existing config file.
disparity_max: int, maximum computable disparity
frame_id: int, the index of the frame (pair of frames) which are used to produce a depth map.
buffer_len: int, time in ms for the buffer length of retina events
Returns:
The bufferen SNN output at the timestamps of the frames.
"""
print("Sample images from experiment: {}".format(experiment_name))
config = load_config(os.path.join("..", "experiments", "configs", "hybrid", experiment_name + ".yaml"))
left_frames, right_frames, timestamps = load_mrf_frames(config)
left_img = left_frames[frame_id]
right_img = right_frames[frame_id]
# remove the _downsampled suffix in the experiment name for the pivots
pivots = np.load(os.path.join("..", "data", "input", "frames", experiment_name[:-12],
"left", "timestamps.npy")) / 1000.
retina_spikes, left_retina, right_retina = \
load_retina_spikes(config, build_frames=True,
pivots=pivots,
buffer_length=buffer_len)
snn_spikes_file = os.path.join("..", "data", "output", "experiments",
"best_snn_spikes", experiment_name + '.pickle')
prior_frames, _, prior_frame_indices = \
load_snn_spikes(snn_spikes_file, build_frames=True,
pivots=pivots, buffer_length=buffer_len)
fig, axs = plt.subplots(3, 2)
fig.set_size_inches(11, 11)
# fig.tight_layout()
axs[0, 0].imshow(left_img, interpolation='none', cmap='gray')
axs[0, 0].set_title("Left frame")
axs[0, 1].imshow(right_img, interpolation='none', cmap='gray')
axs[0, 1].set_title("Right frame")
axs[1, 0].imshow(left_retina[frame_id], interpolation='none')
axs[1, 0].set_title("Left retina frame")
axs[1, 1].imshow(right_retina[frame_id], interpolation='none')
axs[1, 1].set_title("Right retina frame")
depth_map_snn = axs[2, 0].imshow(prior_frames[frame_id], interpolation='none', vmin=0, vmax=disparity_max)
depth_map_pos = axs[2, 0].get_position()
cbaxes = plt.axes([depth_map_pos.x0*1.05 + depth_map_pos.width * 1.05,
depth_map_pos.y0, 0.01, depth_map_pos.height])
fig.colorbar(depth_map_snn, cax=cbaxes)
axs[2, 0].set_title("Network depth map")
axs[2, 1].set_visible(False)
return prior_frames
def compute_optical_flow(experiment_name, background=None):
pivots = np.load(os.path.join("..", "data", "input", "frames", experiment_name[:-12],
"left", "timestamps.npy")) / 1000.
config = load_config(os.path.join("..", "experiments", "configs", "hybrid", experiment_name + ".yaml"))
vvf = VelocityVectorField(time_interval=20,
neighbourhood_size=(3, 3),
rejection_threshold=0.005,
convergence_threshold=1e-5,
max_iter_steps=5,
min_num_events_in_timespace_interval=30)
events = load_spikes(input_file=os.path.join('..', config['input']['spikes_path']),
resolution=config['input']['resolution'],
crop_region=config['input']['crop'],
simulation_time=config['simulation']['duration'],
timestep_unit=config['input']['timestamp_unit'],
dt_thresh=1,
scale_down_factor=config['input']['scale_down_factor'],
as_spike_source_array=False)
time_ind, _ = split_frames_by_time(ts=events['left'][:, 0],
time_interval=50,
pivots=pivots)
velocities = vvf.fit_velocity_field(events['left'][time_ind[frame_id_head], :], assume_sorted=False,
concatenate_polarity_groups=True)
xs, ys, us, vs = events['left'][time_ind[frame_id_head], 1], \
events['left'][time_ind[frame_id_head], 2], \
velocities[:, 0], velocities[:, 1]
fig, axs = plt.subplots(1, 1)
# fig.set_size_inches(5, 5)
if background is not None:
plt.imshow(background)
colors = np.arctan2(us, vs)
norm = Normalize()
if colors.size > 0:
norm.autoscale(colors)
colormap = cm.inferno
axs.invert_yaxis()
plt.quiver(xs, ys, us, vs, angles='xy', scale_units='xy', scale=1, color=colormap(norm(colors)))
return xs, ys, us, vs
def adjust_events_from_motion(prior_frame, velocities):
"""
Modify the position of the events according to the detected motion. As the algorithm for optical flow
operates on the 3d non-buffered retina events, some additional parameters such as frame resolution etc. will
be required (unfortunately they cannot be inferred).
Args:
prior_frame: ndarray, the buffered SNN output used as a prior.
velocities: tuple, xs, ys, us, vs -- start and end positions of the velocity vectors.
Returns:
One adjusted prior frame.
"""
xs, ys, us, vs = velocities
# store the velocities onto a 2D image plane which will be queried for a shift
velocity_lut = np.zeros(prior_frame.shape + (2,))
for x, y, u, v in zip(xs, ys, us, vs):
velocity_lut[int(y), int(x), :] = np.array([u, v])
# compute shift based on 8 directional compass
shifts = np.asarray([(1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, -1)], dtype=np.int32)
compute_shift = lambda x, y: shifts[int(np.floor(np.round(8 * np.arctan2(y, x) / (2 * np.pi)))) % 8] \
if np.linalg.norm([x, y]) > 1. else np.array([0, 0])
adjusted_frame = np.ones_like(prior_frame) * -1
# compute the corresponding shift for all detected disparity event_frames
for row, col in np.argwhere(prior_frame >= 0):
x, y = velocity_lut[row, col]
dcol, drow = compute_shift(y, x)
# going up in the image is equivalent to decrementing the row number, hence the minus in row - drow
if 0 <= col + dcol < prior_frame.shape[1] and 0 <= row - drow < prior_frame.shape[0]:
adjusted_frame[row - drow, col + dcol] = prior_frame[row, col]
return adjusted_frame
def run_mrf_without_prior(experiment_name, disparity_max, frame_id=0, n_iter=5):
"""
Perform the MRF depth map computation on a pair of images without any prior knowledge. The experiment parameters
are loaded from the corresponding configuration yaml file.
Args:
experiment_name: str, the name of the experiment which also match an existing config file.
disparity_max: int, maximum computable disparity.
frame_id: int, the index of the frame (pair of frames) which are used to produce a depth map.
n_iter: int, number of MRF BP iterations.
Returns:
The resolved depth map.
"""
print("Sample images from experiment: {}".format(experiment_name))
config = load_config(os.path.join("..", "experiments", "configs", "hybrid", experiment_name + ".yaml"))
left_frames, right_frames, timestamps = load_mrf_frames(config)
left_img = left_frames[frame_id]
right_img = right_frames[frame_id]
depth_map_raw = eval_mrf(left_img, right_img, disparity_max, n_iter=n_iter)
return depth_map_raw
def run_mrf_with_prior(experiment_name, disparity_max, prior_frames,
frame_id=0, n_iter=5, prior_mode='const', prior_const=1):
"""
Run the MRF computation on an image pair using a SNN prior frame in the initialisation phase. Again, load the
experiment parameters from a configuration file.
Args:
experiment_name: str, the name of the experiment which also match an existing config file.
disparity_max: int, maximum computable disparity
prior_frames: ndarray, list of all buffered frames from the SNN output.
frame_id: int, the index of the frame (pair of frames) which are used to produce a depth map.
n_iter: int, number of MRF BP iterations.
prior_mode: str, the way of incorporating the prior. Can be `adaptive` or `const`.
prior_const: float, if the chosen mode is `const` than this is the influence of the prior.
Returns:
The depth map of the MRF using the prior frame.
"""
config = load_config(os.path.join("..", "experiments", "configs", "hybrid", experiment_name + ".yaml"))
left_frames, right_frames, timestamps = load_mrf_frames(config)
left_img = left_frames[frame_id]
right_img = right_frames[frame_id]
depth_map_prior = eval_mrf(left_img, right_img, disparity_max,
prior=prior_frames[frame_id],
prior_mode=prior_mode,
prior_const=prior_const,
n_iter=n_iter, show_plots=False)
return depth_map_prior
def plot_difference_prior_raw(depth_map_raw, depth_map_prior, disparity_max):
"""
Visualise the outcome from the MRF with the prior and without and show the absolute value difference.
Args:
depth_map_raw: ndarray, depth map result of the MRF applied on the frames only.
depth_map_prior: ndarray, depth map of the MRF applied on the image and prior frames.
disparity_max: int, maximum detectable disparity, used to normalise the plot colors.
"""
fig, axs = plt.subplots(1, 3)
fig.set_size_inches(12, 20)
axs[0].imshow(depth_map_prior, interpolation='none', vmax=disparity_max)
axs[0].set_title("With prior")
axs[1].imshow(depth_map_raw, interpolation='none', vmax=disparity_max)
axs[1].set_title("Without prior")
axs[2].imshow(np.abs(depth_map_raw - depth_map_prior), interpolation='none', vmax=disparity_max)
axs[2].set_title("Absolute value difference")
def plot_adjusted_prior(experiment_name, frame_id=0):
"""
Visualise the prior before and after the adjustment.
Args:
experiment_name: str, name of the experiment to load
frame_id: int, the index of the frame to plot as background
"""
config = load_config(os.path.join("..", "experiments", "configs", "hybrid", experiment_name + ".yaml"))
left_frames, _, _ = load_mrf_frames(config)
left_img = left_frames[frame_id]
fig, axs = plt.subplots(1, 2)
fig.set_size_inches(10, 16)
axs[0].imshow(left_img, interpolation='none', cmap='gray')
axs[0].imshow(prior_frames_head[frame_id_head], interpolation='none', alpha=0.7)
axs[0].set_title("Reference frame with prior overlayed")
axs[1].imshow(left_img, interpolation='none', cmap='gray')
axs[1].imshow(adjusted_events, interpolation='none', alpha=0.7)
axs[1].set_title("Reference frame with adjusted prior overlayed")
```
## MRF on frames without prior information
The following experiment provides a baseline for the stereo-matching performance of the MRF algorothm.
For an algorithm test on a standard stereo benchmark dataset see the notebook `MRF_StereoMatching.ipynb`. These results also provide a baseline for the next experiment in which prior information is included. For the sake of completeness, a [third-party algorithm](http://www.ivs.auckland.ac.nz/quick_stereo/index.php) was applied on a subset of the data to compare against our MRF implementation. The results are included in the submitted data (see `data/output/demo/online_algorithm`).
### Head experiment
```
experiment_name = 'head_downsampled'
disparity_max_head = 30 # note that these should be scaled if the scale factor in the config file is changed.
frame_id_head = 40
depth_map_raw_head = run_mrf_without_prior(experiment_name, disparity_max_head, frame_id=frame_id_head, n_iter=5)
```
**Result Analysis:**
The head is mostly correctly matched, with some errors in the middle. However, if one increases the number of iterations, then in some cases (different `frame_id`s) these spots tend to disappear.
Another interesting effect is the misclassified backgorund area on the left side of the head and the correctly classified right side. This can be explained as follows: when comparing the left and right images for the zero disparity case, the background of the two images overlap and due to the homogeneity of the color, the energy values for the right-side background pixels are quite small and the algorithm correctly assigns the small disparity. On the left side however, the background, albeit not really shifted, is occluded from the object in the right image and the nearest matching point to the left of the object (the direction of search) is some 26-27 pixels apart from the reference location. This inevitably produces the wrong depth values on the left side of the reference object.
Altough the situation below the head statue is different, the algorithm produces unsatisfying results due to the absence of corresponding pixels (as the shadow is not the same in the left and the right image, and the signal from neighbours from above gets propagated to the lower rows of the depth image).
### Checkerboard experiment
```
experiment_name = 'checkerboard_downsampled'
disparity_max_checkerboard = 22 # note that these should be scaled if the scale factor in the config file is changed.
frame_id_checkerboard = 40
depth_map_raw_checkerboard = run_mrf_without_prior(experiment_name, disparity_max_checkerboard,
frame_id=frame_id_checkerboard, n_iter=5)
```
**Result Analysis:**
The outcome of this experiment shows that the MRF is producing good results for the regions which can be matched unambigously, such as object edges. The detected disparities for the regions with homogeneuos colors, e.g. the floor or the wall are mostly incorrect. Nevertheless, the pixel correspondece there is not trivially computable and without any additional knowledge, such as "the floor spans perpendicularly to the image plane" no known to us matching algorithm will be able to generate an adequate depth map. In the experiment with the checkerboard, special difficulty is posed by the repetitive patterns, which in some frames (e.g. No. 40) is fully visible and has therefore a globally optimal matching configuration. There is, however, no guarantee that this configuraiton will be found by the algorithm and in practice we see that only a small portion is correctly matched.
### Boxes and cones experiment
```
experiment_name = 'boxes_and_cones_downsampled'
disparity_max_boxes = 20 # note that these should be scaled if the scale factor in the config file is changed.
frame_id_boxes = 20
depth_map_raw_boxes = run_mrf_without_prior(experiment_name, disparity_max_boxes,
frame_id=frame_id_boxes, n_iter=5)
```
**Result Analysis:**
Some depth maps from frames in this dataset are particularly badly computed as they are overexposed and wash out the object edges. Although the first several frames of the video present a table with sharply outlined edges, some parts which are present in the reference image are missing from the target one, which makes their matching impossible and hinders the correspondence assignment of the visible sections. It is worth putting more effort into pre-processing, such the contrast it normalised locally and overexposed areas do not affect the global contrast normalisation.
## MRF on frames with prior information from SNN output
This experiment will take the pre-computed depth events from the spiking network and will run the MRF on the same data. This time however the initial state of the random field will be computed as a convex combination between the data (i.e. image differences) and the prior. The reader is encouraged to play with the parameters. The arguably well-performing parameters are set as the default in the cell below.
### Head experiment
```
experiment_name = 'head_downsampled'
prior_frames_head = eval_snn(experiment_name, disparity_max_head, frame_id=frame_id_head, buffer_len=20)
```
The prior frame, obtained from the buffered SNN output in the time interval `buffer_len` ms before the actual frames, is mixed with the data-term computed in the image difference operation. The mixing coefficient can be proportional to the difference term, which has the following interpretation: _the lower the matching confidence from the data, the higher the prior influence should be_.
```
depth_map_prior_head = run_mrf_with_prior(experiment_name, disparity_max_head, prior_frames_head,
frame_id=frame_id_head, n_iter=5, prior_mode='const',
prior_const=1)
plot_difference_prior_raw(depth_map_raw=depth_map_raw_head,
depth_map_prior=depth_map_prior_head,
disparity_max=disparity_max_head)
```
Part of this experiment is to evaluate the contribution of the prior with varying prior constants. Below we plot the results from several independent evaluations with the `prior_const` ranging in [0, 0.1, 0.5, 1, 2, 10, 100] and finaly the result from the adaptive mode.
```
prior_consts = [0, 0.5, 1, 10, 100]
depth_maps = []
fig, axs = plt.subplots(1, len(prior_consts)+1)
fig.set_size_inches(40, 40)
for i, p_c in enumerate(prior_consts):
depth_map = run_mrf_with_prior(experiment_name, disparity_max_head, prior_frames_head,
frame_id=frame_id_head, n_iter=5, prior_mode='const',
prior_const=p_c)
axs[i].imshow(depth_map, interpolation='none', vmax=disparity_max_head)
axs[i].set_title("Prior const: {}".format(p_c))
depth_map = run_mrf_with_prior(experiment_name, disparity_max_head, prior_frames_head,
frame_id=frame_id_head, n_iter=5, prior_mode='adaptive')
axs[i+1].imshow(depth_map, interpolation='none', vmax=disparity_max_head)
axs[i+1].set_title("Adaptive Prior const")
```
**Result Analysis:**
In some examples the prior has visually deteriorated the results (especially if taken with great influence, i.e, >> 1) and in the rest of the cases it hasn't change much of the quality of the depth map. The former is due to the noisy output that the SNN produces on these datasets and the latter - due to its sparsity. In any case, these results do not support the claim that using SNN as prior initialisation for the MRF will improve the quality of the depth map.
### Checkerboard experiment
```
experiment_name = 'checkerboard_downsampled'
prior_frames = eval_snn(experiment_name, disparity_max_checkerboard, frame_id=frame_id_checkerboard, buffer_len=20)
depth_map_prior_checkerboard = run_mrf_with_prior(experiment_name, disparity_max_checkerboard, prior_frames,
frame_id=frame_id_checkerboard, n_iter=5, prior_mode='const',
prior_const=1)
plot_difference_prior_raw(depth_map_raw=depth_map_raw_checkerboard,
depth_map_prior=depth_map_prior_checkerboard,
disparity_max=disparity_max_checkerboard)
```
**Result Analysis:**
The same observations as in the _head experiment_: prior doesn't change much, and if it does, then the depth map has not become better in quality.
### Boxes and cones experiment
```
experiment_name = 'boxes_and_cones_downsampled'
prior_frames = eval_snn(experiment_name, disparity_max_boxes, frame_id=frame_id_boxes, buffer_len=20)
depth_map_prior_boxes = run_mrf_with_prior(experiment_name, disparity_max_boxes, prior_frames,
frame_id=frame_id_boxes, n_iter=5, prior_mode='const',
prior_const=1)
plot_difference_prior_raw(depth_map_raw=depth_map_raw_boxes,
depth_map_prior=depth_map_prior_boxes,
disparity_max=disparity_max_boxes)
```
**Result Analysis:**
Same as above.
## Inspecting the spatial precision of the prior
Since the prior is an accumulated information from the past, and motion is present, it can happend that the SNN output will have spikes on locations which are slightly off form the gray-scale image. If this is the case (which, by the way, is not easily detectable in an automatic fashion) then one can try to compute the motion of the object and adapt the SNN output accordingly. An optical flow algorithm on the SNN events is applied to estimate the future posiiton of the object and the shift is added to the prior.
We will perform this experiment on the _head_ dataset only, as this is rather unnecessary evaluation and serves only to show that this approach has been considered. Feel free to try on different frames and/or datasets. The optical flow algorithm is implemented according to _Benosman, Ryad, et al., "Event-based visual flow."_ [10], which in short is based on fitting a plane in 3D space-time (2D image space and 1D time dimensions), where the inverse of the slopes of the plane in the orthogonal _x_, _y_ directions (partial derivatives) are used to compute the velocities.
```
experiment_name = 'head_downsampled'
xs, ys, us, vs = compute_optical_flow(experiment_name)
pivots = np.load(os.path.join("..", "data", "input", "frames", experiment_name[:-12],
"left", "timestamps.npy")) / 1000.
adjusted_events = adjust_events_from_motion(prior_frames_head[frame_id_head], (xs, ys, us, vs))
plot_adjusted_prior(experiment_name, frame_id=frame_id_head)
```
**Result Analysis:**
Since the prior adjustment did not turn out to be beneficial, we decided to stop any furhter analysis of the performance. In a different application or under different circumstances (e.g. when immediate depth SNN spikes cannot be computed and older result should be extrapolated in the future) this technique might prove helpful.
| github_jupyter |
<a href="https://colab.research.google.com/github/hvarS/NLPRefer/blob/main/word2vec_pipeline.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
###Imports and Data
```
import nltk
import numpy as np
#Tokenizer Download
nltk.download('punkt')
#Stopwords Download
nltk.download("stopwords")
nltk.download('wordnet')
paragraph = "Fans, for the past two weeks you have been reading about the bad break I got. Yet today I consider myself the luckiest man on the face of this earth. I have been in ballparks for seventeen years and have never received anything but kindness and encouragement from you fans.Look at these grand men. Which of you wouldn't consider it the highlight of his career just to associate with them for even one day? Sure, I'm lucky. Who wouldn't consider it an honor to have known Jacob Ruppert? Also, the builder of baseball's greatest empire, Ed Barrow? To have spent six years with that wonderful little fellow, Miller Huggins? Then to have spent the next nine years with that outstanding leader, that smart student of psychology, the best manager in baseball today, Joe McCarthy? Sure, I'm lucky.When the New York Giants, a team you would give your right arm to beat, and vice versa, sends you a gift - that's something. When everybody down to the groundskeepers and those boys in white coats remember you with trophies - that's something. When you have a wonderful mother-in-law who takes sides with you in squabbles with her own daughter - that's something. When you have a father and a mother who work all their lives so you can have an education and build your body - it's a blessing. When you have a wife who has been a tower of strength and shown more courage than you dreamed existed - that's the finest I know."
```
###Preprocessing
```
import re
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
# Cleaning the texts
#Using Stemming
ps = PorterStemmer()
sentences = nltk.sent_tokenize(paragraph)
corpus_stemmed = []
for i in range(len(sentences)):
review = re.sub('[^a-zA-Z]', ' ', sentences[i])
review = review.lower()
review = review.split()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus_stemmed.append(review)
sentences
corpus_stemmed
wordnet=WordNetLemmatizer()
corpus_lemmatized = []
for i in range(len(sentences)):
review = re.sub('[^a-zA-Z]', ' ', sentences[i])
review = review.lower()
review = review.split()
review = [wordnet.lemmatize(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus_lemmatized.append(review)
sentences
corpus_lemmatized
```
###Word2Vec
```
##Tokenize sentences into words before Word2Vec
sentences = [nltk.word_tokenize(sentence) for sentence in corpus_lemmatized]
from gensim.models import Word2Vec
w2v = Word2Vec(sentences,min_count=1)
words = w2v.wv.vocab
list(words.keys())
w2v.wv["work"].shape
#100 dimension embedding
vectorised_sentences = []
for sentence in sentences:
s = []
for word in sentence:
vector = w2v.wv[word]
s.append(vector)
s = np.array(s)
vectorised_sentences.append(s)
vectorised_sentences = np.array(vectorised_sentences)
vectorised_sentences[0].shape #First Sentence has 8 words and each word has 100 dimensions
##To Use Google's pretrained Word2Vec
# Load Google's pre-trained Word2Vec model.
import gensim.downloader as api
wv = api.load('word2vec-google-news-300')
```
| github_jupyter |
```
%matplotlib inline
import numpy as np
from sklearn.svm import LinearSVC, SVC
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
class OVRSVC():
def __init__(self, **kwargs):
self.c2svc = {}
self.kwargs = kwargs
def fit(self, X, y):
for c in set(i for i in y):
self.c2svc[c] = LinearSVC(**self.kwargs)
self.c2svc[c].fit(X, y==c)
return self
def predict(self, X):
confidences = np.zeros((X.shape[0], len(self.c2svc)))
for c, svc in self.c2svc.items():
confidences[:, int(c)] = svc.decision_function(X)
result = confidences.argmax(axis=1)
return result
def score(self, X, y):
result = self.predict(X)
score = (result == y).sum() / len(y)
return score
scaler = MinMaxScaler(copy=False)
X_train = np.load('data_hw2/train_data.npy')
y_train = np.load('data_hw2/train_label.npy').astype(int) + 1
# X_train, X_dev, y_train, y_dev = train_test_split(X_train, y_train, test_size=0.5)
X_test = np.load('data_hw2/test_data.npy')
y_test = np.load('data_hw2/test_label.npy').astype(int) + 1
scaler.fit(X_train)
X_train = scaler.transform(X_train)
# X_dev = scaler.transform(X_dev)
X_test = scaler.transform(X_test)
pca = PCA(n_components=2)
pca.fit(X_train)
# pca.fit(np.concatenate((X_train, X_test)))
X_train_r = pca.transform(X_train)
X_dev_r = pca.transform(X_dev)
X_test_r = pca.transform(X_test)
np.random.shuffle(X_train_r)
np.random.shuffle(X_dev_r)
np.random.shuffle(X_test_r)
X_train_r_0 = np.random.permutation(pca.transform(X_train[y_train == 0]))
X_test_r_0 = np.random.permutation(pca.transform(X_test[y_test == 0]))
X_train_r_1 = np.random.permutation(pca.transform(X_train[y_train == 1]))
X_test_r_1 = np.random.permutation(pca.transform(X_test[y_test == 1]))
X_train_r_2 = np.random.permutation(pca.transform(X_train[y_train == 2]))
X_test_r_2 = np.random.permutation(pca.transform(X_test[y_test == 2]))
legends = []
plt.scatter(X_train_r_0[:50, 0], X_train_r_0[:50, 1], label="train_0", color='#EC5D57')
plt.scatter(X_train_r_1[:50, 0], X_train_r_1[:50, 1], label="train_1", color='#70BF41')
plt.scatter(X_train_r_2[:50, 0], X_train_r_2[:50, 1], label="train_2", color='#51A7F9')
plt.scatter(X_test_r_0[:50, 0], X_test_r_0[:50, 1], label="test_0", color='#F5D328')
plt.scatter(X_test_r_1[:50, 0], X_test_r_1[:50, 1], label="test_1", color='#B36AE2')
plt.scatter(X_test_r_2[:50, 0], X_test_r_2[:50, 1], label="test_2", color='#F39019')
plt.legend()
plt.savefig('pca.pdf')
plt.scatter(X_train_r_1[:, 0], X_train_r_1[:, 1], color='navy')
plt.scatter(X_dev_r_1[:, 0], X_dev_r_1[:, 1], color='turquoise')
plt.scatter(X_test_r_1[:, 0], X_test_r_1[:, 1], color='darkorange')
plt.scatter(X_train_r_2[:, 0], X_train_r_2[:, 1], color='navy')
plt.scatter(X_dev_r_2[:, 0], X_dev_r_2[:, 1], color='turquoise')
plt.scatter(X_test_r_2[:, 0], X_test_r_2[:, 1], color='darkorange')
plt.scatter(X_train_r[:70, 0], X_train_r[:70, 1], color='navy')
plt.scatter(X_dev_r[:70, 0], X_dev_r[:70, 1], color='turquoise')
plt.scatter(X_test_r[:70, 0], X_test_r[:70, 1], color='darkorange')
```
| github_jupyter |
<CENTER>
<header>
<h1>Pandas Tutorial</h1>
<h3>EuroScipy, Cambridge UK, August 27th, 2015</h3>
<h2>Joris Van den Bossche</h2>
<p></p>
Source: <a href="https://github.com/jorisvandenbossche/2015-EuroScipy-pandas-tutorial">https://github.com/jorisvandenbossche/2015-EuroScipy-pandas-tutorial</a>
</header>
</CENTER>
# About me: Joris Van den Bossche
- PhD student at Ghent University and VITO, Belgium
- bio-science engineer, air quality research
- pandas core dev
->
- https://github.com/jorisvandenbossche
- [@jorisvdbossche](https://twitter.com/jorisvdbossche)
Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)
# Content of this talk
- Why do you need pandas?
- Basic introduction to the data structures
- Guided tour through some of the pandas features with two case studies: **movie database** and a **case study about air quality**
If you want to follow along, this is a notebook that you can view or run yourself:
- All materials (notebook, data, link to nbviewer): https://github.com/jorisvandenbossche/2015-EuroScipy-pandas-tutorial
- You need `pandas` >= 0.15.2 (easy solution is using Anaconda)
Some imports:
```
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn
pd.options.display.max_rows = 8
```
# Let's start with a showcase
## Case study: air quality in Europe
AirBase (The European Air quality dataBase): hourly measurements of all air quality monitoring stations from Europe
Starting from these hourly data for different stations:
```
data = pd.read_csv('data/airbase_data.csv', index_col=0, parse_dates=True)
data
```
to answering questions about this data in a few lines of code:
**Does the air pollution show a decreasing trend over the years?**
```
data['1999':].resample('A').plot(ylim=[0,100])
```
**How many exceedances of the limit values?**
```
exceedances = data > 200
exceedances = exceedances.groupby(exceedances.index.year).sum()
ax = exceedances.loc[2005:].plot(kind='bar')
ax.axhline(18, color='k', linestyle='--')
```
**What is the difference in diurnal profile between weekdays and weekend?**
```
data['weekday'] = data.index.weekday
data['weekend'] = data['weekday'].isin([5, 6])
data_weekend = data.groupby(['weekend', data.index.hour])['FR04012'].mean().unstack(level=0)
data_weekend.plot()
```
We will come back to these example, and build them up step by step.
# Why do you need pandas?
## Why do you need pandas?
When working with *tabular or structured data* (like R dataframe, SQL table, Excel spreadsheet, ...):
- Import data
- Clean up messy data
- Explore data, gain insight into data
- Process and prepare your data for analysis
- Analyse your data (together with scikit-learn, statsmodels, ...)
# Pandas: data analysis in python
For data-intensive work in Python the [Pandas](http://pandas.pydata.org) library has become essential.
What is ``pandas``?
* Pandas can be thought of as NumPy arrays with labels for rows and columns, and better support for heterogeneous data types, but it's also much, much more than that.
* Pandas can also be thought of as `R`'s `data.frame` in Python.
* Powerful for working with missing data, working with time series data, for reading and writing your data, for reshaping, grouping, merging your data, ...
It's documentation: http://pandas.pydata.org/pandas-docs/stable/
## Key features
* Fast, easy and flexible input/output for a lot of different data formats
* Working with missing data (`.dropna()`, `pd.isnull()`)
* Merging and joining (`concat`, `join`)
* Grouping: `groupby` functionality
* Reshaping (`stack`, `pivot`)
* Powerful time series manipulation (resampling, timezones, ..)
* Easy plotting
# Further reading
- the documentation: http://pandas.pydata.org/pandas-docs/stable/
- Wes McKinney's book "Python for Data Analysis"
- lots of tutorials on the internet, eg http://github.com/jvns/pandas-cookbook
# What's new in pandas
Some recent enhancements of the last year (versions 0.14 to 0.16):
- Better integration for categorical data (`Categorical` and `CategoricalIndex`)
- The same for `Timedelta` and `TimedeltaIndex`
- More flexible SQL interface based on `sqlalchemy`
- MultiIndexing using slicers
- `.dt` accessor for accesing datetime-properties from columns
- Groupby enhancements
- And a lot of enhancements and bug fixes
# How can you help?
**We need you!**
Contributions are very welcome and can be in different domains:
- reporting issues
- improving the documentation
- testing release candidates and provide feedback
- triaging and fixing bugs
- implementing new features
- spreading the word
-> https://github.com/pydata/pandas
<div class="alert alert-success">
<b>JOIN</b> the sprint this Sunday!
</div>
## Thanks for listening! Questions?
- https://github.com/jorisvandenbossche
- <mailto:jorisvandenbossche@gmail.com>
- [@jorisvdbossche](https://twitter.com/jorisvdbossche)
Slides and data: Source: https://github.com/jorisvandenbossche/2015-EuroScipy-pandas-tutorial
Slides presented with 'live reveal' https://github.com/damianavila/RISE
| github_jupyter |
```
## This exercise is from "Hands on Machine Learning with Scikit-Learn & Tensor Flow",##
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "end_to_end_project"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
# Ignore useless warnings (see SciPy issue #5998)
import warnings
warnings.filterwarnings(action="ignore", message="^internal gelsd")
import os
import tarfile
from six.moves import urllib
# Create a master housing path thats reusable using the housing tgz data
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml/master/"
HOUSING_PATH = os.path.join("datasets", "housing")
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
# Whip up a function to extract all the data
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
# Call it
fetch_housing_data()
import pandas as pd
def load_housing_data(housing_path = HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
housing = load_housing_data()
housing.head()
housing.info()
# Now we have our housing variable to hold our dataset, we can start looking for insights in the data
housing.describe()
%matplotlib inline
import matplotlib.pyplot as plt
housing.hist(bins=50, figsize=(20,15))
save_fig("attribute_histogram_plots")
plt.show()
# to make this notebook's output identical at every run
np.random.seed(42)
import numpy as np
# For illustration only. Sklearn has train_test_split()
def split_train_test(data, test_ratio):
shuffled_indices = np.random.permutation(len(data))
test_set_size = int(len(data) * test_ratio) # need to have test and traion sets seperate to not corrupt the system
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
# Making sure that the split worked
train_set, test_set = split_train_test(housing, 0.2)
print(len(train_set), "train +", len(test_set), "test")
from zlib import crc32
def test_set_check(identifier, test_ratio):
return crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2**32
def split_train_test_by_id(data, test_ratio, id_column):
ids = data[id_column]
in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio))
return data.loc[~in_test_set], data.loc[in_test_set]
housing_with_idhousing_ = housing.reset_index() # adds an `index` column
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "index")
housing_with_id["id"] = housing["longitude"] * 1000 + housing["latitude"]
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "id")
# And since people are really smart, we don't have to write that anymore
# We use Scikit-Learn's train_test_split to solve our problems for us
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
test_set.head()
# Values are fairly randomly sampled, but let's clean the data a bit
housing["median_income"].hist()
# Divide by 1.5 to limit the number of income categories
housing["income_cat"] = np.ceil(housing["median_income"] / 1.5)
# Label those above 5 as 5
housing["income_cat"].where(housing["income_cat"] < 5, 5.0, inplace=True)
housing["income_cat"].value_counts()
# distribution isn't too bad, let's make sure the categories are discrete
housing["income_cat"].hist()
# Perfect, but now I'm gunna use the Scikit-Learn's stratified shuffle split to get
# a good distribution of values representative of the entire dataset
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing["income_cat"]):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
strat_test_set["income_cat"].value_counts() / len(strat_test_set)
housing["income_cat"].value_counts() / len(housing)
# Now we're comparing the numerous methods
def income_cat_proportions(data):
return data["income_cat"].value_counts() / len(data)
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
compare_props = pd.DataFrame({
"Overall": income_cat_proportions(housing),
"Stratified": income_cat_proportions(strat_test_set),
"Random": income_cat_proportions(test_set),
}).sort_index()
compare_props["Rand. %error"] = 100 * compare_props["Random"] / compare_props["Overall"] - 100
compare_props["Strat. %error"] = 100 * compare_props["Stratified"] / compare_props["Overall"] - 100
compare_props
for set_ in (strat_train_set, strat_test_set):
set_.drop("income_cat", axis=1, inplace=True)
# Now onto the Visualization of the data
housing = strat_train_set.copy()
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.1)
housing.plot(kind="scatter", x="longitude", y="latitude", alpha=0.4,
s=housing["population"]/100, label="population", figsize=(10,7),
c="median_house_value", cmap=plt.get_cmap("jet"), colorbar=True,
sharex=False)
plt.legend()
# From here we can tell that a lot of valuable houses are clustered together.
# This will help us later identify the proper model.
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
# from pandas.tools.plotting import scatter_matrix # For older versions of Pandas
from pandas.plotting import scatter_matrix
attributes = ["median_house_value", "median_income", "total_rooms",
"housing_median_age"]
scatter_matrix(housing[attributes], figsize=(12, 8))
# Median Income and Median House Value seem to have a decent positive correlation
housing.plot(kind="scatter", x="median_income", y="median_house_value",
alpha=0.1)
plt.axis([0, 16, 0, 550000])
# Fiddling around with combinations to see if any of the combined attributes have greater corrlation
housing["rooms_per_household"] = housing["total_rooms"]/housing["households"]
housing["bedrooms_per_room"] = housing["total_bedrooms"]/housing["total_rooms"]
housing["population_per_household"]=housing["population"]/housing["households"]
corr_matrix = housing.corr()
corr_matrix["median_house_value"].sort_values(ascending=False)
# Let's take a closer look
housing.plot(kind="scatter", x="rooms_per_household", y="median_house_value",
alpha=0.2)
plt.axis([0, 5, 0, 520000])
plt.show()
# It's not a bad correlation bad! Let's check out the data
housing.describe()
# Alright, with our categories figured out, now its time to start preping the data
housing = strat_train_set.drop("median_house_value", axis=1) # drop labels for training set
housing_labels = strat_train_set["median_house_value"].copy()
sample_incomplete_rows = housing[housing.isnull().any(axis=1)].head()
sample_incomplete_rows
# We need to change all the values to numerical for data processing
sample_incomplete_rows.dropna(subset=["total_bedrooms"])
median = housing["total_bedrooms"].median()
sample_incomplete_rows["total_bedrooms"].fillna(median, inplace=True) # option 3
sample_incomplete_rows
# Ah yes, the Goo Lagoon.
# Good for you for getting this deep.
# Now we're gunna input numerical values for ocean proximity
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy="median")
housing_num = housing.drop('ocean_proximity', axis=1)
# alternatively: housing_num = housing.select_dtypes(include=[np.number])
imputer.fit(housing_num)
imputer.statistics_
housing_num.median().values
# And beautiful, they match! the imputer and median values are identical.
# Now we transform the dataset
X = imputer.transform(housing_num)
housing_tr = pd.DataFrame(X, columns=housing_num.columns,
index = list(housing.index.values))
housing_tr.loc[sample_incomplete_rows.index.values]
housing_tr = pd.DataFrame(X, columns=housing_num.columns)
housing_tr.head()
# Now we're dealing with ocean proximity being a non-number
housing_cat = housing[['ocean_proximity']]
housing_cat.head(10)
# We are going to use A OneHotEncoder for our non-number friend here
from sklearn.preprocessing import OneHotEncoder
housing_cat = housing["ocean_proximity"]
housing_cat_encoded, housing_categories = housing_cat.factorize()
encoder = OneHotEncoder()
housing_cat_1hot = encoder.fit_transform(housing_cat_encoded.reshape(-1,1))
housing_cat_1hot
housing_cat_1hot.toarray()
# Let's create a custom transformer to add extra attributes:
from sklearn.base import BaseEstimator, TransformerMixin
# column index
rooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, add_bedrooms_per_room = True): # no *args or **kargs
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self # nothing else to do
def transform(self, X, y=None):
rooms_per_household = X[:, rooms_ix] / X[:, household_ix]
population_per_household = X[:, population_ix] / X[:, household_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
return np.c_[X, rooms_per_household, population_per_household,
bedrooms_per_room]
else:
return np.c_[X, rooms_per_household, population_per_household]
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs = attr_adder.transform(housing.values)
housing_extra_attribs = pd.DataFrame(
housing_extra_attribs,
columns=list(housing.columns)+["rooms_per_household", "population_per_household"])
housing_extra_attribs.head()
#Now we can build a pipeline for preprocessing the numerical attributes
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
num_pipeline = Pipeline([
('imputer', Imputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler()),
])
housing_num_tr = num_pipeline.fit_transform(housing_num)
housing_num_tr
from sklearn.base import BaseEstimator, TransformerMixin
# Create a class to select numerical or categorical columns
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
# This will prepare the two pipelines
from sklearn.preprocessing import OneHotEncoder
from future_encoders import OrdinalEncoder
num_attribs = list(housing_num)
cat_attribs = ["ocean_proximity"]
num_pipeline = Pipeline([
('selector', DataFrameSelector(num_attribs)),
('imputer', Imputer(strategy="median")),
('attribs_adder', CombinedAttributesAdder()),
('std_scaler', StandardScaler())
])
cat_pipeline = Pipeline([
('selector', DataFrameSelector(cat_attribs)),
('cat_encoder', OrdinalEncoder())
])
#Now we are unioning them
from sklearn.pipeline import FeatureUnion
full_pipeline = FeatureUnion(transformer_list=[
("num_pipeline", num_pipeline),
("cat_pipeline", cat_pipeline)
])
housing_prepared = full_pipeline.fit_transform(housing)
housing_prepared
# And after much trial and error, I have got the encoder working properly, and the dataset is ready!
# Time to begin selecting the model
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_labels)
# Let's try the full preprocessing pipeline on a few training instances
some_data = housing.iloc[:5]
some_labels = housing_labels.iloc[:5]
some_data_prepared = full_pipeline.transform(some_data)
print("Predictions:", lin_reg.predict(some_data_prepared))
#Compared with the actual prices
print("Labels:", list(some_labels))
# Let's check out our RMSE
from sklearn.metrics import mean_squared_error
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
lin_rmse
# Not too bad on the first try, but we don't want ot be off by 70,000$ everytime.
# Mean Absolute Error
from sklearn.metrics import mean_absolute_error
lin_mae = mean_absolute_error(housing_labels, housing_predictions)
lin_mae
# The linear regression wasn't too bad, but lets try DecisionTreeRegressor so it
# fits the data better.
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor(random_state=42)
tree_reg.fit(housing_prepared, housing_labels)
housing_predictions = tree_reg.predict(housing_prepared)
tree_mse = mean_squared_error(housing_labels, housing_predictions)
tree_rmse = np.sqrt(tree_mse)
tree_rmse
# Now to begin fine-tuning the model
from sklearn.model_selection import cross_val_score
scores = cross_val_score(tree_reg, housing_prepared, housing_labels,
scoring="neg_mean_squared_error", cv=10)
tree_rmse_scores = np.sqrt(-scores)
# These are the scores for the tree regression
def display_scores(scores):
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
display_scores(tree_rmse_scores)
# These are the scores for linear regression
lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels,
scoring="neg_mean_squared_error", cv=10)
lin_rmse_scores = np.sqrt(-lin_scores)
display_scores(lin_rmse_scores)
# Let's try a Random Forest Regressor algorithm
from sklearn.ensemble import RandomForestRegressor
forest_reg = RandomForestRegressor(random_state=42)
forest_reg.fit(housing_prepared, housing_labels)
housing_predictions = forest_reg.predict(housing_prepared)
forest_mse = mean_squared_error(housing_labels, housing_predictions)
forest_rmse = np.sqrt(forest_mse)
forest_rmse
from sklearn.model_selection import cross_val_score
forest_scores = cross_val_score(forest_reg, housing_prepared, housing_labels,
scoring="neg_mean_squared_error", cv=10)
forest_rmse_scores = np.sqrt(-forest_scores)
display_scores(forest_rmse_scores)
# From 70,000 -> 53,000, a HUGE improvement. This seems to fit the data well, now we need to be
# specific about our data to fit the probelem best
# Let's look back at the old scores.
scores = cross_val_score(lin_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
pd.Series(np.sqrt(-scores)).describe()
#Let's try out a few more algorithms before narrowing it down
from sklearn.svm import SVR
svm_reg = SVR(kernel="linear")
svm_reg.fit(housing_prepared, housing_labels)
housing_predictions = svm_reg.predict(housing_prepared)
svm_mse = mean_squared_error(housing_labels, housing_predictions)
svm_rmse = np.sqrt(svm_mse)
svm_rmse
# Now with Forest Regressor and Grid Search find the optimal number of features
from sklearn.model_selection import GridSearchCV
param_grid = [
# try 12 (3×4) combinations of hyperparameters
{'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},
# then try 6 (2×3) combinations with bootstrap set as False
{'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]},
]
forest_reg = RandomForestRegressor(random_state=42)
# train across 5 folds, that's a total of (12+6)*5=90 rounds of training
grid_search = GridSearchCV(forest_reg, param_grid, cv=5,
scoring='neg_mean_squared_error', return_train_score=True)
grid_search.fit(housing_prepared, housing_labels)
grid_search.best_params_
# The recommended features is 4
grid_search.best_estimator_
# And we have the best model as well
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
# These are the lists of scores we received for feature optimization
pd.DataFrame(grid_search.cv_results_)
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import randint
param_distribs = {
'n_estimators': randint(low=1, high=200),
'max_features': randint(low=1, high=8),
}
forest_reg = RandomForestRegressor(random_state=42)
rnd_search = RandomizedSearchCV(forest_reg, param_distributions=param_distribs,
n_iter=10, cv=5, scoring='neg_mean_squared_error', random_state=42)
rnd_search.fit(housing_prepared, housing_labels)
cvres = rnd_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
feature_importances = grid_search.best_estimator_.feature_importances_
feature_importances
# Now that the hard part is out of the way, we can setup a system
final_model = grid_search.best_estimator_
X_test = strat_test_set.drop("median_house_value", axis=1)
y_test = strat_test_set["median_house_value"].copy()
X_test_prepared = full_pipeline.transform(X_test)
final_predictions = final_model.predict(X_test_prepared)
final_mse = mean_squared_error(y_test, final_predictions)
final_rmse = np.sqrt(final_mse)
final_rmse
# And there it is! after much researd and data science, we have created a system
# that can predict house values within 50,000$! Let's throw a couple tests at it
# just to be sure
from scipy import stats
# Test with a 95% confidence
confidence = 0.95
squared_errors = (final_predictions - y_test) ** 2
mean = squared_errors.mean()
m = len(squared_errors)
np.sqrt(stats.t.interval(confidence, m - 1,
loc=np.mean(squared_errors),
scale=stats.sem(squared_errors)))
# Or manually like this
tscore = stats.t.ppf((1 + confidence) / 2, df=m - 1)
tmargin = tscore * squared_errors.std(ddof=1) / np.sqrt(m)
np.sqrt(mean - tmargin), np.sqrt(mean + tmargin)
# or even using the z scores
zscore = stats.norm.ppf((1 + confidence) / 2)
zmargin = zscore * squared_errors.std(ddof=1) / np.sqrt(m)
np.sqrt(mean - zmargin), np.sqrt(mean + zmargin)
# Thanks for getting to the end! This was a lot of fun to do, and hopefully there are more coming.
```
| github_jupyter |
---
_You are currently looking at **version 1.2** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-social-network-analysis/resources/yPcBs) course resource._
---
# Assignment 4
```
import networkx as nx
import pandas as pd
import numpy as np
import pickle
```
---
## Part 1 - Random Graph Identification
For the first part of this assignment you will analyze randomly generated graphs and determine which algorithm created them.
```
P1_Graphs = pickle.load(open('A4_graphs','rb'))
P1_Graphs
```
<br>
`P1_Graphs` is a list containing 5 networkx graphs. Each of these graphs were generated by one of three possible algorithms:
* Preferential Attachment (`'PA'`)
* Small World with low probability of rewiring (`'SW_L'`)
* Small World with high probability of rewiring (`'SW_H'`)
Anaylze each of the 5 graphs and determine which of the three algorithms generated the graph.
*The `graph_identification` function should return a list of length 5 where each element in the list is either `'PA'`, `'SW_L'`, or `'SW_H'`.*
```
# from matplotlib import pyplot as plt
# import seaborn as sns
# %matplotlib notebook
# P1_stats = []
# for G in P1_Graphs:
# degrees = G.degree()
# degree_v = sorted(set(degrees.values()))
# #print(degree_v)
# histogram = [list(degrees[i]/float(nx.number_of_nodes(G)) for i in degree_v)]
# ac = nx.average_clustering(G)
# al = nx.average_shortest_path_length(G)
# P1_stats.append((ac,al,degree_v, histogram))
# for s in P1_stats:
# print(s[:2])
# this_G = P1_stats[4]
# plt.plot(this_G[2],this_G[3][0],'o')
# plt.xlabel('Degree')
# plt.ylabel('Fraction of Nodes')
# plt.xscale('log')
# plt.yscale('log')
def graph_identification():
# Your Code Here
return ['PA','SW_L','SW_L','PA','SW_H']# Your Answer Here
graph_identification()
```
---
## Part 2 - Company Emails
For the second part of this assignment you will be workking with a company's email network where each node corresponds to a person at the company, and each edge indicates that at least one email has been sent between two people.
The network also contains the node attributes `Department` and `ManagementSalary`.
`Department` indicates the department in the company which the person belongs to, and `ManagementSalary` indicates whether that person is receiving a management position salary.
```
G = nx.read_gpickle('email_prediction.txt')
print(nx.info(G))
```
### Part 2A - Salary Prediction
Using network `G`, identify the people in the network with missing values for the node attribute `ManagementSalary` and predict whether or not these individuals are receiving a management position salary.
To accomplish this, you will need to create a matrix of node features using networkx, train a sklearn classifier on nodes that have `ManagementSalary` data, and predict a probability of the node receiving a management salary for nodes where `ManagementSalary` is missing.
Your predictions will need to be given as the probability that the corresponding employee is receiving a management position salary.
The evaluation metric for this assignment is the Area Under the ROC Curve (AUC).
Your grade will be based on the AUC score computed for your classifier. A model which with an AUC of 0.88 or higher will receive full points, and with an AUC of 0.82 or higher will pass (get 80% of the full points).
Using your trained classifier, return a series of length 252 with the data being the probability of receiving management salary, and the index being the node id.
Example:
1 1.0
2 0.0
5 0.8
8 1.0
...
996 0.7
1000 0.5
1001 0.0
Length: 252, dtype: float64
```
df = pd.DataFrame(index=G.nodes())
df['Department'] = pd.Series(nx.get_node_attributes(G, 'Department'))
df['ManagementSalary'] = pd.Series(nx.get_node_attributes(G, 'ManagementSalary'))
df['clustering'] = pd.Series(nx.clustering(G))
df['degree'] = pd.Series(G.degree())
df['degree_cent'] = pd.Series(nx.degree_centrality(G))
df['between_cent'] = pd.Series(nx.betweenness_centrality(G))
h, a = nx.hits(G)
df['hub_score'] = pd.Series(h)
df['auth_score'] = pd.Series(a)
df.head()
df_train = df[~np.isnan(df['ManagementSalary'])]
df_test = df[np.isnan(df['ManagementSalary'])]
X_train = df_train.drop(labels='ManagementSalary', axis=1)
X_test = df_test.drop(labels='ManagementSalary', axis=1)
y_train = df_train['ManagementSalary']
y_test = df_test['ManagementSalary']
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
scaler = StandardScaler()
scaler.fit(X_train)
X_train_new = scaler.transform(X_train)
X_test_new = scaler.transform(X_test)
param_dict = {'penalty':('l1','l2'),
'C':np.power(10.0,(np.arange(-2,2,1)))}
lr = LogisticRegression(max_iter=500)
clf = GridSearchCV(lr, param_dict, scoring='roc_auc')
clf.fit(X_train_new, y_train)
y_pred = clf.predict_proba(X_test_new)[:,1]
clf.best_score_
def salary_predictions():
# Your Code Here
return pd.Series(y_pred, index=y_test.index) # Your Answer Here
salary_predictions()
```
### Part 2B - New Connections Prediction
For the last part of this assignment, you will predict future connections between employees of the network. The future connections information has been loaded into the variable `future_connections`. The index is a tuple indicating a pair of nodes that currently do not have a connection, and the `Future Connection` column indicates if an edge between those two nodes will exist in the future, where a value of 1.0 indicates a future connection.
```
future_connections = pd.read_csv('Future_Connections.csv', index_col=0, converters={0: eval})
future_connections.head(10)
```
Using network `G` and `future_connections`, identify the edges in `future_connections` with missing values and predict whether or not these edges will have a future connection.
To accomplish this, you will need to create a matrix of features for the edges found in `future_connections` using networkx, train a sklearn classifier on those edges in `future_connections` that have `Future Connection` data, and predict a probability of the edge being a future connection for those edges in `future_connections` where `Future Connection` is missing.
Your predictions will need to be given as the probability of the corresponding edge being a future connection.
The evaluation metric for this assignment is the Area Under the ROC Curve (AUC).
Your grade will be based on the AUC score computed for your classifier. A model which with an AUC of 0.88 or higher will receive full points, and with an AUC of 0.82 or higher will pass (get 80% of the full points).
Using your trained classifier, return a series of length 122112 with the data being the probability of the edge being a future connection, and the index being the edge as represented by a tuple of nodes.
Example:
(107, 348) 0.35
(542, 751) 0.40
(20, 426) 0.55
(50, 989) 0.35
...
(939, 940) 0.15
(555, 905) 0.35
(75, 101) 0.65
Length: 122112, dtype: float64
```
future_connections['pref att'] = [i[2] for i in nx.preferential_attachment(G, future_connections.index)]
future_connections['jaccard'] = [i[2] for i in nx.jaccard_coefficient(G, future_connections.index)]
future_connections['RA'] = [i[2] for i in nx.resource_allocation_index(G, future_connections.index)]
future_connections['num_CN'] = future_connections.index.map(
lambda friend: len(list(nx.common_neighbors(G, friend[0], friend[1]))))
def get_sound_ra(nodes):
node1, node2 = nodes
gg = nx.ra_index_soundarajan_hopcroft(G,[(node1,node2)],community='Department')
for i, j, k in gg:
ra_score = k
return ra_score
future_connections['sound_RA'] = future_connections.index.map(get_sound_ra)
def get_sound_cn(nodes):
node1, node2 = nodes
gg = nx.cn_soundarajan_hopcroft(G,[(node1,node2)],community='Department')
for i, j, k in gg:
ra_score = k
return ra_score
future_connections['sound_CN'] = future_connections.index.map(get_sound_cn)
future_connections.head(20)
fc_train = future_connections[~np.isnan(future_connections['Future Connection'])]
fc_test = future_connections[np.isnan(future_connections['Future Connection'])]
X_train2 = fc_train.drop(labels='Future Connection', axis=1)
X_test2 = fc_test.drop(labels='Future Connection', axis=1)
y_train2 = fc_train['Future Connection']
y_test2 = fc_test['Future Connection']
scaler2 = StandardScaler()
scaler2.fit(X_train2)
X_train_new2 = scaler2.transform(X_train2)
X_test_new2 = scaler2.transform(X_test2)
param_dict = {'C':np.power(10.0,(np.arange(-2,2,1)))}
lr2 = LogisticRegression()
clf2 = GridSearchCV(lr2, param_dict, scoring='roc_auc')
clf2.fit(X_train_new2, y_train2)
clf2.best_score_
y_pred2 = clf2.predict_proba(X_test_new2)[:,1]
def new_connections_predictions():
# Your Code Here
return pd.Series(y_pred2, index=y_test2.index)# Your Answer Here
new_connections_predictions()
```
| github_jupyter |
```
import numpy
from pandas import read_csv
from xgboost import XGBClassifier
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
# First XGBoost model for Pima Indians dataset
from numpy import loadtxt
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import pandas as pd
# load data
df = pd.read_csv('data_with_weather_1.csv')
df.dtypes
df['hour_min'] = df['hour_min'].astype('category')
df['Conditions'] = df['Conditions'].astype('category')
df['Day_of_week'] = df['Day_of_week'].astype('category')
df['isholiday'] = df['isholiday'].astype('category')
df_cat = df[['hour_min','Conditions','Day_of_week','isholiday']]
df_yvar = df[['pickups']]
df_yvar = df_yvar.values
df_yvar.shape
df_yvar = numpy.ravel(df_yvar)
df_yvar.shape
df_numeric = df[['Temp.','Visibility','Precip','one_week_lag_pickups']]
encoded_x = None
df_cat = df_cat.values
df_cat.shape[1]
for i in range(0,df_cat.shape[1]):
label_encoder = LabelEncoder()
feature = label_encoder.fit_transform(df_cat[:,i])
feature = feature.reshape(df_cat.shape[0], 1)
onehot_encoder = OneHotEncoder(sparse=False)
feature = onehot_encoder.fit_transform(feature)
if encoded_x is None:
encoded_x = feature
else:
encoded_x = numpy.concatenate((encoded_x, feature), axis=1)
encoded_x
encoded_x.shape
df_numeric = df_numeric.values
encoded_x = numpy.concatenate((encoded_x, df_numeric), axis=1)
encoded_x.shape
df_yvar.shape
X_train = encoded_x[0:7728,:]
X_test = encoded_x[7728:8064,:]
y_train = df_yvar[0:7728]
y_test = df_yvar[7728:8064]
y_test.shape
X_train.shape
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
from sklearn.svm import SVR
import matplotlib.pyplot as plt
svr_rbf = SVR(kernel='poly', C=1e3,degree = 2)
y_rbf = svr_rbf.fit(X_train,y_train).predict(X_test)
y_rbf.shape
y_rbf = y_rbf.reshape(-1,1)
df_output = pd.DataFrame(numpy.ravel(y_test),numpy.ravel(y_rbf))
df_output.to_csv("svr_ts_1.csv")
# Let's try XGboost algorithm to see if we can get better results
xgb = XGBRegressor(n_estimators=100, learning_rate=0.08, gamma=0, subsample=0.75,
colsample_bytree=1, max_depth=7)
xgb.fit(X_train,y_train)
from sklearn.metrics import explained_variance_score
predictions = xgb.predict(X_test)
print(explained_variance_score(predictions,y_test))
b = self.get_booster()
fs = b.get_fscore()
all_features = [fs.get(f, 0.) for f in b.feature_names]
all_features = np.array(all_features, dtype=np.float32)
return all_features / all_features.sum()
xgb.booster().get_score(importance_type='weight')
model.booster().get_score(importance_type='weight')
fs
def mape_vectorized_v2(a, b):
mask = a <> 0
return (numpy.fabs(a - b)/a)[mask].mean() * 100
predictions = predictions.reshape(-1,1)
y_test = y_test.reshape(-1,1)
y_test.shape
print(mape_vectorized_v2(y_test,predictions))
y_test.shape
df_output = pd.DataFrame(numpy.ravel(y_test),numpy.ravel(predictions))
df_output
df_output.to_csv("output_xgboost_ts_4.csv")
```
| github_jupyter |
```
#ResNet50_bn4b_branch2a
import keras
import os
#from params import get_params
from sklearn import preprocessing
import sklearn.preprocessing
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Model
from keras.preprocessing import image
from keras.applications.resnet50 import ResNet50
from keras.applications.resnet50 import preprocess_input
from PIL import Image, ImageOps
import pickle
descriptors_val = pickle.load(open("ResNet50_bn4b_branch2a_val.p", "rb"))
descriptors_train = pickle.load(open("ResNet50_bn4b_branch2a_train.p", "rb"))
imagen = pickle.load(open("save_img.p", "rb"))
imagen3 = pickle.load(open("save_img3.p", "rb"))
dic_val = pickle.load(open("save_dic_val.p", "rb"))
dic_train = pickle.load(open("save_dic_train.p", "rb"))
x_val_l = []
for o in range(len(descriptors_val)):
x_val_l.append(np.ndarray.flatten(descriptors_val[o]))
x_val = np.asarray(x_val_l)
x_train_l = []
for o1 in range(len(descriptors_train)):
x_train_l.append(np.ndarray.flatten(descriptors_train[o1]))
x_train = np.asarray(x_train_l)
x_val = sklearn.preprocessing.normalize(x_val, norm='l2', axis=1, copy=True, return_norm=False)
x_train = sklearn.preprocessing.normalize(x_train, norm='l2', axis=1, copy=True, return_norm=False)
descriptors_traint = x_train.transpose()
similarities=np.matmul(x_val,descriptors_traint)
ranks = np.argsort(similarities, axis=1)[:,::-1]
# get the original images for visualization
x_val_images = []
x_train_images = []
v = 0
for v in range(477):
x_val_images.append(np.array(imagen[v]))
b = 0
for b in range(1194):
x_train_images.append(np.array(imagen3[b]))
h,w = (224, 224)
new_image= Image.new('RGB', (h*6,w*13))
# Visualize ranks of the 10 queries
offset = 0 # it will show results from query #'offset' to #offset+10
relnotrel = []
for q in range(477):
ranks_q = ranks[q*(offset+1),:]
relevant = dic_val[q*(offset+1)]
rel_help = []
for i in range(1194):
if relevant == dic_train[ranks_q[i]] and relevant != "desconegut":
new_image.paste(ImageOps.expand(Image.fromarray(x_train_images[ranks_q[i]]), border=10, fill='green'), (h*(1+i),w*q))
rel_help.append(1)
else:
new_image.paste(ImageOps.expand(Image.fromarray(x_train_images[ranks_q[i]]), border=10, fill='red'), (h*(1+i),w*q))
rel_help.append(0)
relnotrel.append(np.asarray(rel_help))
# visualize query
ima_q = Image.fromarray(x_val_images[q*(offset+1)])
ima_q = ImageOps.expand(ima_q, border=10, fill='blue')
new_image.paste(ima_q, (0,w*q))
plt.imshow(new_image)
plt.axis('off')
plt.show()
new_image.save('ResNet50_bn4b_branch2a_img.png')
guarda = []
accu_array =[]
AP = []
numRel_tot = []
for t in range(len(relnotrel)):
numRel = 0
accu = 0
graphic = []
for k in range(len(relnotrel[t])):
# If the value is 1
if relnotrel[t][k] == 1:
# We add 1 to the number of correct instances
numRel = numRel + 1
# We calculate the precision at k (+1 because we start at 0)
# and we accumulate it
accu += float(numRel)/ float(k+1)
graphic.append(float( numRel )/ float(k+1))
if numRel != 0:
numRel_tot.append(numRel)
accu_array.append(accu)
guarda.append(np.asarray(graphic))
AP = []
for h in range(len(numRel_tot)):
AP.append(float(accu_array[h])/float(numRel_tot[h]))
MAP = np.sum(AP)/len(numRel_tot)
MAP * 100
plt.plot(guarda[0])
plt.plot(guarda[10])
guarda2 = []
for t2 in range(len(relnotrel)):
graphic2 = []
len_final = 0
numRel2 = 0
for k2 in range(len(relnotrel[t2])):
# If the value is 1
if relnotrel[t2][k2] == 1:
# We add 1 to the number of correct instances
numRel2 = numRel2 + 1
graphic2.append(float( numRel2 )/ float(numRel))
guarda2.append(np.asarray(graphic2))
plt.plot(guarda2[0])
plt.plot(guarda2[10])
p = 0
guarda[p][0]= 1.0
guarda2[p][0]= 0.0
plt.plot(guarda2[p],guarda[p])
plt.title('Precision-Recall')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.show()
```
| github_jupyter |
```
# Important libraries for complete program execution
import json
import pandas as pd
import numpy as np
```
## Data Wrangling
The Dataset we have is in unstructured format (.txt file). Therefore, we first need to structure it and convert it into a format which is easier to work on.
```
with open("Datasets/Digital Nomad Travel Logs.txt") as file:
data = file.readlines()
# Showing a sample of current data
data[0][:1000]
```
## String Dictionary to Dictionary
As we can see the data currently is a list with only 1 element which is a string dictionary. Our task for now is to convert that string dictionary into a dictionary. For this we will use the json package of Python
```
data_dict = json.loads(data[0])
# Showing dictionary sample in key value pair format
list(data_dict.items())[:2]
# Deleting 'README' key from the dictionary
del data_dict['README']
# Showing dictionary sample in key value pair format
list(data_dict.items())[:2]
```
## Structuring the data
Now, we have a dictionary in which key of every element is the place from where the digital nomads travelled and the value contains a dictionary which shows the cities they travelled to and the number of times they travelled.
Now we have to convert this data into 3 python lists with each having values, the city from where the digital nomads travelled, the city where they travelled to, and the number of times they travelled respectively
```
from_cities = []
to_cities = []
number_of_trips = []
```
When we iterate over the dictonary, we can see that the format of dictionary is something like this:
The key of the key value pair represents the city from which the travel was made. The value on the other hand is again a dictionary where key represents the city to which the travel was made and value represents the number of times the travel was made.
```
# iterating over the dictionary to separate all its key value pairs and form 3 lists with required information
for key,value in data_dict.items():
if type(value) == list:
continue
for k,v in value.items():
from_cities.append(key)
to_cities.append(k)
number_of_trips.append(v)
df = pd.DataFrame(list(zip(from_cities, from_cities, to_cities, to_cities, number_of_trips)),
columns = ['From City', 'From Country', 'To City', 'To Country', 'Number of Trips'])
df.head()
```
## Separating Country and City in 'From City' and 'From Country' Column
```
cities = []
countries = []
# checklist of countries which require cutting 3 words from the end
checklist_3words = ['united-arab-emirates', 'papua-new-guinea']
# checklist of countries which require cutting 2 words from the end
checklist_2words = ['new-zealand', 'south-korea', 'cote-divoire', 'cook-islands', 'south-africa', 'saudi-arabia',
'sierra-leone', 'hong-kong', 'solomon-islands', 'dr-congo', 'united-kingdom', 'new-caledonia']
for city in df['From City']:
country = ''
# country and city both are of one word only
if city.count('-') == 1:
country = city.split('-')[1]
city = city.split('-')[0]
# country is United States
elif 'united-states' in city:
country = city.split('-')[-2:]
country = ' '.join(country)
city = city.split('-')[:-3]
city = ' '.join(city)
# country is of 3 words or requires cutting 3 words from the end
elif any(c in city for c in checklist_3words):
country = city.split('-')[-3:]
country = ' '.join(country)
city = city.split('-')[:-3]
city = ' '.join(city)
# country is of 2 words or requires cutting 2 words from the end
elif any(c in city for c in checklist_2words):
country = city.split('-')[-2:]
country = ' '.join(country)
city = city.split('-')[:-2]
city = ' '.join(city)
# country is of 1 word but city is more than 1 word long
else:
country = city.split('-')[-1]
city = city.split('-')[:-1]
city = ' '.join(city)
cities.append(city)
countries.append(country)
df['From City'] = cities
df['From Country'] = countries
df.head()
```
## Separating Country and City in 'To City' and 'To Country' Column
```
cities = []
countries = []
# checklist of countries which require cutting 3 words from the end
checklist_3words = ['united-arab-emirates', 'papua-new-guinea']
# checklist of countries which require cutting 2 words from the end
checklist_2words = ['new-zealand', 'south-korea', 'cote-divoire', 'cook-islands', 'south-africa', 'saudi-arabia',
'sierra-leone', 'hong-kong', 'solomon-islands', 'dr-congo', 'united-kingdom', 'new-caledonia']
for city in df['To City']:
country = ''
# country and city both are of one word only
if city.count('-') == 1:
country = city.split('-')[1]
city = city.split('-')[0]
# country is United States
elif 'united-states' in city:
country = city.split('-')[-2:]
country = ' '.join(country)
city = city.split('-')[:-3]
city = ' '.join(city)
# country is of 3 words or requires cutting 3 words from the end
elif any(c in city for c in checklist_3words):
country = city.split('-')[-3:]
country = ' '.join(country)
city = city.split('-')[:-3]
city = ' '.join(city)
# country is of 2 words or requires cutting 2 words from the end
elif any(c in city for c in checklist_2words):
country = city.split('-')[-2:]
country = ' '.join(country)
city = city.split('-')[:-2]
city = ' '.join(city)
# country is of 1 word but city is more than 1 word long
else:
country = city.split('-')[-1]
city = city.split('-')[:-1]
city = ' '.join(city)
cities.append(city)
countries.append(country)
df['To City'] = cities
df['To Country'] = countries
df.head()
```
## Capitalizing city and country names
```
df['From City'] = df['From City'].str.title()
df['From Country'] = df['From Country'].str.title()
df['To City'] = df['To City'].str.title()
df['To Country'] = df['To Country'].str.title()
df.head()
```
## Saving resultant dataframe as a CSV file
```
df.to_csv('Datasets/Digital Nomad Travel Logs.csv')
```
| github_jupyter |
```
%load_ext autoreload
import os, sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
import tensorflow as tf
import tensorflow.keras as keras
BASE_PATH = os.path.join(os.getcwd(), "..", "..", "..")
MODULES_PATH = os.path.join(BASE_PATH, "modules")
DATASET_PATH = os.path.join(BASE_PATH, "datasets")
sys.path.append(MODULES_PATH)
from active_learning import Config, AcquisitionFunction, Pool
from wrapper import McDropout, MomentPropagation
from models import setup_growth, fchollet_cnn
from data import BenchmarkData, DataSetType
from mp.MomentPropagation import MP
mnist_path = os.path.join(DATASET_PATH, "mnist")
dataset = BenchmarkData(DataSetType.MNIST, mnist_path)
setup_growth()
base_model = fchollet_cnn(output=10)
seed = 90231
np.random.seed(seed)
tf.random.set_seed(seed)
```
### Split data
```
x_train, x_test, y_train, y_test = train_test_split(dataset.inputs, dataset.targets, test_size=10000)
len(x_train)
pool = Pool(x_train, y_train)
```
### Define Models
```
%autoreload 2
fit_params = {"epochs": 2, "batch_size": 10}
compile_params = {"optimizer": "adam", "loss": "sparse_categorical_crossentropy", "metrics": [keras.metrics.SparseCategoricalAccuracy()]}
# Define MC Dropout model
mc_model = McDropout(base_model, config=Config(
fit=fit_params,
query={"sample_size": 25},
evaluate={"sample_size": 25}
))
mc_model.compile(**compile_params)
# Define Moment Propagation model
mp_model = MomentPropagation(base_model, config=Config(
fit=fit_params
))
mp_model.compile(**compile_params)
mc_model.fit(x_train, y_train)
mp_model.fit(x_train, y_train)
mc_model.evaluate(x_test[:10], y_test[:10])
mp_model.evaluate(x_test[:10], y_test[:10])
```
### Try BALD acquisition
```
mc_bald = mc_model.get_query_fn("bald")
mc_bald(x_train[:100], sample_size=100)
mp_bald = mp_model.get_query_fn("bald")
mp_bald(x_train[:100], num_samples=100)
```
## Difference Monte Carlo approximation and analytical
```
# Define Moment Propagation model
fit_params = {"epochs": 10, "batch_size": 500}
mp_model = MomentPropagation(base_model, config=Config(
fit=fit_params
))
mp_model.compile(**compile_params)
mp_model.fit(x_train, y_train)
mp_model.evaluate(x_test, y_test)
exp, var = mp_model(x_train[:100])
exp_s, var_s = MP.Gaussian_Softmax(exp, var)
def evaluate(prob, real_targets):
"""
Evaluate accuracy of predictions.
Parameters:
prob (numpy.ndarray): Probabilities given by estimator. With optional sample dimension.
real_targets (numpy.ndarray): The real targets
Returns:
(float) the accuracy of the estimator
"""
if len(prob.shape) == 2:
return np.mean(np.argmax(prob, axis=-1)==real_targets, axis=0)
return np.mean(np.argmax(np.mean(sampled_datapoints, axis=0), axis=1)==real_targets)
from scipy.stats import norm
exp_shape = list(exp.shape)
sample_sizes = np.arange(1, 102, 10)
acc = []
std = []
mean = []
print(sample_sizes)
for size in sample_sizes:
final_shape = tuple([size] + exp_shape)
sampled_datapoints = norm(exp, np.sqrt(var)).rvs(size=final_shape)
softmax_output = tf.keras.activations.softmax(tf.convert_to_tensor(sampled_datapoints))
sample_acc = evaluate(softmax_output, y_train[:100])
mean.append(np.mean(softmax_output, axis=0))
std.append(np.std(softmax_output, axis=0))
acc.append(sample_acc)
real_acc = evaluate(exp_s, y_train[:100])
fig = plt.figure()
plt.plot(sample_sizes, acc, label="Analytical acc.")
plt.plot(sample_sizes, [real_acc]*len(sample_sizes), label="Monte Carlo acc.")
plt.legend()
plt.grid()
plt.xlabel("Number of Samples")
plt.ylabel("Accuracy")
plt.title("Estimator Accuracy")
plt.show()
mean_scalar = list(map(lambda x: np.diag(np.dot(exp_s, x.T)), mean))
plt.plot(sample_sizes, mean_scalar, label="Analytical acc.")
#plt.plot(sample_sizes, [real_acc]*len(sample_sizes), label="Monte Carlo acc.")
plt.grid()
plt.xlabel("Number of Samples")
plt.ylabel("Accuracy")
plt.title("Estimator Accuracy")
plt.show()
approx_entropy = np.sum(exp_s*np.log(exp_s+.001), axis=1)
approx_entropy
final_shape = tuple([100] + exp_shape)
sampled_datapoints = norm(exp, var).rvs(size=final_shape)
softmax_output = tf.keras.activations.softmax(tf.convert_to_tensor(sampled_datapoints))
sample_acc = evaluate(softmax_output, y_train[:100])
sampled_datapoints.shape
sample_probs = tf.keras.activations.softmax(tf.convert_to_tensor(sampled_datapoints))
# mean_probs = np.mean(sample_probs, axis=0)
# sample_entropy = np.sum(mean_probs*np.log(mean_probs+.001), axis=-1)
sample_entropy = np.sum(sample_probs*np.log(sample_probs+.001), axis=-1)
sample_entropy = np.sum(sample_entropy, axis=0)/len(sample_entropy)
sample_entropy.shape
plt.plot(sample_entropy, label="Sample entropy")
plt.plot(approx_entropy, label="Approx. entropy")
plt.legend()
plt.plot()
```
## Condfidence interval
```
# https://stackoverflow.com/questions/15033511/compute-a-confidence-interval-from-sample-data
# https://blog.finxter.com/how-to-plot-the-confidence-interval-in-python/
import scipy.stats as st
sample_probs = tf.keras.activations.softmax(tf.convert_to_tensor(sampled_datapoints))
res = st.t.interval(0.95, len(sample_probs)-1, loc=np.mean(sample_probs, axis=0), scale=st.sem(sample_probs))
```
| github_jupyter |
```
import os
import glob
import sys
import numpy as np
import pickle
import tensorflow as tf
import PIL
import ipywidgets
import io
""" make sure this notebook is running from root directory """
while os.path.basename(os.getcwd()) in ('notebooks', 'src'):
os.chdir('..')
assert ('README.md' in os.listdir('./')), 'Can not find project root, please cd to project root before running the following code'
import src.tl_gan.generate_image as generate_image
import src.tl_gan.feature_axis as feature_axis
import src.tl_gan.feature_celeba_organize as feature_celeba_organize
""" load feature directions """
path_feature_direction = './asset_results/pg_gan_celeba_feature_direction_40'
pathfile_feature_direction = glob.glob(os.path.join(path_feature_direction, 'feature_direction_*.pkl'))[-1]
with open(pathfile_feature_direction, 'rb') as f:
feature_direction_name = pickle.load(f)
feature_direction = feature_direction_name['direction']
feature_name = feature_direction_name['name']
num_feature = feature_direction.shape[1]
import importlib
importlib.reload(feature_celeba_organize)
feature_name = feature_celeba_organize.feature_name_celeba_rename
feature_direction = feature_direction_name['direction']* feature_celeba_organize.feature_reverse[None, :]
""" start tf session and load GAN model """
# path to model code and weight
path_pg_gan_code = './src/model/pggan'
path_model = './asset_model/karras2018iclr-celebahq-1024x1024.pkl'
sys.path.append(path_pg_gan_code)
""" create tf session """
yn_CPU_only = False
if yn_CPU_only:
config = tf.ConfigProto(device_count = {'GPU': 0}, allow_soft_placement=True)
else:
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=config)
try:
with open(path_model, 'rb') as file:
G, D, Gs = pickle.load(file)
except FileNotFoundError:
print('before running the code, download pre-trained model to project_root/asset_model/')
raise
len_z = Gs.input_shapes[0][1]
z_sample = np.random.randn(len_z)
x_sample = generate_image.gen_single_img(z_sample, Gs=Gs)
def img_to_bytes(x_sample):
imgObj = PIL.Image.fromarray(x_sample)
imgByteArr = io.BytesIO()
imgObj.save(imgByteArr, format='PNG')
imgBytes = imgByteArr.getvalue()
return imgBytes
z_sample = np.random.randn(len_z)
x_sample = generate_image.gen_single_img(Gs=Gs)
w_img = ipywidgets.widgets.Image(value=img_to_bytes(x_sample), format='png', width=512, height=512)
class GuiCallback(object):
counter = 0
# latents = z_sample
def __init__(self):
self.latents = z_sample
self.feature_direction = feature_direction
self.feature_lock_status = np.zeros(num_feature).astype('bool')
self.feature_directoion_disentangled = feature_axis.disentangle_feature_axis_by_idx(
self.feature_direction, idx_base=np.flatnonzero(self.feature_lock_status))
def random_gen(self, event):
self.latents = np.random.randn(len_z)
self.update_img()
def modify_along_feature(self, event, idx_feature, step_size=0.01):
self.latents += self.feature_directoion_disentangled[:, idx_feature] * step_size
self.update_img()
def set_feature_lock(self, event, idx_feature, set_to=None):
if set_to is None:
self.feature_lock_status[idx_feature] = np.logical_not(self.feature_lock_status[idx_feature])
else:
self.feature_lock_status[idx_feature] = set_to
self.feature_directoion_disentangled = feature_axis.disentangle_feature_axis_by_idx(
self.feature_direction, idx_base=np.flatnonzero(self.feature_lock_status))
def update_img(self):
x_sample = generate_image.gen_single_img(z=self.latents, Gs=Gs)
x_byte = img_to_bytes(x_sample)
w_img.value = x_byte
guicallback = GuiCallback()
step_size = 0.4
def create_button(idx_feature, width=96, height=40):
""" function to built button groups for one feature """
w_name_toggle = ipywidgets.widgets.ToggleButton(
value=False, description=feature_name[idx_feature],
tooltip='{}, Press down to lock this feature'.format(feature_name[idx_feature]),
layout=ipywidgets.Layout(height='{:.0f}px'.format(height/2),
width='{:.0f}px'.format(width),
margin='2px 2px 2px 2px')
)
w_neg = ipywidgets.widgets.Button(description='-',
layout=ipywidgets.Layout(height='{:.0f}px'.format(height/2),
width='{:.0f}px'.format(width/2),
margin='1px 1px 5px 1px'))
w_pos = ipywidgets.widgets.Button(description='+',
layout=ipywidgets.Layout(height='{:.0f}px'.format(height/2),
width='{:.0f}px'.format(width/2),
margin='1px 1px 5px 1px'))
w_name_toggle.observe(lambda event:
guicallback.set_feature_lock(event, idx_feature))
w_neg.on_click(lambda event:
guicallback.modify_along_feature(event, idx_feature, step_size=-1 * step_size))
w_pos.on_click(lambda event:
guicallback.modify_along_feature(event, idx_feature, step_size=+1 * step_size))
button_group = ipywidgets.VBox([w_name_toggle, ipywidgets.HBox([w_neg, w_pos])],
layout=ipywidgets.Layout(border='1px solid gray'))
return button_group
list_buttons = []
for idx_feature in range(num_feature):
list_buttons.append(create_button(idx_feature))
yn_button_select = True
def arrange_buttons(list_buttons, yn_button_select=True, ncol=4):
num = len(list_buttons)
if yn_button_select:
feature_celeba_layout = feature_celeba_organize.feature_celeba_layout
layout_all_buttons = ipywidgets.VBox([ipywidgets.HBox([list_buttons[item] for item in row]) for row in feature_celeba_layout])
else:
layout_all_buttons = ipywidgets.VBox([ipywidgets.HBox(list_buttons[i*ncol:(i+1)*ncol]) for i in range(num//ncol+int(num%ncol>0))])
return layout_all_buttons
# w_button.on_click(on_button_clicked)
guicallback.update_img()
w_button_random = ipywidgets.widgets.Button(description='random face', button_style='success',
layout=ipywidgets.Layout(height='40px',
width='128px',
margin='1px 1px 5px 1px'))
w_button_random.on_click(guicallback.random_gen)
w_box = ipywidgets.HBox([w_img,
ipywidgets.VBox([w_button_random,
arrange_buttons(list_buttons, yn_button_select=True)])
], layout=ipywidgets.Layout(height='1024}px', width='1024px')
)
print('press +/- to adjust feature, toggle feature name to lock the feature')
display(w_box)
```
| github_jupyter |
# Comparison of initialization methods for Gaussian mixtures
## Goal
Solution quality for the standard coordinate ascent algorithms like EM depend heavily on initialization quality. Here, we'll see how bnpy can be used to run an experiment comparing two initialization methods, one smarter than the other.
```
import bnpy
import os
%pylab inline
from bnpy.viz.PlotUtil import ExportInfo
bnpy.viz.PlotUtil.ConfigPylabDefaults(pylab)
```
## Toy dataset : `AsteriskK8`
We'll use a simple dataset of 2D points, drawn from 8 well-separated Gaussian clusters.
```
import AsteriskK8
Data = AsteriskK8.get_data()
```
We can visualize this dataset as follows:
```
pylab.plot(Data.X[:,0], Data.X[:,1], 'k.');
pylab.axis('image');
pylab.xlim([-1.75, 1.75]); pylab.xticks([-1, 0, 1]);
pylab.ylim([-1.75, 1.75]); pylab.yticks([-1, 0, 1]);
```
## Initialization Methods
Our intended task is to train a Gaussian mixture model using expectation maximization (EM) with a maximum likelihood criterion.
We'll consider two methods here to initialize the global parameters (means and covariances) of the Gaussian mixture model.
For more background on possible initializations, see the [Initialization documentation TODO](../Code/Initialization.md).
### Naive initialization: "select examples uniformly at random"
To initialize K clusters, we select K items uniformly at random from all N data items, and initialize the model as if each item was the only member of its corresponding component.
This procedure is called `randexamples` in **bnpy**. Note: this is the default initialization.
### Smart initialization: "select examples at random, biased by Euclidean distance"
One problem with the naive method is that it doesn't account for distances between selected points. When using Gaussian observation models, it can be beneficial for initialized clusters to be spread out so a diverse set of points are likely to be represented.
Concretely, we could modify the above procedure to choose K items in a distance-biased way, instead of uniformly at random. We pick the first item at random from the data, and then for each successive component select an item n with probability proportional to its distance from the nearest chosen item among the $k$ previously chosen items.
This procedure is called `randexamplesbydist` in **bnpy**.
## Running the experiment with **bnpy**
We'll do 25 separate runs for each of the two initialization methods. Each run gets at most 50 laps through the data, and uses 10 clusters.
The **initname** argument specifies which initialization method to use, while the **jobname** is a human-readable name for the experiment.
### 25 runs from naive initialization: `randexamples`
```
# ExpectedRunTime=130sec
bnpy.run('AsteriskK8', 'FiniteMixtureModel', 'Gauss', 'EM',
K=8, initname='randexamples', jobname='compareinit-K=8-randexamples',
nLap=100, minLaps=50, nTask=25, printEvery=100);
```
### 25 runs from smart initialization: `randexamplesbydist`
```
# ExpectedRunTime=130sec
bnpy.run('AsteriskK8', 'FiniteMixtureModel', 'Gauss', 'EM',
K=8, initname='randexamplesbydist', jobname='compareinit-K=8-randexamplesbydist',
nLap=100, minLaps=50, nTask=25, printEvery=100);
```
## Performance comparison: training objective as more data is seen
Using **bnpy**'s built-in visualization tools, we can easily make a plot comparing the two methods' performance at recovering the ideal set of 8 clusters.
This plot shows that across many runs, the `randexamplesbydist` procedure often reaches better objective function values than the simpler, more naive baseline. Of course, poor luck in the random initialization can still cause both methods to reach very poor objective values, which correspond to clusterings that group several real clusters together. However, this happens much less frequently with a good initialization.
```
bnpy.viz.PlotELBO.plotJobsThatMatchKeywords('AsteriskK8/compareinit-K=8-*')
pylab.ylim([-1, 0.2]);
pylab.xlim([1, 50]);
pylab.legend(loc='lower right');
pylab.xlabel('num pass thru data');
pylab.ylabel('train objective');
```
## Discovered clusters: naive initialization
Here we show the discovered clusters for each of the 25 runs. The plot shows the runs in ranked order, from highest to lowest final objective function value.
Clearly, the best runs with this method do find all 8 true clusters. In fact, 6 of the 25 runs do. But, this means that **19 of the 25 runs did not find the ideal clustering**.
```
figH, axH = pylab.subplots(nrows=5, ncols=5, figsize=(15,15))
for plotID, rank in enumerate(range(1,26)):
pylab.subplot(5,5, plotID+1)
taskidstr = '.rank%d' % (rank)
bnpy.viz.PlotComps.plotCompsForJob('AsteriskK8/compareinit-K=8-randexamples/', taskids=[taskidstr], figH=figH);
ELBOpath = os.path.expandvars('$BNPYOUTDIR/AsteriskK8/compareinit-K=8-randexamples/%s/evidence.txt' % (taskidstr))
finalELBOval = np.loadtxt(ELBOpath)[-1]
pylab.axis('image'); pylab.xlim([-1.75, 1.75]); pylab.xticks([-1, 0, 1]); pylab.ylim([-1.75, 1.75]); pylab.yticks([-1, 0, 1]);
pylab.title('Rank %d/25 : %.2f' % (rank, finalELBOval))
pylab.tight_layout()
# Ignore this block. Only needed for auto-generation of documentation.
if ExportInfo['doExport']:
W_in, H_in = pylab.gcf().get_size_inches()
figpath100 = '../docs/source/_static/GaussianToyData_FiniteMixtureModel_EM_CompareInitialization_%dx%d.png' % (100, 100)
pylab.savefig(figpath100, bbox_inches=0, pad_inches=0, dpi=ExportInfo['dpi']/W_in);
```
## Discovered clusters: smart initialization
Here, we show the same plots for the smarter, initialize-by-distance runs.
Many more of the runs have discovered the ideal set of 8 clusters. However, still only 14 of the 25 runs find all 8 clusters. Clearly, smarter initialization helps, but we still need to take the best of many runs to get ideal performance.
```
figH, axH = pylab.subplots(nrows=5, ncols=5, figsize=(15,15))
for plotID, rank in enumerate(range(1,26)):
pylab.subplot(5,5, plotID+1)
taskidstr = '.rank%d' % (rank)
bnpy.viz.PlotComps.plotCompsForJob('AsteriskK8/compareinit-K=8-randexamplesbydist/', taskids=[taskidstr], figH=figH);
ELBOpath = os.path.expandvars('$BNPYOUTDIR/AsteriskK8/compareinit-K=8-randexamplesbydist/%s/evidence.txt' % (taskidstr))
finalELBOval = np.loadtxt(ELBOpath)[-1]
pylab.axis('image'); pylab.xlim([-1.75, 1.75]); pylab.xticks([-1, 0, 1]); pylab.ylim([-1.75, 1.75]); pylab.yticks([-1, 0, 1]);
pylab.title('Rank %d/25 : %.2f' % (rank, finalELBOval))
pylab.tight_layout()
```
| github_jupyter |
# Similarity Functions
This notebook describes about the similarity functions that can be used to measure the similarity between two sets.
Firstly we import the shingling functions and other helpful functions.
```
from src.shingle import *
from math import ceil, floor
import numpy as np
```
We will then count how frequent a shingle is in the document. For this I have calculated the frequencies in the document called `data/portuguese/two_ends.txt`. Here we are using portuguese corpus.
Then we create a dictionary called `frequencies` which goes like from the word to its frequency.
```
# Initialize counts
frequencies = {}
text = open("data/portuguese/two_ends.txt", "r+")
for line in text:
word = line.strip().split(' ')
frequencies[word[0]] = float(word[1])
```
## TF - IDF
TF-IDF (Term-frequency and Inverse Document Frequency) measures similarity using this:
<img src="utils/tfidf.png" alt="tfidf" width="400px"/>
Firstly, we define `tf` using this, which is just the frequency counts in the intersection.
```
def tf(intersection, query):
'''Counts term frequency'''
tf = [query.count(word) for word in intersection]
return np.array(tf)
```
Afterwards, we compute `idf`, which is inverse document frequency. Here we will make use of the dictionary that we created earlier in order to compute document frequencies.
```
def idf(intersection, document, N):
'''Counts inverse document frequency'''
idf = np.array([frequencies[word] for word in intersection])
idf = np.log10(np.divide(N + 1, idf + 0.5))
return idf
```
Finally we simulate the function `tf_idf` which takes the dot product of `tf` and `idf` arrays.
```
def tf_idf(query, document, N):
intersection = [word for word in document if word in query] # intersection
score = np.dot(tf(intersection, query), idf(intersection, document, N))
return score
```
We can then run the similarity function in the following manner:
```
query = two_ends("pizza", 2)
document = two_ends("pizza", 2)
tf_idf(query, document, 50000) # number of documents are around 50000
```
## BM25
The formula of BM25 is given like this:
<img src="utils/bm25.png" alt="tfidf" width="800px"/>
Here we define the `bm25_tf` in the following manner:
```
def bm25_tf(intersection, query, document, k1, b, avgdl, N):
tf_ = tf(intersection, document)
numerator = tf_ * (k1 + 1.0)
denominator = tf_ + k1 * (1.0 - b + b * (len(query) / avgdl))
bm25_tf = np.divide(numerator, denominator)
return bm25_tf
```
Finally we will take the dot product of `bm25_tf` and `idf` to get this:
```
def bm25(query, document, k1 = 1.2, b = 0.75, avgdl = 8.3, N = 50000):
intersection = [word for word in document if word in query] # intersection
score = np.dot(bm25_tf(intersection, query, document, k1, b, avgdl, N), idf(intersection, document, N))
return score
```
We can run the function in the following manner:
```
query = two_ends("pizza", 2)
document = two_ends("pizza", 2)
bm25(query, document)
```
## Dirichlet
The formula of Dirichlet is given like this:
<img src="utils/dir.png" alt="tfidf" width="800px"/>
Firstly, we will compute the sum dependent function here in the form of `smooth`.
```
shingles = 470751
def smooth(intersection, document, mu):
smooth = []
for word in intersection:
prob = 1.0 + np.divide(document.count(word), mu * frequencies[word] / shingles)
smooth.append(np.log10(prob))
smooth = np.array(smooth)
return smooth
```
We will add the sum independent function to `smooth` and take the dot product to `tf`.
```
def dirichlet(query, document, mu = 100.0):
intersection = [word for word in document if word in query] # intersection
add = len(query) * np.log10(np.divide(mu, mu + len(document)))
score = np.dot(tf(intersection, query), smooth(intersection , document, mu)) + add
return score
```
We can this function in following manner:
```
query = two_ends("pizzzza", 2)
document = two_ends("pizzza", 2)
print(dirichlet(query, document))
```
| github_jupyter |
```
import pandas as pd
import numpy as np
from tqdm import tqdm
import pickle
import re
import nltk
nltk.download('stopwords');
```
# 1. Read in Raw Data
```
train = pd.read_csv('../data/raw/train.csv')
test = pd.read_csv('../data/raw/valid.csv')
train.shape[0], test.shape[0]
train.head(2)
# train.loc[0].CreationDate
# all_data = pd.concat([train, test]).reset_index(drop = True)
# all_data.shape
# train.query('Y == "HQ"').shape[0], test.query('Y == "HQ"').shape[0]
# train.query('Y == "LQ_CLOSE"').shape[0], test.query('Y == "LQ_CLOSE"').shape[0]
# train.query('Y == "LQ_EDIT"').shape[0], test.query('Y == "LQ_CLOSE"').shape[0]
```
# 2. Data Cleaning
## 2.1 Check NaN
```
train.isna().sum()
test.isna().sum()
```
## 2.2 Clean Label
```
label_dict = {
'LQ_CLOSE': 0,
'LQ_EDIT': 1,
'HQ': 2
}
train['cleaned_y'] = train.Y.apply(lambda x: label_dict[x])
test['cleaned_y'] = test.Y.apply(lambda x: label_dict[x])
```
## 2.3 Clean Title
```
# title
train.Title = train.Title.apply(lambda x: x.lower())
test.Title = test.Title.apply(lambda x: x.lower())
from nltk import word_tokenize
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
def remove_stopwords(string):
word_list = [word.lower() for word in string.split()]
stopwords_list = list(stopwords.words("english"))
for word in word_list:
if word in stopwords_list:
word_list.remove(word)
return ' '.join(word_list)
def clean_text(text):
'''
Acknowledgement: this function is inspired by the following post:
https://www.kaggle.com/anmolkumar/stack-overflow-eda-bert-model-accuracy-87-6
'''
text = re.sub('\\n', ' ', text)
text = re.sub('\W', ' ', text)
text = re.sub(r'https\s+|www.\s+', r'', text)
text = re.sub(r'http\s+|www.\s+',r'', text)
text = re.sub(r'\s+[a-zA-Z]\s+',' ', text)
text = re.sub(r'\^[a-zA-Z]\s+',' ', text)
text = text.lower()
text = re.sub(r"\’", "\'", text)
text = re.sub(r"won\'t", "will not", text)
text = re.sub(r"can\'t", "can not", text)
text = re.sub(r"don\'t", "do not", text)
text = re.sub(r"dont", "do not", text)
text = re.sub(r"n\’t", " not", text)
text = re.sub(r"n\'t", " not", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'s", " is", text)
text = re.sub(r"\’d", " would", text)
text = re.sub(r"\d", " would", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'t", " not", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'m", " am", text)
text = re.sub(r"\n", "", text)
text = re.sub(r"\r", "", text)
text = re.sub(r"[0-9]", "digit", text)
text = re.sub(r"\'", "", text)
text = re.sub(r"\"", "", text)
text = re.sub(r'[?|!|\'|"|#]',r'', text)
text = re.sub(r'[.|,|)|(|\|/]',r' ', text)
text = remove_stopwords(text)
return text
train.Title = train.Title.apply(lambda x: clean_text(x))
test.Title = test.Title.apply(lambda x: clean_text(x))
```
## 2.4 Clean Body
```
import re
train['body_notag'] = train.Body.apply(lambda s: re.sub('<[^>]+>', '', s))
test['body_notag'] = test.Body.apply(lambda s: re.sub('<[^>]+>', '', s))
train.body_notag = train.body_notag.apply(lambda x: clean_text(x))
test.body_notag = test.body_notag.apply(lambda x: clean_text(x))
```
## 2.5 Combine Body and Title
```
train['all_text'] = train.Title + ' ' + train.body_notag
test['all_text'] = test.Title + ' ' + test.body_notag
```
# 3. Feature Engineering
## 3.1. Length of the Title & Question
```
train['title_length'] = train.Title.apply(lambda x: len(x.split()))
test['title_length'] = test.Title.apply(lambda x: len(x.split()))
train['body_length'] = train.body_notag.apply(lambda x: len(x.split()))
test['body_length'] = test.body_notag.apply(lambda x: len(x.split()))
```
# 4. Train-Val-Test Split
```
from sklearn.model_selection import train_test_split
# 45000 * 0.22222222
train_df, val_df, _, _ = train_test_split(train, train['Y'], test_size=0.22222222, random_state=42)
train_df.shape, val_df.shape
pickle.dump(train_df, open('../data/processed/train_df.pkl', 'wb'))
pickle.dump(val_df, open('../data/processed/val_df.pkl', 'wb'))
pickle.dump(test, open('../data/processed/test_df.pkl', 'wb'))
```
| github_jupyter |
This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.
**If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**
This notebook was generated for TensorFlow 2.6.
[](https://colab.research.google.com/github/achimr/deep-learning-with-python-notebooks/blob/master/chapter11_part04_sequence-to-sequence-learning.ipynb)
## Beyond text classification: Sequence-to-sequence learning
### A machine translation example
```
!wget http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip
!unzip -q spa-eng.zip
text_file = "spa-eng/spa.txt"
with open(text_file) as f:
lines = f.read().split("\n")[:-1]
text_pairs = []
for line in lines:
english, spanish = line.split("\t")
spanish = "[start] " + spanish + " [end]"
text_pairs.append((english, spanish))
import random
print(random.choice(text_pairs))
import random
random.shuffle(text_pairs)
num_val_samples = int(0.15 * len(text_pairs))
num_train_samples = len(text_pairs) - 2 * num_val_samples
train_pairs = text_pairs[:num_train_samples]
val_pairs = text_pairs[num_train_samples:num_train_samples + num_val_samples]
test_pairs = text_pairs[num_train_samples + num_val_samples:]
```
**Vectorizing the English and Spanish text pairs**
```
import tensorflow as tf
from tensorflow.keras import layers
import string
import re
strip_chars = string.punctuation + "¿"
strip_chars = strip_chars.replace("[", "")
strip_chars = strip_chars.replace("]", "")
def custom_standardization(input_string):
lowercase = tf.strings.lower(input_string)
return tf.strings.regex_replace(
lowercase, f"[{re.escape(strip_chars)}]", "")
vocab_size = 15000
sequence_length = 20
source_vectorization = layers.TextVectorization(
max_tokens=vocab_size,
output_mode="int",
output_sequence_length=sequence_length,
)
target_vectorization = layers.TextVectorization(
max_tokens=vocab_size,
output_mode="int",
output_sequence_length=sequence_length + 1,
standardize=custom_standardization,
)
train_english_texts = [pair[0] for pair in train_pairs]
train_spanish_texts = [pair[1] for pair in train_pairs]
source_vectorization.adapt(train_english_texts)
target_vectorization.adapt(train_spanish_texts)
```
`adapt` computes a vocabulary of string terms from tokens in a dataset.
**Preparing datasets for the translation task**
```
batch_size = 64
def format_dataset(eng, spa):
eng = source_vectorization(eng)
spa = target_vectorization(spa)
return ({
"english": eng,
"spanish": spa[:, :-1],
}, spa[:, 1:])
def make_dataset(pairs):
eng_texts, spa_texts = zip(*pairs)
eng_texts = list(eng_texts)
spa_texts = list(spa_texts)
dataset = tf.data.Dataset.from_tensor_slices((eng_texts, spa_texts))
dataset = dataset.batch(batch_size)
dataset = dataset.map(format_dataset, num_parallel_calls=4)
return dataset.shuffle(2048).prefetch(16).cache()
train_ds = make_dataset(train_pairs)
val_ds = make_dataset(val_pairs)
```
[from_tensor_slices](https://www.tensorflow.org/api_docs/python/tf/data/Dataset#from_tensor_slices) documentation
```
for inputs, targets in train_ds.take(1):
print(f"inputs['english'].shape: {inputs['english'].shape}")
print(f"inputs['spanish'].shape: {inputs['spanish'].shape}")
print(f"targets.shape: {targets.shape}")
```
### Sequence-to-sequence learning with RNNs
**GRU-based encoder**
```
from tensorflow import keras
from tensorflow.keras import layers
embed_dim = 256
latent_dim = 1024
source = keras.Input(shape=(None,), dtype="int64", name="english")
x = layers.Embedding(vocab_size, embed_dim, mask_zero=True)(source)
encoded_source = layers.Bidirectional(
layers.GRU(latent_dim), merge_mode="sum")(x)
```
**GRU-based decoder and the end-to-end model**
```
past_target = keras.Input(shape=(None,), dtype="int64", name="spanish")
x = layers.Embedding(vocab_size, embed_dim, mask_zero=True)(past_target)
decoder_gru = layers.GRU(latent_dim, return_sequences=True)
x = decoder_gru(x, initial_state=encoded_source)
x = layers.Dropout(0.5)(x)
target_next_step = layers.Dense(vocab_size, activation="softmax")(x)
seq2seq_rnn = keras.Model([source, past_target], target_next_step)
```
Note that the decoder is **not** bidirectional.
**Training our recurrent sequence-to-sequence model**
```
seq2seq_rnn.compile(
optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
seq2seq_rnn.fit(train_ds, epochs=15, validation_data=val_ds)
```
**Translating new sentences with our RNN encoder and decoder**
```
import numpy as np
spa_vocab = target_vectorization.get_vocabulary()
spa_index_lookup = dict(zip(range(len(spa_vocab)), spa_vocab))
max_decoded_sentence_length = 20
def decode_sequence(input_sentence):
tokenized_input_sentence = source_vectorization([input_sentence])
decoded_sentence = "[start]"
for i in range(max_decoded_sentence_length):
tokenized_target_sentence = target_vectorization([decoded_sentence])
next_token_predictions = seq2seq_rnn.predict(
[tokenized_input_sentence, tokenized_target_sentence])
sampled_token_index = np.argmax(next_token_predictions[0, i, :])
sampled_token = spa_index_lookup[sampled_token_index]
decoded_sentence += " " + sampled_token
if sampled_token == "[end]":
break
return decoded_sentence
test_eng_texts = [pair[0] for pair in test_pairs]
for _ in range(20):
input_sentence = random.choice(test_eng_texts)
print("-")
print(input_sentence)
print(decode_sequence(input_sentence))
```
### Sequence-to-sequence learning with Transformer
**Transformer encoder implemented as a subclassed `Layer`**
```
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class TransformerEncoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim)
self.dense_proj = keras.Sequential(
[layers.Dense(dense_dim, activation="relu"),
layers.Dense(embed_dim),]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
def call(self, inputs, mask=None):
if mask is not None:
mask = mask[:, tf.newaxis, :]
attention_output = self.attention(
inputs, inputs, attention_mask=mask)
proj_input = self.layernorm_1(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return self.layernorm_2(proj_input + proj_output)
def get_config(self):
config = super().get_config()
config.update({
"embed_dim": self.embed_dim,
"num_heads": self.num_heads,
"dense_dim": self.dense_dim,
})
return config
```
#### The Transformer decoder
**The `TransformerDecoder`**
```
class TransformerDecoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention_1 = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim)
self.attention_2 = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim)
self.dense_proj = keras.Sequential(
[layers.Dense(dense_dim, activation="relu"),
layers.Dense(embed_dim),]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
self.layernorm_3 = layers.LayerNormalization()
self.supports_masking = True
def get_config(self):
config = super().get_config()
config.update({
"embed_dim": self.embed_dim,
"num_heads": self.num_heads,
"dense_dim": self.dense_dim,
})
return config
def get_causal_attention_mask(self, inputs):
input_shape = tf.shape(inputs)
batch_size, sequence_length = input_shape[0], input_shape[1]
i = tf.range(sequence_length)[:, tf.newaxis]
j = tf.range(sequence_length)
mask = tf.cast(i >= j, dtype="int32")
mask = tf.reshape(mask, (1, input_shape[1], input_shape[1]))
mult = tf.concat(
[tf.expand_dims(batch_size, -1),
tf.constant([1, 1], dtype=tf.int32)], axis=0)
return tf.tile(mask, mult)
def call(self, inputs, encoder_outputs, mask=None):
causal_mask = self.get_causal_attention_mask(inputs)
if mask is not None:
padding_mask = tf.cast(
mask[:, tf.newaxis, :], dtype="int32")
padding_mask = tf.minimum(padding_mask, causal_mask)
attention_output_1 = self.attention_1(
query=inputs,
value=inputs,
key=inputs,
attention_mask=causal_mask)
attention_output_1 = self.layernorm_1(inputs + attention_output_1)
attention_output_2 = self.attention_2(
query=attention_output_1,
value=encoder_outputs,
key=encoder_outputs,
attention_mask=padding_mask,
)
attention_output_2 = self.layernorm_2(
attention_output_1 + attention_output_2)
proj_output = self.dense_proj(attention_output_2)
return self.layernorm_3(attention_output_2 + proj_output)
```
#### Putting it all together: A Transformer for machine translation
**PositionalEmbedding layer**
```
class PositionalEmbedding(layers.Layer):
def __init__(self, sequence_length, input_dim, output_dim, **kwargs):
super().__init__(**kwargs)
self.token_embeddings = layers.Embedding(
input_dim=input_dim, output_dim=output_dim)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim=output_dim)
self.sequence_length = sequence_length
self.input_dim = input_dim
self.output_dim = output_dim
def call(self, inputs):
length = tf.shape(inputs)[-1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_tokens = self.token_embeddings(inputs)
embedded_positions = self.position_embeddings(positions)
return embedded_tokens + embedded_positions
def compute_mask(self, inputs, mask=None):
return tf.math.not_equal(inputs, 0)
def get_config(self):
config = super(PositionalEmbedding, self).get_config()
config.update({
"output_dim": self.output_dim,
"sequence_length": self.sequence_length,
"input_dim": self.input_dim,
})
return config
```
**End-to-end Transformer**
```
embed_dim = 256
dense_dim = 2048
num_heads = 8
encoder_inputs = keras.Input(shape=(None,), dtype="int64", name="english")
x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(encoder_inputs)
encoder_outputs = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)
decoder_inputs = keras.Input(shape=(None,), dtype="int64", name="spanish")
x = PositionalEmbedding(sequence_length, vocab_size, embed_dim)(decoder_inputs)
x = TransformerDecoder(embed_dim, dense_dim, num_heads)(x, encoder_outputs)
x = layers.Dropout(0.5)(x)
decoder_outputs = layers.Dense(vocab_size, activation="softmax")(x)
transformer = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)
```
**Training the sequence-to-sequence Transformer**
```
transformer.compile(
optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
transformer.fit(train_ds, epochs=30, validation_data=val_ds)
```
**Translating new sentences with our Transformer model**
```
import numpy as np
spa_vocab = target_vectorization.get_vocabulary()
spa_index_lookup = dict(zip(range(len(spa_vocab)), spa_vocab))
max_decoded_sentence_length = 20
def decode_sequence(input_sentence):
tokenized_input_sentence = source_vectorization([input_sentence])
decoded_sentence = "[start]"
for i in range(max_decoded_sentence_length):
tokenized_target_sentence = target_vectorization(
[decoded_sentence])[:, :-1]
predictions = transformer(
[tokenized_input_sentence, tokenized_target_sentence])
sampled_token_index = np.argmax(predictions[0, i, :])
sampled_token = spa_index_lookup[sampled_token_index]
decoded_sentence += " " + sampled_token
if sampled_token == "[end]":
break
return decoded_sentence
test_eng_texts = [pair[0] for pair in test_pairs]
for _ in range(20):
input_sentence = random.choice(test_eng_texts)
print("-")
print(input_sentence)
print(decode_sequence(input_sentence))
```
Back to slides ↩
## Summary
| github_jupyter |
<h2><center>Predicting the probability of citation </center></h2>
In this section, we predict the probability of receiving citation for the particular violation. We use the random forest model to make the prediction.
```
# Import necessary modules
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from joblib import dump, load
# Read the accident data from csv
df_traffic = pd.read_csv('data_wrangled.csv',
index_col = 'Date Of Stop_Time Of Stop',
parse_dates = True,
infer_datetime_format = True)
df_traffic['V_sage'] = (df_traffic.V_Age - df_traffic.V_Age.min())/\
(df_traffic.V_Age.max() - df_traffic.V_Age.min())
des_col = df_traffic.Description
not_relevant = ['Latitude', 'Longitude', 'Year', 'Description',
'Make', 'Model', 'Color', 'datetime', 'V_Age']
df_traffic.drop(labels = not_relevant, axis = 1, inplace = True)
# Get X and y from data
y = df_traffic['Violation Type_Warning']
df_X1 = df_traffic.drop('Violation Type_Warning', axis = 1)
X = df_X1.values
# now, do a Logistic regression to the data.
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.5, random_state=77)
# The description column is full of text. We vectorize the test so that we
#can fit it into the model.
corpus = des_col
vectorizer = CountVectorizer(stop_words = 'english',
strip_accents = 'ascii',
min_df = 0.025,
max_features = 100,
token_pattern='[a-z]+',
max_df = 0.25,
binary = True)
X = vectorizer.fit_transform(corpus)
"""This translates text in each column as a vector array of
following basis (keywords)"""
print(vectorizer.get_feature_names())
```
To calculate citation probability, we first copy the entire row from a random violation and then replace the description column of our choice. Then we make a prediction some of the unique violations.
Now, let us take some violations of our choice and make some prediction.
```
plt.style.use('seaborn-dark')
random_driver = df_X1.iloc[777777, :]
some_violations = np.array(['DRIVING UNDER THE INFLUENCE OF ALCOHOL',
'SUSPENDED LICENSE AND PRIVILEGE',
'FAILURE TO CONTROL VEHICLE TO AVOID COLLISION',
'HOLDER OF LEARNERS PERMIT DRIVING W/O REQ. SUPERVISION',
'NEGLIGENT DRIVING ENDANGERING PROPERTY, LIFE AND PERSON',
'OPERATOR NOT RESTRAINED BY SEATBELT',
'KNOWINGLY DRIVING UNINSURED VEHICLE',
'DRIVER CHANGING LANES WHEN UNSAFE',
'FAILURE TO STOP AT STOP SIGN',
'FAILURE TO DISPLAY TWO LIGHTED FRONT LAMPS '
])
vec_array = vectorizer.transform(some_violations).toarray()
driver = len(some_violations)*[random_driver]
driver = np.vstack(driver)
driver[:,20:83] = vec_array
random_forest = load('RANDOM_FOREST.joblib')
prob = random_forest.predict_proba(driver)[:,0]
df_prob = pd.DataFrame(data = prob, index = some_violations, columns=['Prob'])
df_prob.sort_values(by = 'Prob', ascending = False, inplace = True)
ax = df_prob.plot(kind = 'barh', rot = 0, legend = False, figsize = (5,5))
plt.xlabel('Citation Probability', fontsize = 15, fontweight = 'bold')
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(15)
tick.label1.set_fontweight('bold')
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(15)
tick.label1.set_fontweight('bold')
plt.savefig('prob.png', dpi = 100, bbox_inches = 'tight')
```
| github_jupyter |
# Ford GoBike Bike Sharing Exploratory Analysis
### Import Data
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from scipy import stats
from requests import get
from os import path, getcwd, makedirs, listdir
from io import BytesIO
from zipfile import ZipFile
import warnings
warnings.filterwarnings('ignore')
folder_name_of_csvs = 'trip_data_files'
for month in range(1, 13):
month_string = str(month)
month_leading_zero = month_string.zfill(2) #zfill() 返回指定长度的字符串,原字符串右对齐,前面填充0
bike_data_url = 'https://s3.amazonaws.com/fordgobike-data/2018' + month_leading_zero + '-fordgobike-tripdata.csv.zip'
response = get(bike_data_url) #get(): return the target value, if none, return defalut value
# code below opens zip file; BytesIO returns a readable and writeable view of the contents;
unzipped_file = ZipFile(BytesIO(response.content))
# puts extracted zip file into folder trip_data_files
unzipped_file.extractall(folder_name_of_csvs)
list_csvs = []
for file_name in listdir(folder_name_of_csvs):
list_csvs.append(pd.read_csv(folder_name_of_csvs+'/'+file_name))
df_18 = pd.concat(list_csvs)
df_18.to_csv('data.csv')
df_18 = pd.read_csv('data.csv')
```
### Assess Data
```
df_18.head()
df_18.start_time.sort_values().head(1), df_18.start_time.sort_values().tail(1)
df_18.info()
df_18.nunique()
```
**Question:
According to the above information, there are 11,771 null values of start/end stations ids and names, so I further check which stations they are to try to find the reason and check whether they will make the analysis in accurate.**
```
df_18.count()
df_18[df_18['start_station_id'].isnull() == True].head()
df_18[df_18['start_station_id'].isnull() == True].start_station_latitude.unique()
```
**Answer:
It appears that the missing value of station id and station names are all in San Jose. Despite no value of station id and name, there are still longtitude and latitude to distinguish the location. So I am not going to remove the null values.**
## Wrangle Data
#### 1. Convert member birthday into the age format
```
#Code
df_18['member_age'] = 2019 - df_18['member_birth_year'].fillna(0).astype(int)
#Test
df_18.head(1)
```
**Note: But there are null value of birth year in the data, so we need to ignore the value when making plot.**
```
#Code
#Step1: Process birth year with NaN value, which is 110718 in total
df_18[df_18.member_birth_year.isnull()].bike_id.count()
```
#### 2. Convert time data into the formats of time, day, and month
```
#Code
from datetime import datetime, timedelta
df_18['start_time'] = pd.to_datetime(df_18['start_time'])
df_18['time'] = df_18['start_time'].apply(lambda t: t.strftime('%H'))
df_18['day'] = df_18['start_time'].apply(lambda t: t.strftime('%A'))
df_18['month'] = df_18['start_time'].apply(lambda t: t.strftime('%B'))
#Test
df_18.head(2)
```
## Data Visualization
### Questions
1: What is the number of customer and subscriber?
2: What is the number of gender of all members?
3: What is the number of age of all members?
4: When are most popular bike time for 20-40-year-old members in terms of month of a year?
5: When are most trips taken by user types in terms of time of day, day of the week, and month of a year?
6: What is the duration distribution of overall / customer / subscriber?
7: How long does the average trip take in overall / by customer and subscriber?
8: What is the number of overtime by subscriber and customer?
9: How many people return their bikes at the same station they start their trip?
10: How many stations in total and which stations are the most popular?
#### Q1. What is the number of customer and subscriber?
```
df_18.user_type.value_counts()
df_18.user_type.value_counts().plot(kind = 'pie', radius = 1.2)
plt.title('The Number of Customer and Subscriber')
plt.savefig('user_type01.png');
```
**Finding:<br>
Among 1,863,721 members, there are 1,583,554 subscribers (85%) and 280,167 customers (15%).**
#### Q2: What is the number of gender of members?¶
```
s_gender = df_18.query('user_type == "Subscriber"').groupby('member_gender').bike_id.count()
c_gender = df_18.query('user_type == "Customer"').groupby('member_gender').bike_id.count()
labels = ['Female', 'Male', 'Other']
def absolute_value(val):
a = np.round(val/100*s_gender.sum(), 0)
return int(a)
def absolute_value_c(val):
a = np.round(val/100*c_gender.sum(), 0)
return int(a)
plt.figure(0)
plt.title('The Count by Gender of Subscriber')
plt.pie(s_gender, labels = labels, autopct = absolute_value, radius = 1, colors=['yellow', 'orange', 'pink'])
plt.figure(1)
plt.title('The Count by Gender of Customer')
plt.pie(c_gender, labels = labels, autopct = absolute_value_c, radius = 1, colors=['yellow', 'orange', 'pink'])
plt.savefig('gender_01.png');
```
**Finding:<br>
In terms of the age, the males users are more than the female users in two user types. For customers, they have female (64,047) with the higher in-group ratio (31.7%) than the female of subscribers (374141, 24.1%).**
#### Q3 What is the number of members' ages?
```
df_18.member_age.describe()
df_18.member_age.sort_values().tail()
plt.figure(figsize = [10,4])
plt.hist(df_18.member_age, label = 'gender count')
plt.legend(['age'])
plt.xlabel("Age")
plt.ylabel("Count")
plt.title('The Ages of All Members');
```
**Note: Several age values which are 2019 should be removal in the plot.**
```
age = df_18.query('member_age < 2019')
x = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150]
plt.figure(figsize = [10,4])
plt.hist(age.member_age, bins = 40)
plt.hist(age.query('user_type == "Subscriber"').member_age, bins = 40, alpha = .5)
plt.hist(age.query('user_type == "Customer"').member_age, bins = 40, alpha = .7)
plt.legend(['All', 'Subscriber', 'Customer'])
plt.xticks(x)
plt.xlabel('Age')
plt.ylabel('Count')
plt.title('The Age Distribution of Members')
plt.savefig('age01.png');
```
**Finding:<br>
According to the above age distribution, most of members are 20-40 years old. For the subscriber, most of people are 25-30 and 32-35 years old.**
#### Q4: When are most popular bike time for 20-40-year-old members in terms of month of a year?
```
df_age_new = df_18.query('member_age >= 20 & member_age >= 40')
plt.figure(figsize = [10, 4])
order = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August',
'September','October', 'November', 'December']
df_age_new.groupby('month').bike_id.count().sort_values(ascending = False).plot(kind = 'bar', color = 'g')
plt.legend(['count'])
plt.xlabel('Month')
plt.ylabel('Count')
plt.title('Biking Month of 20-40-year-old Members')
plt.savefig('Month_of_A_Year02.png');
```
**Finding:<br>
Delving into the 20-40-year-old group, they prefer to go to bike trip during summer, from Mau to August.**
#### Q5: When are most popular bike time for two user types in terms of time of day, day of the week, and month of a year?
**Time of a Day**
```
def plot_user_time(df, x, y, z):
df1 = df.groupby([x, y])[z].count().reset_index().sort_values(by = z)
#plot
plt.figure(figsize = [10, 4])
sns.barplot(data = df1, x = x, y = z, hue = y);
plot_user_time(df_18, 'time', 'user_type', 'bike_id')
plt.title('The Popular Time for Trips by Customer / Subscriber')
plt.savefig('Time_of_A_Day01.png')
```
**Finding:<br>
Looking to user behavior through the time of a day, for subscribers, there are two prefer time segments they ride bikes, which are 7:00 - 9:00 and 16:00 - 18:00. While for customers, they have more consistent using time, and the peak is around 17:00.**
**Day of a Month**
```
plot_user_time(df_18, 'day', 'user_type', 'bike_id')
plt.title('The Popular Day for Trips by Customer / Subscriber')
plt.savefig('Day_of_A_Month01.png')
```
**Finding:<br>
In terms of the day of a month, subscribers often ride bikes during weekday, from Monday to Friday, while customers prefer to rider bike on weekends.**
**Month of a Year**
```
plot_user_time(df_18, 'month', 'user_type', 'bike_id')
plt.title('The Popular Month for Trips by Customer / Subscriber')
plt.savefig('Month_of_A_Year01.png')
```
**Finding:<br>
In terms of the month of a year, subscribers tend to ride bikes from summer to fall, especially on October and customers also use bike on summer more frequently.**
#### Q6: What is the duration distribution of overall / customer / subscriber?
```
all_duration = df_18['duration_sec']
S_Duration = df_18.query('user_type == "Subscriber"').duration_sec
C_Duration = df_18.query('user_type == "Customer"').duration_sec
plt.figure(figsize = [10,4])
plt.hist(all_duration, bins = 50)
plt.hist(S_Duration, bins = 50, alpha = .7)
plt.hist(C_Duration, bins = 50, alpha = .7)
plt.title('The Trip Duration of Members')
plt.legend(['All', 'Subscriber', 'Customer'])
plt.xlabel('Duration')
plt.ylabel('Count')
plt.savefig('Durtion01.png');
```
**Finding:<br>
The majority trip duraton is located beloew 3000 second (50 min) probably because if users ride bike over 30 mins, they will be charged additional fee of using bikes.**
```
plt.figure(figsize = [10,4])
plt.hist(df_18[df_18['duration_sec'] < 3000].duration_sec, bins = 50)
plt.hist(S_Duration[S_Duration < 3000], bins = 50, alpha = .5)
plt.hist(C_Duration[C_Duration < 3000], bins = 50, alpha = .7)
plt.axvline(1800, color = 'g')
plt.title('The Major Trip Duration of Members')
plt.legend(['Overtime Line','All', 'Subscriber', 'Customer'])
plt.xlabel('Duration')
plt.ylabel('Count')
plt.savefig('Durtion02.png');
```
**Finding:<br>
So let's look deeper into those who ride bikes under 3000 sec and plot the 30 min line to see how many people ride overtime. According to the above plot, most of people use bike within 5-10 minutes. They might only use bikes for the short-distance trip.**
#### Q7: How long does the average trip take in overall / by customer and subscriber?
```
print('The average time of trip in overall: ' + repr(df_18.duration_sec.mean()))
print()
print('Plot those whose duration is less than 3000 sec:')
plt.figure(figsize = [10,4])
sns.boxplot(df_18[df_18.duration_sec <3000].duration_sec)
plt.title('The Average Time of Trip')
plt.ylabel('All Member')
plt.savefig('AvgTime01.png');
print('The average time of trip for customers: ' + repr(C_Duration.mean()))
print('The average time of trip for subscribers: ' + repr(S_Duration.mean()))
print()
print('Plot those whose duration is less than 3000 sec:')
plt.figure(figsize = [10,4])
sns.boxplot( x = df_18[df_18.duration_sec <3000].duration_sec, y = df_18.user_type)
plt.title('The Average Time of Trip by Customer / Subscriber')
plt.savefig('AvgTime02.png');
```
**Finding: <br>
The average time of trip for customers: 1923 sec (32 min). The average time of trip for subscribers: 669 sec (11 min). So apparently, as opposed to subscribers, more customers have overtime ride partly due to the different charging mechnism for this group of users. They joined the membership program of GoBike, so they pay lower if they rider overtime.**
#### Q8: What is the number of overtime by subscriber and customer?
```
print('Overall:')
print('The number of those who use bike within 30 minutes: ' + repr(all_duration[all_duration <= 1800].count()))
print('The number of those who use bike over 30 minutes: ' + repr(all_duration[all_duration > 1800].count()))
print()
print('Subscriber:')
print('The number of subscribers use bike within 30 minutes: ' + repr(S_Duration[S_Duration <= 1800].count()))
print('The number of subscribers use bike over 30 minutes: ' + repr(S_Duration[S_Duration > 1800].count()))
print()
print('Customer:')
print('The number of customers use bike within 30 minutes: ' + repr(C_Duration[C_Duration <= 1800].count()))
print('The number of customers use bike over 30 minutes: ' + repr(C_Duration[C_Duration > 1800].count()))
df1 = df_18[['duration_sec', 'user_type']]
df3 = df1[df1['duration_sec'] <= 1800].groupby('user_type').count()
df3.rename(columns = {'duration_sec': 'within 30 mins'}, inplace = True);
df4 = df1[df1['duration_sec'] > 1800].groupby('user_type').count()
df4.rename(columns = {'duration_sec': 'over 30 mins'}, inplace = True)
df5 = df4.join(df3)
df5['total'] = df5['within 30 mins'] + df5['over 30 mins']
df5.loc['Total'] = df5.sum()
df5
df5.plot(kind = 'bar', figsize = [10,4])
plt.title('The Number of Overtime Users')
plt.ylabel('count')
plt.savefig('Overtime01.png');
```
**Overtime Percentage by User Type**
```
df6 = df5.copy()
df6.drop('total', axis = 1, inplace = True)
df6.iloc[0,0] = df5.iloc[0,0] / df5.iloc[2,0]
df6.iloc[1,0] = df5.iloc[1,0] / df5.iloc[2,0]
df6.iloc[2,0] = df5.iloc[2,0] / df5.iloc[2,0]
df6.iloc[0,1] = df5.iloc[0,1] / df5.iloc[2,1]
df6.iloc[1,1] = df5.iloc[1,1] / df5.iloc[2,1]
df6.iloc[2,1] = df5.iloc[2,1] / df5.iloc[2,1]
df6
df6.plot(kind = 'bar', figsize = [10,4])
plt.title('The Percentage of Overtime Users')
plt.ylabel('percentage of overtime users')
plt.savefig('Overtime02.png');
```
**Finding:<br>
Nearly 56.7% customers ride bikes over 30 minutes. So consistent to the findings in the previous plots, it is more likely for customers to rider overtime due to their membership.**
#### Q9: How many people return their bikes at the same station they start their trip?
```
print('The number of users who return bike at the same station: ' + repr(df_18.query('start_station_name == end_station_name').start_station_id .count()))
print('The number of users who return bike at different station: ' + repr(df_18.query('start_station_name != end_station_name').start_station_id .count()))
print()
print('The number of customers who return bike at the same station: ' + repr(df_18.query('start_station_name == end_station_name & user_type == "Customer"').start_station_id .count()))
print('The number of customers who return bike at different station: ' + repr(df_18.query('start_station_name != end_station_name & user_type == "Customer"').start_station_id .count()))
print()
print('The number of subscribers who return bike at the same station: ' + repr(df_18.query('start_station_name == end_station_name & user_type == "Subscriber"').start_station_id .count()))
print('The number of subscribers who return bike at different station: ' + repr(df_18.query('start_station_name != end_station_name & user_type == "Subscriber"').start_station_id .count()))
df_12 = df_18.query('start_station_name == end_station_name')[['user_type','bike_id']]
df_13 = df_18.query('start_station_name != end_station_name')[['user_type','bike_id']]
#one-way: pickup and dropoff at the same place
df_12.groupby('user_type').count()
#pickup and dropoff at the different place
df_13.groupby('user_type').count()
df_12.groupby('user_type').count().plot(kind = 'bar', figsize = [10,4], legend = False, title = 'One-way Bike Rental')
plt.ylabel('count')
df_13.groupby('user_type').count().plot(kind = 'bar', figsize = [10,4], legend = False, title = 'Non-One-way Bike Rental')
plt.ylabel('count')
plt.savefig('OneWay01.png');
```
**Finding:<br>
Most of people choose to return bikes at the same station they rent the bikes, especially for the subscriber. Yet only look to one-way rental, it is more likely for customers to return their bike at the different station they rent the bike. Maybe it is because that customers tend to have long-distance trips, so it will be more convenient for them to return at the different stations.**
#### Q10: How many stations in total? Which station is the most popular one?
```
print('The total station number: ' + repr(df_18['start_station_id'].nunique()))
plt.figure(figsize = [10,6])
plt.scatter(df_18['start_station_latitude'], df_18['start_station_longitude'])
plt.title('The Locations of Bike Stations')
plt.legend(['Bike Stations'])
plt.xlabel('station latitude')
plt.ylabel('station longitude')
plt.savefig('Station01.png');
```
**Filter out the docks in San Francisco Bay Area**
```
max_longitude_sf = -122.3597
min_longitude_sf = -122.5147
max_latitude_sf = 37.8121
min_latitude_sf = 37.7092
start_lat = (df_18['start_station_latitude']>=min_latitude_sf) & (df_18['start_station_latitude']<=max_latitude_sf)
end_lat = (df_18['end_station_latitude']>=min_latitude_sf) & (df_18['end_station_latitude']<=max_latitude_sf)
start_lon = (df_18['start_station_longitude']>=min_longitude_sf) & (df_18['start_station_longitude']<=max_longitude_sf)
end_lon = (df_18['end_station_longitude']>=min_longitude_sf) & (df_18['end_station_longitude']<=max_longitude_sf)
df_sf = df_18[start_lat & end_lat & start_lon & end_lon]
df_sf.start_station_latitude.max(), df_sf.start_station_latitude.min(), df_sf.start_station_longitude.max(), df_sf.start_station_longitude.min()
df_sf.head(1)
print('The total station number in San Francisco: ' + repr(df_sf['start_station_id'].nunique()))
plt.figure(figsize = [20,10])
plt.scatter(df_sf['start_station_latitude'], df_sf['start_station_longitude'], color = 'navy')
plt.title('The Total Stations in San Francisco')
plt.legend(['Bike Stations'])
plt.xlabel('station latitude')
plt.ylabel('station longitude');
df_sf_top = pd.DataFrame(df_18['start_station_name'].value_counts())
df_sf_top = df_sf_top.reset_index().rename(columns = {'index':'start_station_name', 'start_station_name':'count'})
df_sf_top.head().start_station_name
```
**Top 5 most popular stations in SF**
```
df_top5 = df_18.query('start_station_name == ["San Francisco Ferry Building (Harry Bridges Plaza)" ,"San Francisco Caltrain Station 2 (Townsend St at 4th St)", "San Francisco Caltrain (Townsend St at 4th St)", "Market St at 10th St", "Berry St at 4th St"]')
name = [[df_top5['start_station_latitude'], df_top5['start_station_longitude']]]
plt.figure(figsize = [10,6])
plt.scatter(df_top5['start_station_latitude'], df_top5['start_station_longitude'])
plt.title('The Locations of Top 5 Stations in San Francisco')
plt.legend(['Bike Stations'])
plt.xlabel('station latitude')
plt.ylabel('station longitude')
plt.savefig('Station03.png');
#find top 5 stations among all sf stations
print('The total station number in San Francisco: ' + repr(df_sf['start_station_id'].nunique()))
plt.figure(figsize = [20,10])
plt.scatter(df_sf['start_station_latitude'], df_sf['start_station_longitude'], color = 'c')
#highlight top 5 stations
plt.scatter(df_top5['start_station_latitude'], df_top5['start_station_longitude'], color = 'r')
plt.title('The Top 5 Popular Stations in San Francisco')
plt.title('The Total Stations in San Francisco')
plt.legend(['Bike Stations'])
plt.xlabel('station latitude')
plt.ylabel('station longitude')
plt.savefig('Station02.png');
top_20 = df_18['start_station_name'].value_counts().head(20)
plt.figure(figsize = [10,6])
top_20.plot(kind = 'bar', color = 'deepskyblue')
plt.title('Top 20 Popular Stations')
plt.legend(['Usage Count'])
plt.xlabel('station name')
plt.ylabel('count')
plt.savefig('Station04.png');
```
**Finding:
Over half of the bike stations are located in San Francisco city, which is 156 stations in total. The top 5 popular docks are all in San Francisco as well and three of them are near to each other.**
| github_jupyter |
# RED WINE QUALITY_XGBRegresion
## 01| Dataset Information
__About Dataset:__
Dataset Kualitas Anggur berisi informasi tentang berbagai sifat fisikokimia anggur. Seluruh dataset dikelompokkan menjadi dua kategori: anggur merah dan anggur putih. Setiap anggur memiliki label kualitas yang terkait dengannya. Label berada dalam kisaran 0 hingga 10. Namun pada dataset ini, kita hanya menggunakan varian anggur merah.
__Features Description:__
* __Fixed acidity:__ Menunjukkan jumlah asam tartarat dalam anggur dan diukur dalam g/dm3
* __Volatile acidity:__ Menunjukkan jumlah asam asetat dalam anggur. Diukur dalam g/dm3.
* __Citric acid:__ Menunjukkan jumlah asam sitrat dalam anggur. Itu juga diukur dalam g/dm3.
* __Residual sugar:__ Menunjukkan jumlah gula yang tersisa dalam anggur setelah proses fermentasi selesai. Itu juga diukur dalam g/dm3.
* __Free sulfur dioxide:__ Mengukur jumlah belerang dioksida (SO2) dalam bentuk bebas. Itu juga diukur dalam g/dm3.
* __Total sulfur dioxide:__ Mengukur jumlah total SO2 dalam anggur. Bahan kimia ini bekerja sebagai agen antioksidan dan antimikroba.
* __Density:__ Menunjukkan kepadatan anggur dan diukur dalam g/dm3.
* __PH:__ Menunjukkan nilai pH anggur. Kisaran nilainya antara 0 hingga 14. Nilai 0 menunjukkan keasaman sangat tinggi, dan nilai 14 menunjukkan keasaman basa.
* __Sulphates:__ Menunjukkan jumlah kalium sulfat dalam anggur. Itu juga diukur dalam g/dm3.
* __Alcohol:__ Menunjukkan kandungan alkohol dalam anggur.
* __Quality:__ Menunjukkan kualitas anggur, yang berkisar dari 1 hingga 10. Di sini, semakin tinggi nilainya, semakin baik anggurnya.
## 02| Import Library
```
# Library math and visual
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('seaborn-colorblind')
import seaborn as sns
# Library sklearn
from sklearn.model_selection import train_test_split, RandomizedSearchCV
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.ensemble import RandomForestClassifier
# Library jcopml
from jcopml.tuning import random_search_params as rsp
from jcopml.pipeline import num_pipe,cat_pipe
from jcopml.plot import plot_missing_value
from jcopml.feature_importance import mean_score_decrease
from warnings import filterwarnings
filterwarnings('ignore')
```
## Load Dataset
```
df = pd.read_csv('winequality-red.csv')
df.head()
# informasi dataset
df.info()
```
Jika dilihat type data dari setiap atribut sudah sesuai
```
#melihat statistik deskriptif
df.describe()
```
Kualitas wine yg diproduksi kebanyakan memiliki taraf nilai 5 dari 10 (normal). dari kadungan gulanya jenis wine termasuk dalam golongan Extra Sec (Very Dry: 1.5%-3%) dan tergolong dalam light bodied wine (alkohol < 12%).
Sumber: https://cellar.asia dan https://www.pelajaricaranya.com
```
# mendeteksi missing value
plot_missing_value(df, return_df=True)
```
Dari dataset yg kita punya, semua data terisi lengkap atau tidak ada missing value
```
#Melihat data duplikat.
df.duplicated().sum()
```
Sebanyak 240 data duplikat. Pada kasus ini data duplikat tidak akan dihandling, karena suatu hal lumrah jika data wine memiliki kualitas yang sama. Bahkan menjadi suatu hal yang bagus jika bisa mengontrol dan membuat semuanya sama.
## 03| Exploratory Data Analisys (EDA)
#### Hal2 umum dalam melihat kualitas wine:
Sebelum jauh melakukan EDA, perlu untuk mengetahui jenis parameter umum dalam menentukan kualitas wine khususnya red wine.
* __Kadar alkohol__
1. Light bodied wine: < 12,5%
2. Medium bodied wine: 12,5% – 13,5%
3. Full bodied wine: 13,5%
* __Kandungan gula__
1. very2 dry = 0.5 %– 1,5 %
2. very dry = 1,5% – 3%
3. dry = 3% – 5 %
4. sweet = 5% - 7%
5. very sweet = > 7%
* __Kekentalan__
```
# visualisasi kolom quality
plt.figure(figsize=(5,3))
sns.countplot(df.quality, palette='Set2')
plt.title('Fitur data target')
```
Data target terdiri dari beberapa kualitas dari range 3-8. Karena dataset ini berasal dari satu jenis wine yang sama yaitu red wine, maka kita akan membuatnya menjadi 2 kategori saja. Untukka tegori Baik '1' (1-5), sedangkan Buruk '0' (6-10).
```
# mapping data target
df.quality = [0 if i <= 5 else 1 for i in df.quality]
df.quality.value_counts()
```
Setelah dimapping, kolom quality hanya terdiri dari 2 kategori.
```
# Dataframe berdasarkan hal umum untuk melihat kualitas wine
data = df[['residual sugar','density','alcohol','quality']]
# Bining data berdasarkan kandungan gula dan alkohol
data['sugar level'] = pd.cut(data['residual sugar'], bins=[0,1.5,3,5,7,20], labels=['very2 dry', 'very dry', 'dry','sweet','very sweet'])
data['alcohol level'] = pd.cut(data['alcohol'], bins=[0,12.5,13.5,20], labels=['light bodied','medium bodied','full bodied'])
# Menampilkan 5 data teratas
data.head()
```
#### Taraf Alcohol
```
# Visualisasi kategori kadar gula
plt.figure(figsize=(5,3))
sns.barplot(data['quality'], data['alcohol'], palette='Set2')
plt.title('Kadar alkohol berdasarkan kualitas')
plt.show()
# Visualisasi kategori kadar alkohol
plt.figure(figsize=(5,3))
sns.countplot(data['alcohol level'], color='mediumaquamarine')
plt.show()
```
#### Taraf Gula
```
# Visualisasi kategori kadar gula
plt.figure(figsize=(5,3))
sns.barplot(data['quality'], data['residual sugar'], palette='Set2')
plt.show()
# Visualisasi kategori kadar gula
plt.figure(figsize=(5,3))
sns.countplot(data['sugar level'], color='mediumaquamarine')
plt.show()
```
#### Taraf kekentalan
```
# Visualisasi kategori kadar gula
plt.figure(figsize=(5,3))
sns.barplot(data['quality'], data['density'], palette='Set2')
plt.show()
```
Dari visualisasi diatas dapat disimpulkan bahwa kualitas wine yg diproduksi memiliki kekentalan yg sama. Wine juga memiliki kandungan gula yg sama dan cenderung berjenis Extra Sec (Very Dry: 1.5%-3%). Kandungan alkohol juga tergolong dalam light bodied wine (alkohol < 12%). Make sense saja karena dataset kita merupakan satu jenis yg sama yaitu red wine (anggur merah).
```
# visualisasi scatter plot
plt.figure(figsize=(10,5))
sns.scatterplot(data=data, x='density', y='residual sugar', hue='alcohol level', palette='Dark2', )
plt.title('Density vs Residual Sugar', fontsize=15)
plt.tight_layout()
plt.show()
```
* Wine full bodied lebih cair dan memiliki kandungan gula yg rendah.
* Wine medium bodied tergolong sedang untuk kekentalan dan kandungan gula
* Wine light bodied tergolong kental dan tinggi kandungan gula.
```
#Copy dataset
df_new = df.copy()
# Menambahkan range kualitas wine (low, mid, high)
df_new['level quality'] = df_new.quality.map({0:'low quality', 1:'high quality'})
df_new.head()
# Group dataset berdasarkan level quality
df_new.drop(columns='quality').groupby('level quality').mean()
# Visualisasi dengan distplot untuk melihat distribusi data
plt.figure(figsize=(15,10))
for i in enumerate(df.describe().columns):
plt.subplot(3,4, i[0]+1)
sns.distplot(df[i[1]], color='deeppink', kde=True)
plt.tight_layout()
plt.show()
# Melihat korelasi dari setiap fitur dataset
quality_corr = df.corr()['quality'].sort_values(ascending=False)
print(quality_corr)
# Visualisasi korelasi dengan heatmap
plt.figure(figsize=(15,15))
sns.heatmap(df.corr(),
annot=True,
linewidths=.2,
cbar=True,
cmap="cool",
center=0)
plt.title('Korelasi Features', fontsize=20, pad=15)
plt.show()
# Melihat outlier
fig, ax = plt.subplots(ncols=4, nrows=3, figsize=(20,15))
index = 0
ax = ax.flatten()
for col, value in df.items():
sns.boxplot(x=col, data=df, color='lightseagreen', ax=ax[index])
index += 1
plt.tight_layout(pad=0.5, w_pad=0.7, h_pad=5.0)
Q1 = df.quantile(0.25)
Q3 = df.quantile(0.75)
IQR = Q3-Q1
print(IQR)
df_clear_iqr = df[~((df < (Q1-1.5*IQR)) | (df > (Q3+1.5*IQR))).any(axis=1)]
df_clear_iqr.shape[0]
df.shape[0]
```
## 04| Splitting Data
```
X = df_clear_iqr.drop(columns='quality')
y = df_clear_iqr['quality']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y, random_state=42)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
```
## 05| Building and Evaluation Model
```
preprocessor = ColumnTransformer([
('numeric', num_pipe(), X_train.columns)
])
pipeline = Pipeline([
('prep', preprocessor),
('algo', RandomForestClassifier(random_state=42))
])
model = RandomizedSearchCV(pipeline, rsp.rf_params, cv=3, n_iter=50, n_jobs=-1, verbose=1, random_state=42)
model.fit(X_train, y_train)
print(model.best_params_)
print(model.score(X_train, y_train), model.best_score_, model.score(X_test, y_test))
#Prediksi dan assign ke y_pred
y_pred = model.predict(X_test)
#Evaluasi model
print('Nilai confusion matrix: \n')
print(confusion_matrix(y_test, y_pred))
print('\nNilai classification report: \n')
print(classification_report(y_test, y_pred))
# Visualisasi feature importance
df_imp = mean_score_decrease(X_train, y_train, model, plot=True)
```
Dari visualisasi tersebut, terlihat bahwa semua fitur sangat pengaruh dalam pemodelan algoritma machine learning
## 06| Prediksi menggunakan model
Oke kita akan coba dan cocokkan hasil prediksi menggunakan model yang sudah kita buat. Data prediksi yang kita pakai menggunakan sample data dari dataset secara acak.
```
df_acak = df.sample(frac=0.005, random_state=42)
df_acak
# Mencoba data baru dari dataset
data_baru = df_acak.drop(columns='quality')
data_baru
# Prediksi dan menampilkan data hasil prediksi
file = df_acak.copy()
file['prediksi'] = pd.DataFrame(model.predict(data_baru)).values
# Menampilkan Data
file
```
Dari data diatas, prediksi menunjukkan benar semua. Namun akan ada kemungkinan salah, karena performa atau tingkat keakuratan dari model yang kita buat hanya sebesar 85%.
By muhamadsahri08@gmail.com
| github_jupyter |
# Numerical solution to the 1-dimensional Time Independent Schroedinger Equation
Based on the paper "Matrix Numerov method for solving Schroedinger's equation" by Mohandas Pillai, Joshua Goglio, and Thad G. Walker, _American Journal of Physics_ **80** (11), 1017 (2012). [doi:10.1119/1.4748813](http://dx.doi.org/10.1119/1.4748813)
```
# import some needed libraries
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as opt
%matplotlib inline
autoscale = False # set this equal to true to use Pillai's recommended step sizes
# values of constants
hbar = 1.0
mass = 1.0 # changing the mass will also change the energy scale
omega = 1.0
L = 1.0 # width of SW
# bounds (These are overwritten if autoscale=True)
xmin = -L # lower bound of position
xmax = 5.0 # upper bound of position
n = 100 # number of steps (may be overwritten if autoscale == True)
dx = (xmax-xmin)/(n-1)
# the function V is the potential energy function
def V(x):
# make sure there is no division by zero
# this also needs to be a "vectorizable" function
# uncomment one of the examples below, or write your own.
return 0.5*mass*omega**2*x*x*(0.5*(x+np.abs(x))) # half harmonic oscillator
if (autoscale):
#Emax is the maximum energy for which to check for eigenvalues
Emax = 20.0
#The next lines make some reasonable choices for the position grid size and spacing
xt = opt.brentq(lambda x: V(x)-Emax ,0,5*Emax) #classical turning point
dx = 1.0/np.sqrt(2*Emax) #step size
# bounds and number of steps
n = np.int(0.5+2*(xt/dx + 4.0*np.pi)) #number of steps
xmin = -dx*(n+1)/2
xmax = dx*(n+1)/2
xmin, xmax, n #show the limits and number of steps
#define the x coordinates
x = np.linspace(xmin,xmax,n)
#define the numerov matrices
B = np.matrix((np.eye(n,k=-1)+10.0*np.eye(n,k=0)+np.eye(n,k=1))/12.0)
A = np.matrix((np.eye(n,k=-1)-2.0*np.eye(n,k=0)+np.eye(n,k=1))/(dx**2))
#calculate kinetic energy operator using Numerov's approximation
KE = -0.5*hbar**2/mass*B.I*A
#calculate hamiltonian operator approximation
H = KE + np.diag(V(x))
#Calculate eigenvalues and eigenvectors of H
energies, wavefunctions = np.linalg.eigh(H) # "wavefunctions" is a matrix with one eigenvector in each column.
energies[0:5] #display the lowest four energies
# extract color settings to help plotting
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
plt.figure(figsize=(6,8))
number = [0,1,2,3,4,5] #which wavefunctions to plot, starting counting from zero
zoom = -3.0 # zoom factor for wavefunctions to make them more visible
plt.plot(x,V(x),'-k',label="V(x)") # plot the potential
plt.vlines(-1,0,15,color="black")
plt.vlines(0,0,15,color="black",lw=0.5)
for num in number:
plt.plot(x,zoom*wavefunctions[:,num]+energies[num],label="n={}".format(num)) #plot the num-th wavefunction
plt.hlines(energies[num],-1,5,lw=0.5, color=colors[num])
plt.ylim(-1,15); # set limits of vertical axis for plot
plt.legend();
plt.xlabel("x");
plt.ylabel("Energy or ϕ(x)");
```
| github_jupyter |
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import math
from basic_artificial_market import ArtificialMarket
import numpy as np
import matplotlib.pyplot as plt
import math
class ArtificialMarket():
def __init__(self, num_player=1000, fdmtl=10000.0, ganma=1000, sigma=0.06, P_sigma=30):
self.num_player = num_player
self.random_state = np.random.RandomState()
self.sigma = sigma
self.P_sigma = P_sigma
def weight(self, w_1_max=1, w_2_max=10, w_3_max=1):
num_player = self.num_player
weight_1 = np.zeros(num_player)
weight_2 = np.zeros(num_player)
weight_3 = np.zeros(num_player)
random_state = self.random_state
for i in range(num_player):
weight_1[i] = random_state.uniform()*w_1_max
weight_2[i] = random_state.uniform()*w_2_max
weight_3[i] = random_state.uniform()*w_3_max
weight = [weight_1, weight_2, weight_3]
return weight
def ganma(self, ganma_max=10000, num_player=None):
random_state = self.random_state
if num_player is None:
num_player = self.num_player
ganma = np.array([])
for i in range(num_player):
ganma = np.append(ganma, int(random_state.uniform(ganma_max)))
return ganma
def P_t(self, past_data, delta_l, delta_t):
if np.sum(delta_t) <= delta_l:
P_t = past_data[-1]
else:
d = 0
j = 0
while d <= delta_l:
d += delta_t[-j]
j += 1
P_t = past_data[-j]
return P_t
def r_t_h(self, past_data, P_t, ganma):
if len(past_data) < ganma:
r_t_h = np.log10(P_t/past_data[0])
else:
past_data_ganma = past_data[-ganma]
r_t_h = np.log10(P_t/past_data_ganma)
return r_t_h
def one_market_model(self, delta_l, w, ganma, fdmtl=10000.0, b_limit=None, s_limit=None, t=0, delta_t=None, past_data=None, delta=1, market_order=0):
num_player = self.num_player
sigma = self.sigma
P_sigma = self.P_sigma
P_f = fdmtl
if past_data is None:
past_data = np.array([P_f])
if b_limit is None:
b_limit = np.array([np.random.normal(P_f, 1) for i in range(5)])
if s_limit is None:
s_limit = np.array([np.random.normal(P_f, 1) for i in range(5)])
if delta_t is None:
delta_t = np.array([])
w_1 = w[0]
w_2 = w[1]
w_3 = w[2]
r_t_e = np.zeros(num_player)
for i in range(num_player):
P_t_1 = past_data[-1]
#P_t_1 = self.P_t(past_data, delta_l, delta_t)
r_t_h = self.r_t_h(past_data, P_t_1, ganma[i])
e_t = np.random.normal(0, sigma)
r_t_e[i] = (w_1[i]*np.log10(P_f/P_t_1) + w_2[i]*r_t_h + w_3[i]*e_t)/(w_1[i] + w_2[i] + w_3[i])
P_e = P_t_1*math.exp(r_t_e[i])
P_o = np.random.normal(P_e, P_sigma)
#print w_1[i]*np.log10(P_f/P_t_1), w_2[i]*r_t_h, r_t_e[i]
if P_e > P_o:
P_o = round(P_o, 1)
if len(s_limit) > 100 and np.min(s_limit) < P_o:
P_t = np.min(s_limit)
s_limit = np.delete(s_limit, np.argmin(s_limit))
market_order += 1
else:
b_limit = np.append(b_limit, P_o)
P_t = P_t_1
else:
P_o = round(P_o, 1)+0.1
if len(b_limit) > 100 and np.max(b_limit) > P_o:
P_t = np.max(b_limit)
b_limit = np.delete(b_limit, np.argmax(b_limit))
market_order += 1
else:
s_limit = np.append(s_limit, P_o)
P_t = P_t_1
past_data = np.append(past_data, P_t)
delta_t = np.append(delta_t, np.random.exponential(delta))
t += delta_t[-1]
return t, delta_t, past_data, b_limit, s_limit, market_order
def one_market_simulation(self, delta_l, o_max=10000, fdmtl=10000):
o = 0
w = self.weight()
ganma = self.ganma()
t0, delta_t0, past_data0, b_limit, s_limit, market_order = self.one_market_model(delta_l, w, ganma, fdmtl)
t, delta_t, past_data, b_limit, s_limit, market_order = self.one_market_model(delta_l, w, ganma, fdmtl, b_limit, s_limit)
while o < o_max:
t, delta_t, past_data, b_limit, s_limit, market_order = self.one_market_model(delta_l, w, ganma, fdmtl, b_limit, s_limit, t, delta_t, past_data, market_order)
o += 1
return past_data, t, market_order
def rad(self, past_data, fdmtl=10000):
p = 0
for i in range(len(past_data)):
p += math.fabs(float(past_data[i]) - fdmtl)/fdmtl
m = 1.0/float(len(past_data))*p
return m
def agreed_rate(self, past_data, market_order):
agreed_rate = float(market_order)/float(len(past_data))
return agreed_rate
AM = ArtificialMarket()
k, t, market_order= AM.one_market_simulation(10, 100)
plt.plot(k)
AM.agreed_rate(k, market_order)
AM = ArtificialMarket()
AM.agreed_rate(k, market_order)
k[-100:]
AM = ArtificialMarket()
k, t= AM.one_market_simulation(10, 5)
plt.plot(k)
round(AM.rad(k), 8)
j = np.where(k == min(k))
j
k[20:1000]
for i in range:
np.random.exponential(1)
k = [1, 3, 4, 5, 1, 5]
k = np.delete(k, np.max(k))
k
if past_data[-1] > past_data[-2] + 100:
print past_data[-1], past_data[-2], i, P_t_1, P_e, P_o, r_t_e[i], math.exp(r_t_e[i])
elif past_data[-1] < past_data[-2] - 100:
print past_data[-1], past_data[-2], i, P_t_1, P_e, P_o, r_t_e[i], math.exp(r_t_e[i]
random_state = np.random.RandomState()
random_state.uniform()
i = 0
for i in range(100):
i += random_state.uniform()
i/100
```
| github_jupyter |
```
import numpy as np
import pandas as pd
from datetime import datetime
from IPython.display import display
import cv2
import os
import time
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization
from tensorflow.keras.layers import Activation, Concatenate, GlobalMaxPooling2D
from tensorflow.keras.layers import GlobalAveragePooling2D, Reshape, Permute, multiply
#from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input
from tensorflow.keras.applications.xception import Xception, preprocess_input
from tensorflow.keras.utils import Sequence
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping, TensorBoard, LambdaCallback
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
import imgaug as ia
from imgaug import augmenters as iaa
```
## Create image augmenter
```
def create_augmenter(train=True):
# from https://github.com/aleju/imgaug
# Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
# e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.
sometimes = lambda aug: iaa.Sometimes(0.5, aug)
# Define our sequence of augmentation steps that will be applied to every image
# All augmenters with per_channel=0.5 will sample one value _per image_
# in 50% of all cases. In all other cases they will sample new values
# _per channel_.
if train:
seq = iaa.Sequential(
[
# apply the following augmenters to most images
# crop images by -5% to 10% of their height/width
sometimes(iaa.CropAndPad(
percent=(-0.05, 0.05),
pad_mode=ia.ALL, # random mode from all available modes will be sampled per image.
pad_cval=(0, 255) # The constant value to use if the pad mode is constant or the end value to use if the mode is linear_ramp
)),
sometimes(iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
rotate=(-5, 5), # rotate by -45 to +45 degrees
shear=(-5, 5), # shear by -16 to +16 degrees
cval=(0, 255), # if mode is constant, use a cval between 0 and 255
mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
)),
],
)
else:
pass
return seq
```
## MultiOutputDataGenerator is for Keras multiple output model¶
```
# reference: https://www.kaggle.com/mpalermo/keras-pipeline-custom-generator-imgaug
class BaseDataGenerator(Sequence):
'''
Generates data for Keras
'''
def __init__(self, images=None, images_paths=None, labels=None, batch_size=64, image_dimensions = (512, 512, 3),
shuffle=False, augmenter=None, preprocessor=None,
return_label=True, total_classes=None):
self.labels = labels # array of labels
self.images = images
self.images_paths = images_paths # array of image paths
self.dim = image_dimensions # image dimensions
self.batch_size = batch_size # batch size
self.shuffle = shuffle # shuffle bool
self.augmenter = augmenter # augmenter
self.preprocessor = preprocessor
self.return_label = return_label
self.total_classes = total_classes
if self.images is None:
self.total_len = len(self.images_paths)
else:
self.total_len = len(self.images)
if images is None and images_paths is None:
raise Exception("Must give images or images_paths")
self.on_epoch_end()
def __len__(self):
'''
Denotes the number of batches per epoch
'''
return int(np.ceil(self.total_len / self.batch_size))
def on_epoch_end(self):
'''
Updates indexes after each epoch
'''
self.indexes = np.arange(self.total_len)
if self.shuffle:
np.random.shuffle(self.indexes)
def gather_batch_item(self, index):
'''
Generate one batch of data
'''
# selects indices of data for next batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# select data and load images
if self.images is None:
images = [cv2.imread(self.images_paths[k]) for k in indexes]
else:
images = [self.images[k] for k in indexes]
# preprocess and augment data
if self.augmenter:
images = self.augmenter.augment_images(images)
images= np.array([self.preprocess_image(cv2.resize(img, self.dim[:2])) for img in images])
if self.return_label:
labels = np.array([self.labels[k] for k in indexes])
labels = to_categorical(labels, num_classes=self.total_classes)
return images, labels
else:
return images
def __getitem__(self, index):
return self.gather_batch_item(index)
def preprocess_image(self, images):
if self.preprocessor is None:
images = images / 255.
pass
else:
images = self.preprocessor(images)
return images
class MultiOutputDataGenerator(BaseDataGenerator):
'''
Generates multiple output data for Keras
'''
def __init__(self, images, images_paths, labels, batch_size=64, image_dimensions = (512, 512, 3),
shuffle=False, augmenter=None, preprocessor=None,
return_label=True, total_classes=None, output_names=None, tta_augmentors=None):
# Init parent's parameter
super().__init__(images, images_paths,
labels, batch_size, image_dimensions,
shuffle, augmenter, preprocessor,
return_label, total_classes)
self.output_names = output_names
self.tta_augmentors = tta_augmentors
def __getitem__(self, index):
'''
Generate one batch of data for multiple output model
'''
if self.return_label:
images, labels = self.gather_batch_item(index)
output_dict = {}
# Copy labels to each output name
for output_name in self.output_names:
output_dict[output_name] = labels
if self.tta_augmentors != None:
images = self.get_tta_images(images)
return images, output_dict
else:
images = self.gather_batch_item(index)
if self.tta_augmentors != None:
images = self.get_tta_images(images)
return images
def get_tta_images(self, images):
'''
If test time augmentation is used, apply augmentation to test images.
'''
aug_images = []
# Original
aug_images.append(images)
for augmentor in self.tta_augmentors:
aug_images.append(augmentor.augment_images(images))
images = aug_images
return images
```
## Process data
```
all_unicodes = pd.read_csv('input/unicode_translation.csv')
all_unicodes.head()
all_unicodes = all_unicodes.sort_values('Unicode')
unicode_to_word = {}
word_to_unicode = {}
unicode_to_encode = {}
start_index = 0
for unicode, word in all_unicodes.values:
unicode_to_encode[unicode] = start_index
unicode_to_word[unicode] = word
word_to_unicode[word] = unicode
start_index += 1
# pretrain model input size
image_shape = (96, 96, 3)
total_classes = len(all_unicodes)
batch_size = 48
all_img_path = 'input/chars'
def create_path_labels(all_path):
'''
Create training csv file by path.
'''
img_paths, label_list, encode_list = [], [], []
for img_name in all_path:
word_unicode = img_name.split('_')[0]
label_list.append(word_unicode)
encode_list.append(unicode_to_encode[word_unicode])
img_paths.append(all_img_path + '/' + img_name)
df = pd.DataFrame({'img_path': img_paths, 'label': label_list, 'encode': encode_list})
return df
# Create csv, only need to run this cell at the first time
char_img_names = list(os.listdir(all_img_path))
df_all = create_path_labels(char_img_names)
df_all.shape
df_all.to_csv("input/all_chars.csv", index=False)
df_all = pd.read_csv("input/all_chars.csv")
df_train, df_val = train_test_split(df_all, test_size=0.1, random_state=42)
df_train.shape
df_val.shape
df_train.head()
train_datagen = MultiOutputDataGenerator(images=None, images_paths=df_train['img_path'].values, labels=df_train['encode'].values,
batch_size=batch_size, image_dimensions=image_shape, shuffle=True,
augmenter=create_augmenter(train=True), preprocessor=preprocess_input,
return_label=True, total_classes=total_classes, output_names=['original_out', 'se_out'])
val_datagen = MultiOutputDataGenerator(images=None, images_paths=df_val['img_path'].values, labels=df_val['encode'].values,
batch_size=20, image_dimensions=image_shape, shuffle=True,
augmenter=None,
preprocessor=preprocess_input,
return_label=True, total_classes=total_classes, output_names=['original_out', 'se_out'])
print(len(train_datagen))
print(len(val_datagen))
```
## Create Model
```
def squeeze_excite_block(tensor, ratio=16):
# From: https://github.com/titu1994/keras-squeeze-excite-network
init = tensor
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
filters = K.int_shape(init)[channel_axis]
se_shape = (1, 1, filters)
se = GlobalAveragePooling2D()(init)
se = Reshape(se_shape)(se)
se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)
if K.image_data_format() == 'channels_first':
se = Permute((3, 1, 2))(se)
x = multiply([init, se])
return x
pretrained = Xception(include_top=False, weights='imagenet', input_shape=image_shape, pooling=None)
x = pretrained.output
# Original branch
gavg = GlobalAveragePooling2D()(x)
gmax = GlobalMaxPooling2D()(x)
original_concat = Concatenate(axis=-1)([gavg, gmax,])
original_concat = Dropout(0.5)(original_concat)
original_final = Dense(total_classes, activation='softmax', name='original_out')(original_concat)
# SE branch
se_out = squeeze_excite_block(x)
se_gavg = GlobalAveragePooling2D()(se_out)
se_gmax = GlobalMaxPooling2D()(se_out)
se_concat = Concatenate(axis=-1)([se_gavg, se_gmax,])
se_concat = Dropout(0.5)(se_concat)
se_final = Dense(total_classes, activation='softmax', name='se_out')(se_concat)
model = Model(inputs=pretrained.input, outputs=[original_final, se_final])
model.summary()
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping
checkpointer = ModelCheckpoint(filepath='models/classification/weights.best.Xception_best.hdf5',
verbose=1, save_best_only=True)
logdir = f".\logs\warmup"
# Create target directory if it doesn't exist
if not os.path.exists(logdir):
os.mkdir(logdir)
tensorboard_callback = TensorBoard(log_dir=logdir)
early_stop = EarlyStopping(monitor="val_loss",
mode="min",
patience=15,
restore_best_weights=True)
model.compile(optimizer='adam',
loss={'original_out': 'categorical_crossentropy', 'se_out': 'categorical_crossentropy'},
loss_weights={'original_out': 1., 'se_out': 1.}, metrics=['accuracy'])
history = model.fit_generator(generator=train_datagen,
validation_data=val_datagen,
epochs=10,
callbacks=[tensorboard_callback, early_stop, checkpointer],
verbose=1,
)
```
| github_jupyter |
<img src="../images/aeropython_logo.png" alt="AeroPython" style="width: 300px;"/>
# Introducción a SymPy

__SymPy es una biblioteca de Python para matemática simbólica__. Apunta a convertirse en un sistema de algebra computacional (__CAS__) con todas sus prestaciones manteniendo el código tan simple como sea posible para manterlo comprensible y fácilmente extensible. SymPy está __escrito totalmente en Python y no requiere bibliotecas adicionales__. _Este proyecto comenzó en 2005, fue lanzado al público en 2007 y a él han contribuido durante estos años cientos de personas._
_ Otros CAS conocidos son Mathematica y Maple, sin embargo ambos son software privativo y de pago. [Aquí](https://github.com/sympy/sympy/wiki/SymPy-vs.-Maple) puedes encontrar una comparativa de SymPy con Maple. _
Hoy veremos cómo:
* Crear símbolos y expresiones.
* Manipular expresiones (simplificación, expansión...)
* Calcular derivadas e integrales.
* Límites y desarrollos en serie.
* Resolución de ecuaciones.
* Resolción de EDOs.
* Matrices
Sin embargo, SymPy no acaba aquí ni mucho menos...
## Documentación & SymPy Live Shell
```
from IPython.display import HTML
HTML('<iframe src="http://docs.sympy.org/latest/index.html" width="700" height="400"></iframe>')
```
## SymPy Gamma
```
HTML('<iframe src="http://www.sympygamma.com/input/?i=integrate%281+%2F+%281+%2B+x^2%29%29" width="700" height="400"></iframe>')
```
## Creación de símbolos
Lo primero, como siempre, es importar aquello que vayamos a necesitar. La manera usual de hacerlo con SymPy es importar la función `init_session`:
```
from sympy import init_session
init_session(use_latex=True)```
Esta función ya se encarga de importar todas las funciones básicas y preparar las salidas gráficas. Sin embargo, en este momento, esta función se encuentra en mantenimiento para su uso dentro de los notebooks por lo que activaremos la salida gráfica e importaremos las funciones de la manera usual. Puedes consultar el estado de la corrección en: https://github.com/sympy/sympy/pull/13300 y https://github.com/sympy/sympy/issues/13319 .
El comando `init_session` llevaría a cabo algunas acciones por nostros:
* Gracias a `use_latex=True` obtenemos la salida en $\LaTeX$.
* __Crea una serie de variables__ para que podamos ponernos a trabajar en el momento.
Estas capacidades volverán a estar disponibles cuando el problema se corrija.
```
from sympy import init_printing
init_printing()
# aeropython: preserve
from sympy import (symbols, pi, I, E, cos, sin, exp, tan, simplify, expand, factor, collect,
apart, cancel, expand_trig, diff, Derivative, Function, integrate, limit,
series, Eq, solve, dsolve, Matrix, N)
```
<div class="alert warning-info"><strong>Nota:</strong>
En Python, no se declaran las variables, sin embargo, no puedes usar una hasta que no le hayas asignado un valor. Si ahora intentamos crear una variable `a` que sea `a = 2 * b`, veamos qué ocurre:
</div>
```
# Intentamos usar un símbolo que no hemos creado
a = 2 * b
```
Como en `b` no había sido creada, Python no sabe qué es `b`.
Esto mismo nos ocurre con los símbolos de SymPy. __Antes de usar una variable, debo decir que es un símbolo y asignárselo:__
```
# Creamos el símbolo a
a = symbols('a')
a
# Número pi
(a + pi) ** 2
# Unidad imaginaria
a + 2 * I
# Número e
E
# Vemos qué tipo de variable es a
type(a)
```
Ahora ya podría crear `b = 2 * a`:
```
b = 2 * a
b
type(b)
```
¿Qué está ocurriendo? Python detecta que a es una variable de tipo `Symbol` y al multiplicarla por `2` devuelve una variable de Sympy.
Como Python permite que el tipo de una variable cambie, __si ahora le asigno a `a` un valor float deja de ser un símbolo.__
```
a = 2.26492
a
type(a)
```
---
__Las conclusiones son:__
* __Si quiero usar una variable como símbolo debo crearla previamente.__
* Las operaciones con símbolos devuelven símbolos.
* Si una varibale que almacenaba un símbolo recibe otra asignación, cambia de tipo.
---
__Las variables de tipo `Symbol` actúan como contenedores en los que no sabemos qué hay (un real, un complejo, una lista...)__. Hay que tener en cuenta que: __una cosa es el nombre de la variable y otra el símbolo con el que se representa__.
```
#creación de símbolos
coef_traccion = symbols('c_T')
coef_traccion
```
Incluso puedo hacer cosas raras como:
```
# Diferencia entre variable y símbolo
a = symbols('b')
a
```
Además, se pueden crear varos símbolos a la vez:
```
x, y, z, t = symbols('x y z t')
```
y símbolos griegos:
```
w = symbols('omega')
W = symbols('Omega')
w, W
```

_Fuente: Documentación oficial de SymPy_
__Por defecto, SymPy entiende que los símbolos son números complejos__. Esto puede producir resultados inesperados ante determinadas operaciones como, por ejemplo, lo logaritmos. __Podemos indicar que la variable es real, entera... en el momento de la creación__:
```
# Creamos símbolos reales
x, y, z, t = symbols('x y z t', real=True)
# Podemos ver las asunciones de un símbolo
x.assumptions0
```
## Expresiones
Comencemos por crear una expresión como: $\cos(x)^2+\sin(x)^2$
```
expr = cos(x)**2 + sin(x)**2
expr
```
### `simplify()`
Podemos pedirle que simplifique la expresión anterior:
```
simplify(expr)
```
En este caso parece estar claro lo que quiere decir más simple, pero como en cualquier _CAS_ el comando `simplify` puede no devolvernos la expresión que nosotros queremos. Cuando esto ocurra necesitaremos usar otras instrucciones.
### `.subs()`
En algunas ocasiones necesitaremos sustituir una variable por otra, por otra expresión o por un valor.
```
expr
# Sustituimos x por y ** 2
expr.subs(x, y**2)
# ¡Pero la expresión no cambia!
expr
# Para que cambie
expr = expr.subs(x, y**2)
expr
```
Cambia el `sin(x)` por `exp(x)`
```
expr.subs(sin(x), exp(x))
```
Particulariza la expresión $sin(x) + 3 x $ en $x = \pi$
```
(sin(x) + 3 * x).subs(x, pi)
```
__Aunque si lo que queremos es obtener el valor numérico lo mejor es `.evalf()`__
```
(sin(x) + 3 * x).subs(x, pi).evalf(25)
#ver pi con 25 decimales
pi.evalf(25)
#el mismo resultado se obtiene ocn la función N()
N(pi,25)
```
# Simplificación
SymPy ofrece numerosas funciones para __simplificar y manipular expresiones__. Entre otras, destacan:
* `expand()`
* `factor()`
* `collect()`
* `apart()`
* `cancel()`
Puedes consultar en la documentación de SymPy lo que hace cada una y algunos ejemplos. __Existen también funciones específicas de simplificación para funciones trigonométricas, potencias y logaritmos.__ Abre [esta documentación](http://docs.sympy.org/latest/tutorial/simplification.html) si lo necesitas.
##### ¡Te toca!
Pasaremos rápidamente por esta parte, para hacer cosas "más interesantes". Te proponemos algunos ejemplos para que te familiarices con el manejor de expresiones:
__Crea las expresiones de la izquierda y averigua qué función te hace obtener la de la derecha:__
expresión 1| expresión 2
:------:|:------:
$\left(x^{3} + 3 y + 2\right)^{2}$ | $x^{6} + 6 x^{3} y + 4 x^{3} + 9 y^{2} + 12 y + 4$
$\frac{\left(3 x^{2} - 2 x + 1\right)}{\left(x - 1\right)^{2}} $ | $3 + \frac{4}{x - 1} + \frac{2}{\left(x - 1\right)^{2}}$
$x^{3} + 9 x^{2} + 27 x + 27$ | $\left(x + 3\right)^{3}$
$\sin(x+2y)$ | $\left(2 \cos^{2}{\left (y \right )} - 1\right) \sin{\left (x \right )} + 2 \sin{\left (y \right )} \cos{\left (x \right )} \cos{\left (y \right )}$
```
#1
expr1 = (x ** 3 + 3 * y + 2) ** 2
expr1
expr1_exp = expr1.expand()
expr1_exp
#2
expr2 = (3 * x ** 2 - 2 * x + 1) / (x - 1) ** 2
expr2
expr2.apart()
#3
expr3 = x ** 3 + 9 * x ** 2 + 27 * x + 27
expr3
expr3.factor()
#4
expr4 = sin(x + 2 * y)
expr4
expand(expr4)
expand_trig(expr4)
expand(expr4, trig=True)
```
# Derivadas e integrales
Puedes derivar una expresion usando el método `.diff()` y la función `dif()`
```
#creamos una expresión
expr = cos(x)
#obtenemos la derivada primera con funcion
diff(expr, x)
#utilizando método
expr.diff(x)
```
__¿derivada tercera?__
```
expr.diff(x, x, x)
expr.diff(x, 3)
```
__¿varias variables?__
```
expr_xy = y ** 3 * sin(x) ** 2 + x ** 2 * cos(y)
expr_xy
diff(expr_xy, x, 2, y, 2)
```
__Queremos que la deje indicada__, usamos `Derivative()`
```
Derivative(expr_xy, x, 2, y)
```
__¿Será capaz SymPy de aplicar la regla de la cadena?__
```
# Creamos una función F
F = Function('F')
F(x)
# Creamos una función G
G = Function('G')
G(x)
```
$$\frac{d}{d x} F{\left (G(x) \right )} $$
```
# Derivamos la función compuesta F(G(x))
F(G(x)).diff(x)
```
En un caso en el que conocemos las funciones:
```
# definimos una f
f = 2 * y * exp(x)
f
# definimos una g(f)
g = f **2 * cos(x) + f
g
#la derivamos
diff(g,x)
```
##### Te toca integrar
__Si te digo que se integra usando el método `.integrate()` o la función `integrate()`__. ¿Te atreves a integrar estas casi inmediatas...?:
$$\int{\cos(x)^2}dx$$
$$\int{\frac{dx}{\sin(x)}}$$
$$\int{\frac{dx}{(x^2+a^2)^2}}$$
```
int1 = cos(x) ** 2
integrate(int1)
int2 = 1 / sin(x)
integrate(int2)
x, a = symbols('x a', real=True)
int3 = 1 / (x**2 + a**2)**2
integrate(int3, x)
```
# Límites
Calculemos este límite sacado del libro _Cálculo: definiciones, teoremas y resultados_, de Juan de Burgos:
$$\lim_{x \to 0} \left(\frac{x}{\tan{\left (x \right )}}\right)^{\frac{1}{x^{2}}}$$
Primero creamos la expresión:
```
x = symbols('x', real=True)
expr = (x / tan(x)) ** (1 / x**2)
expr
```
Obtenemos el límite con la función `limit()` y si queremos dejarlo indicado, podemos usar `Limit()`:
```
limit(expr, x, 0)
```
# Series
Los desarrollos en serie se pueden llevar a cabo con el método `.series()` o la función `series()`
```
#creamos la expresión
expr = exp(x)
expr
#la desarrollamos en serie
series(expr)
```
Se puede especificar el número de términos pasándole un argumento `n=...`. El número que le pasemos será el primer término que desprecie.
```
# Indicando el número de términos
series(expr, n=10)
```
Si nos molesta el $\mathcal{O}(x^{10})$ lo podemos quitar con `removeO()`:
```
series(expr, n=10).removeO()
series(sin(x), n=8, x0=pi/3).removeO().subs(x, x-pi/3)
```
---
## Resolución de ecuaciones
Como se ha mencionado anteriormente las ecuaciones no se pueden crear con el `=`
```
#creamos la ecuación
ecuacion = Eq(x ** 2 - x, 3)
ecuacion
# También la podemos crear como
Eq(x ** 2 - x -3)
#la resolvemos
solve(ecuacion)
```
Pero la gracia es resolver con símbolos, ¿no?
$$a e^{\frac{x}{t}} = C$$
```
# Creamos los símbolos y la ecuación
a, x, t, C = symbols('a, x, t, C', real=True)
ecuacion = Eq(a * exp(x/t), C)
ecuacion
# La resolvemos
solve(ecuacion ,x)
```
Si consultamos la ayuda, vemos que las posibilidades y el número de parámetros son muchos, no vamos a entrar ahora en ellos, pero ¿se ve la potencia?
## Ecuaciones diferenciales
Tratemos de resolver, por ejemplo:
$$y{\left (x \right )} + \frac{d}{d x} y{\left (x \right )} + \frac{d^{2}}{d x^{2}} y{\left (x \right )} = \cos{\left (x \right )}$$
```
x = symbols('x')
y = Function('y')
ecuacion_dif = Eq(y(x).diff(x,2) + y(x).diff(x) + y(x), cos(x))
ecuacion_dif
#resolvemos
dsolve(ecuacion_dif, f(x))
```
# Matrices
```
#creamos una matriz llena de símbolos
a, b, c, d = symbols('a b c d')
A = Matrix([
[a, b],
[c, d]
])
A
#sacamos autovalores
A.eigenvals()
#inversa
A.inv()
#elevamos al cuadrado la matriz
A ** 2
```
---
_ Esto ha sido un rápido recorrido por algunas de las posibilidades que ofrece SymPy . El cálculo simbólico es un terreno díficil y este joven paquete avanza a pasos agigantados gracias a un grupo de desarrolladores siempre dispuestos a mejorar y escuchar sugerencias. Sus posibilidades no acaban aquí. En la siguiente clase presentaremos el paquete `mechanics`, pero además cuenta con herramientas para geometría, mecánica cuántica, teoría de números, combinatoria... Puedes echar un ojo [aquí](http://docs.sympy.org/latest/modules/index.html). _
Si te ha gustado esta clase:
<a href="https://twitter.com/share" class="twitter-share-button" data-url="https://github.com/AeroPython/Curso_AeroPython" data-text="Aprendiendo Python con" data-via="pybonacci" data-size="large" data-hashtags="AeroPython">Tweet</a>
<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script>
---
#### <h4 align="right">¡Síguenos en Twitter!
###### <a href="https://twitter.com/Pybonacci" class="twitter-follow-button" data-show-count="false">Follow @Pybonacci</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script> <a href="https://twitter.com/Alex__S12" class="twitter-follow-button" data-show-count="false" align="right";>Follow @Alex__S12</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script> <a href="https://twitter.com/newlawrence" class="twitter-follow-button" data-show-count="false" align="right";>Follow @newlawrence</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script>
##### <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es"><img alt="Licencia Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">Curso AeroPython</span> por <span xmlns:cc="http://creativecommons.org/ns#" property="cc:attributionName">Juan Luis Cano Rodriguez y Alejandro Sáez Mollejo</span> se distribuye bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es">Licencia Creative Commons Atribución 4.0 Internacional</a>.
##### <script src="//platform.linkedin.com/in.js" type="text/javascript"></script> <script type="IN/MemberProfile" data-id="http://es.linkedin.com/in/juanluiscanor" data-format="inline" data-related="false"></script> <script src="//platform.linkedin.com/in.js" type="text/javascript"></script> <script type="IN/MemberProfile" data-id="http://es.linkedin.com/in/alejandrosaezm" data-format="inline" data-related="false"></script>
---
_Las siguientes celdas contienen configuración del Notebook_
_Para visualizar y utlizar los enlaces a Twitter el notebook debe ejecutarse como [seguro](http://ipython.org/ipython-doc/dev/notebook/security.html)_
File > Trusted Notebook
```
%%html
<a href="https://twitter.com/Pybonacci" class="twitter-follow-button" data-show-count="false">Follow @Pybonacci</a>
<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script>
# Esta celda da el estilo al notebook
from IPython.core.display import HTML
css_file = '../styles/aeropython.css'
HTML(open(css_file, "r").read())
```
| github_jupyter |
```
import os
import rmgpy.species
import rmgpy.chemkin
import autotst.species
import autotst.reaction
import autotst.calculator.gaussian
import pandas as pd
import logging
from hotbit import Hotbit
import ase.calculators.lj
import ase.io
import glob
import job_manager
def get_label(rmg_rxn):
label = ''
for reactant in rmg_rxn.reactants:
label += f'{reactant.smiles}+'
label = label[:-1]
label += '_'
for product in rmg_rxn.products:
label += f'{product.smiles}+'
label = label[:-1]
return label
def reaction2SMILES(reaction):
string = ""
for react in reaction.reactants:
if isinstance(react, rmgpy.species.Species):
string += f"{react.molecule[0].to_smiles()}+"
elif isinstance(react, rmgpy.molecule.Molecule):
string += f"{react.to_smiles()}+"
string = string[:-1]
string += "_"
for prod in reaction.products:
if isinstance(prod, rmgpy.species.Species):
string += f"{prod.molecule[0].to_smiles()}+"
elif isinstance(prod, rmgpy.molecule.Molecule):
string += f"{prod.to_smiles()}+"
label = string[:-1]
return label
try:
DFT_DIR = os.environ['DFT_DIR']
except KeyError:
DFT_DIR = '/work/westgroup/harris.se/autoscience/autoscience_workflow/results/dft'
reaction_index = 1
# reaction_index = 168
# reaction_index = int(sys.argv[1])
# print(f'Reaction index is {reaction_index}')
# Load the species from the official species list
# scripts_dir = os.path.dirname(__file__)
scripts_dir = '/work/westgroup/harris.se/autoscience/autoscience_workflow/workflow/scripts'
reaction_csv = os.path.join(scripts_dir, '..', '..', 'resources', 'reaction_list.csv')
reaction_df = pd.read_csv(reaction_csv)
reaction_smiles = reaction_df.SMILES[reaction_index]
print(reaction_smiles)
reaction = autotst.reaction.Reaction(label=reaction_smiles)
print(reaction)
reaction.ts['forward'][0].get_molecules()
print(reaction.ts['forward'][0].get_xyz_block())
# reaction.ts_databases['H_Abstraction'].estimate_distances(reaction.rmg_reaction).distances
reaction.get_labeled_reaction()
print(reaction.ts['forward'][0].get_xyz_block())
print(reaction.ts['reverse'][0].get_xyz_block())
print(reaction.rmg_reaction)
print(reaction.rmg_reaction.family)
reaction.get_rmg_complexes()
# TODO handle multiple forward TS?
# TODO also calculate reverse TS?
# reaction.generate_conformers(ase_calculator=Hotbit())
reaction.generate_conformers(ase_calculator=ase.calculators.lj.LennardJones())
# These distances are way off. Why?
print(reaction.ts['forward'][0].get_xyz_block())
print(reaction.ts['reverse'][0].get_xyz_block())
reaction_base_dir = os.path.join(DFT_DIR, 'kinetics', f'reaction_{reaction_index:04}')
os.makedirs(reaction_base_dir, exist_ok=True)
shell_dir = os.path.join(reaction_base_dir, 'shell')
os.makedirs(shell_dir, exist_ok=True)
# Do the shell calculation
# write Gaussian input files
for i, ts in enumerate(reaction.ts['forward']):
gaussian = autotst.calculator.gaussian.Gaussian(conformer=ts)
calc = gaussian.get_shell_calc()
calc.label = f'fwd_ts_{i:04}'
calc.directory = shell_dir
calc.parameters.pop('scratch')
calc.parameters.pop('multiplicity')
calc.parameters['mult'] = ts.rmg_molecule.multiplicity
calc.write_input(ts.ase_molecule)
print("done")
# make the shell slurm script
slurm_run_file = os.path.join(shell_dir, 'run.sh')
slurm_settings = {
'--job-name': f'g16_shell_{reaction_index}',
'--error': 'error.log',
'--output': 'output.log',
'--nodes': 1,
'--partition': 'west,short',
'--exclude': 'c5003',
'--mem': '20Gb',
'--time': '24:00:00',
'--cpus-per-task': 16,
}
slurm_file_writer = job_manager.SlurmJobFile(
full_path=slurm_run_file,
)
slurm_file_writer.settings = slurm_settings
slurm_file_writer.content = [
'export GAUSS_SCRDIR=/scratch/harris.se/guassian_scratch\n',
'mkdir -p $GAUSS_SCRDIR\n',
'module load gaussian/g16\n',
'source /shared/centos7/gaussian/g16/bsd/g16.profile\n\n',
f'g16 fwd_ts_{i:04}.com\n',
]
slurm_file_writer.write_file()
import ase.io.gaussian
# once the shell calculation is done, load the geometry and run the overall TS optimization
shell_dir = '/work/westgroup/harris.se/autoscience/autoscience_workflow/results/dft/kinetics/reaction_0001/shell'
shell_opt = os.path.join(shell_dir, 'fwd_ts_0000.log')
print(reaction.ts['forward'][0].get_xyz_block())
with open(shell_opt, 'r') as f:
atoms = ase.io.gaussian.read_gaussian_out(f)
print(atoms.get_positions())
reaction.ts['forward'][0]._ase_molecule
from ase.visualize import view
import matplotlib
matplotlib.use('agg')
view(reaction.ts['forward'][0]._ase_molecule, viewer='ngl')
view(atoms, viewer='ngl')
# once the shell calculation is done, load the geometry and run the overall TS optimization
shell_dir = '/work/westgroup/harris.se/autoscience/autoscience_workflow/results/dft/kinetics/reaction_0001/shell'
shell_opt = os.path.join(shell_dir, 'shell_debug', 'fwd_ts_0000.log')
with open(shell_opt, 'r') as f:
atoms2 = ase.io.gaussian.read_gaussian_out(f)
print(atoms.get_positions())
view(atoms2, viewer='ngl')
# In theory, if all degrees of freedom haven't been frozen, then we read in the geometry
# but for this simplistic case, proceed to the overall calculation
overall_dir = os.path.join(reaction_base_dir, 'overall')
os.makedirs(overall_dir, exist_ok=True)
# Do the shell calculation
# write Gaussian input files
for i, ts in enumerate(reaction.ts['forward']):
gaussian = autotst.calculator.gaussian.Gaussian(conformer=ts)
calc = gaussian.get_overall_calc()
calc.label = f'fwd_ts_{i:04}'
calc.directory = overall_dir
calc.parameters.pop('scratch')
calc.parameters.pop('multiplicity')
calc.parameters['mult'] = ts.rmg_molecule.multiplicity
calc.write_input(ts.ase_molecule)
print("done")
# make the overall slurm script
slurm_run_file = os.path.join(overall_dir, 'run.sh')
slurm_settings = {
'--job-name': f'ts_{reaction_index}',
'--error': 'error.log',
'--output': 'output.log',
'--nodes': 1,
'--partition': 'short',
'--mem': '20Gb',
'--time': '24:00:00',
'--cpus-per-task': 48,
}
slurm_file_writer = job_manager.SlurmJobFile(
full_path=slurm_run_file,
)
slurm_file_writer.settings = slurm_settings
slurm_file_writer.content = [
'export GAUSS_SCRDIR=/scratch/harris.se/guassian_scratch\n',
'mkdir -p $GAUSS_SCRDIR\n',
'module load gaussian/g16\n',
'source /shared/centos7/gaussian/g16/bsd/g16.profile\n\n',
f'g16 fwd_ts_{i:04}.com\n',
]
slurm_file_writer.write_file()
spec = autotst.species.Species([species_smiles])
print(f"loaded species {species_smiles}")
thermo_base_dir = os.path.join(DFT_DIR, 'thermo')
species_base_dir = os.path.join(thermo_base_dir, f'species_{species_index:04}')
os.makedirs(species_base_dir, exist_ok=True)
# generate conformers
spec.generate_conformers(ase_calculator=Hotbit())
n_conformers = len(spec.conformers[species_smiles])
print(f'{n_conformers} found with Hotbit')
# load the model
chemkin_path = "/home/harris.se/rmg/rmg_tools/uncertainty/nheptane/chem_annotated.inp"
dictionary_path = "/home/harris.se/rmg/rmg_tools/uncertainty/nheptane/species_dictionary.txt"
transport_path = "/home/harris.se/rmg/rmg_tools/uncertainty/nheptane/tran.dat"
species_list, reaction_list = rmgpy.chemkin.load_chemkin_file(
chemkin_path,
dictionary_path=dictionary_path,
transport_path=transport_path
)
print(f"Loaded model with {len(species_list)} species and {len(reaction_list)} reactions")
rxn_idx = 186 # R recombo
rxn_idx = 194
rxn_idx = 274 # first H abstraction
# rxn_idx = 236 another H abstraction
rmg_rxn = reaction_list[rxn_idx]
print(rmg_rxn)
print(rmg_rxn.family)
kinetics_base_dir = '/work/westgroup/harris.se/autoscience/autoscience_workflow/results/dft/kinetics'
reaction_base_dir = os.path.join(kinetics_base_dir, f'reaction_{rxn_idx:04}')
os.makedirs(kinetics_base_dir, exist_ok=True)
# TODO fix reaction index
label = get_label(rmg_rxn)
print("label", label)
reaction = autotst.reaction.Reaction(label=label, rmg_reaction=rmg_rxn)
print("reaction", reaction)
reaction.get_labeled_reaction()
print("Got labeled reaction")
transition_states = reaction.ts["reverse"]
print("ts0", transition_states[0])
print("rxn.ts", reaction.ts)
print("About to start hotbit")
logging.warning("Danger zone 0")
# reaction.generate_conformers_all(ase_calculator=Hotbit())
reaction.generate_conformers(ase_calculator=Hotbit())
print("ran hotbit")
for ts in reaction.ts['forward']:
print(ts)
# overall calc
ts_dir = os.path.join(reaction_base_dir, 'ts')
# write Gaussian input files
for i, ts in enumerate(reaction.ts['forward']):
gaussian = autotst.calculator.gaussian.Gaussian(conformer=ts)
calc = gaussian.get_overall_calc()
calc.label = f'fwd_ts_{i:04}'
calc.directory = ts_dir
calc.parameters.pop('scratch')
calc.parameters.pop('multiplicity')
calc.parameters['mult'] = ts.rmg_molecule.multiplicity
calc.write_input(ts.ase_molecule)
print("done")
n_ts = len(reaction.ts['forward'])
print(f'{n_ts} found with Hotbit')
# Make a file to run Gaussian
slurm_run_file = os.path.join(ts_dir, 'run.sh')
slurm_settings = {
'--job-name': f'g16_ts_{rxn_idx}',
'--error': 'error.log',
'--output': 'output.log',
'--nodes': 1,
'--partition': 'west,short',
'--mem': '20Gb',
'--time': '24:00:00',
'--cpus-per-task': 16,
'--array': f'0-{n_conformers}%40',
}
slurm_file_writer = job_manager.SlurmJobFile(
full_path=slurm_run_file,
)
slurm_file_writer.settings = slurm_settings
slurm_file_writer.content = [
'export GAUSS_SCRDIR=/scratch/harris.se/guassian_scratch\n',
'mkdir -p $GAUSS_SCRDIR\n',
'module load gaussian/g16\n',
'source /shared/centos7/gaussian/g16/bsd/g16.profile\n\n',
'RUN_i=$(printf "%04.0f" $(($SLURM_ARRAY_TASK_ID)))\n',
'fname="fwd_ts_${RUN_i}.com"\n\n',
'g16 $fname\n',
]
slurm_file_writer.write_file()
# submit the job
gaussian_conformers_job = job_manager.SlurmJob()
slurm_cmd = f"sbatch {slurm_run_file}"
gaussian_conformers_job.submit(my_cmd)
```
| github_jupyter |
> **How to run this notebook (command-line)?**
1. Install the `ReinventCommunity` environment:
`conda env create -f environment.yml`
2. Activate the environment:
`conda activate ReinventCommunity`
3. Execute `jupyter`:
`jupyter notebook`
4. Copy the link to a browser
# `REINVENT 3.0`: transfer learning mode demo (teachers forcing)
#### The *transfer learning* mode can be used for either
1. Initial training of the Agent - where a newly built agent is trained from scratch while iterating through sufficiently large datasets over many epochs
2. Focusing of pre-trained Agent - where an already pre-trained agent is introduced to a small dataset for a small number of epochs.
In this notebook we are going to illustrate the first scenario. The provided dataset is processed by the workflow illustrated in the `Data Preparation` example. The required input is an empty model and the dataset with which this empty model is created.
The Prior can be used afterwards for *reinforcement learning*, *tranfer learning* or just *sampling*.
## This is a rather slow process that depends on the number of epochs and dataset size
```
# load dependencies
import os
import re
import json
import tempfile
# --------- change these path variables as required
reinvent_dir = os.path.expanduser("~/PycharmProjects/github_repositories/reinvent/")
reinvent_env = os.path.expanduser("~/miniconda3/envs/reinvent_shared.v2.1")
output_dir = os.path.expanduser("~/Desktop/REINVENT_transfer_learning_demo")
# --------- do not change
# get the notebook's root path
try: ipynb_path
except NameError: ipynb_path = os.getcwd()
# if required, generate a folder to store the results
try:
os.mkdir(output_dir)
except FileExistsError:
pass
```
## Setting up the configuration
`REINVENT` has an entry point that loads a specified `JSON` file on startup. `JSON` is a low-level data format that allows to specify a fairly large number of parameters in a cascading fashion very quickly. The parameters are structured into *blocks* which can in turn contain blocks or simple values, such as *True* or *False*, strings and numbers. In this tutorial, we will go through the different blocks step-by-step, explaining their purpose and potential values for given parameters. Note, that while we will write out the configuration as a `JSON` file in the end, in `python` we handle the same information as a simple `dict`.
```
# initialize the dictionary
configuration = {
"version": 3, # we are going to use REINVENT's newest release
"run_type": "transfer_learning" # other run types: "scoring", "validation",
# "transfer_learning",
# "reinforcement_learning" and
# "create_model"
}
# add block to specify whether to run locally or not and
# where to store the results and logging
configuration["logging"] = {
"sender": "http://127.0.0.1", # only relevant if "recipient" is set to "remote"
"recipient": "local", # either to local logging or use a remote REST-interface
"logging_path": os.path.join(output_dir, "progress.log"), # where the run's output is stored
"job_name": "Transfer Learning demo", # set an arbitrary job name for identification
"job_id": "demo" # only relevant if "recipient" is set to "remote"
}
```
We will need to specify a path to an agent (parameter `model_path`), which can be a prior or trained agent. For the purpose of this notebook, we will use a prior shipped with the `REINVENT 3.0` repository.
The code block below will define the settings for `adaptive_lr_config` property of the configuration. These parameters are defining the behavior of the learning rate. Note that the mode is set to `"adaptive"`. We recommend adhering to the default values.
```
adaptive_lr_config = {
"mode": "adaptive", # other modes: "exponential", "adaptive", "constant"
"gamma": 0.8,
"step": 1,
"start": 5E-4, # initial learning rate
"min": 1E-5,
"threshold": 1E-4,
"average_steps": 4,
"patience": 8, # patience is the lower bound of how frequently the learning rate should change
"restart_value": 1E-5,
"sample_size": 10000, # this is relevant for stats and decision on how to update the learning rate
"restart_times": 0
}
input_SMILES_path = os.path.join(ipynb_path, "data/chembl.filtered.smi")
input_model_path = os.path.join(ipynb_path, "models/empty_model.ckpt")
output_model_path = os.path.join(output_dir, "chembl.prior")
# The final focused agent will be named "chembl.prior"
# The intermediate steps will be named "chembl.prior.1", "chembl.prior.2", "chembl.prior.3" and etc.
# add the "parameters" block
configuration["parameters"] = {
"input_model_path": input_model_path, # path to prior or empty model
"output_model_path": output_model_path, # location to store the chembl prior
"input_smiles_path": input_SMILES_path, # path to input smiles
"save_every_n_epochs": 1, # how often to save the Prior. Here its stored after each epoch
"batch_size": 128, # batch size the input data
"num_epochs": 200, # number of epochs to train Prior. NOTE: this may take days to train!
# in reality even only 20 epochs could result in a sufficiently good Prior
"standardize": False, # we assume all SMILES strings have been pre-processed
"randomize": True, # this triggers data augmentation and will slow down the training a bit.
"adaptive_lr_config": adaptive_lr_config # setting the learning rate behavior
}
# write the configuration file to the disc
configuration_JSON_path = os.path.join(output_dir, "transfer_learning_config.json")
with open(configuration_JSON_path, 'w') as f:
json.dump(configuration, f, indent=4, sort_keys=True)
```
## Run `REINVENT`
Now it is time to execute `REINVENT` locally.
This training might take days with the suggested dataset and the number of epochs.
Best execute for just a couple of epoch to optain a realistic estimate.
The command-line execution looks like this:
```
# activate envionment
conda activate reinvent.v3.0
# execute REINVENT
python <your_path>/input.py <config>.json
```
```
%%capture captured_err_stream --no-stderr
# execute REINVENT from the command-line
!python {reinvent_dir}/input.py {configuration_JSON_path}
# print the output to a file, just to have it for documentation
with open(os.path.join(output_dir, "run.err"), 'w') as file:
file.write(captured_err_stream.stdout)
```
## Analyse the results
In order to analyze the run in a more intuitive way, we can use `tensorboard`:
```
# go to the root folder of the output
cd <your_path>/REINVENT_transfer_learning_demo
# make sure, you have activated the proper environment
conda activate reinvent.v3.0
# start tensorboard
tensorboard --logdir progress.log
```
| github_jupyter |
## Mini-batch Gradient Descent
* if you have a huge dataset it may slowdown you training step, just use the mini-batch gradient descent
Three types of gradient descent:
#### Batch Gradient Descent
* Vecorizations allows Batch GD to process all M relatively quickly. However, if M is very large BGD still be very slowly
* You need to process the entire training set before you the gradient descent can take a one step
* BGD is a special case of mini-batch where the <div class="text-danger">size of the mini-batch = the entire training set</div>
#### Mini-batch Gradient Descent
* The intution her is to let gradient descent start to make some progress even before you finish processing your entire, your training sets.
* Mini-batch t: $X^{\{t\}}, Y^{\{t\}}$
#### Stochastic Gradient Descent
* SGD is a special case of mini-batch where the <span class="text-danger">size of each mini-batch = one training example</span>
* on average, SGD will go to a good direction, but sometimes it'll head in the wrong direction as well. As stochastic gradient descent won't ever converge, it'll always just kind of oscillate and wander around the region of the minimum.
epoch is a single pass through the entire training set
* big downside is you lost the victorization
<span style="float:right">𝔐𝔬𝔥𝔞𝔪𝔢𝔡 𝔈𝔩𝔡𝔢𝔰𝔬𝔲𝔨𝔦</span>
### Mini-batch Gradient Descent Implementation
```python
# for loop over the number of Mini-batch
for e in range(epochs):
for t in range(T):
normal_gradient_descent(X[t], Y[t])
def normal_gradient_descent(X, Y):
# vectorized implementation
Y_hat = forward_propagation(X)
cost = compute_cost(Y_hat)
dW, db = compute_gradient_using_packprobagation(cost)
W = W - alpha*dW
b = b - alpha* db
def compute_cost(Y_hat):
return (1/number_of_examples_in_the_batch) * sum(loss(each_training_example)) + [regularization term]
```
### Guidelines for choosing Mini-batch size
* If you have a small training set just use the batch gradient descent (< 2000)
* because of the way computer memory is layed out and accessed, sometimes your code runs faster if your mini-batch size is a power of 2.
* make sure that the mini-batch fits in your CPU/GPU memory. Which depends in your application and how large is a single example in your training set.
* Try a few different powers of two and then see if you can pick one that makes your gradient descent optimization algorithm as efficient as possible.
## Exponentially Weighted (Moving) Averages - EWMA
* $V_t = \beta V_{t-1} + (1-\beta) \theta_t $
* You can think of $V_t$ as approximately averaging over the last $ \frac{1}{1-\beta}$ days
* As $\beta$ gets larger as the curve gets smoother, beacuse we averaging over more days. However, $V_t$ adapts more slowly, when the x changes.
* Use bias correction to help up your EWMA in cold starts
* bias correction = $ \frac{1}{1-\beta^t} $
```
import random
import matplotlib.pyplot as plt
import numpy as np
m = 10
n = 200
beta = 0.90
theta = list()
the_list = list(range(m,n))
for i in the_list:
r = random.randint(i-5,i+5)
theta.append(r)
V = 0
ewma = list()
for t in range(len(the_list)):
warm = 1
if 0 <= t <= 5000:
bias_correction = 1-beta**(t+1)
V = (beta*V + (1-beta)*theta[t])
# print(theta[t], V, V/warming_up)
ewma.append(V/bias_correction)
plt.plot(list(the_list), theta, 'ro')
plt.plot(list(the_list), ewma)
plt.show()
```
### Gradient Descent with Momentum
* The momentum algorithm is almost always faster than the standard gradient descent algorithm.
* The momentum algorithm is just apply the EWMA to the gradient descent algorithm so it relies on the previous gradient steps.
* The most used value of $\beta$ is $\beta_1 = 0.9$ which means it depends on the last 10 gradient steps.
* In practice, bias correction is ommited since after few iteration algorithm is warmed up.
```python
# for loop over the number of Mini-batch
for e in range(epochs):
for t in range(T):
normal_gradient_descent(X[t], Y[t])
VdW = np.zeros_like(dW)
Vdb = np.zeros_like(db)
def normal_gradient_descent(X, Y):
# vectorized implementation
Y_hat = forward_propagation(X)
cost = compute_cost(Y_hat)
dW, db = compute_gradient_using_packprobagation(cost)
VdW = beta * VdW + (1-beta) * dW
Vdb = beta * Vdb + (1-beta) * db
W = W - alpha * VdW
b = b - alpha * Vdb
def compute_cost(Y_hat):
return (1/number_of_examples_in_the_batch) * sum(loss(each_training_example)) + [regularization term]
```
### Root Mean Squared Prop (RMSProp)
- use $\epsilon$ so you don't divide by zero
- first proposed in a coursera course by Hinton
```python
# for loop over the number of Mini-batch
SdW = np.zeros_like(dW)
Sdb = np.zeros_like(db)
for e in range(epochs):
for t in range(T):
normal_gradient_descent(X[t], Y[t])
def normal_gradient_descent(X, Y):
# vectorized implementation
Y_hat = forward_propagation(X)
cost = compute_cost(Y_hat)
dW, db = compute_gradient_using_packprobagation(cost)
SdW = beta_2 * SdW + (1-beta_2) * dW.*dW
Sdb = beta_2 * Sdb + (1-beta_2) * db.*db
W = W - alpha * dW/np.sqrt(SdW+epsilon)
b = b - alpha * db/np.sqrt(Sdb+epsilon)
def compute_cost(Y_hat):
return (1/number_of_examples_in_the_batch) * sum(loss(each_training_example)) + [regularization term]
```
### Adam Optimization Algorithm (AKA ADAptive Moment Estimation)
- Adam algorithm is one of those rare algorithms that has really stood up, and has been shown to work well across a wide range of deep learning architectures.
- In the typical implementation of Adam algorithm you take into consideration the bias correction
- The most used value of $\beta_2$ is $\beta_2 = 0.999$ which means it depends on the last 1000 gradient steps.
- The most used value of $\epsilon$ is $\epsilon = 10^{-8} $
```python
# for loop over the number of Mini-batch
VdW = np.zeros_like(dW)
Vdb = np.zeros_like(db)
SdW = np.zeros_like(dW)
Sdb = np.zeros_like(db)
iteration = 0
for e in range(epochs):
for t in range(T):
iteration =+ 1
normal_gradient_descent(X[t], Y[t], iteration)
def normal_gradient_descent(X, Y, t):
# vectorized implementation
Y_hat = forward_propagation(X)
cost = compute_cost(Y_hat)
dW, db = compute_gradient_using_packprobagation(cost)
VdW = beta_1*VdW + (1-beta_1) * dw
Vdb = beta_1*Vdb + (1-beta_1) * db
SdW = beta_2*SdW + (1-beta_2) * dW.*dW
Sdb = beta_2*Sdb + (1-beta_2) * db.*db
VdW_correction = VdW/(1-beta_1**t)
Vdb_correction = Vdb/(1-beta_1**t)
SdW_correction = SdW/(1-beta_2**t)
Sdb_correction = Sdb/(1-beta_2**t)
W = W - alpha * VdW_correction/(np.sqrt(SdW_correction)+epsilon)
b = b - alpha * Vdb_correction/(np.sqrt(Sdb_correction)+epsilon)
def compute_cost(Y_hat):
return (1/number_of_examples_in_the_batch) * sum(loss(each_training_example)) + [regularization term]
```
### Learning Rate Decay
- One of the things that might help speed up your learning algorithm, is to slowly reduce your learning rate over time. We call this learning rate decay.
```python
# for loop over the number of Mini-batch
VdW = np.zeros_like(dW)
Vdb = np.zeros_like(db)
SdW = np.zeros_like(dW)
Sdb = np.zeros_like(db)
iteration = 0
__alpha__ = 0.2 # initial value
decay_rate = 1
for e in range(epochs):
for t in range(T):
iteration =+ 1
normal_gradient_descent(X[t], Y[t], iteration, e)
def normal_gradient_descent(X, Y, t, e):
# vectorized implementation
Y_hat = forward_propagation(X)
cost = compute_cost(Y_hat)
dW, db = compute_gradient_using_packprobagation(cost)
VdW = beta_1*VdW + (1-beta_1) * dw
Vdb = beta_1*Vdb + (1-beta_1) * db
SdW = beta_2*SdW + (1-beta_2) * dW.*dW
Sdb = beta_2*Sdb + (1-beta_2) * db.*db
VdW_correction = VdW/(1-beta_1**t)
Vdb_correction = Vdb/(1-beta_1**t)
SdW_correction = SdW/(1-beta_2**t)
Sdb_correction = Sdb/(1-beta_2**t)
alpha = __alpha__ * 1/ (1+ decay_rate * e)
W = W - alpha * VdW_correction/(np.sqrt(SdW_correction)+epsilon)
b = b - alpha * Vdb_correction/(np.sqrt(Sdb_correction)+epsilon)
def compute_cost(Y_hat):
return (1/number_of_examples_in_the_batch) * sum(loss(each_training_example)) + [regularization term]
```
| github_jupyter |
```
%pip install mmh3 numpy
```
## The Bloom embeddings algorithm
In a normal embedding table, each word-string is mapped to a distinct ID.
Usually these IDs will be sequential, so if you have a vocabulary of 100 words,
your words will be mapped to numbers `range(100)`. The sequential IDs can then
be used as indices into an embedding table: if you have 100 words in your
vocabulary, you have 100 rows in the table, and each word receives its own
vector.
However, there's no limit to the number of unique words that might occur in a
sample of text, while we definitely want a limited number of rows in our
embedding table. Some of the rows in our table will therefore need to be shared
between multiple words in our vocabulary. One obvious solution is to set aside a
single vector in the table. Words 0-98 will each receive their own vector, while
all other words are assigned to vector 99.
However, this asks vector 99 to do a lot of work. What if we gave more vectors
to the unknown words?
```
def get_row(word_id, number_vector=100, number_oov=10):
if word_id < (number_vector - number_oov):
return word_id
else:
return number_vector + (word_id % number_oov)
```
This gives the model a little more resolution for the unknown words. If all
out-of-vocabulary words are assigned the same vector, then they'll all look
identical to the model. Even if the training data actually includes information
that shows two different out-of-vocabulary words have important, different
implications -- for instance, if one word is a strong indicator of positive
sentiment, while the other is a strong indicator of negative sentiment -- the
model won't be able to tell them apart. However, if we have 10 buckets for the
unknown words, we might get lucky, and assign these words to different buckets.
If so, the model would be able to learn that one of the unknown-word vectors
makes positive sentiment more likely, while the other vector makes negative
sentiment more likely.
If this is good, then why not do more of it? Bloom embeddings are like an
extreme version, where _every_ word is handled like the unknown words above:
there are 100 vectors for the "unknown" portion, and 0 for the "known" portion.
So far, this approach seems weird, but not necessarily good. The part that makes
it unfairly effective is the next step: by simply doing the same thing multiple
times, we can greatly improve the resolution, and have unique representations
for far more words than we have vectors. The code in full:
```
import numpy
import mmh3
def allocate(n_vectors, n_dimensions):
table = numpy.zeros((n_vectors, n_dimensions), dtype='f')
table += numpy.random.uniform(-0.1, 0.1, table.size).reshape(table.shape)
return table
def get_vector(table, word):
hash1 = mmh3.hash(word, seed=0)
hash2 = mmh3.hash(word, seed=1)
row1 = hash1 % table.shape[0]
row2 = hash2 % table.shape[0]
return table[row1] + table[row2]
def update_vector(table, word, d_vector):
hash1 = mmh3.hash(word, seed=0)
hash2 = mmh3.hash(word, seed=1)
row1 = hash1 % table.shape[0]
row2 = hash2 % table.shape[0]
table[row1] -= 0.001 * d_vector
table[row2] -= 0.001 * d_vector
```
In this example, we've used two keys, assigned from two random hash functions.
It's unlikely that two words will collide on both keys, so by simply summing the
vectors together, we'll assign most words a unique representation.
For the sake of illustration, let's step through a very small example,
explicitly.
Let's say we have this vocabulary of 20 words:
```
vocab = ['apple', 'strawberry', 'orange', 'juice', 'drink', 'smoothie',
'eat', 'fruit', 'health', 'wellness', 'steak', 'fries', 'ketchup',
'burger', 'chips', 'lobster', 'caviar', 'service', 'waiter', 'chef']
```
We'll embed these into two dimensions. Normally this would give us a table of
`(20, 2)` floats, which we would randomly initialise. With the hashing trick, we
can make the table smaller. Let's give it 15 vectors:
```
normal_embed = numpy.random.uniform(-0.1, 0.1, (20, 2))
hashed_embed = numpy.random.uniform(-0.1, 0.1, (15, 2))
```
In the normal table, we want to map each word in our vocabulary to its own
vector:
```
word2id = {}
def get_normal_vector(word, table):
if word not in word2id.keys():
word2id[word] = len(word2id)
return table[word2id[word]]
```
The hashed table only has 15 rows, so some words will have to share. We'll
handle this by mapping the word into an arbitrary integer – called a "hash
value". The hash function will return an arbitrary integer, which we'll mod into
the range `(0, 15)`. Importantly, we need to be able to compute _multiple,
distinct_ hash values for each key – so Python's built-in hash function is
inconvenient. We'll therefore use MurmurHash.
Let's see what keys we get for our 20 vocabulary items, using MurmurHash:
```
hashes1 = [mmh3.hash(w, 1) % 15 for w in vocab]
assert hashes1 == [3, 6, 4, 13, 8, 3, 13, 1, 9, 12, 11, 4, 2, 13, 5, 10, 0, 2, 10, 13]
```
As you can see, some keys are shared between multiple words, while 2/15 keys are
unoccupied. This is obviously unideal! If multiple words have the same key,
they'll map to the same vector – as far as the model is concerned, "strawberry"
and "heart" will be indistinguishable. It won't be clear which word was used –
they have the same representation.
To address this, we simply hash the words again, this time using a different
seed – so that we get a different set of arbitrary keys:
```
from collections import Counter
hashes2 = [mmh3.hash(w, 2) % 15 for w in vocab]
assert len(Counter(hashes2).most_common()) == 12
```
This one's even worse – 3 keys unoccupied! But our strategy is not to keep drawing until we get a favorable seed. Instead, consider this:
```
assert len(Counter(zip(hashes1, hashes2))) == 20
```
By combining the results from the two hashes, our 20 words distribute perfectly,
into 20 unique combinations. This makes sense: we expect to have some words
overlapping on one of the keys, but we'd have to be very unlucky for a pair of
words to overlap on _both_ keys.
This means that if we simply add the two vectors together, each word once more
has a unique representation:
```
for word in vocab:
key1 = mmh3.hash(word, 0) % 15
key2 = mmh3.hash(word, 1) % 15
vector = hashed_embed[key1] + hashed_embed[key2]
print(word, '%.3f %.3f' % tuple(vector))
```
We now have a function that maps our 20 words to 20 unique vectors – but we're
storing weights for only 15 vectors in memory. Now the question is: will we be
able to find values for these weights that let us actually map words to useful
vectors?
Let's do a quick experiment to see how this works. We'll assign "true" values
for our little vocabulary, and see how well we can approximate them with our
compressed table. To get the "true" values, we _could_ put the "science" in data
science, and drag the words around into reasonable-looking clusters. But for our
purposes, the actual "true" values don't matter. We'll therefore just do a
simulation: we'll assign random vectors as the "true" state, and see if we can
learn values for the hash embeddings that match them.
The learning procedure will be a simple stochastic gradient descent:
```
import numpy
import numpy.random as random
import mmh3
random.seed(0)
nb_epoch = 500
learn_rate = 0.001
nr_hash_vector = 1000
words = [str(i) for i in range(2000)]
true_vectors = numpy.random.uniform(-0.1, 0.1, (len(words), 10))
hash_vectors = numpy.random.uniform(-0.1, 0.1, (nr_hash_vector, 10))
examples = list(zip(words, true_vectors))
for epoch in range(nb_epoch):
random.shuffle(examples)
loss=0.
for word, truth in examples:
key1 = mmh3.hash(word, 0) % nr_hash_vector
key2 = mmh3.hash(word, 1) % nr_hash_vector
hash_vector = hash_vectors[key1] + hash_vectors[key2]
diff = hash_vector - truth
hash_vectors[key1] -= learn_rate * diff
hash_vectors[key2] -= learn_rate * diff
loss += (diff**2).sum()
print(epoch, loss)
```
It's worth taking some time to play with this simulation. You can start by doing
some sanity checks:
- How does the loss change with `nr_hash_vector`?
- If you remove `key2`, does the loss go up?
- What happens if you add more hash keys?
- What happens as the vocabulary size increases?
- What happens when more dimensions are added?
- How sensitive are the hash embeddings to the initial conditions? If we change the random seed, do we ever get unlucky?
If you play with the simulation for a while, you'll start to get a good feel for
the dynamics, and hopefully you'll have a clear idea of why the technique works.
## Bonus Section
To make it easier for folks to try out a whole bunch of settings we'd added a little bit of code below that makes it easier to get relevant visuals.
```
%pip install altair pandas
from functools import reduce
def calc_losses(epochs=500, seed=0, learn_rate=0.001, nr_hash_vector=1000, n_hash=3, n_words=1000, size_vector=10):
random.seed(seed)
nb_epoch = epochs
learn_rate = learn_rate
nr_hash_vector = nr_hash_vector
words = [str(i) for i in range(n_words)]
true_vectors = numpy.random.uniform(-0.1, 0.1, (len(words), size_vector))
hash_vectors = numpy.random.uniform(-0.1, 0.1, (nr_hash_vector, size_vector))
examples = list(zip(words, true_vectors))
losses = []
for epoch in range(nb_epoch):
random.shuffle(examples)
loss=0.
for word, truth in examples:
keys = [mmh3.hash(word, k) % nr_hash_vector for k in range(n_hash)]
hash_vector = reduce(lambda a, b: a + b, [hash_vectors[k] for k in keys])
diff = hash_vector - truth
for key in keys:
hash_vectors[key] -= learn_rate * diff
loss += (diff**2).sum()
losses.append(loss)
return losses
data = []
for n_hash in [1, 2, 3, 4, 5]:
losses = calc_losses(nr_hash_vector=2_000, n_words=10_000, n_hash=n_hash, epochs=150)
data = data + [{"loss": l, "nr_hash_vector": nr_hash_vector, "n_hash": str(n_hash), "epoch": e} for e, l in enumerate(losses)]
import pandas as pd
import altair as alt
source = pd.DataFrame(data)
(alt.Chart(source)
.mark_line()
.encode(x='epoch', y='loss', color='n_hash')
.properties(width=600, height=250)
.interactive())
```
| github_jupyter |
<a href="https://colab.research.google.com/github/MinCiencia/Datos-COVID19/blob/master/DataObservatory_ex3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
<p><img alt="Data Observatory logo" height="150px" src="http://dataobservatory.io/wp-content/themes/data-observatory-01/img/do-logo-web-01.png" align="left" hspace="10px" vspace="0px"></p>
<h1 align="center">Bienvenido al jupyter notebook: ejemplos para ayudar a usar los datos publicados</h1>
<h4 align="center">Una contribución del Data Observatory</h4>
## **Para empezar**
Este documento te permite interactuar con los datos que se encuentran en el [repositorio](https://github.com/MinCiencia/Datos-COVID19) del [Ministerio de Ciencia, Tecnología, Conocimiento e Innovación](http://www.minciencia.gob.cl/COVID19). Estos datos son recopilados de las cifras oficiales publicadas por el [Ministerio de Salud](https://www.gob.cl/coronavirus/cifrasoficiales/#informes) sobre la pandemia del COVID-19 en Chile.
Este notebook es un ejemplo y puedes usarlo como base para generar tus propios gráficos y/o productos. Los datos publicados están en https://colab.research.google.com/github/MinCiencia/output/blob/master
# <h2>Para trabajar con los productos</h2>
Este notebook está escrito utilizando el lenguaje de programación [Python](https://www.python.org/about/) versión [3.x](https://www.python.org/download/releases/3.0/), cuya lectura se facilita a programadores no expertos. Cada bloque de ejecución está separado en distintas celdas, es necesario "ejecutar" cada una en secuencia haciendo click en botón "play" que aparece al posicionar el mouse sobre el recuadro [ ] al inicio de cada celda. Una vez que la celda se ejecuta, aparece un número que indica el órden en que se ha ejecutado.
Recomendamos ver los notebooks anteriores para utilizar algunas funciones útiles de python y las bibliotecas que hemos ido utilizando
Una manera comun de manipular los datos, es usando [pandas](https://pandas.pydata.org/). Para cargar uno de los archivos en un dataframe. También es necesario utilizar [numpy](https://numpy.org/) para los distintos cálculos
```
import numpy as np
import pandas as pd
```
**Nota:** Los datos están almacenados como tablas en formato csv. Algunas tablas están almacenadas por día. Es importante mantener el formato 'año-mes-día' (las comillas simples o dobles indican que es texto y no se ejecutará como una operación matemática)
# <h4>Graficar datos por región de varios días</h4>
En los notebook DataObservatory-ex1 se leyeron los datos totales del país en los que está el desgloce por región para una fecha dada. Ahora queremos ver el acumulado por cada región por un rango de fechas.
Primero establecemos la fecha de interés en la variable 'date' y la primera fecha ('first_date') es cuando se reportó el primer contagio en el país.
```
# el símbolo # al inicio de la línea nos indica que esto es un comentario, no se ejecuta con el código
# date indica la fecha de los datos que queremos utilizar
date = '2020-05-05'
first_date = '2020-03-03'
```
Contamos el rango de días
```
# contamos los días considerados con la siguiente instrucción
total_days = (pd.to_datetime(date)-pd.to_datetime(first_date)).days
total_days
```
Ahora leemos el archivo (tabla) dado en el producto 4 correpondiente a la fecha 'date'
```
dataTotalRegion = pd.read_csv("https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto4/" + date + "-CasosConfirmados-totalRegional.csv",index_col=0)
```
Revisamos cuales son las columnas en este archivo
```
columnas = list(dataTotalRegion.columns.values)
columnas
```
El nombre de las columnas ha variado durante el curso de las publicaciones del Ministerio de Salud debido a la complejidad de la enfermedad. Esto puede agregar complejidad para utilizar los datos. Aquí tratamos de cubrir una variedad de bases para que se pueda trabajar con los datos sin mayores inconvenientes.
Por ejemplo, revisamos las columnas del archivo csv del primer día:
```
dataTotalRegion = pd.read_csv("https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto4/2020-03-03-CasosConfirmados-totalRegional.csv",index_col=0)
columnas = list(dataTotalRegion.columns.values)
columnas
```
Se nota que las tablas ahora entregan mucha más información que las primeras.
Para converger ambos estilos, optamos por utilizar la secuencia de instrucciones:
1. Declaramos nuevos dataFrames donde se asignaran las series de tiempo
```
# dataNew corresponde a los nuevos casos por día
# dataCum a los datos acumulados al día
dataNew = pd.DataFrame()
dataCum = pd.DataFrame()
```
2. Utilizamos reglas para clasificar los datos que siguen la misma serie de tiempo. Por ejemplo, los 'Casos nuevos' a partir del 29 de abril se llaman 'Casos nuevos totales'. Y los 'Casos totales' se llaman 'Casos totales acumulados' a partir de esa misma fecha.
También consideramos ciertos "typos" en los nombres, algunos tienen espacios intercalados. Por ello, la asignación en los nuevos dataFrames la planteamos de la siguiente manera:
```
for i in np.arange(total_days+1):
date = (pd.to_datetime(first_date)+pd.DateOffset(i)).strftime('%Y-%m-%d')
s = "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto4/" + date + "-CasosConfirmados-totalRegional.csv"
dataTotalRegion_by_date = pd.read_csv(s,index_col=0)
columnas = list(dataTotalRegion.columns.values)
if "Casos nuevos" in dataTotalRegion_by_date.columns:
dataNew[date] = dataTotalRegion_by_date['Casos nuevos'].values
elif 'Casos nuevos' in dataTotalRegion_by_date.columns:
dataNew[date] = dataTotalRegion_by_date["Casos nuevos"].values
elif " Casos nuevos" in dataTotalRegion_by_date.columns:
dataNew[date] = dataTotalRegion_by_date[" Casos nuevos"].values
if "Casos nuevos totales" in dataTotalRegion_by_date.columns:
dataNew[date] = dataTotalRegion_by_date['Casos nuevos totales'].values
elif 'Casos nuevos totales' in dataTotalRegion_by_date.columns:
dataNew[date] = dataTotalRegion_by_date["Casos nuevos totales"].values
elif " Casos nuevos totales" in dataTotalRegion_by_date.columns:
dataNew[date] = dataTotalRegion_by_date[" Casos nuevos totales"].values
if "Casos totales" in dataTotalRegion_by_date.columns:
dataCum[date] = dataTotalRegion_by_date['Casos totales'].values
elif 'Casos totales' in dataTotalRegion_by_date.columns:
dataCum[date] = dataTotalRegion_by_date["Casos totales"].values
elif " Casos totales" in dataTotalRegion_by_date.columns:
dataCum[date] = dataTotalRegion_by_date[" Casos totales"].values
if "Casos totales acumulados" in dataTotalRegion_by_date.columns:
dataCum[date] = dataTotalRegion_by_date['Casos totales acumulados'].values
elif 'Casos totales acumulados' in dataTotalRegion_by_date.columns:
dataCum[date] = dataTotalRegion_by_date["Casos totales acumulados"].values
```
Antes de hacer el gráfico, preparamos las etiquetas a usar en la leyenda para que nos brinde mayor información. Queremos visualizar las regiones y el total de casos acumulados a la fecha escogida en la leyenda del gráfico
```
label_region = list()
temp = dataCum[date].values.tolist()
temp2 = dataTotalRegion.index.tolist()
for i in range(len(temp)):
label_region.append(temp2[i]+' '+str(temp[i]))
```
Para hacer gráficos utilizamos matplotlib, con el estilo (opcional) 'fivethirtyeight'
```
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# Definimos el tipo de línea explícitamente ara evitar la duplicación de
# color/tipo de línea en las regiones
filename = date+'-COVID-19-Chile-TotalConfirmados-Region.png'
fig, ax = plt.subplots(tight_layout=True,figsize=(14,7))
lsRegion = ['-','--','-.',':',':','-.','--','-','-','--','-.',':',':','-.','--','-']
for i in np.arange(len(dataCum.index)-1):
dataCum.iloc[i].plot(ax=ax,ls=lsRegion[i])
plt.legend(fontsize='medium', labels=label_region, handlelength=3.0, title='Region - total confirmados')
ax.set_yscale('log')
ax.set_title(f'COVID-19 en Chile: Número total confirmados por Región '+date, fontsize='large')
ax.set_xlabel(f'fecha (año-mes-día)', fontsize='medium')
ax.set_ylabel(f'total confirmados por Region', fontsize='medium')
plt.annotate('Source:\nhttps://github.com/MinCiencia/Datos-COVID19', (0,0), (-80,-20), fontsize='medium', xycoords='axes fraction', textcoords='offset points', va='top')
plt.savefig(filename, bbox_inches='tight', format='png', dvi=700)
# ejecutar esta celda solo si se quiere descargar el gráfico anterior
from google.colab import files
files.download(filename)
```
Estas son figuras básicas para trabajar con los productos del repositorio. La idea es que generes tus propios gráficos modificando las columnas, estilos, colores, etc. En los próximos notebooks encontrarás ejemplos para utilizar los datos que se encuentran disponibles en [https://github.com/MinCiencia/Datos-COVID19/tree/master/output](https://github.com/MinCiencia/Datos-COVID19/tree/master/output)
| github_jupyter |
# ELAIS-S1 - Merging HELP data products
This notebook merges the various HELP data products on ELAIS-S1.
It is first used to create a catalogue that will be used for SED fitting by CIGALE by merging the optical master list, the photo-z and the XID+ far infrared fluxes. Then, this notebook is used to incorporate the CIGALE physical parameter estimations and generate the final HELP data product on the field.
```
from herschelhelp_internal import git_version
print("This notebook was run with herschelhelp_internal version: \n{}".format(git_version()))
import datetime
print("This notebook was executed on: \n{}".format(datetime.datetime.now()))
import numpy as np
from astropy.table import Column, MaskedColumn, Table, join, vstack
from herschelhelp.filters import get_filter_meta_table
from herschelhelp_internal.utils import add_column_meta
filter_mean_lambda = {
item['filter_id']: item['mean_wavelength'] for item in
get_filter_meta_table()
}
# Set this to true to produce only the catalogue for CIGALE and to false
# to continue and merge the CIGALE results too.
MAKE_CIGALE_CAT = False
MAKE_FINAL_CAT = True
SUFFIX = '20180416'
```
# Reading the masterlist, XID+, and photo-z catalogues
```
# Master list
ml = Table.read(
"../../dmu1/dmu1_ml_ELAIS-S1/data/master_catalogue_elais-s1_{}.fits".format(SUFFIX))
ml.meta = None
# XID+ MIPS24
xid_mips24 = Table.read("../../dmu26/dmu26_XID+MIPS_ELAIS-S1/data/"
"dmu26_XID+MIPS_ELAIS-S1_20180215.fits")
xid_mips24.meta = None
# Adding the error column
xid_mips24.add_column(Column(
data=np.max([xid_mips24['FErr_MIPS_24_u'] - xid_mips24['F_MIPS_24'],
xid_mips24['F_MIPS_24'] - xid_mips24['FErr_MIPS_24_l']],
axis=0),
name="ferr_mips_24"
))
xid_mips24['F_MIPS_24'].name = "f_mips_24"
xid_mips24 = xid_mips24['help_id', 'f_mips_24', 'ferr_mips_24', 'flag_mips_24']
# XID+ PACS
xid_pacs = Table.read("../../dmu26/dmu26_XID+PACS_ELAIS-S1/data/"
"dmu26_XID+PACS_ELAIS-S1_cat_20180416.fits")
xid_pacs.meta = None
# Convert from mJy to μJy
for col in ["F_PACS_100", "FErr_PACS_100_u", "FErr_PACS_100_l",
"F_PACS_160", "FErr_PACS_160_u", "FErr_PACS_160_l"]:
xid_pacs[col] *= 1000
xid_pacs.add_column(Column(
data=np.max([xid_pacs['FErr_PACS_100_u'] - xid_pacs['F_PACS_100'],
xid_pacs['F_PACS_100'] - xid_pacs['FErr_PACS_100_l']],
axis=0),
name="ferr_pacs_green"
))
xid_pacs['F_PACS_100'].name = "f_pacs_green"
xid_pacs['flag_PACS_100'].name = "flag_pacs_green"
xid_pacs.add_column(Column(
data=np.max([xid_pacs['FErr_PACS_160_u'] - xid_pacs['F_PACS_160'],
xid_pacs['F_PACS_160'] - xid_pacs['FErr_PACS_160_l']],
axis=0),
name="ferr_pacs_red"
))
xid_pacs['F_PACS_160'].name = "f_pacs_red"
xid_pacs['flag_PACS_160'].name = "flag_pacs_red"
xid_pacs = xid_pacs['help_id', 'f_pacs_green', 'ferr_pacs_green',
'flag_pacs_green', 'f_pacs_red', 'ferr_pacs_red',
'flag_pacs_red']
# # XID+ SPIRE
xid_spire = Table.read("../../dmu26/dmu26_XID+SPIRE_ELAIS-S1/data/"
"dmu26_XID+SPIRE_ELAIS-S1_20180327.fits")
xid_spire.meta = None
xid_spire['HELP_ID'].name = "help_id"
# Convert from mJy to μJy
for col in ["F_SPIRE_250", "FErr_SPIRE_250_u", "FErr_SPIRE_250_l",
"F_SPIRE_350", "FErr_SPIRE_350_u", "FErr_SPIRE_350_l",
"F_SPIRE_500", "FErr_SPIRE_500_u", "FErr_SPIRE_500_l"]:
xid_spire[col] *= 1000
xid_spire.add_column(Column(
data=np.max([xid_spire['FErr_SPIRE_250_u'] - xid_spire['F_SPIRE_250'],
xid_spire['F_SPIRE_250'] - xid_spire['FErr_SPIRE_250_l']],
axis=0),
name="ferr_spire_250"
))
xid_spire['F_SPIRE_250'].name = "f_spire_250"
xid_spire.add_column(Column(
data=np.max([xid_spire['FErr_SPIRE_350_u'] - xid_spire['F_SPIRE_350'],
xid_spire['F_SPIRE_350'] - xid_spire['FErr_SPIRE_350_l']],
axis=0),
name="ferr_spire_350"
))
xid_spire['F_SPIRE_350'].name = "f_spire_350"
xid_spire.add_column(Column(
data=np.max([xid_spire['FErr_SPIRE_500_u'] - xid_spire['F_SPIRE_500'],
xid_spire['F_SPIRE_500'] - xid_spire['FErr_SPIRE_500_l']],
axis=0),
name="ferr_spire_500"
))
xid_spire['F_SPIRE_500'].name = "f_spire_500"
xid_spire = xid_spire['help_id',
'f_spire_250', 'ferr_spire_250', 'flag_spire_250',
'f_spire_350', 'ferr_spire_350', 'flag_spire_350',
'f_spire_500', 'ferr_spire_500', 'flag_spire_500']
# Photo-z
photoz = Table.read("../../dmu24/dmu24_ELAIS-S1/data/master_catalogue_elais-s1_20180221_photoz_20180412.fits")
photoz.meta = None
photoz = photoz['help_id', 'z1_median']
photoz['z1_median'].name = 'redshift'
photoz['redshift'][photoz['redshift'] < 0] = np.nan # -99 used for missing values
# Spec-z remove -99 (Could do this in masterlist stage)
ml['zspec'][ml['zspec'] < 0] = np.nan # -99 used for missing values
# Flags
flags = Table.read("../../dmu6/dmu6_v_ELAIS-S1/data/elais-s1_20180416_flags.fits")
```
# Merging
```
merged_table = join(ml, xid_mips24, join_type='left')
# # Fill values
for col in xid_mips24.colnames:
if col.startswith("f_") or col.startswith("ferr_"):
merged_table[col].fill_value = np.nan
elif col.startswith("flag_"):
merged_table[col].fill_value = False
merged_table = merged_table.filled()
merged_table = join(merged_table, xid_pacs, join_type='left')
# Fill values
for col in xid_pacs.colnames:
if col.startswith("f_") or col.startswith("ferr_"):
merged_table[col].fill_value = np.nan
elif col.startswith("flag_"):
merged_table[col].fill_value = False
merged_table = merged_table.filled()
merged_table = join(merged_table, xid_spire, join_type='left')
# Fill values
for col in xid_spire.colnames:
if col.startswith("f_") or col.startswith("ferr_"):
merged_table[col].fill_value = np.nan
elif col.startswith("flag_"):
merged_table[col].fill_value = False
merged_table = merged_table.filled()
merged_table = join(merged_table, photoz, join_type='left')
# Fill values
merged_table['redshift'].fill_value = np.nan
merged_table = merged_table.filled()
for col in flags.colnames:
if 'flag' in col:
try:
merged_table.remove_column(col)
except KeyError:
print("Column: {} not in masterlist.".format(col))
merged_table = join(merged_table, flags, join_type='left')
# Fill values
for col in merged_table.colnames:
if 'flag' in col:
merged_table[col].fill_value = False
merged_table = merged_table.filled()
```
# Saving the catalogue for CIGALE (first run)
```
if MAKE_CIGALE_CAT:
# Sorting the columns
bands_tot = [col[2:] for col in merged_table.colnames
if col.startswith('f_') and not col.startswith('f_ap')]
bands_ap = [col[5:] for col in merged_table.colnames
if col.startswith('f_ap_') ]
bands = list(set(bands_tot) | set(bands_ap))
bands.sort(key=lambda x: filter_mean_lambda[x])
columns = ['help_id', 'field', 'ra', 'dec', 'hp_idx', 'ebv', 'redshift',
'zspec']
for band in bands:
for col_tpl in ['f_{}', 'ferr_{}', 'f_ap_{}', 'ferr_ap_{}',
'm_{}', 'merr_{}', 'm_ap_{}', 'merr_ap_{}',
'flag_{}']:
colname = col_tpl.format(band)
if colname in merged_table.colnames:
columns.append(colname)
columns += ['stellarity', 'stellarity_origin', 'flag_cleaned',
'flag_merged', 'flag_gaia', 'flag_optnir_obs',
'flag_optnir_det', 'zspec_qual', 'zspec_association_flag']
# Check that we did not forget any column
# assert set(columns) == set(merged_table.colnames)
print(set(columns) - set(merged_table.colnames))
merged_table = add_column_meta(merged_table, '../columns.yml')
merged_table[columns].write("data/ELAIS-S1_{}_cigale.fits".format(SUFFIX), overwrite=True)
```
# Merging CIGALE outputs
We merge the CIGALE outputs to the main catalogue. The CIGALE products provides several χ² with associated thresholds. For simplicity, we convert these two values to flags.
```
if MAKE_FINAL_CAT:
# Cigale outputs
cigale = Table.read("../../dmu28/dmu28_ELAIS-S1/data/best/HELP_final_results.fits")
cigale['id'].name = "help_id"
# We convert the various Chi2 and threshold to flags
flag_cigale_opt = cigale["UVoptIR_OPTchi2"] <= cigale["UVoptIR_OPTchi2_threshold"]
flag_cigale_ir = cigale["UVoptIR_IRchi2"] <= cigale["UVoptIR_IRchi2_threshold"]
flag_cigale = (
(cigale["UVoptIR_best.reduced_chi_square"]
<= cigale["UVoptIR_best.reduced_chi_square_threshold"]) &
flag_cigale_opt & flag_cigale_ir)
flag_cigale_ironly = cigale["IRonly_IRchi2"] <= cigale["IRonly_IRchi2_threshold"]
cigale.add_columns([
MaskedColumn(flag_cigale, "flag_cigale",
dtype=int, fill_value=-1),
MaskedColumn(flag_cigale_opt, "flag_cigale_opt",
dtype=int, fill_value=-1),
MaskedColumn(flag_cigale_ir, "flag_cigale_ir",
dtype=int, fill_value=-1),
MaskedColumn(flag_cigale_ironly, "flag_cigale_ironly",
dtype=int, fill_value=-1)
])
cigale['UVoptIR_bayes.stellar.m_star'].name = "cigale_mstar"
cigale['UVoptIR_bayes.stellar.m_star_err'].name = "cigale_mstar_err"
cigale['UVoptIR_bayes.sfh.sfr10Myrs'].name = "cigale_sfr"
cigale['UVoptIR_bayes.sfh.sfr10Myrs_err'].name = "cigale_sfr_err"
cigale['UVoptIR_bayes.dust.luminosity'].name = "cigale_dustlumin"
cigale['UVoptIR_bayes.dust.luminosity_err'].name = "cigale_dustlumin_err"
cigale['IR_bayes.dust.luminosity'].name = "cigale_dustlumin_ironly"
cigale['IR_bayes.dust.luminosity_err'].name = "cigale_dustlumin_ironly_err"
cigale = cigale['help_id', 'cigale_mstar', 'cigale_mstar_err', 'cigale_sfr',
'cigale_sfr_err', 'cigale_dustlumin', 'cigale_dustlumin_err',
'cigale_dustlumin_ironly', 'cigale_dustlumin_ironly_err',
'flag_cigale', 'flag_cigale_opt', 'flag_cigale_ir',
'flag_cigale_ironly']
if MAKE_FINAL_CAT:
merged_table = join(merged_table, cigale, join_type='left')
# Fill values
for col in cigale.colnames:
if col.startswith("cigale_"):
merged_table[col].fill_value = np.nan
elif col.startswith("flag_"):
merged_table[col].fill_value = -1
merged_table = merged_table.filled()
```
# Sorting columns
We sort the columns by increasing band wavelength.
```
if MAKE_FINAL_CAT:
bands = [col[2:] for col in merged_table.colnames
if col.startswith('f_') and not col.startswith('f_ap')]
bands.sort(key=lambda x: filter_mean_lambda[x])
if MAKE_FINAL_CAT:
columns = ['help_id', 'field', 'ra', 'dec', 'hp_idx', 'ebv', 'redshift', 'zspec']
for band in bands:
for col_tpl in ['f_{}', 'ferr_{}', 'f_ap_{}', 'ferr_ap_{}',
'm_{}', 'merr_{}', 'm_ap_{}', 'merr_ap_{}',
'flag_{}']:
colname = col_tpl.format(band)
if colname in merged_table.colnames:
columns.append(colname)
columns += ['cigale_mstar', 'cigale_mstar_err', 'cigale_sfr', 'cigale_sfr_err',
'cigale_dustlumin', 'cigale_dustlumin_err', 'cigale_dustlumin_ironly',
'cigale_dustlumin_ironly_err', 'flag_cigale', 'flag_cigale_opt',
'flag_cigale_ir', 'flag_cigale_ironly', 'stellarity',
'stellarity_origin', 'flag_cleaned', 'flag_merged', 'flag_gaia',
'flag_optnir_obs', 'flag_optnir_det', 'zspec_qual',
'zspec_association_flag']
if MAKE_FINAL_CAT:
# Check that we did not forget any column
print( set(columns) - set(merged_table.colnames))
```
# Saving
```
if MAKE_FINAL_CAT:
merged_table = add_column_meta(merged_table, '../columns.yml')
merged_table[columns].write("data/ELAIS-S1_{}.fits".format(SUFFIX), overwrite=True)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/rs-delve/tti-explorer/blob/master/notebooks/tti-experiment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# TTI Explorer
#### `tti_explorer` is a library for simulating infection spread. This library is built to explore the impact of various test-trace-isolate strategies and social distancing measures on the spread of COVID-19 in the UK.
This notebook is an introduction to the functionality offered by `tti-explorer`.
```
%pwd
%cd ~/Desktop/College\ Work/Fourth\ Year/L48/L48Project/tti-explorer
%pip install -q .
import os
import numpy as np
import pandas as pd
from tqdm.notebook import trange
from tqdm import tqdm
from tti_explorer import config, utils
from tti_explorer.case import simulate_case, CaseFactors
from tti_explorer.contacts import EmpiricalContactsSimulator
from tti_explorer.strategies import TTIFlowModel, RETURN_KEYS
def print_doc(func):
print(func.__doc__)
```
Before we do anything, let's make a random state
```
rng = np.random.RandomState(0)
```
We will first do a short tour of the functionality, then show how this is put together to generate simulation results.
## Generate a case
The function we use for this is `simulate_case` in `case.py`
```
print_doc(simulate_case)
```
We store our config values in `config.py`. You can retrieve them as follows
```
case_config = config.get_case_config("delve")
case_config
```
We use these parameters to simulate a case
```
primary_case = simulate_case(rng, **case_config)
print_doc(primary_case)
```
Returned is a `case.Case` with stochastically generated attributes.
### Deeper: Case attributes
Let's go through the simulated attributes of a `case.Case`. The attributes `.under18`, `.covid` and `.symptomatic` are `bool` types indicating whether the generated `case.Case` is under 18, COVID positive and symptomatic respectively. All possible values of these attributes are possible apart from the combination `.covid = False` and `.symptomatic = False` (a configuration irrelevant for the purpose of simulating infection spread). The primary case we just simulated has the following attributes:
```
print(f'Under 18: {primary_case.under18}.')
print(f'COVID positive: {primary_case.covid}.')
print(f'Symptomatic: {primary_case.symptomatic}.')
```
Each `case.Case` also has an attribute `.day_noticed_symptoms` of type `int`, indicating the number of days from start of infectiousness until the `case.Case` noticed the symptoms. If a `case.Case` is asymptomatic, the attribute `.day_noticed_symptoms` is set to `-1`.
```
print(f'primary_case noticed symptoms {primary_case.day_noticed_symptoms} days after start of infectiousness.')
```
Finally, the attribute `.inf_profile` is a `list` describing the relative infectiousness of the case for each day of the infectious period. If `.covid = False` for a `case.Case`, this is `0` throughout.
```
print(f'inf_profile is: {primary_case.inf_profile}')
```
As mentioned above, the configuration for simulating these attributes are stored in `config.py`. This includes the distributions used for sampling attributes. For instance, the attribute `.under18` is sampled from a Bernoulli distribution with probability `0.21`:
```
print(f'Probability of case being under 18: {case_config["p_under18"]}')
```
As another example, if `case.Case` is symptomatic, the attribute `.days_noticed_symptoms` is sampled from a categorical distribution over the set {0, 1, ..., 9} (since we model an infection period of ten days in this configuration) with probabilities:
```
print(f'Probability distribution of .day_noticed_symptoms: {case_config["p_day_noticed_symptoms"]}')
```
## Generate contacts
Social contacts are represented by `Contacts` and defined in `contacts.py`.
To simulate social contacts, we use the BBC Pandemic Dataset. This is stratified as over/under 18 to give different patterns of social contact depending on the age of the case.
```
def load_csv(pth):
return np.loadtxt(pth, dtype=int, skiprows=1, delimiter=",")
path_to_bbc_data = os.path.join("..", "data", "bbc-pandemic")
over18 = load_csv(os.path.join(path_to_bbc_data, "contact_distributions_o18.csv"))
under18 = load_csv(os.path.join(path_to_bbc_data, "contact_distributions_u18.csv"))
```
Now that we have the data loaded, we use `EmpiricalContactsSimulator` to sample these tables for contacts of the primary case, then simulate their infection under a no measures scenario (i.e. no government intervention)
```
print_doc(EmpiricalContactsSimulator.__init__)
simulate_contacts = EmpiricalContactsSimulator(over18, under18, rng)
```
We can now use the callable `simulate_contacts` to simulate social contacts of the primary case
```
print_doc(simulate_contacts.__call__)
```
To do this we need some more parameters, which we also load from `config.py`. The user can, of course, specify this themselves if they would like.
```
contacts_config = config.get_contacts_config("delve")
contacts_config.keys()
```
We now do the same as we did with when simulating a primary case.
```
social_contacts = simulate_contacts(primary_case, **contacts_config)
print_doc(social_contacts)
```
### Deeper: Contacts attributes
Let's examine the attributes of `social_contacts`, which is an instance of `contacts.Contacts`. Note that `social_contacts` is simulated above by calling `simulate_contacts` which takes `primary_case` as in argument, so contact generation of course depends on the case simulated first.
The first attribute to note is `.n_daily`, which is a `dict` containing the average number of daily contacts (split into three categories) of the case. This is simulated by sampling one row of the tables `over18` or `under18` depending on the value of `primary_case.under18`. In the case of `primary_case`, we can look at `social_contacts.n_daily`:
```
print(f'Average number of daily contacts for primary_case:')
print(f'Home: {social_contacts.n_daily["home"]}')
print(f'Work: {social_contacts.n_daily["work"]}')
print(f'Other: {social_contacts.n_daily["other"]}')
```
The three remaining attributes `.home`, `.work` and `.other` are arrays containing information about each contact made by the case, with one row per contact. More specifically, for each contact, the row contains the first day (always measured relative to the start of infectiousness) of encounter between the case and contact and, if transmission occurred, then the day of transmission.
Also, recall that home contacts are assumed to repeat every day of the infectious period, whereas work/other contacts are new for each day. This means the lengths of the arrays `.work` and `.other` are `10 * .n_daily['work']` and `10 * .n_daily['other']` respectively (recalling the infection period is assumed to last ten days, a parameter set in `contacts_config['period']`). Whereas, the length of the `.home` array is just `.n_daily['home']`.
```
print(f'Lengths of .home, .work and .other attributes:')
print(f'Home: {len(social_contacts.home)}')
print(f'Work: {len(social_contacts.work)}')
print(f'Other: {len(social_contacts.other)}')
```
Digging further into the array, each row contains two integers. The first integer indicates the day of transmission, which is set to `-1` if no transmission occurred. The second integer contains the day of first encounter. So for instance, looking at one of the home contacts, we see transmission didn't occur and the day of first encounter is `0`, i.e. the first day of the infection period:
```
print(social_contacts.home[0])
```
Looking at the first six work contacts, we see none of them were infected either. This is consistent with the fact that `primary_case.covid = False` so no transmission can occur in this case.
```
print(social_contacts.work[:6])
```
In simulations where `case.Case` is COVID positive, each contact may get infected and the probability of getting infected depends on parameters such as the secondary attack rates (SARs), all of which are set in `contacts_config`. For details on the precise simulation procedure used to generate `contacts.Contacts`, see either Appendix A of the report or the `__call__` method of `EmpiricalContactsSimulator`.
## TTI Strategies
All of the information about the primary case's infection and how they infect their social contacts (under no government intervention) is now contained in `primary_case` and `social_contacts`.
Now we run a simulation, which works as follows. We start by generating a large number of cases, each with associated contacts. Given a particular strategy (e.g. test-based TTI with NPI of stringency level S3), each case is passed through the strategy, which computes various metrics for the case. For example, it computes the number of secondary cases due to primary case (reproduction number) and the number of tests required. We then collect the results for each case and average them, returning the final evaluation of the strategy.
## Running a Simulation
```
from tti_explorer.strategies import TTIFlowModel
```
We will analyse the `S3_test_based_TTI` strategy from our report. For clarity, we will show the whole process.
First get the configurations:
```
name = 'S3_test_based_TTI'
case_config = config.get_case_config("delve")
print(case_config)
contacts_config = config.get_contacts_config("delve")
policy_config = config.get_strategy_configs("delve", name)[name]
factor_config = utils.get_sub_dictionary(policy_config, config.DELVE_CASE_FACTOR_KEYS)
strategy_config = utils.get_sub_dictionary(policy_config, config.DELVE_STRATEGY_FACTOR_KEYS)
```
Set a random state:
```
rng = np.random.RandomState(42)
```
Make contact simulator:
```
simulate_contacts = EmpiricalContactsSimulator(over18, under18, rng)
```
Make the TTI Model:
```
tti_model = TTIFlowModel(rng, **strategy_config)
```
Generate cases, contacts and run simulation:
```
n_cases = 10000
outputs = list()
for i in tqdm(range(n_cases)):
case = simulate_case(rng, **case_config)
case_factors = CaseFactors.simulate_from(rng, case, **factor_config)
contacts = simulate_contacts(case, **contacts_config)
res = tti_model(case, contacts, case_factors)
outputs.append(res)
```
Collate and average results across the cases simulated:
```
# This cell is mosltly just formatting results...
to_show = [
RETURN_KEYS.base_r,
RETURN_KEYS.reduced_r,
RETURN_KEYS.man_trace,
RETURN_KEYS.app_trace,
RETURN_KEYS.tests
]
# scale factor to turn simulation numbers into UK population numbers
nppl = case_config['infection_proportions']['nppl']
scales = [1, 1, nppl, nppl, nppl]
results = pd.DataFrame(
outputs
).mean(
0
).loc[
to_show
].mul(
scales
).to_frame(
name=f"Simulation results: {name.replace('_', ' ')}"
).rename(
index=lambda x: x + " (k per day)" if x.startswith("#") else x
)
results.round(1)
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Image Classification using tf.keras
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c04_exercise_flowers_with_data_augmentation_solution.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l05c04_exercise_flowers_with_data_augmentation_solution.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
In this Colab you will classify images of flowers. You will build an image classifier using `tf.keras.Sequential` model and load data using `tf.keras.preprocessing.image.ImageDataGenerator`.
# Importing Packages
Let's start by importing required packages. **os** package is used to read files and directory structure, **numpy** is used to convert python list to numpy array and to perform required matrix operations and **matplotlib.pyplot** is used to plot the graph and display images in our training and validation data.
```
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
import glob
import shutil
import matplotlib.pyplot as plt
```
### TODO: Import TensorFlow and Keras Layers
In the cell below, import Tensorflow and the Keras layers and models you will use to build your CNN. Also, import the `ImageDataGenerator` from Keras so that you can perform image augmentation.
```
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
```
# Data Loading
In order to build our image classifier, we can begin by downloading the flowers dataset. We first need to download the archive version of the dataset and after the download we are storing it to "/tmp/" directory.
After downloading the dataset, we need to extract its contents.
```
_URL = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
zip_file = tf.keras.utils.get_file(origin=_URL,
fname="flower_photos.tgz",
extract=True)
base_dir = os.path.join(os.path.dirname(zip_file), 'flower_photos')
```
The dataset we downloaded contains images of 5 types of flowers:
1. Rose
2. Daisy
3. Dandelion
4. Sunflowers
5. Tulips
So, let's create the labels for these 5 classes:
```
classes = ['roses', 'daisy', 'dandelion', 'sunflowers', 'tulips']
```
Also, The dataset we have downloaded has following directory structure.
<pre style="font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;" >
<b>flower_photos</b>
|__ <b>diasy</b>
|__ <b>dandelion</b>
|__ <b>roses</b>
|__ <b>sunflowers</b>
|__ <b>tulips</b>
</pre>
As you can see there are no folders containing training and validation data. Therefore, we will have to create our own training and validation set. Let's write some code that will do this.
The code below creates a `train` and a `val` folder each containing 5 folders (one for each type of flower). It then moves the images from the original folders to these new folders such that 80% of the images go to the training set and 20% of the images go into the validation set. In the end our directory will have the following structure:
<pre style="font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;" >
<b>flower_photos</b>
|__ <b>diasy</b>
|__ <b>dandelion</b>
|__ <b>roses</b>
|__ <b>sunflowers</b>
|__ <b>tulips</b>
|__ <b>train</b>
|______ <b>daisy</b>: [1.jpg, 2.jpg, 3.jpg ....]
|______ <b>dandelion</b>: [1.jpg, 2.jpg, 3.jpg ....]
|______ <b>roses</b>: [1.jpg, 2.jpg, 3.jpg ....]
|______ <b>sunflowers</b>: [1.jpg, 2.jpg, 3.jpg ....]
|______ <b>tulips</b>: [1.jpg, 2.jpg, 3.jpg ....]
|__ <b>val</b>
|______ <b>daisy</b>: [507.jpg, 508.jpg, 509.jpg ....]
|______ <b>dandelion</b>: [719.jpg, 720.jpg, 721.jpg ....]
|______ <b>roses</b>: [514.jpg, 515.jpg, 516.jpg ....]
|______ <b>sunflowers</b>: [560.jpg, 561.jpg, 562.jpg .....]
|______ <b>tulips</b>: [640.jpg, 641.jpg, 642.jpg ....]
</pre>
Since we don't delete the original folders, they will still be in our `flower_photos` directory, but they will be empty. The code below also prints the total number of flower images we have for each type of flower.
```
for cl in classes:
img_path = os.path.join(base_dir, cl)
images = glob.glob(img_path + '/*.jpg')
print("{}: {} Images".format(cl, len(images)))
train, val = images[:round(len(images)*0.8)], images[round(len(images)*0.8):]
for t in train:
if not os.path.exists(os.path.join(base_dir, 'train', cl)):
os.makedirs(os.path.join(base_dir, 'train', cl))
shutil.move(t, os.path.join(base_dir, 'train', cl))
for v in val:
if not os.path.exists(os.path.join(base_dir, 'val', cl)):
os.makedirs(os.path.join(base_dir, 'val', cl))
shutil.move(v, os.path.join(base_dir, 'val', cl))
```
For convenience, let us set up the path for the training and validation sets
```
train_dir = os.path.join(base_dir, 'train')
val_dir = os.path.join(base_dir, 'val')
```
# Data Augmentation
Overfitting generally occurs when we have small number of training examples. One way to fix this problem is to augment our dataset so that it has sufficient number of training examples. Data augmentation takes the approach of generating more training data from existing training samples, by augmenting the samples via a number of random transformations that yield believable-looking images. The goal is that at training time, your model will never see the exact same picture twice. This helps expose the model to more aspects of the data and generalize better.
In **tf.keras** we can implement this using the same **ImageDataGenerator** class we used before. We can simply pass different transformations we would want to our dataset as a form of arguments and it will take care of applying it to the dataset during our training process.
## Experiment with Various Image Transformations
In this section you will get some practice doing some basic image transformations. Before we begin making transformations let's define the our `batch_size` and our image size. Remember that the input to our CNN are images of the same size. We therefore have to resize the images in our dataset to the same size.
### TODO: Set Batch and Image Size
In the cell below, create a `batch_size` of 100 images and set a value to `IMG_SHAPE` such that our training data consists of images with width of 150 pixels and height of 150 pixels.
```
batch_size = 100
IMG_SHAPE = 150
```
### TODO: Apply Random Horizontal Flip
In the cell below, use ImageDataGenerator to create a transformation that rescales the images by 255 and then applies a random horizontal flip. Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images, and to shuffle the images.
```
image_gen = ImageDataGenerator(rescale=1./255, horizontal_flip=True)
train_data_gen = image_gen.flow_from_directory(
batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_SHAPE,IMG_SHAPE)
)
```
Let's take 1 sample image from our training examples and repeat it 5 times so that the augmentation can be applied to the same image 5 times over randomly, to see the augmentation in action.
```
# This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column.
def plotImages(images_arr):
fig, axes = plt.subplots(1, 5, figsize=(20,20))
axes = axes.flatten()
for img, ax in zip( images_arr, axes):
ax.imshow(img)
plt.tight_layout()
plt.show()
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
plotImages(augmented_images)
```
### TOO: Apply Random Rotation
In the cell below, use ImageDataGenerator to create a transformation that rescales the images by 255 and then applies a random 45 degree rotation. Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images, and to shuffle the images.
```
image_gen = ImageDataGenerator(rescale=1./255, rotation_range=45)
train_data_gen = image_gen.flow_from_directory(batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_SHAPE, IMG_SHAPE))
```
Let's take 1 sample image from our training examples and repeat it 5 times so that the augmentation can be applied to the same image 5 times over randomly, to see the augmentation in action.
```
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
plotImages(augmented_images)
```
### TODO: Apply Random Zoom
In the cell below, use ImageDataGenerator to create a transformation that rescales the images by 255 and then applies a random zoom of up to 50%. Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images, and to shuffle the images.
```
image_gen = ImageDataGenerator(rescale=1./255, zoom_range=0.5)
train_data_gen = image_gen.flow_from_directory(
batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_SHAPE, IMG_SHAPE)
)
```
Let's take 1 sample image from our training examples and repeat it 5 times so that the augmentation can be applied to the same image 5 times over randomly, to see the augmentation in action.
```
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
plotImages(augmented_images)
```
### TODO: Put It All Together
In the cell below, use ImageDataGenerator to create a transformation rescales the images by 255 and that applies:
- random 45 degree rotation
- random zoom of up to 50%
- random horizontal flip
- width shift of 0.15
- height shfit of 0.15
Then use the `.flow_from_directory` method to apply the above transformation to the images in our training set. Make sure you indicate the batch size, the path to the directory of the training images, the target size for the images,to shuffle the images, and to set the class mode to `sparse`.
```
image_gen_train = ImageDataGenerator(
rescale=1./255,
rotation_range=45,
width_shift_range=.15,
height_shift_range=.15,
horizontal_flip=True,
zoom_range=0.5
)
train_data_gen = image_gen_train.flow_from_directory(
batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_SHAPE,IMG_SHAPE),
class_mode='sparse'
)
```
Let's visualize how a single image would look like 5 different times, when we pass these augmentations randomly to our dataset.
```
augmented_images = [train_data_gen[0][0][0] for i in range(5)]
plotImages(augmented_images)
```
### TODO: Create a Data Generator for the Validation Set
Generally, we only apply data augmentation to our training examples. So, in the cell below, use ImageDataGenerator to create a transformation that only rescales the images by 255. Then use the `.flow_from_directory` method to apply the above transformation to the images in our validation set. Make sure you indicate the batch size, the path to the directory of the validation images, the target size for the images, and to set the class mode to `sparse`. Remember that it is not necessary to shuffle the images in the validation set.
```
image_gen_val = ImageDataGenerator(rescale=1./255)
val_data_gen = image_gen_val.flow_from_directory(batch_size=batch_size,
directory=val_dir,
target_size=(IMG_SHAPE, IMG_SHAPE),
class_mode='sparse')
```
# TODO: Create the CNN
In the cell below, create a convolutional neural network that consists of 3 convolution blocks. Each convolutional block contains a `Conv2D` layer followed by a max pool layer. The first convolutional block should have 16 filters, the second one should have 32 filters, and the third one should have 64 filters. All convolutional filters should be 3 x 3. All max pool layers should have a `pool_size` of `(2, 2)` .
After the 3 convolutional blocks you should have a flatten layer followed by a fully connected layer with 512 units. The CNN should output class probabilities based on 5 classes which is done by the **softmax** activation function. All other layers should use a **relu** activation function. You should also add Dropout layers with a probability of 20%, where appropriate.
```
model = Sequential()
model.add(Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_SHAPE,IMG_SHAPE, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, 3, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, 3, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(5, activation='softmax'))
```
# TODO: Compile the Model
In the cell below, compile your model using the ADAM optimizer, the sparse cross entropy function as a loss function. We would also like to look at training and validation accuracy on each epoch as we train our network, so make sure you also pass the metrics argument.
```
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
```
# TODO: Train the Model
In the cell below, train your model using the **fit_generator** function instead of the usual **fit** function. We have to use the `fit_generator` function because we are using the **ImageDataGenerator** class to generate batches of training and validation data for our model. Train the model for 80 epochs and make sure you use the proper parameters in the `fit_generator` function .
```
epochs = 80
history = model.fit_generator(
train_data_gen,
steps_per_epoch=int(np.ceil(train_data_gen.n / float(batch_size))),
epochs=epochs,
validation_data=val_data_gen,
validation_steps=int(np.ceil(val_data_gen.n / float(batch_size)))
)
```
# TODO: Plot Training and Validation Graphs.
In the cell below, plot the training and validation accuracy/loss graphs.
```
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
```
| github_jupyter |
<center>
<img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/labs/Module%202/images/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" />
</center>
# Area Plots, Histograms, and Bar Plots
Estimated time needed: **30** minutes
## Objectives
After completing this lab you will be able to:
* Create additional labs namely area plots, histogram and bar charts
## Table of Contents
<div class="alert alert-block alert-info" style="margin-top: 20px">
1. [Exploring Datasets with *pandas*](#0)<br>
2. [Downloading and Prepping Data](#2)<br>
3. [Visualizing Data using Matplotlib](#4) <br>
4. [Area Plots](#6) <br>
5. [Histograms](#8) <br>
6. [Bar Charts](#10) <br>
</div>
# Exploring Datasets with *pandas* and Matplotlib<a id="0"></a>
Toolkits: The course heavily relies on [**pandas**](http://pandas.pydata.org/?utm_medium=Exinfluencer\&utm_source=Exinfluencer\&utm_content=000026UJ\&utm_term=10006555\&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01) and [**Numpy**](http://www.numpy.org/?utm_medium=Exinfluencer\&utm_source=Exinfluencer\&utm_content=000026UJ\&utm_term=10006555\&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01) for data wrangling, analysis, and visualization. The primary plotting library that we are exploring in the course is [Matplotlib](http://matplotlib.org/?utm_medium=Exinfluencer\&utm_source=Exinfluencer\&utm_content=000026UJ\&utm_term=10006555\&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01).
Dataset: Immigration to Canada from 1980 to 2013 - [International migration flows to and from selected countries - The 2015 revision](http://www.un.org/en/development/desa/population/migration/data/empirical2/migrationflows.shtml?utm_medium=Exinfluencer\&utm_source=Exinfluencer\&utm_content=000026UJ\&utm_term=10006555\&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkDV0101ENSkillsNetwork20297740-2021-01-01) from United Nation's website.
The dataset contains annual data on the flows of international migrants as recorded by the countries of destination. The data presents both inflows and outflows according to the place of birth, citizenship or place of previous / next residence both for foreigners and nationals. For this lesson, we will focus on the Canadian Immigration data.
# Downloading and Prepping Data <a id="2"></a>
Import Primary Modules. The first thing we'll do is import two key data analysis modules: `pandas` and `numpy`.
```
import numpy as np # useful for many scientific computing in Python
import pandas as pd # primary data structure library
```
Let's download and import our primary Canadian Immigration dataset using *pandas*'s `read_excel()` method. Normally, before we can do that, we would need to download a module which *pandas* requires reading in Excel files. This module was **openpyxl** (formerlly **xlrd**). For your convenience, we have pre-installed this module, so you would not have to worry about that. Otherwise, you would need to run the following line of code to install the **openpyxl** module:
```
! pip3 install openpyxl
```
Download the dataset and read it into a *pandas* dataframe.
```
df_can = pd.read_excel(
'https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/Data%20Files/Canada.xlsx',
sheet_name='Canada by Citizenship',
skiprows=range(20),
skipfooter=2)
print('Data downloaded and read into a dataframe!')
```
Let's take a look at the first five items in our dataset.
```
df_can.head()
```
Let's find out how many entries there are in our dataset.
```
# print the dimensions of the dataframe
print(df_can.shape)
```
Clean up data. We will make some modifications to the original dataset to make it easier to create our visualizations. Refer to `Introduction to Matplotlib and Line Plots` lab for the rational and detailed description of the changes.
#### 1. Clean up the dataset to remove columns that are not informative to us for visualization (eg. Type, AREA, REG).
```
df_can.drop(['AREA', 'REG', 'DEV', 'Type', 'Coverage'], axis=1, inplace=True)
# let's view the first five elements and see how the dataframe was changed
df_can.head()
```
Notice how the columns Type, Coverage, AREA, REG, and DEV got removed from the dataframe.
#### 2. Rename some of the columns so that they make sense.
```
df_can.rename(columns={'OdName':'Country', 'AreaName':'Continent','RegName':'Region'}, inplace=True)
# let's view the first five elements and see how the dataframe was changed
df_can.head()
```
Notice how the column names now make much more sense, even to an outsider.
#### 3. For consistency, ensure that all column labels of type string.
```
# let's examine the types of the column labels
all(isinstance(column, str) for column in df_can.columns)
```
Notice how the above line of code returned *False* when we tested if all the column labels are of type **string**. So let's change them all to **string** type.
```
df_can.columns = list(map(str, df_can.columns))
# let's check the column labels types now
all(isinstance(column, str) for column in df_can.columns)
```
#### 4. Set the country name as index - useful for quickly looking up countries using .loc method.
```
df_can.set_index('Country', inplace=True)
# Let's view the first five elements and see how the dataframe was changed
df_can.head()
```
Notice now the country names now serve as indices.
#### 5. Add total column.
```
df_can['Total'] = df_can.sum(axis=1)
# let's view the first five elements and see how the dataframe was changed
df_can.head()
```
Now the dataframe has an extra column that presents the total number of immigrants from each country in the dataset from 1980 - 2013. So if we print the dimension of the data, we get:
```
print('data dimensions:', df_can.shape)
```
So now our dataframe has 38 columns instead of 37 columns that we had before.
```
# finally, let's create a list of years from 1980 - 2013
# this will come in handy when we start plotting the data
years = list(map(str, range(1980, 2014)))
years
```
# Visualizing Data using Matplotlib<a id="4"></a>
Import the `matplotlib` library.
```
# use the inline backend to generate the plots within the browser
# % matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.style.use('ggplot') # optional: for ggplot-like style
# check for latest version of Matplotlib
print('Matplotlib version: ', mpl.__version__) # >= 2.0.0
%matplotlib inline
```
# Area Plots<a id="6"></a>
In the last module, we created a line plot that visualized the top 5 countries that contribued the most immigrants to Canada from 1980 to 2013. With a little modification to the code, we can visualize this plot as a cumulative plot, also knows as a **Stacked Line Plot** or **Area plot**.
```
df_can.sort_values(['Total'], ascending=False, axis=0, inplace=True)
# get the top 5 entries
df_top5 = df_can.head()
# transpose the dataframe
df_top5 = df_top5[years].transpose()
df_top5.head()
```
Area plots are stacked by default. And to produce a stacked area plot, each column must be either all positive or all negative values (any `NaN`, i.e. not a number, values will default to 0). To produce an unstacked plot, set parameter `stacked` to value `False`.
```
# let's change the index values of df_top5 to type integer for plotting
df_top5.index = df_top5.index.map(int)
df_top5.plot(kind='area',
stacked=False,
figsize=(20, 10)) # pass a tuple (x, y) size
plt.title('Immigration Trend of Top 5 Countries')
plt.ylabel('Number of Immigrants')
plt.xlabel('Years')
plt.show()
```
The unstacked plot has a default transparency (alpha value) at 0.5. We can modify this value by passing in the `alpha` parameter.
```
df_top5.plot(kind='area',
alpha=0.25, # 0 - 1, default value alpha = 0.5
stacked=False,
figsize=(20, 10))
plt.title('Immigration Trend of Top 5 Countries')
plt.ylabel('Number of Immigrants')
plt.xlabel('Years')
plt.show()
```
### Two types of plotting
As we discussed in the video lectures, there are two styles/options of plotting with `matplotlib`, plotting using the Artist layer and plotting using the scripting layer.
\*\*Option 1: Scripting layer (procedural method) - using matplotlib.pyplot as 'plt' \*\*
You can use `plt` i.e. `matplotlib.pyplot` and add more elements by calling different methods procedurally; for example, `plt.title(...)` to add title or `plt.xlabel(...)` to add label to the x-axis.
```python
# Option 1: This is what we have been using so far
df_top5.plot(kind='area', alpha=0.35, figsize=(20, 10))
plt.title('Immigration trend of top 5 countries')
plt.ylabel('Number of immigrants')
plt.xlabel('Years')
```
\*\*Option 2: Artist layer (Object oriented method) - using an `Axes` instance from Matplotlib (preferred) \*\*
You can use an `Axes` instance of your current plot and store it in a variable (eg. `ax`). You can add more elements by calling methods with a little change in syntax (by adding "`set_`" to the previous methods). For example, use `ax.set_title()` instead of `plt.title()` to add title, or `ax.set_xlabel()` instead of `plt.xlabel()` to add label to the x-axis.
This option sometimes is more transparent and flexible to use for advanced plots (in particular when having multiple plots, as you will see later).
In this course, we will stick to the **scripting layer**, except for some advanced visualizations where we will need to use the **artist layer** to manipulate advanced aspects of the plots.
```
# option 2: preferred option with more flexibility
ax = df_top5.plot(kind='area', alpha=0.35, figsize=(20, 10))
ax.set_title('Immigration Trend of Top 5 Countries')
ax.set_ylabel('Number of Immigrants')
ax.set_xlabel('Years')
```
**Question**: Use the scripting layer to create a stacked area plot of the 5 countries that contributed the least to immigration to Canada **from** 1980 to 2013. Use a transparency value of 0.45.
```
### type your answer here
#The correct answer is:
# get the 5 countries with the least contribution
df_least5 = df_can.tail(5)
# transpose the dataframe
df_least5 = df_least5[years].transpose()
df_least5.head()
df_least5.index = df_least5.index.map(int) # let's change the index values of df_least5 to type integer for plotting
df_least5.plot(kind='area', alpha=0.45, figsize=(20, 10))
plt.title('Immigration Trend of 5 Countries with Least Contribution to Immigration')
plt.ylabel('Number of Immigrants')
plt.xlabel('Years')
plt.show()
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
# get the 5 countries with the least contribution
df_least5 = df_can.tail(5)
# transpose the dataframe
df_least5 = df_least5[years].transpose()
df_least5.head()
df_least5.index = df_least5.index.map(int) # let's change the index values of df_least5 to type integer for plotting
df_least5.plot(kind='area', alpha=0.45, figsize=(20, 10))
plt.title('Immigration Trend of 5 Countries with Least Contribution to Immigration')
plt.ylabel('Number of Immigrants')
plt.xlabel('Years')
plt.show()
```
</details>
**Question**: Use the artist layer to create an unstacked area plot of the 5 countries that contributed the least to immigration to Canada **from** 1980 to 2013. Use a transparency value of 0.55.
```
### type your answer here
#The correct answer is:
# get the 5 countries with the least contribution
df_least5 = df_can.tail(5)
# transpose the dataframe
df_least5 = df_least5[years].transpose()
df_least5.head()
df_least5.index = df_least5.index.map(int) # let's change the index values of df_least5 to type integer for plotting
ax = df_least5.plot(kind='area', alpha=0.55, stacked=False, figsize=(20, 10))
ax.set_title('Immigration Trend of 5 Countries with Least Contribution to Immigration')
ax.set_ylabel('Number of Immigrants')
ax.set_xlabel('Years')
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
# get the 5 countries with the least contribution
df_least5 = df_can.tail(5)
# transpose the dataframe
df_least5 = df_least5[years].transpose()
df_least5.head()
df_least5.index = df_least5.index.map(int) # let's change the index values of df_least5 to type integer for plotting
ax = df_least5.plot(kind='area', alpha=0.55, stacked=False, figsize=(20, 10))
ax.set_title('Immigration Trend of 5 Countries with Least Contribution to Immigration')
ax.set_ylabel('Number of Immigrants')
ax.set_xlabel('Years')
```
</details>
# Histograms<a id="8"></a>
A histogram is a way of representing the *frequency* distribution of numeric dataset. The way it works is it partitions the x-axis into *bins*, assigns each data point in our dataset to a bin, and then counts the number of data points that have been assigned to each bin. So the y-axis is the frequency or the number of data points in each bin. Note that we can change the bin size and usually one needs to tweak it so that the distribution is displayed nicely.
**Question:** What is the frequency distribution of the number (population) of new immigrants from the various countries to Canada in 2013?
Before we proceed with creating the histogram plot, let's first examine the data split into intervals. To do this, we will us **Numpy**'s `histrogram` method to get the bin ranges and frequency counts as follows:
```
# let's quickly view the 2013 data
df_can['2013'].head()
# np.histogram returns 2 values
count, bin_edges = np.histogram(df_can['2013'])
print(count) # frequency count
print(bin_edges) # bin ranges, default = 10 bins
```
By default, the `histrogram` method breaks up the dataset into 10 bins. The figure below summarizes the bin ranges and the frequency distribution of immigration in 2013. We can see that in 2013:
* 178 countries contributed between 0 to 3412.9 immigrants
* 11 countries contributed between 3412.9 to 6825.8 immigrants
* 1 country contributed between 6285.8 to 10238.7 immigrants, and so on..
<img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DV0101EN-SkillsNetwork/labs/Module%202/images/Mod2Fig1-Histogram.JPG" align="center" width=800>
We can easily graph this distribution by passing `kind=hist` to `plot()`.
```
df_can['2013'].plot(kind='hist', figsize=(8, 5))
# add a title to the histogram
plt.title('Histogram of Immigration from 195 Countries in 2013')
# add y-label
plt.ylabel('Number of Countries')
# add x-label
plt.xlabel('Number of Immigrants')
plt.show()
```
In the above plot, the x-axis represents the population range of immigrants in intervals of 3412.9. The y-axis represents the number of countries that contributed to the aforementioned population.
Notice that the x-axis labels do not match with the bin size. This can be fixed by passing in a `xticks` keyword that contains the list of the bin sizes, as follows:
```
# 'bin_edges' is a list of bin intervals
count, bin_edges = np.histogram(df_can['2013'])
df_can['2013'].plot(kind='hist', figsize=(8, 5), xticks=bin_edges)
plt.title('Histogram of Immigration from 195 countries in 2013') # add a title to the histogram
plt.ylabel('Number of Countries') # add y-label
plt.xlabel('Number of Immigrants') # add x-label
plt.show()
```
*Side Note:* We could use `df_can['2013'].plot.hist()`, instead. In fact, throughout this lesson, using `some_data.plot(kind='type_plot', ...)` is equivalent to `some_data.plot.type_plot(...)`. That is, passing the type of the plot as argument or method behaves the same.
See the *pandas* documentation for more info http://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.plot.html.
We can also plot multiple histograms on the same plot. For example, let's try to answer the following questions using a histogram.
**Question**: What is the immigration distribution for Denmark, Norway, and Sweden for years 1980 - 2013?
```
# let's quickly view the dataset
df_can.loc[['Denmark', 'Norway', 'Sweden'], years]
# generate histogram
df_can.loc[['Denmark', 'Norway', 'Sweden'], years].plot.hist()
```
That does not look right!
Don't worry, you'll often come across situations like this when creating plots. The solution often lies in how the underlying dataset is structured.
Instead of plotting the population frequency distribution of the population for the 3 countries, *pandas* instead plotted the population frequency distribution for the `years`.
This can be easily fixed by first transposing the dataset, and then plotting as shown below.
```
# transpose dataframe
df_t = df_can.loc[['Denmark', 'Norway', 'Sweden'], years].transpose()
df_t.head()
# generate histogram
df_t.plot(kind='hist', figsize=(10, 6))
plt.title('Histogram of Immigration from Denmark, Norway, and Sweden from 1980 - 2013')
plt.ylabel('Number of Years')
plt.xlabel('Number of Immigrants')
plt.show()
```
Let's make a few modifications to improve the impact and aesthetics of the previous plot:
* increase the bin size to 15 by passing in `bins` parameter;
* set transparency to 60% by passing in `alpha` parameter;
* label the x-axis by passing in `x-label` parameter;
* change the colors of the plots by passing in `color` parameter.
```
# let's get the x-tick values
count, bin_edges = np.histogram(df_t, 15)
# un-stacked histogram
df_t.plot(kind ='hist',
figsize=(10, 6),
bins=15,
alpha=0.6,
xticks=bin_edges,
color=['coral', 'darkslateblue', 'mediumseagreen']
)
plt.title('Histogram of Immigration from Denmark, Norway, and Sweden from 1980 - 2013')
plt.ylabel('Number of Years')
plt.xlabel('Number of Immigrants')
plt.show()
```
Tip:
For a full listing of colors available in Matplotlib, run the following code in your python shell:
```python
import matplotlib
for name, hex in matplotlib.colors.cnames.items():
print(name, hex)
```
If we do not want the plots to overlap each other, we can stack them using the `stacked` parameter. Let's also adjust the min and max x-axis labels to remove the extra gap on the edges of the plot. We can pass a tuple (min,max) using the `xlim` paramater, as show below.
```
count, bin_edges = np.histogram(df_t, 15)
xmin = bin_edges[0] - 10 # first bin value is 31.0, adding buffer of 10 for aesthetic purposes
xmax = bin_edges[-1] + 10 # last bin value is 308.0, adding buffer of 10 for aesthetic purposes
# stacked Histogram
df_t.plot(kind='hist',
figsize=(10, 6),
bins=15,
xticks=bin_edges,
color=['coral', 'darkslateblue', 'mediumseagreen'],
stacked=True,
xlim=(xmin, xmax)
)
plt.title('Histogram of Immigration from Denmark, Norway, and Sweden from 1980 - 2013')
plt.ylabel('Number of Years')
plt.xlabel('Number of Immigrants')
plt.show()
```
**Question**: Use the scripting layer to display the immigration distribution for Greece, Albania, and Bulgaria for years 1980 - 2013? Use an overlapping plot with 15 bins and a transparency value of 0.35.
```
### type your answer here
#The correct answer is:
# create a dataframe of the countries of interest (cof)
df_cof = df_can.loc[['Greece', 'Albania', 'Bulgaria'], years]
# transpose the dataframe
df_cof = df_cof.transpose()
# let's get the x-tick values
count, bin_edges = np.histogram(df_cof, 15)
# Un-stacked Histogram
df_cof.plot(kind ='hist',
figsize=(10, 6),
bins=15,
alpha=0.35,
xticks=bin_edges,
color=['coral', 'darkslateblue', 'mediumseagreen']
)
plt.title('Histogram of Immigration from Greece, Albania, and Bulgaria from 1980 - 2013')
plt.ylabel('Number of Years')
plt.xlabel('Number of Immigrants')
plt.show()
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
# create a dataframe of the countries of interest (cof)
df_cof = df_can.loc[['Greece', 'Albania', 'Bulgaria'], years]
# transpose the dataframe
df_cof = df_cof.transpose()
# let's get the x-tick values
count, bin_edges = np.histogram(df_cof, 15)
# Un-stacked Histogram
df_cof.plot(kind ='hist',
figsize=(10, 6),
bins=15,
alpha=0.35,
xticks=bin_edges,
color=['coral', 'darkslateblue', 'mediumseagreen']
)
plt.title('Histogram of Immigration from Greece, Albania, and Bulgaria from 1980 - 2013')
plt.ylabel('Number of Years')
plt.xlabel('Number of Immigrants')
plt.show()
```
</details>
# Bar Charts (Dataframe) <a id="10"></a>
A bar plot is a way of representing data where the *length* of the bars represents the magnitude/size of the feature/variable. Bar graphs usually represent numerical and categorical variables grouped in intervals.
To create a bar plot, we can pass one of two arguments via `kind` parameter in `plot()`:
* `kind=bar` creates a *vertical* bar plot
* `kind=barh` creates a *horizontal* bar plot
**Vertical bar plot**
In vertical bar graphs, the x-axis is used for labelling, and the length of bars on the y-axis corresponds to the magnitude of the variable being measured. Vertical bar graphs are particularly useful in analyzing time series data. One disadvantage is that they lack space for text labelling at the foot of each bar.
**Let's start off by analyzing the effect of Iceland's Financial Crisis:**
The 2008 - 2011 Icelandic Financial Crisis was a major economic and political event in Iceland. Relative to the size of its economy, Iceland's systemic banking collapse was the largest experienced by any country in economic history. The crisis led to a severe economic depression in 2008 - 2011 and significant political unrest.
**Question:** Let's compare the number of Icelandic immigrants (country = 'Iceland') to Canada from year 1980 to 2013.
```
# step 1: get the data
df_iceland = df_can.loc['Iceland', years]
df_iceland.head()
# step 2: plot data
df_iceland.plot(kind='bar', figsize=(10, 6))
plt.xlabel('Year') # add to x-label to the plot
plt.ylabel('Number of immigrants') # add y-label to the plot
plt.title('Icelandic immigrants to Canada from 1980 to 2013') # add title to the plot
plt.show()
```
The bar plot above shows the total number of immigrants broken down by each year. We can clearly see the impact of the financial crisis; the number of immigrants to Canada started increasing rapidly after 2008.
Let's annotate this on the plot using the `annotate` method of the **scripting layer** or the **pyplot interface**. We will pass in the following parameters:
* `s`: str, the text of annotation.
* `xy`: Tuple specifying the (x,y) point to annotate (in this case, end point of arrow).
* `xytext`: Tuple specifying the (x,y) point to place the text (in this case, start point of arrow).
* `xycoords`: The coordinate system that xy is given in - 'data' uses the coordinate system of the object being annotated (default).
* `arrowprops`: Takes a dictionary of properties to draw the arrow:
* `arrowstyle`: Specifies the arrow style, `'->'` is standard arrow.
* `connectionstyle`: Specifies the connection type. `arc3` is a straight line.
* `color`: Specifies color of arrow.
* `lw`: Specifies the line width.
I encourage you to read the Matplotlib documentation for more details on annotations:
http://matplotlib.orsg/api/pyplot_api.html#matplotlib.pyplot.annotate.
```
df_iceland.plot(kind='bar', figsize=(10, 6), rot=90) # rotate the xticks(labelled points on x-axis) by 90 degrees
plt.xlabel('Year')
plt.ylabel('Number of Immigrants')
plt.title('Icelandic Immigrants to Canada from 1980 to 2013')
# Annotate arrow
plt.annotate('', # s: str. Will leave it blank for no text
xy=(32, 70), # place head of the arrow at point (year 2012 , pop 70)
xytext=(28, 20), # place base of the arrow at point (year 2008 , pop 20)
xycoords='data', # will use the coordinate system of the object being annotated
arrowprops=dict(arrowstyle='->', connectionstyle='arc3', color='blue', lw=2)
)
plt.show()
```
Let's also annotate a text to go over the arrow. We will pass in the following additional parameters:
* `rotation`: rotation angle of text in degrees (counter clockwise)
* `va`: vertical alignment of text \[‘center’ | ‘top’ | ‘bottom’ | ‘baseline’]
* `ha`: horizontal alignment of text \[‘center’ | ‘right’ | ‘left’]
```
df_iceland.plot(kind='bar', figsize=(10, 6), rot=90)
plt.xlabel('Year')
plt.ylabel('Number of Immigrants')
plt.title('Icelandic Immigrants to Canada from 1980 to 2013')
# Annotate arrow
plt.annotate('', # s: str. will leave it blank for no text
xy=(32, 70), # place head of the arrow at point (year 2012 , pop 70)
xytext=(28, 20), # place base of the arrow at point (year 2008 , pop 20)
xycoords='data', # will use the coordinate system of the object being annotated
arrowprops=dict(arrowstyle='->', connectionstyle='arc3', color='blue', lw=2)
)
# Annotate Text
plt.annotate('2008 - 2011 Financial Crisis', # text to display
xy=(28, 30), # start the text at at point (year 2008 , pop 30)
rotation=72.5, # based on trial and error to match the arrow
va='bottom', # want the text to be vertically 'bottom' aligned
ha='left', # want the text to be horizontally 'left' algned.
)
plt.show()
```
**Horizontal Bar Plot**
Sometimes it is more practical to represent the data horizontally, especially if you need more room for labelling the bars. In horizontal bar graphs, the y-axis is used for labelling, and the length of bars on the x-axis corresponds to the magnitude of the variable being measured. As you will see, there is more room on the y-axis to label categorical variables.
**Question:** Using the scripting later and the `df_can` dataset, create a *horizontal* bar plot showing the *total* number of immigrants to Canada from the top 15 countries, for the period 1980 - 2013. Label each country with the total immigrant count.
Step 1: Get the data pertaining to the top 15 countries.
```
### type your answer here
# sort dataframe on 'Total' column (descending)
df_can.sort_values(by='Total', ascending=True, inplace=True)
# get top 15 countries
df_top15 = df_can['Total'].tail(15)
df_top15
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
# sort dataframe on 'Total' column (descending)
df_can.sort_values(by='Total', ascending=True, inplace=True)
# get top 15 countries
df_top15 = df_can['Total'].tail(15)
df_top15
```
</details>
Step 2: Plot data:
1. Use `kind='barh'` to generate a bar chart with horizontal bars.
2. Make sure to choose a good size for the plot and to label your axes and to give the plot a title.
3. Loop through the countries and annotate the immigrant population using the anotate function of the scripting interface.
```
### type your answer here
# generate plot
df_top15.plot(kind='barh', figsize=(12, 12), color='steelblue')
plt.xlabel('Number of Immigrants')
plt.title('Top 15 Conuntries Contributing to the Immigration to Canada between 1980 - 2013')
# annotate value labels to each country
for index, value in enumerate(df_top15):
label = format(int(value), ',') # format int with commas
# place text at the end of bar (subtracting 47000 from x, and 0.1 from y to make it fit within the bar)
plt.annotate(label, xy=(value - 47000, index - 0.10), color='white')
plt.show()
```
<details><summary>Click here for a sample python solution</summary>
```python
#The correct answer is:
# generate plot
df_top15.plot(kind='barh', figsize=(12, 12), color='steelblue')
plt.xlabel('Number of Immigrants')
plt.title('Top 15 Conuntries Contributing to the Immigration to Canada between 1980 - 2013')
# annotate value labels to each country
for index, value in enumerate(df_top15):
label = format(int(value), ',') # format int with commas
# place text at the end of bar (subtracting 47000 from x, and 0.1 from y to make it fit within the bar)
plt.annotate(label, xy=(value - 47000, index - 0.10), color='white')
plt.show()
```
</details>
| github_jupyter |
```
import cv2
import tensorflow as tf
import numpy as np
import math
import time
def paa_skin(image,image1):
new1 = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
dist = cv2.distanceTransform(new1, cv2.DIST_L2, 5)
cv2.normalize(dist, dist, 0, 1.0, cv2.NORM_MINMAX)
return image1,dist
def callback(x):
pass
cv2.namedWindow('image1')
cap=cv2.VideoCapture(0)
ilowH = 0
ihighH = 255
ilowS = 0
ihighS = 255
ilowV = 0
ihighV = 255
cv2.createTrackbar('lowH1', 'image1', ilowH, 179, callback)
cv2.createTrackbar('highH1', 'image1', ihighH, 179, callback)
cv2.createTrackbar('lowS1', 'image1', ilowS, 255, callback)
cv2.createTrackbar('highS1', 'image1', ihighS, 255, callback)
cv2.createTrackbar('lowV1', 'image1', ilowV, 255, callback)
cv2.createTrackbar('highV1', 'image1', ihighV, 255, callback)
while True:
try:
ret,frame=cap.read()
ilowH1 = cv2.getTrackbarPos('lowH1', 'image1')
ihighH1 = cv2.getTrackbarPos('highH1', 'image1')
ilowS1 = cv2.getTrackbarPos('lowS1', 'image1')
ihighS1 = cv2.getTrackbarPos('highS1', 'image1')
ilowV1 = cv2.getTrackbarPos('lowV1', 'image1')
ihighV1 = cv2.getTrackbarPos('highV1', 'image1')
hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
low1=np.array([ilowH1,ilowS1,ilowV1])
high1=np.array([ihighH1,ihighS1,ihighV1])
img_mask1=cv2.inRange(hsv,low1,high1)
output1=cv2.bitwise_and(frame,frame,mask=img_mask1)
new1 = cv2.cvtColor(output1, cv2.COLOR_BGR2GRAY)
dist = cv2.distanceTransform(new1, cv2.DIST_L2, 5)
cv2.normalize(dist, dist, 0, 1.0, cv2.NORM_MINMAX)
M=cv2.moments(dist)
cX = (int(M["m10"] / M["m00"]))
cY = (int(M["m01"] / M["m00"]))
cv2.circle(frame, (cX, cY), 5, (0, 0, 255), -1)
cv2.imshow("image1",frame)
cv2.imshow('dist',dist)
cv2.imshow("image",output1)
except ZeroDivisionError:
pass
if (cv2.waitKey(1) == 13):
break
cv2.destroyAllWindows()
cap.release()
cap=cv2.VideoCapture(0)
start=time.time()
black=np.zeros((output1.shape[0],output1.shape[1]))
i=0
j=1
while True:
try:
ret,frame=cap.read()
hsv=cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
low1=np.array([ilowH1,ilowS1,ilowV1])
high1=np.array([ihighH1,ihighS1,ihighV1])
img_mask1=cv2.inRange(hsv,low1,high1)
output1=cv2.bitwise_and(frame,frame,mask=img_mask1)
new1 = cv2.cvtColor(output1, cv2.COLOR_BGR2GRAY)
dist = cv2.distanceTransform(new1, cv2.DIST_L2, 5)
cv2.normalize(dist, dist, 0, 1.0, cv2.NORM_MINMAX)
M=cv2.moments(dist)
cX = (int(M["m10"] / M["m00"]))
cY = (int(M["m01"] / M["m00"]))
cv2.circle(frame, (cX, cY), 5, (255, 252, 255), -1)
cv2.circle(black, (cX, cY), 5, (255, 255, 255), -1)
if(j==1):
cv2.putText(black,"start",(cX,cY),cv2.FONT_HERSHEY_SIMPLEX,1.0,(255,0,0),lineType=cv2.LINE_AA)
if(j!=1):
cv2.line(black,(sX,sY),(cX,cY),(255,255,255),5)
cv2.imshow("image1",frame)
cv2.imshow("image",output1)
cv2.imshow("path",black)
j=j+1
sX=cX
sY=cY
end=time.time()
diff=end-start
if (diff>10):
cv2.putText(black,"end",(cX,cY),cv2.FONT_HERSHEY_SIMPLEX,1.0,(255,0,0),lineType=cv2.LINE_AA)
black=cv2.flip(black,1)
cv2.imwrite("/home/paa/1/"+str(i)+".jpeg",black)
black=np.zeros((output1.shape[0],output1.shape[1]))
i=i+1
j=1
start=time.time()
print("hi")
except ZeroDivisionError:
pass
if (cv2.waitKey(1) == 13):
break
cv2.destroyAllWindows()
cap.release()
```
| github_jupyter |
# Automated ML with azureml
The dependencies are imported
```
import os
import pandas as pd
from azureml.core import Dataset, Datastore, Workspace, Experiment
# from azureml.train.automl import AutoMLConfig
from azureml.widgets import RunDetails
```
## Dataset
### Overview
We will try to predict the rating of modified version of the **Kaggle Trip advisor dataset**.
The Dataset contains a Trip Advisor hotel review text column as well as a Rating column with Ratings from 0 - 5 stars.
> The Tripadvisor Hotel Review Dataset file, is derived from the publication:
>
>_Alam, M. H., Ryu, W.-J., Lee, S., 2016. Joint multi-grain topic senti- ment: modeling semantic aspects for online >reviews. Information Sciences 339, 206–223._
>
> You can download the Dataset with the link:
> [trip-advisor-hotel-reviews](https://www.kaggle.com/andrewmvd/trip-advisor-hotel-reviews)
In the original Dataset the target **Rating** column contains the values 0* - 5*.
In a modified version of the dataset we will try to predict the **norm_rating** column based on the **Review** text column as a **classification task** with:
* class 0 - Negative reviews (1* & 2* rating)
* class 1 - Neutral reviews (3* rating)
* class 2 - Positive reviews (4* & 5* rating)
## Initialize the Workspace and create an Experiment
```
ws = Workspace.from_config()
# choose a name for experiment
experiment_name = 'automl_review_classifier'
experiment=Experiment(ws, experiment_name)
experiment
print(f"subscription key {ws.subscription_id}")
print(f"resource group {ws.resource_group}")
print(f"workspace name {ws.name}")
```
## Load the Dataset and perform a train test split
```
import pandas as pd
from sklearn.model_selection import train_test_split
filepath_2_dataset = r"hotel_reviews_featurized_roberta.csv"
# Read the Dataset as a pandas dataframe
hotel_review_dataset = pd.read_csv(filepath_2_dataset)
print(f"Dataset Shape: {hotel_review_dataset.shape}")
hotel_review_dataset.describe()
```
### First the same train test split is performed for the Dataset to make it available to both AutoML and Hyperdrive
```
# Get hotel review text and normalized rating
X = hotel_review_dataset.drop(columns=['norm_rating'])
y = list(hotel_review_dataset.norm_rating)
X_train, X_test, y_train, y_test = train_test_split(hotel_review_dataset, y, test_size=0.2, random_state=42)
print(f"X_train: {X_train.shape}\nX_test: {X_test.shape}\ny_train: {len(y_train)}\ny_test: {len(y_test)}")
```
### The training set and test sets will be registered separately to ensure strict separation
```
X_train['norm_rating'] = y_train
X_test['norm_rating'] = y_test
print(X_train.shape)
print(X_test.shape)
```
### The AutoML train/testsets should contain just the text column and norm rating column (no feature engineering)
#### Upload the different train/test sets
```
X_train_automl = X_train.loc[:, ['text', 'norm_rating']]
X_test_automl = X_test.loc[:, ['text', 'norm_rating']]
os.makedirs("data", exist_ok=True)
# Upload the training/test data in the default datastore
train_dataset_path_automl = "data/train_set_automl_clean.csv"
X_train_automl.to_csv(train_dataset_path_automl, index=False)
test_dataset_path_automl = "data/test_set_automl_clean.csv"
X_test_automl.to_csv(test_dataset_path_automl, index=False)
X_train_hyper = X_train.drop(columns =["text"])
X_test_hyper = X_test.drop(columns = ["text"])
train_dataset_path = "data/train_set_hyper_clean.csv"
X_train_hyper.to_csv(train_dataset_path, index=False)
test_dataset_path = "data/test_set_hyper_clean.csv"
X_test_hyper.to_csv(test_dataset_path, index=False)
datastore = ws.get_default_datastore()
datastore.upload(src_dir="data", target_path="data")
```
### Load the training and test Datasets and register them
```
dataset_training = Dataset.Tabular.from_delimited_files(path = [(datastore, ("data/train_set_automl_clean.csv"))])
dataset_training = dataset_training.register(workspace=ws, name="auto-ml-training-data", description="Hotel Review AutoML Training Data")
dataset_test = Dataset.Tabular.from_delimited_files(path = [(datastore, ("data/test_set_automl_clean.csv"))])
dataset_test = dataset_training.register(workspace=ws, name="auto-ml-test-data", description="Hotel Review AutoML Test Data")
pandas_df = dataset_test.to_pandas_dataframe()
```
### Hotel Review example
>outstanding cleanliness value location wanted stay central location london
luna simone hotels fell search area checked reviews trip advisor decided book certainly
not dissapointed location ideal short walk transit important sight seeing
locations absolutely cleanest hotels stayed breakfast good served
management gives chance talk owners management staff outstanding friendly assist
site seeing plans no additional charge fact probably save money got london bus tour
tickets hotel vendor rate thinking trip uk europe luna simone london hotel staying going
london reccomend luna simone'
## Define a Compute Target for AutoML
```
## Define a Compute Target for AutoML
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
cpu_cluster_name = "cpu-cluster-1"
try:
compute_target = ComputeTarget(workspace=ws, name=cpu_cluster_name)
print("Found existing Compute Target")
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size = "Standard_D2_V2", max_nodes=4)
compute_target = ComputeTarget.create(ws, cpu_cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
```
## AutoML Configuration
* _experiment_timeout_minutes_: was set to prevent the experiment from running for long timer periods with high cost
* _max_concurrent_iterations_: was set to 4 since only 4 compute target nodes are available for paralle child runs
* _primary_metric_: was set to AUC_weighted since this includes a balance between false positive and true positive rate
* _n_cross_validations_: 5 crossvalidations were selected, since this results in a more robust mean/std estimation for each model
* _enable_early_stopping_: to prevent unproductive runs which lead to no improvement and costs
* _compute_target_: needs to be define to perform the AutoML computations
* _task_: needs to be classification since the label column is defining separate classes
* _training_data_: corresponds to the training set
* _label_column_: corresponds to the target/label column defining the separate classes
* _debug_log_: defined to enable detailed logging of automl errors
```
from azureml.train.automl.automlconfig import AutoMLConfig
automl_settings = {
## Define key AutoML Settings
automl_settings = {
"experiment_timeout_minutes": 20,
"max_concurrent_iterations": 4,
"primary_metric": "accuracy",
"n_cross_validations": 5
}
## Setup an AutoMLConfig object
automl_config = AutoMLConfig(
compute_target=compute_target,
task="classification",
training_data=dataset_training,
label_column_name="norm_rating",
enable_early_stopping=True,
debug_log="automl_errors.log",
**automl_settings
)
# The Experiment needs to be submitted in order to execute the AutoML run
automl_run = experiment.submit(automl_config)
```
## Run Details
Write about the different models trained and their performance. Why do you think some models did better than others?
```
from azureml.widgets import RunDetails
RunDetails(automl_run ).show()
automl_run.wait_for_completion(show_output=True)
```
## Performance metrics and Best Model
TODO: In the cell below, get the best model from the automl experiments and display all the properties of the model.
### Get the best model and the best run
```
best_child = automl_run.get_best_child()
print(best_child.get_file_names())
best_model = best_child.register_model(model_name="best-automl-model", model_path="outputs/model.pkl")
```
## Model Deployment
In the cell below, register the model, create an inference config and deploy the model as a web service.
```
from azureml.core.environment import Environment
from azureml.core.model import Model
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.model import InferenceConfig
from azureml.core.webservice import AciWebservice
# Create the environment
myenv = Environment(name="myenv")
conda_dep = CondaDependencies()
# Define the packages needed by the model and scripts
conda_dep.add_conda_package("pandas")
conda_dep.add_conda_package("numpy")
conda_dep.add_conda_package("scikit-learn")
conda_dep.add_conda_package("xgboost")
# You must list azureml-defaults as a pip dependency
conda_dep.add_pip_package("azureml-defaults")
# Adds dependencies to PythonSection of myenv
myenv.python.conda_dependencies=conda_dep
inference_config = InferenceConfig(entry_script="automl_score.py",
environment=myenv)
service_name = 'automl-review-classification'
aci_config = AciWebservice.deploy_configuration(cpu_cores=1, memory_gb=1)
service = Model.deploy(workspace=ws,
name=service_name,
models=[best_model],
inference_config=inference_config,
deployment_config=aci_config,
overwrite=True)
service.wait_for_deployment(show_output=True)
print("scoring URI: " + service.scoring_uri)
```
### Safely enter the API key via getpass so it is not shown in plain text
```
import getpass
key = getpass.getpass("Enter the API Key of the endpoint")
import requests
import json
# Get an example text from the test set pandas dataframe and create a HTTP request payload
example_text = pandas_df.iloc[0, 0]
data = json.dumps({"data": [{'text': example_text}]})
input_data = bytes(data, encoding="utf-8")
# Set the content type
headers = {'Content-Type': 'application/json'}
# authentication is enabled, so we set the authorization header
headers['Authorization'] = f'Bearer {key}'
scoring_uri = "http://824c9ffc-835d-4d97-990f-692ecc24aae0.southcentralus.azurecontainer.io/score"
mapping_dict = {0: "Negative", 1: "Neutral", 2: "Positive"}
# Make the request and display the classification results
response = requests.post(scoring_uri, input_data, headers=headers)
print(f"Prediction for hotel review: \n\n{example_text}\n")
print(f"It is a: {mapping_dict[json.loads(response.json())['result'][0]]} hotel review!")
```
### Print the logs of the webservice
```
print(service.get_logs())
```
### Delete the webservice
```
service.delete()
```
| github_jupyter |
<a href="https://colab.research.google.com/drive/1F22gG4PqDIuM0R4zbzEKu1DlGbnHeNxM?usp=sharing" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
By [Ibrahim Sobh](https://www.linkedin.com/in/ibrahim-sobh-phd-8681757/)
## In this code, we are going to implement a basic image classifier:
- Load the dataset (MNIST hand written digits)
- Design a deep learning model and inspect its learnable parameters
- Train the model on the training data and inspect learning curves
- Evaluate the trained model on the never seen testing data
- Save the model for later use
- Load and use the model
```
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import plot_model
from PIL import Image
from keras import backend as K
import matplotlib.pyplot as plt
batch_size = 128
num_classes = 10
epochs = 10 #50
# input image dimensions
img_rows, img_cols = 28, 28
```
## Load the data

```
# load data, split into train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# small data
data_size = 10000
x_train = x_train[:data_size]
y_train = y_train[:data_size]
x_test = x_test[:data_size]
y_test = y_test[:data_size]
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
```
## Build the DNN model
```
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.summary()
plot_model(model, to_file="mnistcnn.png", show_shapes=True)
img = Image.open('./mnistcnn.png')
img
```
## Train the model
```
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
```
## Evalaute the model
```
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
plt.figure(figsize=(10, 7))
plt.plot(history.history['loss'], label='Train')
plt.plot(history.history['val_loss'], label='Test')
plt.title('Learning curve')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend()
```
## Save and load the trained model
```
from keras.models import load_model
# save the model
model.save('my_model.h5') # creates a HDF5 file 'my_model.h5'
del model # deletes the existing model
!ls -l
# load the saved model
myloadednewmodel = load_model('my_model.h5')
myloadednewmodel.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
```
| github_jupyter |
# Searching and sorting
Now we're getting more into the 'numerical methods' part of the course!
Today, we will delve into the following:
* how to write **pseudo code**
* **computational complexity** (big-O notion).
* **search algorithms** (sequential, binary)
* **sort algorithms** (bubble, insertion, quick)
**Search** and **sort** algos are at the heart of computer science.
Understanding these is the first thing you get into at DIKU or DTU, so we are also going to get a taste of them.
**Links to further material:**
If you feel inspired by the material here, you can try your hand at solving algorithmic challenges at [Project Euler](https://projecteuler.net).
(there are both easy and harder exercises to choose from)
```
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import time
import string
import random
import sys
from IPython.display import Image
```
# Algorithms - what are they even?
**Technically:** An unambigious specification of how to solve a class of problems.
**In a nut shell:** *An algo is a recipe.*
Even a simple cooking recipe is an algorithm..
1. Preheat the oven
2. Mix flour, sugar and eggs
3. Pour into a baking pan
etc.
**Properties of an algorithm:**
1. Unambigious termination criteria
1. Pre-defined inputs
2. Pre-defined ouputs
3. Guaranteed finite runtime
4. Correct result
## Simple example: $\max\{ \ell\}$
**Problem:** Given a list of positive numbers, return the largest number in the list.
**Inputs:** A list `L` of positive numbers.
**Outputs:** A number.
**Algorithm:** `find_max()`
1. Set `maxL` to 0.
2. For each `x` in the list `L`, compare it to `maxL`. If `x` is larger, set `maxL` to `x`.
3. `maxL` is now set to the largest number in the list.
> **Note:** The above is called **pseudo-code** (understandable across programming languages).
**Implementation** in Python:
```
def find_max(L):
maxL = 0
for x in L:
if x > maxL:
maxL = x
return maxL
```
**Question:** An error *might* occur if `L` is not restricted to contain strictly positive numbers. What could happen?
**Bonus info:** Python, and other modern languages, actually tries to **predict** the result of an `if` statement before it is reached and prepares the following set of instructions. This is called *branch prediction* and is a major source of computational improvement. If you have a lot of `if-statements` that are not predictable, eg. because of randomized data, it may be a drag on computation time.
## Algorithmic complexity
Algorithms can be characterized by the number of operations needed to perform them. This is called their complexity.
The `find_max()` algorithm has `n = len(L)` operations each making a *comparison* (`x > max`) and (perhaps) an *assignment* (`max = x`).
The number of operations increase linearily in the length of the input list (the order of the function is linear).
**Mathematically** we say that `find_max()` has linear complexity, \\(O(n)\\) where $n$ is the input size (length of L).
Other **common levels of complexity** are:
1. Constant, $O(1)$ (i.e. independent of input size)
2. Logarithmic, $O(\log n)$
3. Linear, $O(n)$
4. Log-linear, $O(n \log n)$
5. Quadratic, $O(n^2)$
6. Cubic, $O(n^3)$
7. Exponential, $O(2^n)$ (**curse of dimensionality**)
If the performance of an algorithm **depends on the exact values of the input** we differentiate between
1. **Best** case
2. **Average** case (across all possible inputs)
3. **Worst** case
Complexity is an **asymptotic** measure,
1. Only the number of operations matter (not their type or cost)
2. Only the highest order matter
<img src="https://github.com/NumEconCopenhagen/lectures-2019/raw/master/08/bigO.png" alt="bigO" width=40% />
**In practice however:**
* The cost of each operation matters for fixed input size.
* The amount and flow of **memory** matter for speed (cache vs. RAM vs. disc).
* Therefore, it is **not guaranteed** that an algorithm of lower complexity executes faster than that of higher complexity for all cases.
Especially, there may be differences in the costs of memory allocation and deletion which are not counted into the measure of complexity. In the case above, we were not counting in the *deletion* of objects, that would necessarily follow.
## Example of a complexity calculation
```
def demo_algorithm(n):
# a. 3 assignments
a = 5
b = 6
c = 10
# b. 3*n^2 multiplications and 3*n^2 assignments
for i in range(n):
for j in range(n):
x = i * i
y = j * j
z = i * j
# c. n multiplications, additions, and assignments
# + n multiplications and assignments
for k in range(n):
w = a*k + 45
v = b*b
# d. 1 assignment
d = 33
```
The **total number of operations** are: $T(n) = 3 + 6n^2 + 5n + 1 = 6n^2 + 5n + 4$
Notice: this is an exposition of operations. There are of course also operations involved in multiplication itself, which means that the number above is not indicative of the *total* number of operations that the computer must handle.
**In big-O notation**: `demo_algorithm()` is $O(n^2)$, i.e. *quadratic complexity*
**$\large \color{purple}{Question}$:** What is the complexity of these two algoritms?
```
def algorithm_a(n):
s = 0
for i in range(n):
for j in range(n):
for k in range(n):
s += 1
def algorithm_b(n):
s = 0
for i in range(n):
s *= 2
for j in range(n):
s *= 2
for k in range(n):
s *= 2
```
## The complexity of operations on data containers
### How are lists and dictionaries structured?
The fact that our data containers have a certain structure in memory matters *greatly* for the speed of the methods (read: algos) that we apply on them.
Let's have a look at how lists and dictionaries are organized.
**Lists:**
* A list is an ordered set of references to objects (eg. floats).
* Each reference *points* to an address in memory where values are stored.
* The reference variables of addresses (called pointers) of data in a list are ligned up next to each other in memory, such that they are increments of `1` apart. A bit like a train, if you will.
* Need therefore **only** to keep track of the reference to the address of the **first element**, `l[0]`, and the rest follows in line.
* If by $a$ we denote the address of the first element of `l`, then looking up element `l[i]` means accessing the $a+i$ address in memory using its reference variable.
* Therefore, the algorithmic complexity of looking up an element `l[i]` does **not depend** on the size of `l`. *Which is nice.*
```
# A demonstration of addresses of elements in a list
x = [5, 21, 30, 35]
x_ref = []
x_id = []
# The addresses of x's elements
for i in x:
x_id.append(id(i)) # Each object has its own unique id
x_ref.append(hex(x_id[-1])) # The memory address is a hexadecimal of the id
# The addresses printed below are NOT lined up next to each other in memory.
# Only the reference variables are lined up, but those we cannot see directly in Python.
print('Id of each element in x:')
for i in x_id:
print(i)
print('\nMemory address of elements in x: ', x_ref)
```
### A quick overview of list operations
|Operation | Code | Complexity |
|:----------|:------------------|:--------------:|
|**Index:** | `l[i]` | $O(1)$ |
|**Store:** | `l[i] = 0` | $O(1)$ |
|**Length:** | `len(l)` | $O(1)$ |
|**Append:** | `l.append(n)` | $O(1)$ |
|**Slice:** | `l[a:b]` | $O(b-a)$ |
|**Pop last:** | `l.pop()` | $O(1)$ |
|**Pop i:** | `l.pop(i)` | $O(N)$ |
|**Clear:** | `l.clear()` | $O(N)$ |
|**check:** | `l1 == l2` | $O(N)$ |
|**Insert:** | `l[a:b] = ...` | $O(N)$ |
|**Delete:** | `del l[i]` | $O(N)$ |
|**Containment:** | x `in/not in l` | $O(N)$ |
|**Copy:** | `l.copy()` | $O(N)$ |
|**Sort:** | `l.sort()` | $O(N $Log$ N)$ |
**A few notes:**
* Getting the length of a list is $O(1)$ because Python keeps track of a list's size as it created and expanded. The length is stored as an attribute to the list.
* Popping (getting the last element) is $O(1)$ because it only requires detaching the last reference in the "train" of references that comprises a list.
* Inserting an element into, or removing it from, the middle of a list requires moving around all the references in memory "behind" the inserted element and is therefore $O(N)$.
* Checking for containment of an element is $O(N)$ because all elements in the list may have to be visited.
### A beautiful solution
**Question:** how do you delete element `i` from list `l` in $O(1)$? (*even when it says above that `del` is an $O(N)$ operation*)
**Answer:**
`l[i] = l.pop()`
The `pop` operation will delete the last element of `l` while also using it to overwrite element `i` in `l`. Hence, last element is preserved while element `i` disappears.
**Note** this won't work if `i` is the last element. A full implementation needs to account for this, but it will still be $O(1)$.
**Dictionaries:**
* A dictionary is a set of *buckets* (think lists) which can store items.
* A dictionary with 1 element and 5 buckets: `[] - [] - [] - [<key,value>] - []`
* Contrary to lists, there is no explicit indexing of a dictionary. No `d[i]`, we can use a string instead, `d[str]`.
* However, the buckets of a dictionary are lined up just like a the references in a list.
* Python therefore needs to locate a bucket, when adding a `<key,value>` pair.
* Buckets are located using a **hash function** on the key of an element.
* This **hash function** converts the key to a integer number, which can then serve as an index.
* Obviously, a useful hash function must be very fast and work on strings as well as floats.
* A fast hash function enables $O(1)$ lookup in a dictionary.
* Hashing also implies that `key in dict.keys()` is $O(1)$, thus independent of dictionary size! (Very handy)
* When an empty dictionary is created, it contains 5 buckets. As a 6th element is added to the dictionary, it is rescaled to 10 buckets. At 11 elements, rescaled to 20 buckets and so on.
* Dictionaries thus **pre-allocate** memory to be efficient when adding the next element.
* *Taking up memory in favor of fast execution is a basic trade-off in algorithms!*
```
d = {'x': 1, 'z': 2}
print('size of md in bytes:', sys.getsizeof(d))
# Start adding elements to d and see how memory usage changes
for i in range(25):
key = random.choice(string.ascii_letters)
value = random.random()
d[key] = value
print(f"key: {key} value: {value: 1.3f} \t size: {i+1:2.0f} bytes: {sys.getsizeof(d)} \t hashed key: {hash(key)}")
# Notice that there may be collisions as some keys are similar, and therefore get same hash value.
# Python can handle such collisions, but they do create a drag on performance.
```
### A quick overview of dictionary operations
|Operation | Code | Complexity |
|:----------|:------------------|:--------------:|
|**Index:** | `d[k]` | $O(1)$ |
|**Store:** | `d[k] = v` | $O(1)$ |
|**Delete:** | `del d[k]` | $O(1)$ |
|**Length:** | `len(d)` | $O(1)$ |
|**Clear:** | `d.clear()` | $O(1)$ |
|**View:** | `d.keys()` | $O(1)$ |
Notice the difference in complexity for **deletions**. Faster in dictionaries because they are unordered.
You can checkout a [comprehensive table](https://www.ics.uci.edu/~pattis/ICS-33/lectures/complexitypython.txt) of Python operations' complexity.
## Multiplication and Karatsuba's algorithm
Ever wondered how Python multiplies two numbers? It actually depends on the size of those numbers!
**Small numbers:** 3rd grade algorithm. **Large numbers:** Karatsuba's algorithm.
### Demonstration
Consider the multiplication $2275 \times 5013 = 11,404,575$
**3rd grade algorithm**
(this one we all know - although it's been a while)
The 3rd grade algorithm is $O(n^2)$. To see this, think of the multiplication part as nested for-loops throughout the 10s, 100s, 1000s etc. Then there is the addition part, which is also $O(n^2)$.
```
Image(filename = "ThirdGradeMultiplication.jpg", width = 230, height = 230)
```
**Karatsuba's algorithm**
It is not super intuitive what goes on here. But basically, it's splitting the numbers to be multiplied into multiples of 10s and then performs operations on those splits.
The algorithm is only $O(n^{log_3})$, so better than 3rd grade algorithm for large $n$.
**Some preparation:**
$x = 2275$, $y = 5013$
Note the identities:
$x = 22 \times 10^2 + 75$
$y = 50 \times 10^2 + 13$
We denote:
$x_a = 22, \: x_b = 75$
$y_a = 50, \: y_b = 13$
**The algorithm**
*First compute:*
$A = x_a \times y_a$
$B = x_b \times y_b$
$C = (x_a + x_b) \times (y_a +y_b) - A - B$
*Then we have that*
$x \times y = A \times 10^4 + C\times 10^2 + B$
**In numbers**
$A = 22 \times 50 = 1100$
$B = 75 \times 13 = 975$
$C = (22 + 75)(50 + 13) - 1100 - 975 = 4036$
$x \times y = 1100 \times 10^4 + 4036\times 10^2 + 975 = 11,404,575$
## Linear search (also called sequential search)
**Problem:** Check whether element is in list. See the `containment` row in the list of complexity above.
**Inputs:** A list `L` and a potential element `x`.
**Outputs:** Boolean.
**Algorithm:** `linear_search()`
1. Set variable `found == False`
2. For each `y` in the list `L`, compare it to `x`. If `x == y` set `found = True` and break loop.
3. `found` now shows whether the element is in the list or not
```
L = [1, 2, 32, 8, 17, 19, 42, 13, 0] # test list
def linear_search(L,x):
pass
print('found 3:',linear_search(L,3))
print('found 13:',linear_search(L,13))
def linear_search(L,x):
""" linear search
Args:
L (list): List to search in.
x (any): Element to search for.
Returns:
found (bool): Boolean for whether element is in list or not.
"""
# a. prep
i = 0
N = len(L)
found = False
# b. main
while i < N and not found:
if L[i] == x: # comparison
found = True
else:
i += 1 # increment
# c. return
return found
print('found 3:',linear_search(L,3))
print('found 13:',linear_search(L,13))
```
**Terminology:** The linear search algorithm is called a **brute force** algorithm (we solve the problem without any intermediate steps).
**Analysis:** Each operation consists of a *comparision* and an *incremenet*:
1. **Best case:** $O(1)$ (element present and first in list)
2. **Average case:**
* $O(\frac{n}{2})=O(n)$ (if element present), or
* $O(n)$ (if element *not* present)
3. **Worst case:** $O(n)$ (element not present or last in list)
**Note:** Much faster ($O(1)$) on a dictionary, because we just apply the hash function to `x`.
## Binary search ("the phonebook search")
**Problem:** You know that a list is sorted. Check whether an element is contained in it.
**Inputs:** A list `L` and a potential element `x`.
**Outputs:** Boolean.
**Algorithm:** `binary_search()`
1. Set `found` to `False`,
2. Locate the `midpoint` of the list part that remains to be searched.
2. Check whether the `midpoint` is the one we are searching for:
* If yes, set `found=True` and go to step 3.
* If no, and the `midpoint` is *larger*, restrict attention to the *left* part of the list and restart step 2 if not empty.
* If no, and the `midpoint` is *smaller*, restrict attention to the *right* part of the list and restart step 2 if not empty.
3. `found` now shows whether the element is in the list or not
**Middle element:** Define the midpoint between index `i` and index `j >= i` as `i + (j-i)/2`, rounded down if necessary.
```
for i in [0,2,4]:
for j in [4,5,9]:
print(f'(i,j) = {i,j} -> midpoint = {i+((j-i)//2)}') # note integer division with //
L = [0, 1, 2, 8, 13, 17, 19, 32, 42] # test list
def binary_search(L,x):
pass
print('found 3:',binary_search(L,3))
print('found 13:',binary_search(L,13))
def binary_search(L,x,do_print=False):
""" binary search
Args:
L (list): List to search in.
x (any): Element to search for.
do_print (bool): Indicator for printing progress.
Returns:
found (bool): Boolean for whether element is in list or not.
"""
# a. initialize
found = False
# b. start with whole list
first = 0
last = len(L)-1
# c. main
while first <= last and not found:
# i. find midpoint
midpoint = first + (last - first) // 2 # // is integer division
if do_print:
print(L[first:last+1],L[midpoint])
# ii. check if x found or smaller or larger than midpoint
if L[midpoint] == x:
found = True
else:
if L[midpoint] > x:
last = midpoint-1
else:
first = midpoint+1
return found
print('found 3:',binary_search(L,3))
print('found 13:',binary_search(L,13))
binary_search(L,32,do_print=True)
```
**Terminology:** This is called a **divide-and-conquer** algorithm.
**Analysis:**
* After 1 comparison there is approximately $\frac{n}{2}$ elements left.
* After 2 comparisons there is approximately $\frac{n}{4}$ elements left.
* After 3 comparisons there is approximately $\frac{n}{8}$ elements left.
* ...
* After $j$ comparisons there is approximately $\frac{n}{2^j}$ number of elements left.
**When is there one element left?** $\frac{n}{2^j} = 1 \Leftrightarrow j = \frac{\log n}{\log 2}$
**Result:** The binary search algorithm is $O(\log n)$, i.e. logarithmic complexity.
# Recursion
**Problem:** Sum the elements in a list.
```
L = [1,3,5,7,9]
```
**Simple:** Just sum them:
```
def listsum(L):
result = 0
for x in L:
result += x
return result
print(listsum(L))
```
**Recursion:** The sum of a list is the sum of the first element and the sum of the rest of the list:
```
def listsum_recursive(L):
if len(L) == 1:
return L[0]
else:
return L[0] + listsum_recursive(L[1:])
print(listsum_recursive(L))
```
This is also a divide-and-conquor strategy. Avoids loops.
## Fibonacci numbers
**Definition:**
$$
\begin{aligned}
F_0 &= 0 \\
F_1 &= 1 \\
F_n &= F_{n-1} + F_{n-2} \\
\end{aligned}
$$
**Implementation:**
```
def fibonacci(n):
if n == 0:
return 0
elif n == 1:
return 1
return fibonacci(n-1)+fibonacci(n-2)
fibonacci(5)
#for n in range(4):
#print(fibonacci(n))
```
### Caution!
This implementation is for demonstration purposes only. It can be greatly sped up by using the `@cache` decorator, which stores the previous return value of a function call.
If you ever want to use recursion, you must rely on **caching** of function values. Because ***recursion on itself is sloow***.
**Test approximate formula:**
```
def fibonacci_approx(n):
return 1/np.sqrt(5)*( ((1+np.sqrt(5))/2)**n - ((1-np.sqrt(5))/2)**n)
for n in [5,10,15,20,25]:
print(f'n = {n:3d}: true = {fibonacci(n):6d}, approximate = {fibonacci_approx(n):20.12f}')
```
## Advanced: Binary search with recursion
```
L = [0, 1, 2, 8, 13, 17, 19, 32, 42,] # test list
def binary_search_recursive(L,x):
pass
print('found 3:',binary_search_recursive(L,3))
print('found 13:',binary_search_recursive(L,13))
def binary_search_recursive(L,x):
""" recursive binary search
Args:
L (list): List to search in.
x (any): Element to search for.
Returns:
found (bool): Boolean for whether element is in list or not.
"""
if len(L) == 0:
return False # not found
else:
# a. find midpoint
midpoint = len(L)//2
# b. check if x found or smaller or larger than midpoint
if L[midpoint] == x: # found
return True
else:
if L[midpoint] > x:
newL = L[:midpoint]
else:
newL = L[midpoint+1:]
return binary_search_recursive(newL,x)
print('found 3:',binary_search_recursive(L,3))
print('found 13:',binary_search_recursive(L,13))
```
# Sorting
Sorting is a super central task of computing. IBM invented it's first computers in the 30s to sort data.
Would be hard to keep track of data without sorting. Thus, many algorithms have been developed for this purpose.
We will look at a simple algorithm first, the bubble sort, which relies on swapping elements iteratively.
Function for **swapping** element `L[i]` with element `L[j]` in-place:
```
def swap(L,i,j):
temp = L[i] # save value in place holder variable
L[i] = L[j] # overwrite value at i with value at j
L[j] = temp # write original value at i to value at j
```
**Example:**
```
L = [1, 3, 4, 9, 13]
swap(L,i=0,j=1)
print('after swap',L)
```
## Bubble sort
**Problem:** Sort a list of numbers in-place.
**Inputs:** List of numbers.
**Outputs:** None.
**Algorithm:** `bubble_sort()`
1. Loop through the first n-1 elements in list, swap with next element if current is larger.
2. Loop through the first n-2 elements in list, swap with next element if current is larger.
<br>
...
<br>
4. Loop through the first 3 elements in list, swap with next element if current is larger.
5. Swap the two first elements if the first is larger than the second
6. List is sorted
```
L = [54, 26, 93, 17, 77, 31, 44, 55, 20] # test list
def bubble_sort(L):
pass
bubble_sort(L)
print(L)
def bubble_sort(L):
""" bubble sort
Args:
L (list): List of numbers
"""
# k starts being len(L)-1 and is decreased by 1 until hitting 0
for k in range(len(L)-1,0,-1):
for i in range(k):
if L[i] > L[i+1]:
swap(L,i,i+1)
L = [54, 26, 93, 17, 77, 31, 44, 55, 20]
bubble_sort(L)
print('sorted L:',L)
from IPython.display import YouTubeVideo
YouTubeVideo('lyZQPjUT5B4', width=800, height=600, start=45)
```
**Another visualization of bubble sort**

**Illustration with printout:**
```
def bubble_sort_with_print(L):
for k in range(len(L)-1,0,-1):
print(f'step = {len(L)-k}')
for i in range(k):
if L[i] > L[i+1]:
swap(L,i,i+1)
print(L)
print('')
L = [54, 26, 93, 17, 77, 31, 44, 55, 20]
print('original',L,'\n')
bubble_sort_with_print(L)
```
**Analysis:** Bubble sort is $O(n^2)$ - do you have an intuition?
## Insertion sort
**Algorithm:** `insertion_sort()`
1. Consider the *second* element. Insert it correctly in the list of the numbers before the *second* element.
2. Consider the *third* element. Insert it correctly in the list of the numbers before the *third* element.
<br>
...
<br>
4. Consider the n'th element. Insert it correctly in the list of the numbers before the *n'th* element.
5. List is sorted
**Illustration:**
<img src="https://github.com/NumEconCopenhagen/lectures-2019/raw/master/08/insertionsort.png" alt="insertionsort" width=50% />
```
L = [54, 26, 93, 17, 77, 31, 44, 55, 20] # test list
def insertion_sort(L):
pass
insertion_sort(L)
print(L)
def insertion_sort(L):
""" insertion sort
Args:
L (list): List of numbers
"""
# loop over last n-1 elements, skipping the 1st element (see range func).
n = len(L)
for k in range(1,n):
# a. current value and position
x = L[k]
i = k
# b. move left while larger: a bubble sort at heart
while i > 0 and L[i-1] > x:
L[i] = L[i-1] # move
i = i-1
# c. insert current vlaue
L[i] = x
L = [54, 26, 93, 17, 77, 31, 44, 55, 20]
insertion_sort(L)
print('sorted',L)
```
**Analysis:** Still $O(n^2)$..
**Benefits relative to bubble sort:**
1. Moves instead of swaps, 1 operation less.
2. Data is often **partially sorted** to begin with. Insertion sort benefits from that.
## Partition (+)
*Intermezzo: Solving the partition problem is useful for a so-called quicksort.*
**Problem:** Permute a list and return a splitpoint such that all elements before the point is larger than or equal to the first element in the original list, and all elements afterwards are strictly larger.
**Input:** List of numbers.
**Output:** Integer.
**Algorithm:**
0. Let splitting point be first element of list.
1. From the *left* find the first element larger than split point (leftmark).
2. From the *right* find the first element smaller than split point (rightmark).
3. Swap these two elements.
4. Repeat 1-3 starting from previous leftmark and rightmark. Continue until leftmark is larger than rightmark.
5. Swap first and rightmark element.
6. Return the rightmark.
<img src="https://github.com/NumEconCopenhagen/lectures-2019/raw/master/08/quicksort.png" alt="quicksort" width=60% />
```
def partition(L,first,last):
""" partition
Permute a list and return a splitpoint, such that all elements before
is larger than or equal to the first element in the original list,
and all elements afterwards are strictly larger.
Args:
L (list): List of numbers
first (integer): Startpoint
last (integer): Endpoint
Returns:
splitpoint (integer):
"""
# a. initialize
splitvalue = L[first]
leftmark = first+1
rightmark = last
# b. find splitpoint
done = False
while not done:
# i. find leftmark
while leftmark <= rightmark and L[leftmark] <= splitvalue:
leftmark = leftmark + 1
# i. find rightmark
while L[rightmark] >= splitvalue and rightmark >= leftmark:
rightmark = rightmark -1
# iii. check if done or swap left and right
if rightmark < leftmark:
done = True
else:
swap(L,leftmark,rightmark)
# c. final swap
swap(L,first,rightmark)
return rightmark
L = [54, 26, 93, 17, 77, 31, 44, 55, 20]
print('before',L)
splitpoint = partition(L,0,len(L)-1)
print('after',L)
print('split',L[:splitpoint+1],L[splitpoint+1:])
```
## Quicksort (+)
**Algorithm:** `quick_sort()`
1. Recursively partition the list and the sub-lists when splitting at the splitpoint.
2. The list is now sorted.
```
def quick_sort(L):
_quick_sort(L,0,len(L)-1)
def _quick_sort(L,first,last):
if first < last:
splitpoint = partition(L,first,last)
_quick_sort(L,first,splitpoint-1) # left part
_quick_sort(L,splitpoint+1,last) # right part
L = [54, 26, 93, 17, 77, 31, 44, 55, 20]
quick_sort(L)
print('sorted',L)
```
**Analysis:** $O(n \log n)$ on average, but still $O(n^2)$ in the worst case [we don't derive this, just trust me].
**Visualization of quicksort**

## Advanced: Comparision of performance
Lets us compare the different sorting algorithm:
1. Bubble
2. Insertion
3. Quick
4. Quick (as implemented in Numpy)
```
# a. settings
n_vec = np.array([100,200,300,400,500,750,1000,1500,2000,4000,8000,16000]) # number of elements in list
K = 50 # number of repetitions when timing
# b. allocate vectors for results
bubble = np.empty(len(n_vec))
insertion = np.empty(len(n_vec))
quick = np.empty(len(n_vec))
quicknp = np.empty(len(n_vec))
# c. run time trials
np.random.seed(1999)
for i,n in enumerate(n_vec):
# i. draw K random lists of lenght n
L_bubble = []
L_insertion = []
L_quick = []
L_quicknp = []
for k in range(K):
L = np.random.uniform(size=n)
np.random.shuffle(L)
L_bubble.append(L.copy())
L_insertion.append(L.copy())
L_quick.append(L.copy())
L_quicknp.append(L.copy())
# ii. bubble sort
if n <= 500:
t0 = time.time() # start timer
for k in range(K):
bubble_sort(L_bubble[k])
bubble[i] = time.time()-t0 # calculate time since start
else:
bubble[i] = np.nan
# ii. insertion sort
if n <= 500:
t0 = time.time()
for k in range(K):
insertion_sort(L_insertion[k])
insertion[i] = time.time()-t0
else:
insertion[i] = np.nan
# iii. quicksort
if n <= 2000:
t0 = time.time()
for k in range(K):
quick_sort(L_quick[k])
quick[i] = time.time()-t0
else:
quick[i] = np.nan
# iii. quicksort (numpy implementation)
t0 = time.time()
for k in range(K):
L_quicknp[k].sort() # built-in numpy method
quicknp[i] = time.time()-t0
# iv. check that all sorted lists are the same
for k in range(K):
if n <= 500:
assert np.all(L_bubble[k] == L_quick[k])
assert np.all(L_insertion[k] == L_quick[k])
if n <= 2000:
assert np.all(L_quicknp[k] == L_quick[k])
# d. figure
I = n_vec <= 2000
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(n_vec[I],bubble[I],label='bubble')
ax.plot(n_vec[I],insertion[I],label='insertion')
ax.plot(n_vec[I],quick[I],label='quick')
ax.plot(n_vec[I],quicknp[I],label='quick (numpy)')
ax.set_xlabel('number of elements')
ax.set_ylabel('seconds')
ax.legend(facecolor='white',frameon=True);
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(n_vec,quicknp,label='quick (numpy)')
ax.set_xlabel('number of elements')
ax.set_ylabel('seconds')
ax.legend(facecolor='white',frameon=True);
```
**Take-aways:**
1. Complexity matters
2. Implementation matter (and the built-in functions and methods are hard to beat)
# Summary
**This lecture:**
1. Algorithms and their complexity (big-O notation)
2. Function recursion (functions calling themselves)
3. Searching algorithms (linear, bineary)
4. Sorting algorithm (bubble, insertion, quick)
**Your work:** The problem set is closely related to the algorithms presented here.
**Next lecture:** Solving equations (single vs. system, linear vs. non-linear, numerically vs. symbolically)
| github_jupyter |
```
import pandas as pd
import numpy as np
from datetime import datetime
df = pd.read_csv('dr_loans.csv')
df.head()
# check distribution
df.groupby('loan_num').size()
# set date format
date_format = "%Y-%m-%d"
# example of calculate mean of dates
var1 = df[(df['loan_num']==20)]['begin_date']
var2 = df[(df['loan_num']==20)]['end_date']
begin_mean = (np.array(var1, dtype='datetime64[s]')
.view('i8')
.mean()
.astype('datetime64[s]'))
end_mean = (np.array(var2, dtype='datetime64[s]')
.view('i8')
.mean()
.astype('datetime64[s]'))
a = datetime.strptime(str(begin_mean)[:10], date_format)
b = datetime.strptime(str(end_mean)[:10], date_format)
delta = b - a
print (delta.days)
# records to check algorithm
df[((df['loan_num']==20))]
# create new empty columns for calculating
df['d_to_end'] = ''
df['d_from_prev'] = ''
# loop with date calculating
for i in range(0, len(df)):
# set
begin = df['begin_date'].iloc[i-1]
end = df['end_date'].iloc[i-1]
# convert to "%Y-%m-%d" format
converted_begin = datetime.strptime(begin, date_format)
converted_end = datetime.strptime(end, date_format)
# set day to end value
day_to_end = converted_end - converted_begin
df['d_to_end'].iloc[i-1] = day_to_end.days
# calculate count of day from previous loan of persone
if(df['loan_num'].iloc[i-1]>1):
profile_id = df['profile_id'].iloc[i-1]
num = df['loan_num'].iloc[i-1]
# get params of loan`s date
current_date = df['begin_date'].iloc[i-1]
previous_date = df[(df['profile_id'] == profile_id) & (df['loan_num']==num-1)]['begin_date'].values[0]
converted_begin = datetime.strptime(previous_date, date_format)
converted_end = datetime.strptime(current_date, date_format)
day_to_end = converted_end - converted_begin
df['d_from_prev'].iloc[i-1] = day_to_end.days
else:
df['d_from_prev'].iloc[i-1] = -1
i += 1
df.head()
binning_df = pd.DataFrame.copy(df[['d_to_end', 'status']])
# binning data by amount
cut_labels_4 = ['1 week','2 weeks','3+ weeks']
cut_bins = [0, 7, 14, 1000]
binning_df['d_to_end'] = pd.cut(binning_df['d_to_end'], bins=cut_bins, labels=cut_labels_4)
binning_df.head()
# create two dataframe to calculate percentage
d1 = pd.DataFrame.copy(binning_df[binning_df['status']=='CLOSED'].groupby('d_to_end')
.agg(closed_persent=pd.NamedAgg(column='status', aggfunc='count'))
.reset_index()
)
d2 = pd.DataFrame.copy(binning_df.groupby('d_to_end')
.agg(closed_persent=pd.NamedAgg(column='status', aggfunc='count'))
.reset_index()
)
d1['closed_persent'] = 100/d2['closed_persent']*d1['closed_persent']
d1
```
| github_jupyter |
<a href="https://colab.research.google.com/github/chiwoongMOON/202111PythonGrammarStudy/blob/master/chapter14.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# chapter14 예외처리
## 14-1 예외가 발생하는 상황
```
lst = [1, 2, 3]
lst[3]
3 + "coffee"
3 / 0
```
## 14-2 예외의 처리
- 사례를 서른을 입력해본다
```
# age.py
def main():
print("안녕하세요.")
age = int(input("나이를 입력하세요: "))
print("입력하신 나이는 다음과 같습니다:", age)
print("만나서 반가웠습니다.")
main()
```
## 14-3 보다 적극적인 예외의 처리
- 서른을 입력해본다
```
# age_expt.py
def main():
print("안녕하세요.")
try:
age = int(input("나이를 입력하세요: "))
print("입력하신 나이는 다음과 같습니다:", age)
except ValueError:
print("입력이 잘못되었습니다.")
print("만나서 반가웠습니다.")
main()
# age_expt_conti.py
def main():
print("안녕하세요.")
while True:
try:
age = int(input("나이를 입력하세요: "))
print("입력하신 나이는 다음과 같습니다:", age, "입력이 정상적이므로 루프를 탈출합니다!")
break # 입력이 정상적이면 while 루프 탈출!
except ValueError:
print("입력이 잘못되었습니다.")
print("만나서 반가웠습니다.")
main()
```
## 14-4 둘 이상의 예외를 처리하기
- 다음 사례에서 예상되는 문제는 무엇이 있는지 논의해보자
```
# div.py
def main():
bread = 10 # 열 개의 빵
people = int(input("몇 명? "))
print("1인당 빵의 수: ", bread / people)
print("맛있게 드세요.")
main()
```
- 일차적인 해결 방안
```
# div_expt1.py
def main():
bread = 10 # 열 개의 빵
try:
people = int(input("몇 명? "))
print("1인당 빵의 수: ", bread / people)
except ValueError:
print("입력이 잘못되었습니다.")
except ZeroDivisionError:
print("0으로는 나눌 수 없습니다.")
print("맛있게 드세요.")
main()
```
- 더 완벽한 해결 방안
```
# div_expt2.py
def main():
bread = 10 # 열 개의 빵
while True:
try:
people = int(input("몇 명? "))
print("1인당 빵의 수: ", bread / people)
print("맛있게 드세요.")
break
except ValueError:
print("입력이 잘못되었습니다.")
except ZeroDivisionError:
print("0으로는 나눌 수 없습니다.")
main()
```
## 14-5 예외 메시지 출력하기와 finally
```
# div_expt3.py
def main():
bread = 10 # 열 개의 빵
while True:
try:
people = int(input("몇 명? "))
print("1인당 빵의 수: ", bread / people)
print("맛있게 드세요.")
break
except ValueError as msg:
print("입력이 잘못되었습니다.")
print(msg)
except ZeroDivisionError as msg:
print("0으로는 나눌 수 없습니다.")
print(msg)
main()
```
- finally 구문을 활용한다
```
# div_expt4.py
def main():
bread = 10 # 열 개의 빵
while True:
try:
people = int(input("몇 명? "))
print("1인당 빵의 수: ", bread / people)
print("맛있게 드세요.")
except ValueError:
print("입력이 잘못되었습니다.")
except ZeroDivisionError as msg:
print("0으로는 나눌 수 없습니다.")
print(msg)
finally:
print("어쨌든 프로그램은 종료합니다.")
break
main()
```
## 14-6 모든 예외 그냥 무시하기
- 모든 예외를 다 처리하는 방법
```
# ignore_expt.py
def main():
bread = 10 # 열 개의 빵
while True:
try:
people = int(input("몇 명? "))
print("1인당 빵의 수: ", bread / people)
print("맛있게 드세요.")
break;
except:
print("뭔지는 몰라도 예외가 발생했군요.")
main()
```
| github_jupyter |
```
# !pip install sklearn
# !pip install featuretools
# !pip install lightgbm
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeClassifier as DT
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import LeaveOneOut, cross_val_score, train_test_split
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.metrics import confusion_matrix # 混合行列
from sklearn.decomposition import PCA #主成分分析
from sklearn.linear_model import LogisticRegression # ロジスティック回帰
from sklearn.neighbors import KNeighborsClassifier # K近傍法
from sklearn.svm import SVC # サポートベクターマシン
from sklearn.tree import DecisionTreeClassifier # 決定木
from sklearn.ensemble import RandomForestClassifier # ランダムフォレスト
from sklearn.ensemble import AdaBoostClassifier # AdaBoost
from sklearn.naive_bayes import GaussianNB # ナイーブ・ベイズ
import lightgbm as lgb
from sklearn import metrics
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
train_df = pd.read_csv("train.csv")
test_X = pd.read_csv("test.csv")
train_df.head()
train_df.disease.value_counts()
```
# train_yに目的変数を代入
```
train_y = train_df['disease']
test_X.head()
test_X.info()
print(test_X.Age.min(), test_X.Age.max())
print(train_df.Age.min(), train_df.Age.max())
test_X.Age = pd.cut(test_X.Age, [0, 10, 20, 30,40,50,60,70,80])
test_X
train_df.Age = pd.cut(train_df.Age, [0, 10, 20, 30,40,50,60,70,80])
train_df
# data_Xに説明変数を代入
train_X = train_df.drop('disease', axis=1)
train_X = pd.get_dummies(train_X)
test_X = pd.get_dummies(test_X)
train_X.head()
test_X.head()
# 訓練セットとテストセットに分割
X_train, X_test, y_train, y_test = train_test_split(train_X, train_y, random_state=0)
```
# ロジスティック回帰
```
p# lr = LinearRegression()
lr = LogisticRegression(max_iter=2000) # ロジスティック回帰モデルのインスタンスを作成
# X = boston_df.values # 説明変数(Numpyの配列)
# Y = boston_df['MEDV'].values # 目的変数(Numpyの配列)
# テストセットでモデルを評価
loo = LeaveOneOut()
# score = cross_val_score(lr, train_X, train_y, cv=loo)
score = cross_val_score(lr, train_X, train_y, cv=5)
score.mean()
lr.fit(train_X, train_y)
# 評価用データの予測
pred_y_lr = lr.predict(test_X)
# 予測結果の表示
print(pred_y_lr)
names = ["Logistic Regression", "Nearest Neighbors",
"Linear SVM", "Polynomial SVM", "RBF SVM", "Sigmoid SVM",
"Decision Tree","Random Forest", "AdaBoost", "Naive Bayes"]
classifiers = [
LogisticRegression(max_iter=3000),
KNeighborsClassifier(),
SVC(kernel="linear"),
SVC(kernel="poly"),
SVC(kernel="rbf"),
SVC(kernel="sigmoid"),
DecisionTreeClassifier(),
RandomForestClassifier(),
AdaBoostClassifier(),
GaussianNB()]
result = []
for name, clf in zip(names, classifiers): # 指定した複数の分類機を順番に呼び出す
clf.fit(train_X, train_y) # 学習
score1 = clf.score(train_X, train_y) # 正解率(train)の算出
score2 = clf.score(train_X, train_y) # 正解率(test)の算出
result.append([score1, score2]) # 結果の格納
# test の正解率の大きい順に並べる
df_result = pd.DataFrame(result, columns=['train', 'test'], index=names).sort_values('test', ascending=False)
df_result
# 棒グラフの描画
df_result.plot(kind='bar', alpha=0.5, grid=True)
result = []
for trial in range(20): # 20 回繰り返す
X_train, X_test, y_train, y_test = train_test_split(train_X, train_y, test_size=.4) # 訓練データ・テストデータの生成
for name, clf in zip(names, classifiers): # 指定した複数の分類機を順番に呼び出す
clf.fit(X_train, y_train) # 学習
score1 = clf.score(X_train, y_train) # 正解率(train)の算出
score2 = clf.score(X_test, y_test) # 正解率(test)の算出
result.append([name, score1, score2]) # 結果の格納
df_result = pd.DataFrame(result, columns=['classifier', 'train', 'test'])
df_result
# 分類器 (classifier) 毎にグループ化して正解率の平均を計算し、test の正解率の平均の大きい順に並べる
df_result_mean = df_result.groupby('classifier').mean().sort_values('test', ascending=False)
df_result_mean
# エラーバーの表示に用いる目的で、標準偏差を計算する
errors = df_result.groupby('classifier').std()
errors # 結果の確認
# 平均値と標準偏差を用いて棒グラフを描画
df_result_mean.plot(kind='bar', alpha=0.5, grid=True, yerr=errors)
DTC = DecisionTreeClassifier()
score = cross_val_score(DTC, train_X, train_y, cv=5)
score.mean()
RFC = RandomForestClassifier()
score = cross_val_score(RFC, train_X, train_y, cv=5)
score.mean()
RFC.fit(train_X, train_y)
# 評価用データの予測
pred_y = RFC.predict(test_X)
# 予測結果の表示
print(pred_y)
```
# LightGBM
```
# データセットを生成する
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
# LightGBM のハイパーパラメータ
lgbm_params = {
# 二値分類問題
'objective': 'binary',
# AUC の最大化を目指す
'metric': 'auc',
# Fatal の場合出力
'verbosity': -1,
}
# 上記のパラメータでモデルを学習する
model = lgb.train(lgbm_params, lgb_train, valid_sets=lgb_eval,
verbose_eval=50, # 50イテレーション毎に学習結果出力
num_boost_round=1000, # 最大イテレーション回数指定
early_stopping_rounds=100
)
# 保存
model.save_model('model.txt')
# テストデータを予測する
y_pred_proba = model.predict(X_test, num_iteration=model.best_iteration)
# y_pred = np.round(y_pred)
# しきい値 0.5 で最尤なクラスに分類する
# y_pred = np.where(y_pred_proba > 0.5, 1, 0)
y_pred_proba
y_pred = y_pred_proba
print('Accuracy score = \t {}'.format(accuracy_score(y_test, y_pred)))
print('Precision score = \t {}'.format(precision_score(y_test, y_pred)))
print('Recall score = \t {}'.format(recall_score(y_test, y_pred)))
print('F1 score = \t {}'.format(f1_score(y_test, y_pred)))
# 保存したモデルを使う場合
#bst = lgb.Booster(model_file='model.txt')
#ypred = bst.predict(X_test, num_iteration=bst.best_iteration)
# AUC (Area Under the Curve) を計算する
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred)
auc = metrics.auc(fpr, tpr)
print(auc)
# ROC曲線をプロット
plt.plot(fpr, tpr, label='ROC curve (area = %.2f)'%auc)
plt.legend()
plt.title('ROC curve')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.grid(True)
```
# 提出用データ作成
```
# RFC.fit(train_X, train_y)
# 評価用データの予測
y_pred_proba = model.predict(test_X)
# pred_y = np.where(y_pred_proba > 0.5, 1, 0)
pred_y = y_pred_proba
# 予測結果の表示
print(pred_y)
submit = pd.read_csv("sample_submit.csv", header=None)
submit[1] = pred_y
submit.to_csv("submit.csv", index=False, header=False)
```
# フォーラム投稿用検証
```
import re
import pandas as pd
import numpy as np
from xgboost.sklearn import XGBRegressor
regex = re.compile(r"\[|\]|<", re.IGNORECASE)
# test input data with string, int, and symbol-included columns
df = pd.DataFrame({'0': np.random.randint(0, 2, size=100),
'[test1]': np.random.uniform(0, 1, size=100),
'test2': np.random.uniform(0, 1, size=100),
3: np.random.uniform(0, 1, size=100)})
df.columns = [regex.sub("_", col) if any(x in str(col) for x in set(('[', ']', '<'))) else col for col in df.columns.values]
target = df.iloc[:, 0]
predictors = df.iloc[:, 1:]
# basic xgb model
xgb0 = XGBRegressor(objective= 'reg:linear')
xgb0.fit(predictors, target)
import pandas as pd
import numpy as np
from xgboost.sklearn import XGBRegressor
# test input data with string, int, and symbol-included columns
df = pd.DataFrame({'0': np.random.randint(0, 2, size=100),
'test1': np.random.uniform(0, 1, size=100),
'test2': np.random.uniform(0, 1, size=100),
3: np.random.uniform(0, 1, size=100)})
target = df.iloc[:, 0]
predictors = df.iloc[:, 1:]
# basic xgb model
xgb0 = XGBRegressor(objective= 'reg:linear')
xgb0.fit(predictors, target)
```
| github_jupyter |
# Neural networks with PyTorch
Deep learning networks tend to be massive with dozens or hundreds of layers, that's where the term "deep" comes from. You can build one of these deep networks using only weight matrices as we did in the previous notebook, but in general it's very cumbersome and difficult to implement. PyTorch has a nice module `nn` that provides a nice way to efficiently build large neural networks.
```
# Import necessary packages
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import torch
import helper
import matplotlib.pyplot as plt
```
Now we're going to build a larger network that can solve a (formerly) difficult problem, identifying text in an image. Here we'll use the MNIST dataset which consists of greyscale handwritten digits. Each image is 28x28 pixels, you can see a sample below
<img src='assets/mnist.png'>
Our goal is to build a neural network that can take one of these images and predict the digit in the image.
First up, we need to get our dataset. This is provided through the `torchvision` package. The code below will download the MNIST dataset, then create training and test datasets for us. Don't worry too much about the details here, you'll learn more about this later.
```
### Run this cell
from torchvision import datasets, transforms
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)),
])
# Download and load the training data
trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
```
We have the training data loaded into `trainloader` and we make that an iterator with `iter(trainloader)`. Later, we'll use this to loop through the dataset for training, like
```python
for image, label in trainloader:
## do things with images and labels
```
You'll notice I created the `trainloader` with a batch size of 64, and `shuffle=True`. The batch size is the number of images we get in one iteration from the data loader and pass through our network, often called a *batch*. And `shuffle=True` tells it to shuffle the dataset every time we start going through the data loader again. But here I'm just grabbing the first batch so we can check out the data. We can see below that `images` is just a tensor with size `(64, 1, 28, 28)`. So, 64 images per batch, 1 color channel, and 28x28 images.
```
dataiter = iter(trainloader)
images, labels = dataiter.next()
print(type(images))
print(images.shape)
print(labels.shape)
```
This is what one of the images looks like.
```
plt.imshow(images[1].numpy().squeeze(), cmap='Greys_r');
```
First, let's try to build a simple network for this dataset using weight matrices and matrix multiplications. Then, we'll see how to do it using PyTorch's `nn` module which provides a much more convenient and powerful method for defining network architectures.
The networks you've seen so far are called *fully-connected* or *dense* networks. Each unit in one layer is connected to each unit in the next layer. In fully-connected networks, the input to each layer must be a one-dimensional vector (which can be stacked into a 2D tensor as a batch of multiple examples). However, our images are 28x28 2D tensors, so we need to convert them into 1D vectors. Thinking about sizes, we need to convert the batch of images with shape `(64, 1, 28, 28)` to a have a shape of `(64, 784)`, 784 is 28 times 28. This is typically called *flattening*, we flattened the 2D images into 1D vectors.
Previously you built a network with one output unit. Here we need 10 output units, one for each digit. We want our network to predict the digit shown in an image, so what we'll do is calculate probabilities that the image is of any one digit or class. This ends up being a discrete probability distribution over the classes (digits) that tells us the most likely class for the image. That means we need 10 output units for the 10 classes (digits). We'll see how to convert the network output into a probability distribution next.
> **Exercise:** Flatten the batch of images `images`. Then build a multi-layer network with 784 input units, 256 hidden units, and 10 output units using random tensors for the weights and biases. For now, use a sigmoid activation for the hidden layer. Leave the output layer without an activation, we'll add one that gives us a probability distribution next.
```
## Your solution
def activation(x):
return 1 / (1 + torch.exp(-x))
n_input = 28 * 28
n_hidden = 256
n_output = 10
W1 = torch.randn(n_input, n_hidden)
W2 = torch.randn(n_hidden, n_output)
B1 = torch.randn(n_hidden)
B2 = torch.randn(n_output)
images = images.view(images.shape[0], -1)
h = activation(torch.mm(images, W1) + B1)
out = torch.mm(h, W2) + B2
```
Now we have 10 outputs for our network. We want to pass in an image to our network and get out a probability distribution over the classes that tells us the likely class(es) the image belongs to. Something that looks like this:
<img src='assets/image_distribution.png' width=500px>
Here we see that the probability for each class is roughly the same. This is representing an untrained network, it hasn't seen any data yet so it just returns a uniform distribution with equal probabilities for each class.
To calculate this probability distribution, we often use the [**softmax** function](https://en.wikipedia.org/wiki/Softmax_function). Mathematically this looks like
$$
\Large \sigma(x_i) = \cfrac{e^{x_i}}{\sum_k^K{e^{x_k}}}
$$
What this does is squish each input $x_i$ between 0 and 1 and normalizes the values to give you a proper probability distribution where the probabilites sum up to one.
> **Exercise:** Implement a function `softmax` that performs the softmax calculation and returns probability distributions for each example in the batch. Note that you'll need to pay attention to the shapes when doing this. If you have a tensor `a` with shape `(64, 10)` and a tensor `b` with shape `(64,)`, doing `a/b` will give you an error because PyTorch will try to do the division across the columns (called broadcasting) but you'll get a size mismatch. The way to think about this is for each of the 64 examples, you only want to divide by one value, the sum in the denominator. So you need `b` to have a shape of `(64, 1)`. This way PyTorch will divide the 10 values in each row of `a` by the one value in each row of `b`. Pay attention to how you take the sum as well. You'll need to define the `dim` keyword in `torch.sum`. Setting `dim=0` takes the sum across the rows while `dim=1` takes the sum across the columns.
```
def softmax(x):
return torch.exp(x) / torch.sum(torch.exp(x), dim=1).view(-1, 1)
# Here, out should be the output of the network in the previous excercise with shape (64,10)
probabilities = softmax(out)
# Does it have the right shape? Should be (64, 10)
print(probabilities.shape)
# Does it sum to 1?
print(probabilities.sum(dim=1))
```
## Building networks with PyTorch
PyTorch provides a module `nn` that makes building networks much simpler. Here I'll show you how to build the same one as above with 784 inputs, 256 hidden units, 10 output units and a softmax output.
```
from torch import nn
class Network(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.hidden = nn.Linear(784, 256)
# Output layer, 10 units - one for each digit
self.output = nn.Linear(256, 10)
# Define sigmoid activation and softmax output
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
# Pass the input tensor through each of our operations
x = self.hidden(x)
x = self.sigmoid(x)
x = self.output(x)
x = self.softmax(x)
return x
```
Let's go through this bit by bit.
```python
class Network(nn.Module):
```
Here we're inheriting from `nn.Module`. Combined with `super().__init__()` this creates a class that tracks the architecture and provides a lot of useful methods and attributes. It is mandatory to inherit from `nn.Module` when you're creating a class for your network. The name of the class itself can be anything.
```python
self.hidden = nn.Linear(784, 256)
```
This line creates a module for a linear transformation, $x\mathbf{W} + b$, with 784 inputs and 256 outputs and assigns it to `self.hidden`. The module automatically creates the weight and bias tensors which we'll use in the `forward` method. You can access the weight and bias tensors once the network (`net`) is created with `net.hidden.weight` and `net.hidden.bias`.
```python
self.output = nn.Linear(256, 10)
```
Similarly, this creates another linear transformation with 256 inputs and 10 outputs.
```python
self.sigmoid = nn.Sigmoid()
self.softmax = nn.Softmax(dim=1)
```
Here I defined operations for the sigmoid activation and softmax output. Setting `dim=1` in `nn.Softmax(dim=1)` calculates softmax across the columns.
```python
def forward(self, x):
```
PyTorch networks created with `nn.Module` must have a `forward` method defined. It takes in a tensor `x` and passes it through the operations you defined in the `__init__` method.
```python
x = self.hidden(x)
x = self.sigmoid(x)
x = self.output(x)
x = self.softmax(x)
```
Here the input tensor `x` is passed through each operation and reassigned to `x`. We can see that the input tensor goes through the hidden layer, then a sigmoid function, then the output layer, and finally the softmax function. It doesn't matter what you name the variables here, as long as the inputs and outputs of the operations match the network architecture you want to build. The order in which you define things in the `__init__` method doesn't matter, but you'll need to sequence the operations correctly in the `forward` method.
Now we can create a `Network` object.
```
# Create the network and look at it's text representation
model = Network()
model
```
You can define the network somewhat more concisely and clearly using the `torch.nn.functional` module. This is the most common way you'll see networks defined as many operations are simple element-wise functions. We normally import this module as `F`, `import torch.nn.functional as F`.
```
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.hidden = nn.Linear(784, 256)
# Output layer, 10 units - one for each digit
self.output = nn.Linear(256, 10)
def forward(self, x):
# Hidden layer with sigmoid activation
x = F.sigmoid(self.hidden(x))
# Output layer with softmax activation
x = F.softmax(self.output(x), dim=1)
return x
```
### Activation functions
So far we've only been looking at the sigmoid activation function, but in general any function can be used as an activation function. The only requirement is that for a network to approximate a non-linear function, the activation functions must be non-linear. Here are a few more examples of common activation functions: Tanh (hyperbolic tangent), and ReLU (rectified linear unit).
<img src="assets/activation.png" width=700px>
In practice, the ReLU function is used almost exclusively as the activation function for hidden layers.
### Your Turn to Build a Network
<img src="assets/mlp_mnist.png" width=600px>
> **Exercise:** Create a network with 784 input units, a hidden layer with 128 units and a ReLU activation, then a hidden layer with 64 units and a ReLU activation, and finally an output layer with a softmax activation as shown above. You can use a ReLU activation with the `nn.ReLU` module or `F.relu` function.
It's good practice to name your layers by their type of network, for instance 'fc' to represent a fully-connected layer. As you code your solution, use `fc1`, `fc2`, and `fc3` as your layer names.
```
class Network(nn.Module):
def __init__(self):
super().__init__()
# Inputs to hidden layer linear transformation
self.fc1 = nn.Linear(784, 128)
self.fc2 = nn.Linear(128, 64)
# Output layer, 10 units - one for each digit
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
# Hidden layer with sigmoid activation
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
# Output layer with softmax activation
x = F.softmax(self.fc3(x), dim=1)
return x
model = Network()
model
```
### Initializing weights and biases
The weights and such are automatically initialized for you, but it's possible to customize how they are initialized. The weights and biases are tensors attached to the layer you defined, you can get them with `model.fc1.weight` for instance.
```
print(model.fc1.weight)
print(model.fc1.bias)
```
For custom initialization, we want to modify these tensors in place. These are actually autograd *Variables*, so we need to get back the actual tensors with `model.fc1.weight.data`. Once we have the tensors, we can fill them with zeros (for biases) or random normal values.
```
# Set biases to all zeros
model.fc1.bias.data.fill_(0)
# sample from random normal with standard dev = 0.01
model.fc1.weight.data.normal_(std=0.01)
```
### Forward pass
Now that we have a network, let's see what happens when we pass in an image.
```
# Grab some data
dataiter = iter(trainloader)
images, labels = dataiter.next()
# Resize images into a 1D vector, new shape is (batch size, color channels, image pixels)
images.resize_(64, 1, 784)
# or images.resize_(images.shape[0], 1, 784) to automatically get batch size
# Forward pass through the network
img_idx = 0
ps = model.forward(images[img_idx,:])
img = images[img_idx]
helper.view_classify(img.view(1, 28, 28), ps)
```
As you can see above, our network has basically no idea what this digit is. It's because we haven't trained it yet, all the weights are random!
### Using `nn.Sequential`
PyTorch provides a convenient way to build networks like this where a tensor is passed sequentially through operations, `nn.Sequential` ([documentation](https://pytorch.org/docs/master/nn.html#torch.nn.Sequential)). Using this to build the equivalent network:
```
# Hyperparameters for our network
input_size = 784
hidden_sizes = [128, 64]
output_size = 10
# Build a feed-forward network
model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]),
nn.ReLU(),
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
nn.ReLU(),
nn.Linear(hidden_sizes[1], output_size),
nn.Softmax(dim=1))
print(model)
# Forward pass through the network and display output
images, labels = next(iter(trainloader))
images.resize_(images.shape[0], 1, 784)
ps = model.forward(images[0,:])
helper.view_classify(images[0].view(1, 28, 28), ps)
```
Here our model is the same as before: 784 input units, a hidden layer with 128 units, ReLU activation, 64 unit hidden layer, another ReLU, then the output layer with 10 units, and the softmax output.
The operations are available by passing in the appropriate index. For example, if you want to get first Linear operation and look at the weights, you'd use `model[0]`.
```
print(model[0])
model[0].weight
```
You can also pass in an `OrderedDict` to name the individual layers and operations, instead of using incremental integers. Note that dictionary keys must be unique, so _each operation must have a different name_.
```
from collections import OrderedDict
model = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_size, hidden_sizes[0])),
('relu1', nn.ReLU()),
('fc2', nn.Linear(hidden_sizes[0], hidden_sizes[1])),
('relu2', nn.ReLU()),
('output', nn.Linear(hidden_sizes[1], output_size)),
('softmax', nn.Softmax(dim=1))]))
model
```
Now you can access layers either by integer or the name
```
print(model[0])
print(model.fc1)
```
In the next notebook, we'll see how we can train a neural network to accuractly predict the numbers appearing in the MNIST images.
| github_jupyter |
```
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Ridge
import lightgbm as lgb
import xgboost as xgb
import warnings
warnings.filterwarnings("ignore")
import pickle
def cv_sklearn (model_name, fun_create_model, df, cols, score_column, early_stopping_rounds = None):
pred = np.zeros ((df.shape[0]))
for fold in range(5):
train = df.query ("kfold != @fold")
valid = df.query ("kfold == @fold")
X_train = train [cols].values
y_train = train[score_column].values
X_valid = valid [cols].values
y_valid = valid[score_column].values
model = fun_create_model ()
if not early_stopping_rounds is None:
model.fit(X_train, y_train,
early_stopping_rounds = early_stopping_rounds,
eval_set=[(X_train, y_train), (X_valid, y_valid)], verbose=0)
else:
model.fit(X_train, y_train)
pred_train = model.predict ( X_train )
pred_valid = model.predict ( X_valid )
pred [valid.index.values] = pred_valid
rmse_train = mean_squared_error ( y_train, pred_train, squared = False )
rmse_valid = mean_squared_error ( y_valid, pred_valid, squared = False )
print (f"fold:{fold} rmse_train:{rmse_train:.5f}, rmse_valid:{rmse_valid:.5f}")
pickle.dump(model, open(f"../models/{model_name}_{fold}.pkl", 'wb'))
y_true = df[score_column]
rmse_tot = mean_squared_error ( y_true, pred, squared = False )
print (f"tot rmse_tot:{rmse_tot:.5f}")
return pred
def show_rmse (y_true, pred, model_name):
rmse_tot = mean_squared_error ( y_true, pred, squared = False )
plt.figure (figsize = (6,6))
plt.scatter (y_true, pred)
plt.title(f"{model_name}: rmse_tot:{rmse_tot:.5f}")
plt.show()
def create_validation_features ():
df_valid_pair = pd.read_csv("../processed/validation.csv")
df_valid_fe = pd.read_csv("../processed/valid_text_detoxify_fe.csv")
df_valid_fe_2 = pd.read_csv("../processed/valid_text_jc_tfidf_fe.csv")
df_valid_fe_2 = df_valid_fe_2[["jc_tfidf_toxic","jc_tfidf_severe_toxic","jc_tfidf_obscene","jc_tfidf_threat","jc_tfidf_insult","jc_tfidf_identity_hate"]]
df_valid_fe_3 = pd.read_csv("../processed/valid_text_juc_tfidf_fe.csv")
df_valid_fe_3 = df_valid_fe_3[['juc_tfidf_toxicity', 'juc_tfidf_severe_toxicity','juc_tfidf_obscene', 'juc_tfidf_sexual_explicit','juc_tfidf_identity_attack', 'juc_tfidf_insult', 'juc_tfidf_threat']]
df_valid_fe = pd.concat ( [df_valid_fe,df_valid_fe_2, df_valid_fe_3], axis=1)
return df_valid_pair, df_valid_fe
def create_rud_features ():
df_valid_pair = pd.read_csv("../processed/rud_pair.csv")
df_valid_fe = pd.read_csv("../processed/rud_text_detoxify_fe.csv")
df_valid_fe_2 = pd.read_csv("../processed/rud_text_jc_tfidf_fe.csv")
df_valid_fe_2 = df_valid_fe_2[["jc_tfidf_toxic","jc_tfidf_severe_toxic","jc_tfidf_obscene","jc_tfidf_threat","jc_tfidf_insult","jc_tfidf_identity_hate"]]
df_valid_fe_3 = pd.read_csv("../processed/rud_text_juc_tfidf_fe.csv")
df_valid_fe_3 = df_valid_fe_3[['juc_tfidf_toxicity', 'juc_tfidf_severe_toxicity','juc_tfidf_obscene', 'juc_tfidf_sexual_explicit','juc_tfidf_identity_attack', 'juc_tfidf_insult', 'juc_tfidf_threat']]
df_valid_fe = pd.concat ( [df_valid_fe,df_valid_fe_2, df_valid_fe_3], axis=1)
return df_valid_pair, df_valid_fe
def validate(df_valid_pair,df_valid_text, pred):
### rud
if "comment_id" in df_valid_text.columns:
m = { a:b for a, b in zip(df_valid_text["comment_id"].values, pred) }
p_less = df_valid_pair["less_toxic_id"].map(lambda x: m[x] )
p_more = df_valid_pair["more_toxic_id"].map(lambda x: m[x] )
return (p_less < p_more).mean()
else:
m = { a:b for a, b in zip(df_valid_text["text"].values, pred) }
df_valid_pair = df_valid_pair.copy()
#df_valid_pair = df_valid_pair.query("count == 3").copy()
df_valid_pair["less_toxic_score"] = df_valid_pair["less_toxic"].map(lambda x: m[x] )
df_valid_pair["more_toxic_score"] = df_valid_pair["more_toxic"].map(lambda x: m[x] )
d_a = df_valid_pair.query("less_toxic_score < more_toxic_score and avg_agreement == 1.0")
d_b = df_valid_pair.query("less_toxic_score < more_toxic_score and avg_agreement < 1.0")
d_c = df_valid_pair.query("less_toxic_score > more_toxic_score and avg_agreement < 1.0")
return (d_a["count"].sum() + d_b.shape[0]*2 + d_c.shape[0])/df_valid_pair["count"].sum()
def avg_predict (df_valid_text, cols):
for k, col in enumerate(cols):
p = rankdata(df_valid_text[col].values, method='ordinal')
if k == 0:
y = p
else:
y = y + p
return y
def sklearn_predict (df_valid_text, model_path, model_name, cols, folds = 5):
pred = np.zeros ((df_valid_text.shape[0]))
X = df_valid_text [cols].values
for fold in range(5):
model = pickle.load(open(f"{model_path}/{model_name}_{fold}.pkl", 'rb'))
pred += model.predict ( X )
return pred
cols = ['original_toxicity', 'original_severe_toxicity', 'original_obscene',
'original_threat', 'original_insult', 'original_identity_attack',
'unbiased_toxicity', 'unbiased_severe_toxicity', 'unbiased_obscene',
'unbiased_identity_attack', 'unbiased_insult', 'unbiased_threat',
'unbiased_sexual_explicit', 'multilingual_toxicity',
'multilingual_severe_toxicity', 'multilingual_obscene',
'multilingual_identity_attack', 'multilingual_insult',
'multilingual_threat', 'multilingual_sexual_explicit',
'original-small_toxicity', 'original-small_severe_toxicity',
'original-small_obscene', 'original-small_threat',
'original-small_insult', 'original-small_identity_attack',
'unbiased-small_toxicity', 'unbiased-small_severe_toxicity',
'unbiased-small_obscene', 'unbiased-small_identity_attack',
'unbiased-small_insult', 'unbiased-small_threat',
'unbiased-small_sexual_explicit',
"jc_tfidf_toxic","jc_tfidf_severe_toxic","jc_tfidf_obscene","jc_tfidf_threat","jc_tfidf_insult","jc_tfidf_identity_hate"]
def lgb_model ():
return lgb.LGBMRegressor(random_state=2022,
learning_rate=0.1,
subsample=0.8, colsample_bytree=0.8,
num_leaves=6,
)
def xgb_model ():
return xgb.XGBRegressor(random_state=2022, learning_rate=0.1,
subsample=0.6, colsample_bytree=0.6, max_depth=4,
reg_alpha=1.0,
)
def rf_model():
return RandomForestRegressor(random_state=2022, max_features=3, max_depth=8)
def ridge_model():
return Ridge(alpha=1.0)
rud_pair, rud_text = create_rud_features ()
val_pair, val_text = create_validation_features ()
y_true = rud_text["offensiveness_score"]
model_name = "ridge_rud"
print(f"MODEL {model_name}")
pred_ridge = cv_sklearn ( model_name = model_name, fun_create_model = ridge_model, df = rud_text , cols = cols ,
score_column = "offensiveness_score")
show_rmse (y_true, pred_ridge, model_name)
pred_val_ridge = sklearn_predict (val_text, model_path = "../models/", model_name =model_name, cols=cols)
val_score = validate (val_pair,val_text, pred_val_ridge)
print(f"{model_name} VAL: {val_score:.5f}")
print()
print()
model_name = "lgb_rud"
print(f"MODEL {model_name}")
pred_lgb = cv_sklearn ( model_name = model_name, fun_create_model = lgb_model, df = rud_text , cols = cols ,
score_column = "offensiveness_score",
early_stopping_rounds = 50)
show_rmse (y_true, pred_lgb, model_name)
pred_val_lgb = sklearn_predict (val_text, model_path = "../models/", model_name =model_name, cols=cols)
val_score = validate (val_pair,val_text, pred_val_lgb)
print(f"{model_name} VAL: {val_score:.5f}")
print()
print()
model_name = "rf_rud"
print(f"MODEL {model_name}")
pred_rf = cv_sklearn ( model_name = model_name, fun_create_model = rf_model, df = rud_text , cols = cols , score_column = "offensiveness_score")
show_rmse (y_true, pred_rf, model_name)
pred_val_rf = sklearn_predict (val_text, model_path = "../models/", model_name =model_name, cols=cols)
val_score = validate (val_pair,val_text, pred_val_rf)
print(f"{model_name} VAL: {val_score:.5f}")
print()
print()
model_name = "xgb_rud"
print(f"MODEL {model_name}")
pred_xgb = cv_sklearn ( model_name = model_name, fun_create_model = xgb_model,
df = rud_text , cols = cols ,
score_column = "offensiveness_score",
early_stopping_rounds = 50)
show_rmse (y_true, pred_xgb, model_name)
pred_val_xgb = sklearn_predict (val_text, model_path = "../models/", model_name =model_name, cols=cols)
val_score = validate (val_pair,val_text, pred_val_xgb)
print(f"{model_name} VAL: {val_score:.5f}")
print()
print()
pred_val_ensemble = pred_val_ridge + pred_val_lgb + pred_val_rf + pred_val_xgb
val_score = validate (val_pair,val_text, pred_val_ensemble)
print(f"Ensemble VAL: {val_score:.5f}")
```
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Image/image_overview.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Image/image_overview.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Image/image_overview.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Image/image_overview.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium.
```
import subprocess
try:
import geehydro
except ImportError:
print('geehydro package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geehydro'])
```
Import libraries
```
import ee
import folium
import geehydro
```
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once.
```
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
```
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
```
## Add Earth Engine Python script
## Display Earth Engine data layers
```
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
from src.config import config
from scipy.stats import pearsonr
cdr_dhs_other = pd.DataFrame(pd.read_csv('data/processed/civ/correlation/master_cdr_dhs_other.csv'))
urb = cdr_dhs_other[cdr_dhs_other['Z_Med'] >= np.median(cdr_dhs_other['Z_Med'])]
rur = cdr_dhs_other[cdr_dhs_other['Z_Med'] < np.median(cdr_dhs_other['Z_Med'])]
# For each Administrative level
for i in ['Adm_4']:
cdr_sum_urb = urb.groupby(i)['Vol', 'Vol_in', 'Vol_out',
'Dur', 'Dur_in', 'Dur_out'].sum().reset_index()
cdr_mean_urb = urb.groupby(i)['Entropy', 'Med_degree',
'Pagerank', 'Introversion'].median().reset_index()
cdr_urb = cdr_sum_urb.merge(cdr_mean_urb, on=i)
dhs_urb = urb.groupby(i)['BloodPosRate', 'RapidPosRate', 'DeathRate',
'HIVPosRate', 'HealthAccessDifficulty'].mean().reset_index()
data_urb = cdr_urb.merge(dhs_urb, on=i)
cdr_sum_rur = rur.groupby(i)['Vol', 'Vol_in', 'Vol_out',
'Dur', 'Dur_in', 'Dur_out'].sum().reset_index()
cdr_mean_rur = rur.groupby(i)['Entropy', 'Med_degree',
'Pagerank', 'Introversion'].median().reset_index()
cdr_rur = cdr_sum_rur.merge(cdr_mean_rur, on=i)
dhs_rur = rur.groupby(i)['BloodPosRate', 'RapidPosRate', 'DeathRate',
'HIVPosRate', 'HealthAccessDifficulty'].mean().reset_index()
data_rur = cdr_rur.merge(dhs_rur, on=i)
for j in ['Vol', 'Vol_in', 'Vol_out', 'Entropy',
'Med_degree', 'Pagerank', 'Introversion']:
for k in ['BloodPosRate', 'RapidPosRate', 'DeathRate',
'HIVPosRate', 'HealthAccessDifficulty']:
a = np.array(data_urb[j])
b = np.array(data_urb[k])
outliers = np.where(a > 0)
a = a[outliers]
b = b[outliers]
outliers2 = np.where(b > 0)
a = a[outliers2]
b = b[outliers2]
c = np.array(data_rur[j])
d = np.array(data_rur[k])
outliers3 = np.where(c > 0)
c = c[outliers3]
d = d[outliers3]
outliers4 = np.where(d > 0)
c = c[outliers4]
d = d[outliers4]
print i, j, k
print pearsonr(a, b)
plt.scatter(a, b)
# plt.scatter(c, d, c='r')
plt.show()
print pearsonr(a, b)
plt.scatter(np.log(a), b)
# plt.scatter(np.log(c), np.log(d), c='r')
plt.show()
```
| github_jupyter |
# Curve-fit to estimate final dissipation
```
%run base.py
%run paths.py
from base import *
from paths import *
%matplotlib ipympl
import matplotlib.pyplot as plt
def get_teps(short_name):
path = paths_sim[short_name]
d = SpatialMeansSW1L._load(path)
t = d['t']
eps = d['epsK'] + d ['epsA']
idx = _index_where(eps, eps.max())
return t, eps, idx
def plot_eps(short_name):
t, eps,idx = get_teps(short_name)
plt.plot(t, eps, label=short_name)
plt.text(t[idx], eps[idx], short_name)
# plt.legend()
## Some extreme cases
#plot_eps('noise_c400nh7680Buinf')
#plot_eps('vortex_grid_c100nh1920Bu2.0efr1.00e+00')
#plot_eps('noise_c20nh7680Buinf')
#plot_eps('vortex_grid_c100nh1920Bu1.0efr1.00e+00')
#plot_eps('noise_c40nh7680Buinf')
#plot_eps('vortex_grid_c100nh1920Buinfefr1.00e+02')
## Lognorm like
# plot_eps('vortex_grid_c20nh1920Buinfefr1.00e+01')
# plot_eps('vortex_grid_c100nh1920Buinfefr1.00e+02')
# plot_eps('vortex_grid_c100nh1920Bu4.0efr1.00e+02')
# plot_eps('vortex_grid_c400nh1920Buinfefr1.00e+02')
#for short in df_vort['short name']:
# plot_eps(short)
plot_eps('noise_c400nh3840Buinf')
# plot_eps('noise_c100nh3840Buinf')
# plot_eps('vortex_grid_c100nh1920Bu2.0efr1.00e+00')
%matplotlib ipympl
from scipy.signal import medfilt
from scipy.special import erf
t, eps, idx = get_teps("vortex_grid_c400nh1920Buinfefr1.00e+02")
# plt.figure()
# plt.plot(t, np.tanh(t/10))
# plt.plot(t, np.tanh((t/10)**4))
# plt.plot(t, erf((t/10)**4))
# plt.show()
eps_filt = medfilt(eps, 7)
plt.figure()
plt.plot(t, eps)
plt.plot(t, eps_filt)
```
# Curve fit
```
%matplotlib ipympl
from scipy.optimize import curve_fit
from scipy.signal import lti, step2
from scipy import stats
from matplotlib.pyplot import *
option = 2
# short = 'vortex_grid_c100nh1920Buinfefr1.00e-01'
# short = 'vortex_grid_c100nh1920Buinfefr1.00e+02'
# short = 'vortex_grid_c20nh1920Buinfefr1.00e+01'
# short = 'vortex_grid_c400nh1920Buinfefr1.00e+02'
# short = 'noise_c20nh7680Buinf'
# short = 'vortex_grid_c20nh960Buinfefr1.00e+00'
# short = 'noise_c20nh3840Buinf'
# short = 'noise_c400nh3840Buinf'
short = 'vortex_grid_c20nh1920Bu4.0efr1.00e-02'
short = 'vortex_grid_c20nh1920Bu20.0efr1.00e+00'
short = 'noise_c400nh1920Buinf'
# short = 'vortex_grid_c100nh1920Bu4.0efr1.00e+02'
# short = 'vortex_grid_c100nh1920Bu2.0efr1.00e+00'
t, eps,_ = get_teps(short)
if option == 1:
def f(x, amptan, ttan):
return amptan * pl.tanh(2 * (x / ttan)**4)
guesses = [pl.median(eps), t[eps==eps.max()]]
else:
# def f(x, amptan, ttan, amplog, tlog, sigma):
def f(x, amptan, ttan, amplog, sigma):
return (
amptan * np.tanh(2 * (x/ttan)**4) +
amplog * stats.lognorm.pdf(x, scale=np.exp(ttan), s=sigma)
)
guesses = {
'amptan': np.median(eps),
'ttan': t[eps==eps.max()],
'amplog': eps.max(),
# 'tlog': t[eps==eps.max()],
'sigma': eps.std()
}
guesses = np.array(list(guesses.values()), dtype=float)
bounds = (0, guesses * 1.5)
# popt, pcov = curve_fit(f, t, eps)
# popt, pcov = curve_fit(f, t, eps, sigma=1./t)
popt, pcov = curve_fit(f, t, eps, guesses)
# popt, pcov = curve_fit(f, t, eps, guesses, bounds=bounds, method="trf")
plot(t, eps, label='original')
plot(t, f(t, *popt), label='curve fit')
plot(t, np.median(eps) * np.ones_like(eps), 'g', label='median_all')
plot(t, np.median(eps[t>40]) * np.ones_like(eps), 'r:', label='median')
plot(t, np.mean(eps[t>40]) * np.ones_like(eps), 'r--', label='mean')
# df = df_vort if 'vortex' in short else df_noise
# eps_chosen = get_row(df, 'short name', short)['$\epsilon$'].iloc[0]
# plot(t, eps_chosen * np.ones_like(eps), 'k', label='chosen')
# plot(t, popt[2] * stats.lognorm.pdf(t, *popt[-2:]), label='lognorm')
legend()
eps_fit = f(t, *popt)
dt = t[1]-t[0]
# dt = np.median(np.gradient(t))
deps_fit = np.gradient(eps_fit, dt)
ddeps_fit = np.gradient(deps_fit, dt)
curv = ddeps_fit / (1 + deps_fit) ** 1.5
# curv = curv*eps.max()/curv.max()
figure()
plot(t, eps_fit)
plot(t, curv)
# plot(t, deps_fit)
```
### Kneedle algorithm
```
def locate_knee(time, eps_fit, eps_stat):
from kneed import KneeLocator
while not np.array_equal(time, np.sort(time)):
idx_del = np.where(np.diff(time) < 0)[0] + 1
time = np.delete(time, idx_del)
eps_fit = np.delete(eps_fit, idx_del)
if eps_fit.max() > 2 * eps_stat:
# log-norm + tanh
knee = KneeLocator(time, eps_fit, direction='decreasing')
idx = knee.knee_x
else:
knee = KneeLocator(time, eps_fit)
idx = knee.knee_x
if idx is None:
# non-stationary case
idx = -1
time_stat = time[idx]
return time_stat
locate_knee(t, eps_fit, eps_fit[-1])
from kneed import KneeLocator
while not np.array_equal(t, np.sort(t)):
idx_del = np.where(np.diff(t) < 0)[0] + 1
t = np.delete(t, idx_del)
eps_fit = np.delete(eps_fit, idx_del)
print(idx_del)
knee = KneeLocator(t, eps_fit)
knee.plot_knee()
t[knee.knee_x], knee.direction
%matplotlib
np.where(np.gradient(t) <= 0)[0]
# plt.plot(t, np.gradient(t))
idx_neq = np.where(t != np.sort(t))[0]
print(idx_neq)
print(t[idx_neq])
idx = _index_where(eps_fit, np.median(eps)); t[idx]
idx = _index_where(abs(curv), 1e-5); t[idx]
idx = np.argmin(abs(curv)); t[idx]
```
### Histogram of curvatures
```
curv.std()*0.01
%matplotlib ipympl
n, bins, patches = plt.hist(curv, 10, normed=1, facecolor='green', alpha=0.75)
idx = _index_flat(eps_fit, t, 1e-4); t[idx]
popt
```
# Cumulative average
```
from fluidsim.base.output.spect_energy_budget import cumsum_inv
import numpy as np
def cummean(x):
"""Cumulative average from the reversed array."""
sum_fwd = x.cumsum()
idx_fwd = np.arange(1, x.shape[0]+1)
return sum_fwd / idx_fwd
def cummean_inv(x):
"""Cumulative average from the reversed array."""
sum_inv = cumsum_inv(x)
idx_inv = np.arange(x.shape[0], 0, -1)
return sum_inv / idx_inv
eps_mean = cummean(eps)
eps_mean_inv = cummean_inv(eps)
plt.figure()
plt.plot(t, eps)
plt.plot(t, eps_mean)
plt.plot(t, eps_mean_inv)
```
# Moving average (from SciPy cookbook)
```
import numpy
def smooth(x,window_len=11,window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s=numpy.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
#print(len(s))
if window == 'flat': #moving average
w=numpy.ones(window_len,'d')
else:
w=eval('numpy.'+window+'(window_len)')
y=numpy.convolve(w/w.sum(),s,mode='valid')
return y
from scipy.stats import linregress
linregress(t[100:200], eps[100:200])
def f(x, a):
return a * np.ones_like(x)
curve_fit(f, t[300:], eps[300:])
eps[300:].mean()
plt.figure()
plt.plot(eps)
eps_filt = medfilt(eps, 51)
eps_mavg = smooth(eps, 21)[20:]
plt.figure()
plt.plot(t, eps)
plt.plot(t, eps_filt, label="median filtered")
plt.plot(t, eps_mavg, label="averaged")
plt.legend()
```
# Using FFT
```
Ts = t[1] - t[0]; # sampling interval
Fs = 1.0/Ts; # sampling rate
tvec = t # time vector
y = eps
n = len(y) # length of the signal
k = np.arange(n)
T = n/Fs
frq = k/T # two sides frequency range
frq = frq[0:n//2] # one side frequency range
Y = np.fft.fft(y)/n # fft computing and normalization
Y = Y[0:n//2]
plt.clf()
fig, ax = plt.subplots(2, 1)
ax[0].plot(tvec,y)
ax[0].set_xlabel('Time')
ax[0].set_ylabel('Amplitude')
ax[1].loglog(frq[1:],abs(Y[1:]),'r') # plotting the spectrum
ax[1].set_xlabel('Freq (Hz)')
ax[1].set_ylabel('|Y(freq)|')
```
# Peak detection
```
%matplotlib ipympl
from matplotlib.pyplot import *
from scipy.signal import find_peaks_cwt, find_peaks
widths = np.diff(t)
peaks = find_peaks(eps)[0]
plot(t, eps)
scatter(t[peaks], eps[peaks])
eps[-1], eps_filt[-1]
def step_info(t, yout, thresh_percent=20):
thresh = 1 + thresh_percent / 100
result = dict(
overshoot_percent=(yout.max() / yout[-1] - 1) * 100,
rise_time=(
t[next(i for i in range(0,len(yout)-1)
if yout[i]>yout[-1]*.90)]
- t[0]
),
settling_time=(
t[next(len(yout)-i for i in range(2,len(yout)-1)
if abs(yout[-i]/yout[-1])>thresh)]
- t[0]
),
)
return result
step_info(t, eps_filt)
yout = eps
thresh_settling = 1.20
idx = np.where(np.abs(yout / yout[-1]) > thresh_settling)[0][-1]
settling_time = t[idx] - t[0]
settling_time == step_info(t, yout)["settling_time"]
```
| github_jupyter |
```
import math
import os
import nemo
from nemo.utils.lr_policies import WarmupAnnealing
import nemo.collections.nlp as nemo_nlp
from nemo.collections.nlp.data import NemoBertTokenizer, SentencePieceTokenizer
from nemo.collections.nlp.callbacks.token_classification_callback import \
eval_iter_callback, eval_epochs_done_callback
from nemo.backends.pytorch.common.losses import CrossEntropyLossNM
from nemo.collections.nlp.nm.trainables import TokenClassifier
```
You can download data from [here](https://github.com/kyzhouhzau/BERT-NER/tree/master/data) and use [this](https://github.com/NVIDIA/NeMo/blob/master/examples/nlp/token_classification/import_from_iob_format.py) script to preprocess it.
```
BATCHES_PER_STEP = 1
BATCH_SIZE = 32
CLASSIFICATION_DROPOUT = 0.1
DATA_DIR = "PATH TO WHERE THE DATA IS"
WORK_DIR = "PATH_TO_WHERE_TO_STORE_CHECKPOINTS_AND_LOGS"
MAX_SEQ_LENGTH = 128
NUM_EPOCHS = 3
LEARNING_RATE = 0.00005
LR_WARMUP_PROPORTION = 0.1
OPTIMIZER = "adam"
# Instantiate neural factory with supported backend
neural_factory = nemo.core.NeuralModuleFactory(
backend=nemo.core.Backend.PyTorch,
# If you're training with multiple GPUs, you should handle this value with
# something like argparse. See examples/nlp/token_classification.py for an example.
local_rank=None,
# If you're training with mixed precision, this should be set to mxprO1 or mxprO2.
# See https://nvidia.github.io/apex/amp.html#opt-levels for more details.
optimization_level="O0",
# Define path to the directory you want to store your results
log_dir=WORK_DIR,
# If you're training with multiple GPUs, this should be set to
# nemo.core.DeviceType.AllGpu
placement=nemo.core.DeviceType.GPU)
# If you're using a standard BERT model, you should do it like this. To see the full
# list of BERT model names, check out nemo_nlp.huggingface.BERT.list_pretrained_models()
tokenizer = NemoBertTokenizer(pretrained_model="bert-base-cased")
bert_model = nemo_nlp.nm.trainables.huggingface.BERT(
pretrained_model_name="bert-base-cased")
# Describe training DAG
train_data_layer = nemo_nlp.nm.data_layers.BertTokenClassificationDataLayer(
tokenizer=tokenizer,
text_file=os.path.join(DATA_DIR, 'text_train.txt'),
label_file=os.path.join(DATA_DIR, 'labels_train.txt'),
max_seq_length=MAX_SEQ_LENGTH,
batch_size=BATCH_SIZE)
label_ids = train_data_layer.dataset.label_ids
num_classes = len(label_ids)
hidden_size = bert_model.hidden_size
ner_classifier = TokenClassifier(hidden_size=hidden_size,
num_classes=num_classes,
dropout=CLASSIFICATION_DROPOUT)
ner_loss = CrossEntropyLossNM(logits_dim=3)
input_ids, input_type_ids, input_mask, loss_mask, _, labels = train_data_layer()
hidden_states = bert_model(input_ids=input_ids,
token_type_ids=input_type_ids,
attention_mask=input_mask)
logits = ner_classifier(hidden_states=hidden_states)
loss = ner_loss(logits=logits, labels=labels, loss_mask=loss_mask)
# Describe evaluation DAG
eval_data_layer = nemo_nlp.nm.data_layers.BertTokenClassificationDataLayer(
tokenizer=tokenizer,
text_file=os.path.join(DATA_DIR, 'text_dev.txt'),
label_file=os.path.join(DATA_DIR, 'labels_dev.txt'),
max_seq_length=MAX_SEQ_LENGTH,
batch_size=BATCH_SIZE,
label_ids=label_ids)
eval_input_ids, eval_input_type_ids, eval_input_mask, _, eval_subtokens_mask, eval_labels \
= eval_data_layer()
hidden_states = bert_model(
input_ids=eval_input_ids,
token_type_ids=eval_input_type_ids,
attention_mask=eval_input_mask)
eval_logits = ner_classifier(hidden_states=hidden_states)
callback_train = nemo.core.SimpleLossLoggerCallback(
tensors=[loss],
print_func=lambda x: logging.info("Loss: {:.3f}".format(x[0].item())))
train_data_size = len(train_data_layer)
# If you're training on multiple GPUs, this should be
# train_data_size / (batch_size * batches_per_step * num_gpus)
steps_per_epoch = int(train_data_size / (BATCHES_PER_STEP * BATCH_SIZE))
# Callback to evaluate the model
callback_eval = nemo.core.EvaluatorCallback(
eval_tensors=[eval_logits, eval_labels, eval_subtokens_mask],
user_iter_callback=lambda x, y: eval_iter_callback(x, y),
user_epochs_done_callback=lambda x: eval_epochs_done_callback(x, label_ids),
eval_step=steps_per_epoch)
# Callback to store checkpoints
# Checkpoints will be stored in checkpoints folder inside WORK_DIR
ckpt_callback = nemo.core.CheckpointCallback(
folder=neural_factory.checkpoint_dir,
epoch_freq=1)
lr_policy = WarmupAnnealing(NUM_EPOCHS * steps_per_epoch,
warmup_ratio=LR_WARMUP_PROPORTION)
neural_factory.train(
tensors_to_optimize=[loss],
callbacks=[callback_train, callback_eval, ckpt_callback],
lr_policy=lr_policy,
batches_per_step=BATCHES_PER_STEP,
optimizer=OPTIMIZER,
optimization_params={
"num_epochs": NUM_EPOCHS,
"lr": LEARNING_RATE
})
```
| github_jupyter |
## Install Java, Spark, and Findspark
```
!apt-get install openjdk-8-jdk-headless -qq > /dev/null
!wget -q http://www-us.apache.org/dist/spark/spark-2.3.2/spark-2.3.2-bin-hadoop2.7.tgz
!tar xf spark-2.3.2-bin-hadoop2.7.tgz
!pip install -q findspark
```
## Set Environmental Variables
```
import os
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["SPARK_HOME"] = "/content/spark-2.3.2-bin-hadoop2.7"
```
## Find Spark and start session
```
import findspark
findspark.init()
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("evr_nlp").getOrCreate()
# Read in data from S3 Buckets
from pyspark import SparkFiles
url ="https://s3.amazonaws.com/dataviz-curriculum/day_2/yelp_reviews.csv"
spark.sparkContext.addFile(url)
df = spark.read.csv(SparkFiles.get("yelp_reviews.csv"), sep=",", header=True)
df.show()
# Create a length column to be used as a future feature
from pyspark.sql.functions import length
data_df = df.withColumn('length', length(df['text']))
data_df.show()
```
### Feature Transformations
```
from pyspark.ml.feature import Tokenizer, StopWordsRemover, HashingTF, IDF, StringIndexer
# Create all the features to the data set
pos_neg_to_num = StringIndexer(inputCol='class',outputCol='label')
tokenizer = Tokenizer(inputCol="text", outputCol="token_text")
stopremove = StopWordsRemover(inputCol='token_text',outputCol='stop_tokens')
hashingTF = HashingTF(inputCol="token_text", outputCol='hash_token')
idf = IDF(inputCol='hash_token', outputCol='idf_token')
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.linalg import Vector
# Create feature vectors
clean_up = VectorAssembler(inputCols=['idf_token', 'length'], outputCol='features')
# Create a and run a data processing Pipeline
from pyspark.ml import Pipeline
data_prep_pipeline = Pipeline(stages=[pos_neg_to_num, tokenizer, stopremove, hashingTF, idf, clean_up])
# Fit and transform the pipeline
cleaner = data_prep_pipeline.fit(data_df)
cleaned = cleaner.transform(data_df)
# Show label and resulting features
cleaned.select(['label', 'features']).show()
from pyspark.ml.classification import NaiveBayes
# Break data down into a training set and a testing set
training, testing = cleaned.randomSplit([0.7, 0.3])
# Create a Naive Bayes model and fit training data
nb = NaiveBayes()
predictor = nb.fit(training)
# Tranform the model with the testing data
test_results = predictor.transform(testing)
test_results.show(5)
# Use the Class Evaluator for a cleaner description
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
acc_eval = MulticlassClassificationEvaluator()
acc = acc_eval.evaluate(test_results)
print("Accuracy of model at predicting reviews was: %f" % acc)
```
| github_jupyter |
# Vectorized Execution in SparkR
This nootebook demonstrates Arrow optimization with some small data (~10 MB) so that people can actually try out and refer when they run the benchmark in an actual cluster.
**Note that** the performance improves far more greatly when the size of data is large. Given my benchmark with [500000 Records](http://eforexcel.com/wp/downloads-16-sample-csv-files-data-sets-for-testing/) dataset, I have observed up to around **1000% ~ 5000%** improvement.
For more details, see [Databricks' blog](https://databricks.com/blog/2018/08/15/100x-faster-bridge-between-spark-and-r-with-user-defined-functions-on-databricks.html).
## Preparation
First, enable R cell magic to execute R codes in Jupyter.
```
import rpy2.rinterface
%load_ext rpy2.ipython
```
After that, prepare data to use. In this simple benchmark, [10000 Records](http://eforexcel.com/wp/downloads-16-sample-csv-files-data-sets-for-testing/) dataset is used.
```
import urllib.request
from zipfile import ZipFile
from io import BytesIO
# `rownum` can be 10000, 50000, 100000, ....
# See http://eforexcel.com/wp/downloads-16-sample-csv-files-data-sets-for-testing/
rownum = 10000
url = "http://eforexcel.com/wp/wp-content/uploads/2017/07/%s-Records.zip" % rownum
ZipFile(BytesIO(urllib.request.urlopen(url).read())).extractall()
```
Initialize SparkR with enough memory and load libraries used for benchmarking. In this benchmark, it used 1 for shuffle and default parallelism to mimic the case when the large dataset is processed.
```
%%R
library(SparkR, lib.loc = c(file.path(Sys.getenv("SPARK_HOME"), "R", "lib")))
# Let's limit core; otherwise, Jupyter might die on some heavy workload.
sparkR.session(master = "local[1]")
library(microbenchmark)
```
Prepare R DataFrame to test from the data downloaded above.
```
%%R
# `rownum` can be 10000, 50000, 100000, ....
# See http://eforexcel.com/wp/downloads-16-sample-csv-files-data-sets-for-testing/
rownum <- 10000
df <- read.csv(paste0(rownum, " Records.csv"))
print(object.size(df), units = "MB")
# To load `createDataFrame` faster, let's turn on Arrow optimization
sparkR.session(sparkConfig = list(spark.sql.execution.arrow.sparkr.enabled = "true"))
spark_df <- cache(createDataFrame(df))
num <- count(spark_df) # trigger the count to make sure input DataFrame is cached.
```
Prepare the common function to use for the benchmarking.
```
%%R
benchmark_arrow <- function(func) {
microbenchmark("No Arrow" = {
sparkR.session(sparkConfig = list(spark.sql.execution.arrow.sparkr.enabled = "false"))
func()
},
"Arrow" = {
sparkR.session(sparkConfig = list(spark.sql.execution.arrow.sparkr.enabled = "true"))
func()
}, times = 3L)
}
```
## R DataFrame to Spark DataFrame
```
%%R
func <- function() { createDataFrame(df) }
boxplot(benchmark_arrow(func))
```
## Spark DataFrame to R DataFrame
```
%%R
func <- function() { collect(spark_df) }
boxplot(benchmark_arrow(func))
```
## `dapply`
```
%%R
func <- function() { count(dapply(spark_df, function(rdf) { rdf }, schema(spark_df))) }
boxplot(benchmark_arrow(func))
```
## `gapply`
```
%%R
func <- function() {
count(gapply(spark_df,
"Month_of_Joining",
function(key, group) { group }, schema(spark_df)))
}
boxplot(benchmark_arrow(func))
```
| github_jupyter |
# GSD Examining for ec equivalents of Pop1p contacts to Pop6p and Pop7p
This effort is based on my notebook [Using snakemake with multiple chains or structures to report if residues interacting with a specific chain have equivalent residues in hhsuite-generated alignments](https://nbviewer.jupyter.org/github/fomightez/hhsuite3-binder/blob/main/notebooks/Using%20snakemake%20with%20multiple%20chains%20or%20structures%20to%20report%20if%20residues%20interacting%20with%20a%20specific%20chain%20have%20equivalent%20residues%20in%20hhsuite-generated%20alignments.ipynb) in order to look at ec equivalents of residues in Pop1p that contact Pop6p and Pop6p.
----
**Step #1:** Make a table with columns separated by spaces and each line as a row that specificies structures, chains, and hhr results file(s).
```text
6agb B F results_S288C_POP1.hhr 1 True True
6agb B G results_S288C_POP1.hhr 1 True True
6ah3 B F results_S288C_POP1.hhr 1 True True
6ah3 B G results_S288C_POP1.hhr 1 True True
7c79 B F results_S288C_POP1.hhr 1 True True
7c79 B G results_S288C_POP1.hhr 1 True True
7c7a B F results_S288C_POP1.hhr 1 True True
7c7a B G results_S288C_POP1.hhr 1 True True
6w6v B F results_S288C_POP1.hhr 1 True True
6w6v B G results_S288C_POP1.hhr 1 True True
```
**Step #2:** Save the table with the following name, `equiv_check_matrix.txt`. It has to have that name for the table to be recognized and processed to make the Jupyter notbeook files with the reports.
Running following will generate an `equiv_check_matrix.txt` file here with the indicated content; however, you can, and will want to, skip running this if already made your own table. If you run it, it will replace your file though. Alternatively, you can edit the code below to make a table with the contents that interest you.
```
s='''6agb B F results_S288C_POP1.hhr 1 True True
6agb B G results_S288C_POP1.hhr 1 True True
6ah3 B F results_S288C_POP1.hhr 1 True True
6ah3 B G results_S288C_POP1.hhr 1 True True
7c79 B F results_S288C_POP1.hhr 1 True True
7c79 B G results_S288C_POP1.hhr 1 True True
7c7a B F results_S288C_POP1.hhr 1 True True
7c7a B G results_S288C_POP1.hhr 1 True True
6w6v B F results_S288C_POP1.hhr 1 True True
6w6v B G results_S288C_POP1.hhr 1 True True
'''
%store s >equiv_check_matrix.txt
```
**Step #3:** Get the HH-suite3-generated results files (`*.hhr` files).
```
# PUT THE *.hhr FILE, `results_S288C_POP1.hhr`,IN THE DIRECTORY WITH THIS NOTEBOOK
```
**Step #4:** Copy the Snakemake Snakefile to this directory
```
import os
file_needed = "equiv_snakefile"
if not os.path.isfile(file_needed):
!curl -OL https://raw.githubusercontent.com/fomightez/hhsuite3-binder/main/notebooks/equiv_snakefile
```
**Step #5:** Run snakemake and point it at the corresponding snake file `equiv_snakefile` and it will process the `equiv_check_matrix.txt` file to extract the information and make individual notebooks corresponding to analysis of the interactions for each line. This will be very similar to running the previous notebooks in this series with the items spelled out on each line.
The file snakemake uses in this pipeline, named `equiv_snakefile`, is already here. It is related to Python scripts and you can examine the text if you wis.
It will take about a minute or less to complete if you are running the demonstration.
```
!snakemake -s equiv_snakefile --cores 1
```
(For those knowlegeable with snakemake, I will note that I set the number of cores as one because I was finding with eight that occasionally a race condition would ensue where some of the auxillary scripts fetched in the course of running the report-generating notebooks would overwrite each other as they was being accessed by another notebook causing failures. Using one core avoids that hazard. I will add though that in most cases if you use multiple cores, you can easily get the additional files and a new archive made by running snakemake with your chosen number of cores again. I never saw a race hazard with my clean rule, and so if you want to quickly start over you can run `!snakemake -s equiv_snakefile --cores 8 clean`.)
#### Make the reports clearer by substituting in the names of the proteins in place of the Chain designations.
```
chain2name_pairs = {
"Chain B":"Pop1p",
"Chain F":"Pop6p",
"Chain G":"Pop7p",
}
# Make a list of the report-containing notebooks.
prefix_for_report_nbs = "equivalents_report_for_"
import sys
import glob
import re
equivalents_report_nb_pattern = f"{prefix_for_report_nbs}*.ipynb"
equivalents_report_nbs = glob.glob(equivalents_report_nb_pattern)
def make_swaps(file_name,key_value_pairs):
'''
Takes a file name and edits every occurence of each key in all of them,
replacing that text with the corresponding value.
Saves the fixed file. Nothing is returned from this function.
'''
output_file_name = "temp.txt"
with open(file_name, 'r') as thefile:
nb_text=thefile.read()
for k,v in key_value_pairs.items():
#nb_text=nb_text.replace(k.lower(),v) # if wasn't case insensitive for key
# case-insensitive string replacement from https://stackoverflow.com/a/919067/8508004
insensitive = re.compile(re.escape(k), re.IGNORECASE)
nb_text = insensitive.sub(v, nb_text)
with open(output_file_name, 'w') as output_file:
output_file.write(nb_text)
# replace the original file with edited
!mv {output_file_name} {file_name}
# Feedback
sys.stderr.write("Chain designations swapped for names in {}.\n".format(file_name))
for nbn in equivalents_report_nbs:
make_swaps(nbn,chain2name_pairs)
```
#### Make a new archive with the substituted files
```
# delete the archive withOUT the susbstitured protein names
!rm equivalents_report_nbs*.tar.gz
import datetime
now = datetime.datetime.now()
results_archive = f"equivalents_report_nbs{now.strftime('%b%d%Y%H%M')}.tar.gz"
!tar -czf {results_archive} {" ".join(equivalents_report_nbs)}
sys.stderr.write(f"Be sure to download {results_archive}.")
```
**Step #4:** Verify the Jupyter notebooks with the reports were generated.
You can go to the dashboard and see the ouput of running snakemake. To do that click on the Jupyter logo in the upper left top of this notebook and on that page you'll look in the notebooks directory and you should see files that begin with `equivalents_report_` and end with `.ipynb`. You can examine some of them to insure all is as expected.
If things seem to be working and you haven't run your data yet, run `!snakemake -s equiv_snakefile --cores 8 clean` in a cell to reset things, and then edit & save `equiv_check_matrix.txt` to have your information, and then run the `!snakemake -s equiv_snakefile --cores 1` step above, again.
**Step #5:** If this was anything other than the demonstration run, download the archive containing all the Jupyter notebooks bundled together.
For ease in downloading, all the created notebooks have been saved as a compressed archive so that you only need to retieve and keep track of one file. The file you are looking for begins with `equivalents_report_nbs` in front of a date/time stamp and ends with `.tar.gz`. The snakemake run will actually highlight this archive towards the very bottom of the run, following the words 'Be sure to download'.
**Download that file from this remote, temporary session to your local computer.** You should see this archive file ending in `.tar.gz` on the dashboard. Toggle next to it to select it and then select `Download` to bring it from the remote Jupyterhub session to your computer. If you don't retrieve that file and the session ends, you'll need to re-run to get the results again.
You should be able to unpack that archive using your favorite software to extract compressed files. If that is proving difficult, you can always reopen a session like you did to run this series of notebooks and upload the archive and then run the following command in a Jupyter notebook cellk to unpack it:
```bash
!tar xzf equivalents_report_nbs*
```
(If you are running that command on the command line, leave off the exclamation book.)
You can then examine the files in the session or download the individual Jupyter notebooks similar to the advice on how to download the archive given above.
-----
Enjoy.
-----
| github_jupyter |
## Normalized Differential Cross Section


In 1928 Klein and Nishina applied Dirac’s relativistic theory of
the electron to the Compton effect to obtain improved cross sections. The differential
cross section for photon scattering at angle $\phi$ per unit solid angle and per electron,
corresponding to Equation:
\begin{equation}
\frac{d{_e\sigma}}{d\Omega_\phi}=\frac{r_0^2}{2} (1+ \text{cos}^2\phi)
\label{TH}
\end{equation}
from Thomson’s theory, may be written in the form
\begin{equation}
\frac{d{_e\sigma}}{d\Omega_\phi}=\frac{r_0^2}{2}\bigg (\frac{h\nu'}{h\nu}\bigg)^2\bigg (\frac{h\nu}{h\nu'}+\frac{h\nu'}{h\nu}-\text{sin}^2\phi\bigg)
\label{TH1}
\end{equation}
where, $ h\nu'=h\nu\frac{1}{1+\varepsilon (1+\text{cos}\phi)}$, $\varepsilon = \frac{h\nu}{m_ec^2}$ and $m_ec^2$=the energy equivalence of the electron rest mass(511 keV);
r$_0$ = classical electron radius(= 2.8 $\times$ 10$^{-15}$m)
h$\nu$ = incident photon energy
h$\nu'$ = Compton scattered photon energy
and
$\phi$ = polar scattering angle in the photon coordinate
system
The differential solid angle is represented
by $d\Omega = 2\pi \text{sin}\phi d\phi$ assuming all azimuthal
angles are equally probable. This crosssection
can be expressed as cross-section
based on polar angle ($\phi$) given by the relation:
$\frac{d{_e\sigma}}{d\phi}=\pi r_0^2\bigg(\frac{h\nu'}{h\nu}\bigg)^2\bigg(\frac{h\nu}{h\nu'}+\frac{h\nu'}{h\nu}-\text{sin}^2\phi\bigg)\text{sin}\phi$
If h$\nu$ $\approx$ h$\nu'$
$\frac{d{_e\sigma}}{d\Omega_\phi}=\frac{r_0^2}{2}(2-\text{sin}^2\phi)=\frac{r_0^2}{2}(1+\text{cos}^2\phi)$
The total K-N cross section per electron ($_e\sigma$):
$$
_e\sigma=2\pi \int_{\phi=0}^\pi \frac{d{_e\sigma}}{d\Omega_\phi} \text{sin}\phi \text{d}\phi
\label{KN}
$$
$$
=\pi r_0^2 \int_{0}^\pi \bigg(\frac{h\nu'}{h\nu}\bigg)^2\bigg(\frac{h\nu}{h\nu'}+\frac{h\nu'}{h\nu}-\text{sin}^2\phi\bigg)\text{sin}\phi \text{d}\phi$$
\begin{equation}
_e\sigma =2\pi r_0^2 \bigg[\frac{1+\alpha}{\alpha^2} \bigg (\frac{2 (1+\alpha)}{1+2\alpha}-\frac{ln (1+2\alpha)}{\alpha}\bigg)+\frac{ln (1+2\alpha)}{2\alpha}-\frac{1+3\alpha}{ (1+2\alpha)^2}\bigg]
\label{KN1}
\end{equation}
Where $\alpha=\varepsilon=\frac{h\nu}{m_ec^2}$, in which h$\nu$ is to be expressed in MeV and m$_e$c$^2$ = 0.511 MeV.
The relation between the electron scattering crosssection and the attenuation coefficient ($\mu$) is $\mu$=n$_{v}$$\times$$\sigma_e$, where n$_{v}$ is number of electron per unit volume.
## References:
### Attix, F. H. (2008). Introduction to radiological physics and radiation dosimetry. John Wiley & Sons.
### https://chem.libretexts.org/Courses/Sacramento_City_College/SCC%3A_CHEM_330_-_Adventures_in_Chemistry_(Alviar-Agnew)/11%3A_Nuclear_Chemistry/11.02%3A_Nuclear_Equations
```
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import math
from statistics import stdev
phi=np.arange(180)
Einit=0.8 #Energy in MeV; h\nu
Escat=Einit/(1+Einit/0.511*(1-np.cos(phi*math.pi/180))) # Scattering Energy
dsigma_dphi=math.pi*(2.818e-15)**2*(Escat/Einit)**2*(Einit/Escat+Escat/Einit-np.sin(phi*math.pi/180)**2)*np.sin(phi*math.pi/180)
plt.plot(phi/180,dsigma_dphi/max(dsigma_dphi))
plt.fill_between(phi/180, dsigma_dphi/max(dsigma_dphi), color= 'red', alpha=0.2,hatch='/')
plt.xlabel(r'$\phi$, normalized')
plt.ylabel('$d\sigma$/d\u03A6, normalized')
plt.title('Compton Differential Cross-Section, 800KeV, photons')
plt.xlim(0,1)
plt.ylim(0,1)
plt.show()
print(max(dsigma_dphi))
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import math
from statistics import stdev
phi=np.arange(180)
Einit=8 #Energy in MeV
Escat=Einit/(1+Einit/0.511*(1-np.cos(phi*math.pi/180))) # Scattering Energy
dsigma_dphi=math.pi*(2.818e-13)**2*(Escat/Einit)**2*(Einit/Escat+Escat/Einit-np.sin(phi*math.pi/180)**2)*np.sin(phi*math.pi/180)
plt.plot(phi/180,dsigma_dphi/max(dsigma_dphi))
plt.fill_between(phi/180, dsigma_dphi/max(dsigma_dphi), color= 'yellow', alpha=0.2,hatch='/')
plt.xlabel('\u03A6, normalized') # Unicode Character of Symbol
plt.ylabel('$d\sigma$/d\u03A6, normalized')
plt.title('Compton Differential Cross-Section, 8MeV, photons')
plt.xlim(0,1)
plt.ylim(0,1)
plt.show()
print(max(dsigma_dphi))
```
| github_jupyter |
### Inheriting from type
In the last lectures, we saw how classes can be created by calling the `type` class.
But what if we wanted to use something other than `type` to construct classes?
Since `type` is a class, maybe we could define a class that inherits from `type` (so we can leverage the actual type creation process), and override some things that would enable us to inject something in the class creation process.
Here we want to intercept the creation of the `type` instance before it is created, so we would want to use the `__new__` method.
Remember that the `__new__` method basically needs to build and return the new instance. So we'll do the customizations we want, but ultimately we'll punt (delegate) to the `type` class to do the actual work, just adding the tweaks (before and/or after the class creation) we want.
Just a quick reminder of how the static `__new__` method works in general:
```
class Test:
def __new__(cls, *args, **kwargs):
print(f'New instance of {cls} being created with these values:', args, kwargs)
t = Test(10, 20, kw='a')
```
And it's really the same as doing this:
```
Test.__new__(Test, 10, 20, kw='a')
```
Of course, it's now up to us to return an object from the `__new__` function.
So, instead of calling `type` to create the class (type), let's create a custom type generator by subclassing `type`.
We'll inherit from `type`, and override the `__new__` function to create the instance of the class.
```
import math
class CustomType(type):
def __new__(cls, name, bases, class_dict):
# above is the signature that type.__new__ has -
# and args are collected and passed by Python when we call a class (to create an instance of that class)
# we'll see where those actually come from later
print('Customized type creation!')
cls_obj = super().__new__(cls, name, bases, class_dict) # delegate to super (type in this case)
cls_obj.circ = lambda self: 2 * math.pi * self.r # basically attaching a function to the class
return cls_obj
```
Now let's go through the same process to create our `Circle` class that we used in the last lecture, the manual way, but using `CustomType` instead of `type`.
```
class_body = """
def __init__(self, x, y, r):
self.x = x
self.y = y
self.r = r
def area(self):
return math.pi * self.r ** 2
"""
```
And we create our class dictionary by executing the above code in the context of that dictionary:
```
class_dict = {}
exec(class_body, globals(), class_dict)
```
Then we create the `Circle` class:
```
Circle = CustomType('Circle', (), class_dict)
```
We basically customized the class creation, and `Circle` is just a standard object, but, as you can see below, the type of our class is no longer `type`, but `CustomType`.
```
type(Circle)
```
Of course, `Circle` is still an instance of `type` since `CustomType` is a subclass of `type`:
```
isinstance(Circle, CustomType), issubclass(CustomType, type)
```
And just like before, `Circle` still has the `__init__` and `area` methods:
```
hasattr(Circle, '__init__'), hasattr(Circle, 'area')
```
So we can use `Circle` just as normal:
```
c = Circle(0, 0, 1)
c.area()
```
Additionally, we injected a new function, `circ`, into the class while we were constructing it in the `__new__` method of `CustomType`:
```
hasattr(Circle, 'circ')
c.circ()
```
So, this is another example of metaprogramming!
But yeah, creating classes (types) this way is a bit tedious!!!
This is where the concept of a `metaclass` comes in, which we'll cover in the next set of lectures.
| github_jupyter |
```
#!/usr/bin/env python
# coding: utf-8
%matplotlib inline
%reload_ext autoreload
%autoreload 2
import sys
sys.path.insert(0, '../')
from pyMulticopterSim.simulation.env import *
# execute only if run as a script
env = simulation_env()
env.proceed_motor_speed("uav1", np.array([1100.0,1100.0,1100.0,1100.0]),0.1)
env.plot_state("uav1")
#!/usr/bin/env python
# coding: utf-8
%matplotlib inline
%reload_ext autoreload
%autoreload 2
import os, sys, time, copy, yaml
from scipy.special import factorial, comb, perm
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pandas as pd
import h5py
sys.path.insert(0, '../')
from pyTrajectoryUtils.pyTrajectoryUtils.utils import *
from multicopter_dynamics_sim import MulticopterDynamicsSim as uav_dyns
from pyMulticopterSim.simulation.env import *
from pyTrajectoryUtils.pyTrajectoryUtils.PIDcontroller import *
def plot_state(time_array, state, state_ref, label_txt='vel', dim=3, flag_save=False):
start_idx = 0
if failure_idx >= 0:
end_idx = min(status_ref.shape[0], state.shape[0], time_array.shape[0], failure_idx)
else:
end_idx = min(status_ref.shape[0], state.shape[0], time_array.shape[0])
time_array_t = time_array[start_idx:end_idx]
plt.ioff()
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111)
for i in range(dim):
ax.plot(time_array_t, state[start_idx:end_idx,i], '-', label='{} dim {}'.format(label_txt,i))
ax.plot(time_array_t, state_ref[start_idx:end_idx,i], '-', label='{} ref dim {}'.format(label_txt,i))
ax.legend()
ax.grid()
plt.show()
plt.pause(0.1)
if flag_save:
plt.savefig('{}/{}_{}.png'.format(save_dir,save_idx,label_txt))
plt.close()
if __name__ == "__main__":
env = simulation_env()
controller = UAV_pid_tracking()
traj_ref_path = '../test/sample_trajectory.csv'
df = pd.read_csv(traj_ref_path, sep=',', header=None)
status_ref = df.values[1:,:]
print(status_ref.shape)
freq_ctrl=200
freq_sim=2000
max_time = 100
dt_micro_ctrl = np.int(1e6/freq_ctrl)
freq_sim_update = np.int(freq_sim/freq_ctrl)
N = min(status_ref.shape[0], max_time*freq_ctrl)
traj_ref = status_ref[0,:]
curr_time = 0
env.set_state_vehicle("uav1", position=status_ref[0,2:5], velocity=status_ref[0,5:8])
state_t = env.get_state("uav1")
pos = state_t["position"]
vel = state_t["velocity"]
acc = state_t["acceleration"]
att_q = state_t["attitude"]
att = state_t["attitude_euler_angle"]
angV = state_t["angular_velocity"]
angA = state_t["angular_acceleration"]
ms = state_t["motor_speed"]
ma = state_t["motor_acceleration"]
raw_acc = state_t["acceleration_raw"]
raw_gyro = state_t["gyroscope_raw"]
raw_ms = state_t["motor_speed_raw"]
pos_array = np.zeros((N,3))
vel_array = np.zeros((N,3))
acc_array = np.zeros((N,3))
att_array = np.zeros((N,3))
att_q_array = np.zeros((N,4))
raw_acc_array = np.zeros((N,3))
raw_gyro_array = np.zeros((N,3))
filtered_acc_array = np.zeros((N,3))
filtered_gyro_array = np.zeros((N,3))
ms_array = np.zeros((N,4))
ms_c_array = np.zeros((N,4))
time_array = np.zeros(N)
pos_err_array = np.zeros(N)
yaw_err_array = np.zeros(N)
failure_idx = -1
failure_start_idx = -1
failure_end_idx = -1
for it in range(N):
curr_time = np.int(1.0*(it+1)/freq_ctrl*1e6)
traj_ref = status_ref[it,2:]
pos_ref = traj_ref[:3]
vel_ref = traj_ref[3:6]
ms_c = controller.control_update(traj_ref, pos, vel, acc, att, angV, angA, 1.0/freq_ctrl)
env.proceed_motor_speed("uav1", ms_c, 1.0/freq_ctrl)
state_t = env.get_state("uav1")
pos = state_t["position"]
vel = state_t["velocity"]
acc = state_t["acceleration"]
att_q = state_t["attitude"]
att = state_t["attitude_euler_angle"]
angV = state_t["angular_velocity"]
angA = state_t["angular_acceleration"]
ms = state_t["motor_speed"]
ma = state_t["motor_acceleration"]
raw_acc = state_t["acceleration_raw"]
raw_gyro = state_t["gyroscope_raw"]
raw_ms = state_t["motor_speed"]
time_array[it] = 1.0*(it+1)/freq_ctrl
pos_array[it,:] = pos
vel_array[it,:] = vel
acc_array[it,:] = acc
att_array[it,:] = att
att_q_array[it,:] = att_q
raw_acc_array[it,:] = raw_acc
raw_gyro_array[it,:] = raw_gyro
filtered_acc_array[it,:] = acc
filtered_gyro_array[it,:] = angV
ms_array[it,:] = ms
ms_c_array[it,:] = ms_c
plot_state(time_array, pos_array, status_ref[:,2:5], label_txt='pos', dim=3)
#!/usr/bin/env python
# coding: utf-8
%matplotlib inline
%reload_ext autoreload
%autoreload 2
import os, sys, time, copy, yaml
sys.path.insert(0, '../')
from pyTrajectoryUtils.pyTrajectoryUtils.trajectorySimulation import *
traj_sim = TrajectorySimulation()
res = traj_sim.run_simulation(traj_ref_path='./sample_trajectory.csv', N_trial=1,
max_pos_err=5.0, min_pos_err=0.5,
max_yaw_err=30.0, min_yaw_err=5.0,
freq_ctrl=200)
traj_sim.plot_result(debug_value=res[0], flag_save=False)
```
| github_jupyter |
# Week 5: Root Finding
## Rahman notes:
In the theory lectures we looked at three different root finding techniques, and applied them to a function that looked something like $f(x) = x^3$. Here let's look at a concrete, but nontrivial function. Consider the functions $f(x) = x\cos(x)$ on the interval $[2,5]$. Suppose we are looking for the local extrema of this function; i.e., we look for the roots of $f'(x) = \cos(x) - x\sin(x)$.
Lets first plot the two functions to give us a graphical idea of what is happening.
```
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(2, 5.01, 0.001)
f = x*np.cos(x)
df = np.cos(x) - x*np.sin(x)
plt.plot(x, f, x, df, linewidth=4)
```
## Section Search: Iterate and Pray
Notice that we have a discretization of x already in the plot above. And from that we have a discretization of $f'(x)$. Now all we have to do is find the entry of x that corresponds to $f'(x)$ that is closest to zero. In order to do that all we have to do is find the entry that gives us the minimum of the absolute value of $f'(x)$.
```
x = np.arange(2, 5.01, 0.001)
df = np.cos(x) - x*np.sin(x)
root_entry = np.argmin(np.abs(df));
root = x[root_entry];
print('root = ', root, '\n derivative = ', df[root_entry])
plt.plot(x, f, x, df, linewidth=4)
plt.plot(root, 0, 'k.', root, f[root_entry], 'g.', markersize = 40)
```
Remeber, these are just approximations. That is not far off! But that is because we use a quite fine discretization. Let's try it with a much coarser discretization. This time it's quite far off the root, and since it's quite far of the root, can we even trust the local minima?
```
x = np.arange(2, 5.01, 0.5)
f = x*np.cos(x)
df = np.cos(x) - x*np.sin(x)
root_entry = np.argmin(np.abs(df));
root = x[root_entry];
print('root = ', root, '\n derivative = ', df[root_entry])
plt.plot(x, f, x, df, linewidth=4)
plt.plot(root, 0, 'k.', root, f[root_entry], 'g.', markersize = 40)
```
Now what if we weren't restriced to a domain where the function $f'(x)$ only has one root? Say $[-20, 5]$.
```
x = np.arange(-20, 5.01, 0.001)
df = np.cos(x) - x*np.sin(x)
plt.plot(x, df, linewidth = 4)
```
Notice that this has quite a few roots. How do we find them all? This requires us to be a bit clever. Let's do a for loop that goes through the entirety of x, and pick out the points when $|f'(x)|$ is decreasing and then suddenly changes direction to increasing. Right after it changes direction we will pick the entry before it. This is like going over the edge, and then figure out the edge was just one step behind you.
```
x = np.arange(-20, 5.01, 0.001)
df = np.cos(x) - x*np.sin(x)
for i in range(x.shape[0]-2):
if abs(df[i+2]) > abs(df[i+1]) and abs(df[i]) > abs(df[i+1]):
print('\n root = ', x[i], '\n derivative = ', df[i])
```
## Bisection Method
With bisection we must know that there is a root between two points a and b. If there are multiple roots, it will only find one.
For the interval [-2, 5], we know for sure that there is a root between 2 and 5. So we will bisect our interval each time until we hone in on a root.
```
a = 2;
b = 5;
df_a = np.cos(a) - a*np.sin(a);
df_b = np.cos(b) - b*np.sin(b);
xmid = (a+b)/2;
df_mid = np.cos(xmid) - xmid*np.sin(xmid);
for i in range(10):
if df_mid == 0:
break
elif np.sign(df_mid) == np.sign(df_a):
a = xmid;
else:
b = xmid;
xmid = (a+b)/2;
df_mid = np.cos(xmid) - xmid*np.sin(xmid);
df_a = np.cos(a) - a*np.sin(a);
df_b = np.cos(b) - b*np.sin(b);
print('\n root = ', xmid, '\n derivative = ', np.cos(xmid) - xmid*np.sin(xmid))
a = 2;
b = 5;
df_a = np.cos(a) - a*np.sin(a);
df_b = np.cos(b) - b*np.sin(b);
xmid = (a+b)/2;
df_mid = np.cos(xmid) - xmid*np.sin(xmid);
for i in range(100):
if df_mid == 0:
break
elif np.sign(df_mid) == np.sign(df_a):
a = xmid;
else:
b = xmid;
xmid = (a+b)/2;
df_mid = np.cos(xmid) - xmid*np.sin(xmid);
df_a = np.cos(a) - a*np.sin(a);
df_b = np.cos(b) - b*np.sin(b);
print('\n root = ', xmid, '\n derivative = ', np.cos(xmid) - xmid*np.sin(xmid))
```
## Newton's Method
With Newton's method we only need a nearby point to the root, however, we better be sure it's close to that root and not some other root. And even then it may not converge.
```
x = 5
for i in range(10):
x = x - (np.cos(x) - x*np.sin(x))/(-2*np.sin(x) - x*np.cos(x)); #We do have to calculate the derivative here
print('\n root = ', x, '\n derivative = ', np.cos(x) - x*np.sin(x))
x = 4
for i in range(10):
x = x - (np.cos(x) - x*np.sin(x))/(-2*np.sin(x) - x*np.cos(x)); #We do have to calculate the derivative here
print('\n root = ', x, '\n derivative = ', np.cos(x) - x*np.sin(x))
```
## Brief word on built in functions
Built in functions are useful, and it also allows you to not have to think too much about the problem at hand, but we should make sure not to be lulled into a sense of comfort. After all, these are just computer programs, not magic. I personally, for my work, only use built in functions if I know for sure it is optimized (e.g. on MATLAB most Linear Algebra functions are more optimized than something I could write, especially since I am not an expert in Linear Algebra). I am an expert in Differential Equations, and in my work I rarely ever use a built in differential equation solver because I often need it to do something specialized that I know for sure the creators of the solver did not optimize. Further, as we have seen with the backslash operator, we need to know how it works. If we don't we can very easily make irrepreparable damage to our codes (or worse, someone else's code that we are contributing to).
You will probably not have a need to use the following function, but it is there if on the off chance you need it. The following function looks for the local minima of our original $f(x)$ between two points, similar to bisection. Here we use 2 and 5. It takes in an anonymous function and the two bounds 2 and 5.
```
import scipy.optimize
f = lambda x: x * np.cos(x)
xmin = scipy.optimize.minimize_scalar(f, bounds=(2, 5), method='Bounded')
print(xmin.x)
```
| github_jupyter |
##### Copyright 2021 The Cirq Developers
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://quantumai.google/cirq/qcvv/xeb_theory>"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/qcvv/xeb_theory.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/quantumlib/Cirq/blob/master/docs/qcvv/xeb_theory.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/Cirq/docs/qcvv/xeb_theory.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a>
</td>
</table>
```
try:
import cirq
except ImportError:
print("installing cirq...")
!pip install --quiet cirq --pre
print("installed cirq.")
```
# Cross Entropy Benchmarking Theory
Cross entropy benchmarking uses the properties of random quantum programs to determine the fidelity of a wide variety of circuits. When applied to circuits with many qubits, XEB can characterize the performance of a large device. When applied to deep, two-qubit circuits it can be used to accurately characterize a two-qubit interaction potentially leading to better calibration.
```
# Standard imports
import numpy as np
import cirq
from cirq.contrib.svg import SVGCircuit
```
## The action of random circuits with noise
An XEB experiment collects data from the execution of random circuits
subject to noise. The effect of applying a random circuit with unitary $U$ is
modeled as $U$ followed by a depolarizing channel. The result is that the
initial state $|𝜓⟩$ is mapped to a density matrix $ρ_U$ as follows:
$$
|𝜓⟩ → ρ_U = f |𝜓_U⟩⟨𝜓_U| + (1 - f) I / D
$$
where $|𝜓_U⟩ = U|𝜓⟩$, $D$ is the dimension of the Hilbert space, $I / D$ is the
maximally mixed state, and $f$ is the fidelity with which the circuit is
applied.
For this model to be accurate, we require $U$ to be a random circuit that scrambles errors. In practice, we use a particular circuit ansatz consisting of random single-qubit rotations interleaved with entangling gates.
### Possible single-qubit rotations
These 8*8 possible rotations are chosen randomly when constructing the circuit.
Geometrically, we choose 8 axes in the XY plane to perform a quarter-turn (pi/2 rotation) around. This is followed by a rotation around the Z axis of 8 different magnitudes.
```
exponents = np.linspace(0, 7/4, 8)
exponents
import itertools
SINGLE_QUBIT_GATES = [
cirq.PhasedXZGate(x_exponent=0.5, z_exponent=z, axis_phase_exponent=a)
for a, z in itertools.product(exponents, repeat=2)
]
SINGLE_QUBIT_GATES[:10], '...'
```
### Random circuit
We use `random_rotations_between_two_qubit_circuit` to generate a random two-qubit circuit. Note that we provide the possible single-qubit rotations from above and declare that our two-qubit operation is the $\sqrt{i\mathrm{SWAP}}$ gate.
```
import cirq.google as cg
from cirq.experiments import random_quantum_circuit_generation as rqcg
SQRT_ISWAP = cirq.ISWAP**0.5
q0, q1 = cirq.LineQubit.range(2)
circuit = rqcg.random_rotations_between_two_qubit_circuit(
q0, q1,
depth=4,
two_qubit_op_factory=lambda a, b, _: SQRT_ISWAP(a, b),
single_qubit_gates=SINGLE_QUBIT_GATES
)
SVGCircuit(circuit)
```
## Estimating fidelity
Let $O_U$ be an observable that is diagonal in the computational
basis. Then the expectation value of $O_U$ on $ρ_U$ is given by
$$
Tr(ρ_U O_U) = f ⟨𝜓_U|O_U|𝜓_U⟩ + (1 - f) Tr(O_U / D).
$$
This equation shows how $f$ can be estimated, since $Tr(ρ_U O_U)$ can be
estimated from experimental data, and $⟨𝜓_U|O_U|𝜓_U⟩$ and $Tr(O_U / D)$ can be
computed.
Let $e_U = ⟨𝜓_U|O_U|𝜓_U⟩$, $u_U = Tr(O_U / D)$, and $m_U$ denote the experimental
estimate of $Tr(ρ_U O_U)$. We can write the following linear equation (equivalent to the
expression above):
$$
m_U = f e_U + (1-f) u_U \\
m_U - u_U = f (e_U - u_U)
$$
```
# Make long circuits (which we will truncate)
MAX_DEPTH = 100
circuits = [
rqcg.random_rotations_between_two_qubit_circuit(
q0, q1,
depth=MAX_DEPTH,
two_qubit_op_factory=lambda a, b, _: SQRT_ISWAP(a, b),
single_qubit_gates=SINGLE_QUBIT_GATES)
for _ in range(10)
]
# We will truncate to these lengths
cycle_depths = np.arange(3, MAX_DEPTH, 9)
cycle_depths
```
### Execute circuits
Cross entropy benchmarking requires sampled bitstrings from the device being benchmarked *as well as* the true probabilities from a noiseless simulation. We find these quantities for all `(cycle_depth, circuit)` permutations.
```
pure_sim = cirq.Simulator()
P_DEPOL = 5e-3
noisy_sim = cirq.DensityMatrixSimulator(noise=cirq.depolarize(P_DEPOL))
# These two qubit circuits have 2^2 = 4 probabilities
DIM = 4
records = []
for cycle_depth in cycle_depths:
for circuit_i, circuit in enumerate(circuits):
# Truncate the long circuit to the requested cycle_depth
circuit_depth = cycle_depth * 2 + 1
assert circuit_depth <= len(circuit)
trunc_circuit = circuit[:circuit_depth]
# Pure-state simulation
psi = pure_sim.simulate(trunc_circuit)
psi = psi.final_state_vector
pure_probs = np.abs(psi)**2
# Noisy execution
meas_circuit = trunc_circuit + cirq.measure(q0, q1)
sampled_inds = noisy_sim.sample(meas_circuit, repetitions=10_000).values[:,0]
sampled_probs = np.bincount(sampled_inds, minlength=DIM) / len(sampled_inds)
# Save the results
records += [{
'circuit_i': circuit_i,
'cycle_depth': cycle_depth,
'circuit_depth': circuit_depth,
'pure_probs': pure_probs,
'sampled_probs': sampled_probs,
}]
print('.', end='', flush=True)
```
## What's the observable
What is $O_U$? Let's define it to be the observable that gives the sum of all probabilities, i.e.
$$
O_U |x \rangle = p(x) |x \rangle
$$
for any bitstring $x$. We can use this to derive expressions for our quantities of interest.
$$
e_U = \langle \psi_U | O_U | \psi_U \rangle \\
= \sum_x a_x^* \langle x | O_U | x \rangle a_x \\
= \sum_x p(x) \langle x | O_U | x \rangle \\
= \sum_x p(x) p(x)
$$
$e_U$ is simply the sum of squared ideal probabilities. $u_U$ is a normalizing factor that only depends on the operator. Since this operator has the true probabilities in the definition, they show up here anyways.
$$
u_U = \mathrm{Tr}[O_U / D] \\
= 1/D \sum_x \langle x | O_U | x \rangle \\
= 1/D \sum_x p(x)
$$
For the measured values, we use the definition of an expectation value
$$
\langle f(x) \rangle_\rho = \sum_x p(x) f(x)
$$
It becomes notationally confusing because remember: our operator on basis states returns the ideal probability of that basis state $p(x)$. The probability of observing a measured basis state is estimated from samples and denoted $p_\mathrm{est}(x)$ here.
$$
m_U = \mathrm{Tr}[\rho_U O_U] \\
= \langle O_U \rangle_{\rho_U} = \sum_{x} p_\mathrm{est}(x) p(x)
$$
```
for record in records:
e_u = np.sum(record['pure_probs']**2)
u_u = np.sum(record['pure_probs']) / DIM
m_u = np.sum(record['pure_probs'] * record['sampled_probs'])
record.update(
e_u=e_u,
u_u=u_u,
m_u=m_u,
)
```
Remember:
$$
m_U - u_U = f (e_U - u_U)
$$
We estimate f by performing least squares
minimization of the quantity
$$
f (e_U - u_U) - (m_U - u_U)
$$
over different random circuits. The solution to the
least squares problem is given by
$$
f = (∑_U (m_U - u_U) * (e_U - u_U)) / (∑_U (e_U - u_U)^2)
$$
```
import pandas as pd
df = pd.DataFrame(records)
df['y'] = df['m_u'] - df['u_u']
df['x'] = df['e_u'] - df['u_u']
df['numerator'] = df['x'] * df['y']
df['denominator'] = df['x'] ** 2
df.head()
```
### Fit
We'll plot the linear relationship and least-squares fit while we transform the raw DataFrame into one containing fidelities.
```
%matplotlib inline
from matplotlib import pyplot as plt
# Color by cycle depth
import seaborn as sns
colors = sns.cubehelix_palette(n_colors=len(cycle_depths))
colors = {k: colors[i] for i, k in enumerate(cycle_depths)}
_lines = []
def per_cycle_depth(df):
fid_lsq = df['numerator'].sum() / df['denominator'].sum()
cycle_depth = df.name
xx = np.linspace(0, df['x'].max())
l, = plt.plot(xx, fid_lsq*xx, color=colors[cycle_depth])
plt.scatter(df['x'], df['y'], color=colors[cycle_depth])
global _lines
_lines += [l] # for legend
return pd.Series({'fid_lsq': fid_lsq})
fids = df.groupby('cycle_depth').apply(per_cycle_depth).reset_index()
plt.xlabel(r'$e_U - u_U$', fontsize=18)
plt.ylabel(r'$m_U - u_U$', fontsize=18)
_lines = np.asarray(_lines)
plt.legend(_lines[[0,-1]], cycle_depths[[0,-1]], loc='best', title='Cycle depth')
plt.tight_layout()
```
### Fidelities
```
plt.plot(fids['cycle_depth'], fids['fid_lsq'], label='LSq')
xx = np.linspace(0, fids['cycle_depth'].max())
plt.plot(xx, (1-P_DEPOL)**(4*xx), label=r'$(1-\mathrm{depol})^{4d}$')
plt.ylabel('Circuit fidelity', fontsize=18)
plt.xlabel('Cycle Depth $d$', fontsize=18)
plt.legend(loc='best')
plt.tight_layout()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/ElisaAma93/Master/blob/master/Booking_scraper_Selenium.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Scraping su Booking.com
Per cominciare installo e importo le librerie necessarie per il lavoro.
```
!pip install selenium
!apt-get update
!apt install chromium-chromedriver
!cp /usr/lib/chromium-browser/chromedriver /usr/bin
import sys
sys.path.insert(0,'/usr/lib/chromium-browser/chromedriver')
from selenium import webdriver
from tqdm import tqdm_notebook as tqdm
import pandas
import json
import pprint
```
Definisco un webdriver da Chrome e gli associo il link della pagina di Booking da cui voglio fare scraping.
```
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
wd = webdriver.Chrome('chromedriver',chrome_options=chrome_options)
wd.get("https://www.booking.com/searchresults.it.html?aid=304142&label=gen173nr-1FCAEoggI46AdIM1gEaHGIAQGYARS4ARfIAQzYAQHoAQH4AQuIAgGoAgO4ApzakPgFwAIB0gIkZGU3ZDYxMDMtZTQ1OC00Y2E2LTkxM2EtZTViNTJlYzk0NWU52AIG4AIB&sid=f7fc04c8a03b4db3566a73d422abcde1&tmpl=searchresults&ac_click_type=b&ac_position=0&checkin_month=8&checkin_monthday=31&checkin_year=2020&checkout_month=9&checkout_monthday=6&checkout_year=2020&class_interval=1&dest_id=910&dest_type=region&from_sf=1&group_adults=2&group_children=0&label_click=undef&nflt=class%3D3%3B&no_rooms=1&raw_dest_type=region&room1=A%2CA&sb_price_type=total&search_selected=1&shw_aparth=1&slp_r_match=0&src=index&srpvid=ff6f391d537100b9&ss=Toscana%2C%20Italia&ss_raw=tos&ssb=empty&top_ufis=1&rows=25&offset=")
```
Definisco la lista degli hotel di una pagina.
```
list_hotel = wd.find_elements_by_css_selector("div.sr_item")
print(len(list_hotel))
print(list_hotel[0].text)
```
Estraggo le informazioni sugli hotel da più pagine.
Identifico la lista dei nomi degli hotel in una pagina.
```
list_title = wd.find_elements_by_css_selector(".sr-hotel__name")
#print(len(list_h3))
#print(list_h3[0].text)
for title in list_title:
print(title.text)
```
Definisco le variabili che voglio scaricare.
```
import pprint
detail_hotel = []
for hotel in list_hotel:
title = hotel.find_elements_by_css_selector(".sr-hotel__name")[0].text
url = hotel.find_elements_by_css_selector(".hotel_name_link")[0].get_attribute("href")
city = hotel.find_elements_by_css_selector("a.bui-link")[0].text
room_description = hotel.find_elements_by_css_selector("div.room_link")[0].text
price = hotel.find_elements_by_css_selector("div.bui-price-display__value")[0].text
evaluation_description = ""
if (len(hotel.find_elements_by_css_selector("div.bui-review-score__title")) > 0):
evaluation_description = hotel.find_elements_by_css_selector("div.bui-review-score__title")[0].text
reviews = ""
if (len(hotel.find_elements_by_css_selector("div.bui-review-score__text")) > 0):
reviews = hotel.find_elements_by_css_selector("div.bui-review-score__text")[0].text
score_position = ""
if (len(hotel.find_elements_by_css_selector("span.review-score-widget")) > 0):
score_position = hotel.find_elements_by_css_selector("span.review-score-widget")[0].text
evaluation = hotel.get_attribute("data-score")
hotel_id = hotel.get_attribute("data-hotelid")
detail_hotel.append({'title': title,
'url' : url,
'price' : price,
'city' : city,
'evaluation_description' : evaluation_description,
'evaluation' : evaluation,
'reviews' : reviews,
'room_description' : room_description,
'score_position' : score_position,
'hotel_id' : hotel_id})
len(detail_hotel)
pprint.pprint(detail_hotel)
def parse_hotel(hotel):
hotel_id = hotel.get_attribute("data-hotelid")
title = ""
url = ""
city = ""
room_description = ""
evaluation = ""
score_position = ""
price = ""
try:
title = hotel.find_elements_by_css_selector(".sr-hotel__name")[0].text
url = hotel.find_elements_by_css_selector(".hotel_name_link")[0].get_attribute("href")
city = hotel.find_elements_by_css_selector("a.bui-link")[0].text
room_description = hotel.find_elements_by_css_selector("div.room_link")[0].text
price = hotel.find_elements_by_css_selector("div.bui-price-display__value")[0].text
evaluation = hotel.get_attribute("data-score")
evaluation_description = ""
if (len(hotel.find_elements_by_css_selector("div.bui-review-score__title")) > 0):
evaluation_description = hotel.find_elements_by_css_selector("div.bui-review-score__title")[0].text
reviews = ""
if (len(hotel.find_elements_by_css_selector("div.bui-review-score__text")) > 0):
reviews = hotel.find_elements_by_css_selector("div.bui-review-score__text")[0].text
score_position = ""
if (len(hotel.find_elements_by_css_selector("span.review-score-widget")) > 0):
score_position = hotel.find_elements_by_css_selector("span.review-score-widget")[0].text
cancellazione_gratuita = ""
if (len(hotel.find_elements_by_css_selector("sup.sr_room_reinforcement.sr_room_policy_single_line")) > 0):
cancellazione_gratuita = hotel.find_elements_by_css_selector("sup.sr_room_reinforcement.sr_room_policy_single_line")[0].text
except:
pass
return {'title': title,
'url' : url,
'room_description' : room_description,
'city' : city,
'evaluation' : evaluation,
'evaluation_description' : evaluation_description,
'reviews' : reviews,
'cancellazione_gratuita' : cancellazione_gratuita,
'score_position' : score_position,
'price' : price,
'hotel_id' : hotel_id}
```
Scarico le informazioni che mi interessano da più pagine e le salvo in un file csv.
```
detail_hotel = []
for num in tqdm(range(0, 26, 25)):
wd.get(f"https://www.booking.com/searchresults.it.html?aid=304142&label=gen173nr-1FCAEoggI46AdIM1gEaHGIAQGYARS4ARfIAQzYAQHoAQH4AQuIAgGoAgO4ApzakPgFwAIB0gIkZGU3ZDYxMDMtZTQ1OC00Y2E2LTkxM2EtZTViNTJlYzk0NWU52AIG4AIB&sid=f7fc04c8a03b4db3566a73d422abcde1&tmpl=searchresults&ac_click_type=b&ac_position=0&checkin_month=7&checkin_monthday=27&checkin_year=2020&checkout_month=8&checkout_monthday=16&checkout_year=2020&class_interval=1&dest_id=910&dest_type=region&from_sf=1&group_adults=2&group_children=0&label_click=undef&nflt=class%3D3%3B&no_rooms=1&raw_dest_type=region&room1=A%2CA&sb_price_type=total&search_selected=1&shw_aparth=1&slp_r_match=0&src=index&srpvid=ff6f391d537100b9&ss=Toscana%2C%20Italia&ss_raw=tos&ssb=empty&top_ufis=1&rows=25&offset={num}")
wd.save_screenshot(f'screenshot_{num}.png')
list_hotel = wd.find_elements_by_css_selector("div.sr_item")
for hotel in list_hotel:
detail_hotel.append(parse_hotel(hotel))
print(len(detail_hotel))
detail_hotel
import time
detail_hotel = []
for num in tqdm(range(0,251, 25)):
time.sleep(1)
wd.get(f"https://www.booking.com/searchresults.it.html?aid=304142&label=gen173nr-1FCAEoggI46AdIM1gEaHGIAQGYARS4ARfIAQzYAQHoAQH4AQuIAgGoAgO4ApzakPgFwAIB0gIkZGU3ZDYxMDMtZTQ1OC00Y2E2LTkxM2EtZTViNTJlYzk0NWU52AIG4AIB&sid=f7fc04c8a03b4db3566a73d422abcde1&tmpl=searchresults&ac_click_type=b&ac_position=0&checkin_month=8&checkin_monthday=31&checkin_year=2020&checkout_month=9&checkout_monthday=6&checkout_year=2020&class_interval=1&dest_id=910&dest_type=region&from_sf=1&group_adults=2&group_children=0&label_click=undef&nflt=class%3D3%3B&no_rooms=1&raw_dest_type=region&room1=A%2CA&sb_price_type=total&search_selected=1&shw_aparth=1&slp_r_match=0&src=index&srpvid=ff6f391d537100b9&ss=Toscana%2C%20Italia&ss_raw=tos&ssb=empty&top_ufis=1&rows=25&offset={num}")
#wd.save_screenshot(f'screenshot_{num}.png')
list_hotel = wd.find_elements_by_css_selector("div.sr_item")
for hotel in list_hotel:
detail_hotel.append(parse_hotel(hotel))
print(len(detail_hotel))
import pandas as pd
ds_detail_hotel = pd.DataFrame(detail_hotel)
ds_detail_hotel.set_index("hotel_id")
ds_detail_hotel.head()
ds_detail_hotel.info()
ds_detail_hotel.to_csv('ds_hotel_agosto1.csv')
# open csv file
import pandas as pd
ds_detail_hotel = pd.read_csv("ds_hotel_agosto1.csv", index_col="hotel_id")
ds_detail_hotel.head()
ds_detail_hotel.info()
```
Estraggo le pagine di dettaglio.
Per cominciare scarico gli indirizzi degli hotel.
```
# open csv file
import pandas as pd
ds_detail_hotel = pd.read_csv("ds_hotel_agosto1.csv", index_col="hotel_id")
ds_detail_hotel.head()
ds_detail_hotel.info()
ds_detail_hotel["url"]
list_title = wd.find_elements_by_css_selector("p.address")
for hotel in list_title:
if (len(hotel.find_elements_by_css_selector(".hp_address_subtitle")) > 0):
address = wd.find_elements_by_css_selector(".hp_address_subtitle")[0].text
print(address)
else:
print("errore")
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
details = []
for hotel_id, hotel in tqdm(ds_detail_hotel.head().iterrows()):
time.sleep(3)
link = hotel["url"]
print(link)
wd.set_window_size(1920, 1080) #fisso le dimensioni di risoluzione fissa così che il browser nascosto abbia la stessa riduzione del mio locale
wd.get(link)
#try:
# WebDriverWait(wd, 10).until(
# EC.element_to_be_clickable((By.ID, "button.txp-sidebar-cta"))
# )
#except:
# continue
#if (len(wd.find_elements_by_css_selector(".hp_address_subtitle")) > 0):
address = wd.find_elements_by_css_selector(".hp_address_subtitle")[0].text
wd.save_screenshot(f'screenshot_{hotel_id}.png')
details.append({'hotel_id': hotel_id,
'address': address})
len(details)
pprint.pprint(details)
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
details = []
for hotel_id, hotel in tqdm(ds_detail_hotel.iterrows()):
time.sleep(2)
link = hotel["url"]
address = 0
#print(link)
try:
wd.set_window_size(1920, 1080)
wd.get(link)
#try:
# WebDriverWait(wd, 10).until(
#EC.element_to_be_clickable((By.ID, "button.txp-sidebar-cta"))
# )
#except:
# continue
#wd.save_screenshot(f'screenshot_{project_id}.png')
if (len(wd.find_elements_by_css_selector(".hp_address_subtitle")) > 0):
address = wd.find_elements_by_css_selector(".hp_address_subtitle")[0].text
except Exception as e:
#print(e)
pass
details.append({'hotel_id': hotel_id,
'address': address})
print(len(details))
#pprint.pprint(details)
len(details)
import pandas as pd
ds_details = pd.DataFrame(details)
ds_details.set_index("hotel_id")
ds_details.head()
ds_details.info()
ds_details.to_csv("ds_hotel_details_address1.csv")
```
Estraggo informazioni di dettaglio sulla salute e igiene.
```
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
details = []
for hotel_id, hotel in tqdm(ds_detail_hotel.iterrows()):
time.sleep(2)
link = hotel["url"]
salute_igiene = 0
salute = 0
#print(link)
try:
wd.set_window_size(1920, 1080)
wd.get(link)
#try:
# WebDriverWait(wd, 10).until(
#EC.element_to_be_clickable((By.ID, "button.txp-sidebar-cta"))
# )
#except:
# continue
#wd.save_screenshot(f'screenshot_{project_id}.png')
if (len(wd.find_elements_by_css_selector("div.safety-standards--body")) > 0):
salute = wd.find_elements_by_css_selector("div.safety-standards--body")[0].text
salute_igiene = salute.replace('\n', '-')
except Exception as e:
#print(e)
pass
details.append({'hotel_id': hotel_id,
'salute_igiene': salute_igiene})
print(len(details))
pprint.pprint(details)
import pandas as pd
ds_details = pd.DataFrame(details)
ds_details.set_index("hotel_id")
ds_details.head()
ds_details.info()
ds_details.to_csv("ds_hotel_details_salute_igiene.csv")
```
Estraggo le immagini.
```
import time
import requests
detail_hotel = []
list_images = []
for num in tqdm(range(0, 26, 25)):
time.sleep(1)
wd.get(f"https://www.booking.com/searchresults.it.html?aid=304142&label=gen173nr-1FCAEoggI46AdIM1gEaHGIAQGYARS4ARfIAQzYAQHoAQH4AQuIAgGoAgO4ApzakPgFwAIB0gIkZGU3ZDYxMDMtZTQ1OC00Y2E2LTkxM2EtZTViNTJlYzk0NWU52AIG4AIB&sid=f7fc04c8a03b4db3566a73d422abcde1&tmpl=searchresults&ac_click_type=b&ac_position=0&checkin_month=8&checkin_monthday=3&checkin_year=2020&checkout_month=8&checkout_monthday=9&checkout_year=2020&class_interval=1&dest_id=910&dest_type=region&from_sf=1&group_adults=2&group_children=0&label_click=undef&nflt=class%3D3%3B&no_rooms=1&raw_dest_type=region&room1=A%2CA&sb_price_type=total&search_selected=1&shw_aparth=1&slp_r_match=0&src=index&srpvid=ff6f391d537100b9&ss=Toscana%2C%20Italia&ss_raw=tos&ssb=empty&top_ufis=1&rows=25&offset={num}")
list_hotel = wd.find_elements_by_css_selector("div.sr_item")
for hotel in list_hotel:
try:
hotel_id = hotel.get_attribute("data-hotelid")
src = hotel.find_element_by_css_selector("img.hotel_image").get_attribute("src")
list_images.append({"hotel_id": hotel_id,
"img_file": "img_" + str(hotel_id) + ".jpg"})
img_file = requests.get(src, stream=True)
if img_file.status_code == 200:
with open("/content/images/img_" + str(hotel_id) + ".jpg", 'wb') as f: #creo una new folder chiamata images
f.write(img_file.content)
except Exception as e:
print(e)
print(len(list_images))
%pylab inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img=mpimg.imread('/content/images/img_19443.jpg')
imgplot = plt.imshow(img)
plt.show()
import pandas as pd
ds_images = pd.DataFrame(list_images)
ds_images.set_index("hotel_id")
ds_images.head()
ds_images.info()
ds_images.to_csv("ds_images_agosto1.csv")
!zip -r "/content/images.zip" "/content/images/"
```
CLARIFAI
```
!pip install clarifai
from clarifai.rest import ClarifaiApp
# setup your key!!!
clarifai_key = "810cedec3053464a9a7161435f69c318"
app = ClarifaiApp(api_key=clarifai_key)
# and use the general model
model = app.public_models.general_model
response = model.predict_by_filename("/content/images/img_18794.jpg")
pprint.pprint(response)
if(response['status']['description'] == "Ok"):
for concept in response["outputs"][0]["data"]["concepts"]:
name = concept["name"]
value = concept["value"]
print(name + " " + str(value))
ds_images = pd.read_csv("ds_images_luglio.csv", index_col="project_id")
img_details = []
for project_id, image in tqdm(ds_images.iterrows(), total=ds_images.shape[0]):
try:
response = model.predict_by_filename("/content/images/" + image['img_file'])
if(response['status']['description'] == "Ok"):
for concept in response["outputs"][0]["data"]["concepts"]:
name = concept["name"]
value = concept["value"]
img_details.append({
"project_id": project_id,
"image": image['img_file'],
"name": name,
"value": value
})
except Exception as e:
print(e)
print(len(img_details))
import pandas as pd
ds_img_details = pd.DataFrame(img_details)
ds_img_details.set_index("image")
ds_img_details.head()
ds_img_details.info()
```
GEOCODING
```
import requests
import json
address = "Via Marsiglia 26, Pesaro"
key = "QDtJijTy03kIQIGxttVh92fCYZS5A5gH"
geocode_url = f"http://www.mapquestapi.com/geocoding/v1/address?key={key}&location={address}"
response = requests.get(geocode_url)
import json
geo = json.loads(response.text)
pprint.pprint(response.text)
print(geo['results'][0]['locations'][0]['latLng']['lat'])
print(geo['results'][0]['locations'][0]['latLng']['lng'])
ds_hotel = pd.read_csv("ds_hotel_details_address1.csv", index_col="hotel_id")
ds_hotel.info()
ds_hotel = pd.read_csv("ds_hotel_details_address1.csv", index_col="hotel_id")
import time
hotel_location = []
for hotel_id, hotel in tqdm(ds_hotel.iterrows(), total=ds_hotel.shape[0]):
geocod = hotel['address']
if(geocod is None):# or geocod == NaN):
print(str(hotel_id) + "- " + hotel['address'])
else:
key = "r3xgNwBAviBMdOj4Op90kvyy2iO1CDGz"
try:
geocode_url = f"http://www.mapquestapi.com/geocoding/v1/address?key={key}&location={geocod}"
response = requests.get(geocode_url)
if(response.status_code == 200):
geo = json.loads(response.text)
lat = geo['results'][0]['locations'][0]['latLng']['lat']
lng = geo['results'][0]['locations'][0]['latLng']['lng']
hotel_location.append({
"hotel_id": hotel_id,
"lat": lat,
"lng": lng
})
except Exception as e:
print(e)
print(len(hotel_location))
import pandas as pd
ds_hotel_location = pd.DataFrame(hotel_location)
ds_hotel_location.set_index("hotel_id")
ds_hotel_location.head(20)
ds_hotel_location.info()
ds_hotel_location.to_csv('ds_hotel_location1.csv')
```
| github_jupyter |
<a href="https://colab.research.google.com/github/jahelsantiago/Pytorch-tutorial/blob/main/troch_init.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import torch
from __future__ import print_function
```
Los tensores son matrices que pueden correr en la GPU
# BASICOS
## OPERACIONES DE CREACION
```
#create a tensor of random numbers
x = torch.rand(size = (5,5))
print(x)
#create a tensor of ones
x = torch.ones(size = (3,3))
print(x)
#create a tensor of zeros
x = torch.zeros(size = (3,3))
print(x)
#create a tensor of the values
x = torch.tensor([1,1.2,0])
print(x)
```
```
x = torch.ones((3,4))
print(x)
x = torch.rand_like(x) # con el _like podemos copiar un tensor con el mismo tamaño
print(x)
# obtener el tamaño de un tensor
print(x.size())
```
## OPERACIONES MATEMATICAS
```
a = torch.empty((3,3)) #empty tensor
b = torch.zeros((3,3))
a = b+1 #add 1
a *= 3 # multiply
print(a)
```
##resize
```
x = torch.tensor(list(range(1,10)))
print(x)
print(x.view((3,3)))
print(x.view((-1,3)))
x = torch.rand(1)
print(x)
print(x.item())
```
## Numpy
```
import numpy as np
a = np.ones((3,3))
x = torch.from_numpy(a)
x
b = torch.randint(1,100,(3,3))
b.numpy()
```
# Gradients
```
import torch
x = torch.ones([3,3], requires_grad= True)
print(x)
y = x**2 + x -1
print(y)
z = y.mean()
print(z)
x = torch.rand((3,3))
print(x.requires_grad)
x.requires_grad_(True)
print(x.requires_grad)
```
#Neural networks contruction
```
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module): #todas las redes deben heredar de la clase Module
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=(3,3))
self.Conv2 = nn.Conv2d(6, 16, 3)
self.fc1 = nn.Linear(6*6*16, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84,10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2,2))
x = F.max_pool2d(F.tanh(self.Conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
parameter = list(net.parameters())[0]
print(parameter)
print(parameter.size())
inp = torch.randn(1,1,32,32)
out = net(inp)
print(out)
```
## Loss function
```
inp = torch.rand(1,1,32,32) # generate random input
out = net(inp) #porcess troght forward
print("models output")
print(out)
print("---------------------------------------")
y = torch.randn(10) #generate 10 classes for the label
y = y.view(1,-1)
print("labels")
print(y)
print("---------------------------------------")
print("loss")
criteria = nn.MSELoss()
loss = criteria(out, y)
print(loss)
net.zero_grad()
print('conv1.bias.grad before backward')
print(net.conv1.bias.grad)
loss.backward()
print("conv1.bias.grad after backward")
print(net.conv1.bias.grad)
```
## Update parametres
stochastic gradient desent
```
print("conv1.bias before update")
print(net.conv1.bias)
learning_rate = 0.01
for f in net.parameters():
f.data = f.data - f.grad.data * learning_rate
print("conv1.bias after update")
print(net.conv1.bias)
```
# optimizers
```
import torch.optim as optim
optimizer = optim.Adam(net.parameters(), lr = 0.001)
for i in range(5):
optimizer.zero_grad() #convertimos el gradiente en 0
output = net(inp)
loss = criteria(output, y)
loss.backward()
optimizer.step()
```
| github_jupyter |
```
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
```
# Reflect Tables into SQLAlchemy ORM
```
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
# Save references to each table
measurement = Base.classes.measurement
station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
conn = engine.connect()
```
# Exploratory Climate Analysis
```
# Design a query to retrieve the last 12 months of precipitation data and plot the results
# Calculate the date 1 year ago from the last data point in the database
latest = session.query(measurement.date).order_by(measurement.date.desc()).first()
print(f"Latest: {latest[0]}")
# Perform a query to retrieve the data and precipitation scores
last_twelve = engine.execute('SELECT date, prcp AS precipitation FROM measurement WHERE date >= "2016-08-23"').fetchall()
# Preview the Data
df = pd.DataFrame(last_twelve, columns=['date', 'precipitation'])
# Save the query results as a Pandas DataFrame and set the index to the date column
date_df = df.set_index(['date'])
# Sort the dataframe by date
last_twelve_df = date_df.sort_values(by=['date'], ascending=True)
#drop 'none' values
clean_df = last_twelve_df.dropna(how='any')
clean_df.head()
# Use Pandas Plotting with Matplotlib to plot the data
ax= clean_df.plot.bar(figsize=(16,8), width=20)
ax.set_xlabel("date", fontsize=14)
ax.set_ylabel("inches", fontsize=14)
ax.set_xticklabels([])
plt.legend(['precipitation'], fontsize=20)
ax.get_legend().set_bbox_to_anchor((0.6, 1))
plt.tight_layout()
plt.show()
# Use Pandas to calcualte the summary statistics for the precipitation data
clean_df.describe()
# Design a query to show how many stations are available in this dataset?
station_count = session.query(func.count(station.id)).first()
station_count
# What are the most active stations? (i.e. what stations have the most rows)?
# List the stations and the counts in descending order.
active_stations = session.query(measurement.station,func.count(measurement.station)).\
group_by(measurement.station).\
order_by(func.count(measurement.station).desc()).all()
active_stations
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature of the most active station?
first_station = session.query(measurement.station).group_by(measurement.station).\
order_by(func.count(measurement.station).desc()).first()
first_station
session.query(measurement.station, func.min(measurement.tobs), func.max(measurement.tobs), func.avg(measurement.tobs)).\
filter(measurement.station=='USC00519281').all()
# Choose the station with the highest number of temperature observations.
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
tobs_data = session.query(measurement.date,measurement.tobs).\
filter(measurement.station == 'USC00519281').\
filter(measurement.date > '2016-08-23').all()
tobs_data[:10]
tobs_df = pd.DataFrame(tobs_data, columns=['date', 'tobs'])
plt.figure(figsize=(12,8))
plt.hist(tobs_df["tobs"], bins=12)
plt.ylabel("frequency")
plt.xlabel("temperature")
plt.legend(["tobs"])
plt.tight_layout()
plt.show()
```
| github_jupyter |
## Documentation
### Documentation is hard
* Good documentation is hard, and very expensive.
* Bad documentation is detrimental.
* Good documentation quickly becomes bad if not kept up-to-date with code changes.
* Professional companies pay large teams of documentation writers.
### Prefer readable code with tests and vignettes
If you don't have the capacity to maintain great documentation,
focus on:
* Readable code
* Automated tests
* Small code samples demonstrating how to use the api
### Comment-based Documentation tools
Documentation tools can produce extensive documentation about your code by pulling out comments near the beginning of functions,
together with the signature, into a web page.
The most popular is [Doxygen](http://www.doxygen.nl/).
[Have a look at an example of some Doxygen output](
http://www.bempp.org/cppref/2.0/group__abstract__boundary__operators.html).
[Sphinx](http://sphinx-doc.org/) is nice for Python, and works with C++ as well.
Here's some [Sphinx-generated output](http://www.bempp.org/pythonref/2.0/bempp_visualization.html)
and the [corresponding source code](https://bitbucket.org/bemppsolutions/bempp/src/8f10af0b0b4a94bc36c6236eb9ddb2a34cde1756/python/bempp/visualization.py?at=v2.0.2&fileviewer=file-view-default).
[Breathe](https://breathe.readthedocs.io/en/latest/) can be used to make Sphinx and Doxygen work together.
[Roxygen](https://cran.r-project.org/web/packages/roxygen2/vignettes/roxygen2.html) is good for R.
## Example of using Sphinx
### Write some docstrings
We're going to document our "greeter" example using docstrings with Sphinx.
There are various conventions for how to write docstrings, but the native sphinx one doesn't look nice when used with
the built in `help` system.
In writing Greeter, we used the docstring conventions from NumPy.
So we use the [numpydoc](https://numpydoc.readthedocs.io/en/latest/) sphinx extension to
support these.
```python
"""
Generate a greeting string for a person.
Parameters
----------
personal: str
A given name, such as Will or Jean-Luc
family: str
A family name, such as Riker or Picard
title: str
An optional title, such as Captain or Reverend
polite: bool
True for a formal greeting, False for informal.
Returns
-------
string
An appropriate greeting
"""
```
### Set up sphinx
Invoke the [sphinx-quickstart](https://www.sphinx-doc.org/en/latest/usage/quickstart.html) command to build Sphinx's
configuration file automatically based on questions
at the command line:
``` bash
sphinx-quickstart
```
Which responds:
```
Welcome to the Sphinx 1.8.0 quickstart utility.
Please enter avalues for the following settings (just press Enter to
accept a default value, if one is given in brackets).
Enter the root path for documentation.
> Root path for the documentation [.]:
```
and then look at and adapt the generated config, a file called
conf.py in the root of the project. This contains the project's Sphinx configuration, as Python variables:
``` python
#Add any Sphinx extension module names here, as strings. They can be
#extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', # Support automatic documentation
'sphinx.ext.coverage', # Automatically check if functions are documented
'sphinx.ext.mathjax', # Allow support for algebra
'sphinx.ext.viewcode', # Include the source code in documentation
'numpydoc' # Support NumPy style docstrings
]
```
To proceed with the example, we'll copy a finished conf.py into our folder, though normally you'll always use `sphinx-quickstart`
```
%%writefile greetings/conf.py
import sys
import os
extensions = [
'sphinx.ext.autodoc', # Support automatic documentation
'sphinx.ext.coverage', # Automatically check if functions are documented
'sphinx.ext.mathjax', # Allow support for algebra
'sphinx.ext.viewcode', # Include the source code in documentation
'numpydoc' # Support NumPy style docstrings
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'Greetings'
copyright = u'2014, James Hetherington'
version = '0.1'
release = '0.1'
exclude_patterns = ['_build']
pygments_style = 'sphinx'
htmlhelp_basename = 'Greetingsdoc'
latex_elements = {
}
latex_documents = [
('index', 'Greetings.tex', u'Greetings Documentation',
u'James Hetherington', 'manual'),
]
man_pages = [
('index', 'greetings', u'Greetings Documentation',
[u'James Hetherington'], 1)
]
texinfo_documents = [
('index', 'Greetings', u'Greetings Documentation',
u'James Hetherington', 'Greetings', 'One line description of project.',
'Miscellaneous'),
]
```
### Define the root documentation page
Sphinx uses [RestructuredText](http://docutils.sourceforge.net/rst.html) another wiki markup format similar to Markdown.
You define an "index.rst" file to contain any preamble text you want. The rest is autogenerated by `sphinx-quickstart`
```
%%writefile greetings/index.rst
Welcome to Greetings's documentation!
=====================================
Simple "Hello, James" module developed to teach research software engineering.
.. autofunction:: greetings.greeter.greet
```
### Run sphinx
We can run Sphinx using:
```
%%bash
cd greetings/
sphinx-build . doc
```
### Sphinx output
Sphinx's output is [html](http://github-pages.ucl.ac.uk/rsd-engineeringcourse/ch04packaging/greetings/doc/index.html). We just created a simple single function's documentation, but Sphinx will create
multiple nested pages of documentation automatically for many functions.
## Doctest - testing your documentation is up to date
`doctest` is a module included in the standard library. It runs all the code within the docstrings and checks whether the output is what it's claimed on the documentation.
Let's add an example to our greeting function and check it with `doctest`. We are leaving the output with a small typo to see what's the type of output we get from `doctest`.
```
%%writefile greetings/greetings/greeter.py
def greet(personal, family, title="", polite=False):
""" Generate a greeting string for a person.
Parameters
----------
personal: str
A given name, such as Will or Jean-Luc
family: str
A family name, such as Riker or Picard
title: str
An optional title, such as Captain or Reverend
polite: bool
True for a formal greeting, False for informal.
Returns
-------
string
An appropriate greeting
Examples
--------
>>> from greetings.greeter import greet
>>> greet("Terry", "Jones")
'Hey, Terry Jones.
"""
greeting= "How do you do, " if polite else "Hey, "
if title:
greeting += f"{title} "
greeting += f"{personal} {family}."
return greeting
%%bash --no-raise-error
python -m doctest greetings/greetings/greeter.py
```
which clearly identifies a tiny error in our example.
pytest can run the doctest too if you call it as:
`pytest --doctest-modules`
| github_jupyter |
```
import pyrosetta
pyrosetta.init(extra_options='-no_optH false -mute all -ignore_unrecognized_res true -load_PDB_components false')
pose = pyrosetta.rosetta.core.pose.Pose()
params_paths = pyrosetta.rosetta.utility.vector1_string()
params_paths.extend(['E4S.params'])
resiset = pyrosetta.generate_nonstandard_residue_set(pose, params_paths)
pyrosetta.rosetta.core.import_pose.pose_from_file(pose, 'pdb6bq1.relax.ent') # pose_from_pdbstring or pose_from_file
data = [{'chains': ('A','E'),
'name': 'Phosphatidylinositol 4-kinase alpha', 'uniprot': 'P42356',
'sequence': '''MAAAPARGGGGGGGGGGGCSGSGSSASRGFYFNTVLSLARSLAVQRPASLEKVQKLLCMC
PVDFHGIFQLDERRRDAVIALGIFLIESDLQHKDCVVPYLLRLLKGLPKVYWVEESTARK
GRGALPVAESFSFCLVTLLSDVAYRDPSLRDEILEVLLQVLHVLLGMCQALEIQDKEYLC
KYAIPCLIGISRAFGRYSNMEESLLSKLFPKIPPHSLRVLEELEGVRRRSFNDFRSILPS
NLLTVCQEGTLKRKTSSVSSISQVSPERGMPPPSSPGGSAFHYFEASCLPDGTALEPEYY
FSTISSSFSVSPLFNGVTYKEFNIPLEMLRELLNLVKKIVEEAVLKSLDAIVASVMEANP
SADLYYTSFSDPLYLTMFKMLRDTLYYMKDLPTSFVKEIHDFVLEQFNTSQGELQKILHD
ADRIHNELSPLKLRCQANAACVDLMVWAVKDEQGAENLCIKLSEKLQSKTSSKVIIAHLP
LLICCLQGLGRLCERFPVVVHSVTPSLRDFLVIPSPVLVKLYKYHSQYHTVAGNDIKISV
TNEHSESTLNVMSGKKSQPSMYEQLRDIAIDNICRCLKAGLTVDPVIVEAFLASLSNRLY
ISQESDKDAHLIPDHTIRALGHIAVALRDTPKVMEPILQILQQKFCQPPSPLDVLIIDQL
GCLVITGNQYIYQEVWNLFQQISVKASSVVYSATKDYKDHGYRHCSLAVINALANIAANI
QDEHLVDELLMNLLELFVQLGLEGKRASERASEKGPALKASSSAGNLGVLIPVIAVLTRR
LPPIKEAKPRLQKLFRDFWLYSVLMGFAVEGSGLWPEEWYEGVCEIATKSPLLTFPSKEP
LRSVLQYNSAMKNDTVTPAELSELRSTIINLLDPPPEVSALINKLDFAMSTYLLSVYRLE
YMRVLRSTDPDRFQVMFCYFEDKAIQKDKSGMMQCVIAVADKVFDAFLNMMADKAKTKEN
EEELERHAQFLLVNFNHIHKRIRRVADKYLSGLVDKFPHLLWSGTVLKTMLDILQTLSLS
LSADIHKDQPYYDIPDAPYRITVPDTYEARESIVKDFAARCGMILQEAMKWAPTVTKSHL
QEYLNKHQNWVSGLSQHTGLAMATESILHFAGYNKQNTTLGATQLSERPACVKKDYSNFM
ASLNLRNRYAGEVYGMIRFSGTTGQMSDLNKMMVQDLHSALDRSHPQHYTQAMFKLTAML
ISSKDCDPQLLHHLCWGPLRMFNEHGMETALACWEWLLAGKDGVEVPFMREMAGAWHMTV
EQKFGLFSAEIKEADPLAASEASQPKPCPPEVTPHYIWIDFLVQRFEIAKYCSSDQVEIF
SSLLQRSMSLNIGGAKGSMNRHVAAIGPRFKLLTLGLSLLHADVVPNATIRNVLREKIYS
TAFDYFSCPPKFPTQGEKRLREDISIMIKFWTAMFSDKKYLTASQLVPPDNQDTRSNLDI
TVGSRQQATQGWINTYPLSSGMSTISKKSGMSKKTNRGSQLHKYYMKRRTLLLSLLATEI
ERLITWYNPLSAPELELDQAGENSVANWRSKYISLSEKQWKDNVNLAWSISPYLAVQLPA
RFKNTEAIGNEVTRLVRLDPGAVSDVPEAIKFLVTWHTIDADAPELSHVLCWAPTDPPTG
LSYFSSMYPPHPLTAQYGVKVLRSFPPDAILFYIPQIVQALRYDKMGYVREYILWAASKS
QLLAHQFIWNMKTNIYLDEEGHQKDPDIGDLLDQLVEEITGSLSGPAKDFYQREFDFFNK
ITNVSAIIKPYPKGDERKKACLSALSEVKVQPGCYLPSNPEAIVLDIDYKSGTPMQSAAK
APYLAKFKVKRCGVSELEKEGLRCRSDSEDECSTQEADGQKISWQAAIFKVGDDCRQDML
ALQIIDLFKNIFQLVGLDLFVFPYRVVATAPGCGVIECIPDCTSRDQLGRQTDFGMYDYF
TRQYGDESTLAFQQARYNFIRSMAAYSLLLFLLQIKDRHNGNIMLDKKGHIIHIDFGFMF
ESSPGGNLGWEPDIKLTDEMVMIMGGKMEATPFKWFMEMCVRGYLAVRPYMDAVVSLVTL
MLDTGLPCFRGQTIKLLKHRFSPNMTEREAANFIMKVIQSCFLSNRSRTYDMIQYYQNDI
PY'''.replace('\n','')},
{'chains': ('B','F'), 'name': 'Tetratricopeptide repeat protein 7B', 'uniprot': 'Q86TV6',
'sequence': '''MATKKAGSRLETEIERCRSECQWERIPELVKQLSAKLIANDDMAELLLGESKLEQYLKEH
PLRQGASPRGPKPQLTEVRKHLTAALDRGNLKSEFLQESNLIMAKLNYVEGDYKEALNIY
ARVGLDDLPLTAVPPYRLRVIAEAYATKGLCLEKLPISSSTSNLHVDREQDVITCYEKAG
DIALLYLQEIERVILSNIQNRSPKPGPAPHDQELGFFLETGLQRAHVLYFKNGNLTRGVG
RFRELLRAVETRTTQNLRMTIARQLAEILLRGMCEQSYWNPLEDPPCQSPLDDPLRKGAN
TKTYTLTRRARVYSGENIFCPQENTEEALLLLLISESMANRDAVLSRIPEHKSDRLISLQ
SASVVYDLLTIALGRRGQYEMLSECLERAMKFAFEEFHLWYQFALSLMAAGKSARAVKVL
KECIRLKPDDATIPLLAAKLCMGSLHWLEEAEKFAKTVVDVGEKTSEFKAKGYLALGLTY
SLQATDASLRGMQEVLQRKALLAFQRAHSLSPTDHQAAFYLALQLAISRQIPEALGYVRQ
ALQLQGDDANSLHLLALLLSAQKHYHDALNIIDMALSEYPENFILLFSKVKLQSLCRGPD
EALLTCKHMLQIWKSCYNLTNPSDSGRGSSLLDRTIADRRQLNTITLPDFSDPETGSVHA
TSVAASRVEQALSEVASSLQSSAPKQGPLHPWMTLAQIWLHAAEVYIGIGKPAEATACTQ
EAANLFPMSHNVLYMRGQIAELRGSMDEARRWYEEALAISPTHVKSMQRLALILHQLGRY
SLAEKILRDAVQVNSTAHEVWNGLGEVLQAQGNDAAATECFLTALELEASSPAVPFTIIP
RVL'''.replace('\n','')},
{'chains': ('C','G'), 'name': 'Hyccin', 'uniprot': 'Q9BYI3',
'sequence': '''MFTSEKGVVEEWLSEFKTLPETSLPNYATNLKDKSSLVSSLYKVIQEPQSELLEPVCHQL
FEFYRSGEEQLLQFTLQFLPELIWCYLAVSASRNVHSSGCIEALLLGVYNLEIVDKQGHT
KVLSFTIPSLSKPSVYHEPSSIGSMALTESALSQHGLSKVVYSGPHPQREMLTAQNRFEV
LTFLLLCYNAALTYMPSVSLQSLCQICSRICVCGYPRQHVRKYKGISSRIPVSSGFMVQM
LTGIYFAFYNGEWDLAQKALDDIIYRAQLELYPEPLLVANAIKASLPHGPMKSNKEGTRC
IQVEITPTSSRISRNAVTSMSIRGHRWKRHGNTELTGQEELMEISEVDEGFYSRAASSTS
QSGLSNSSHNCSNKPSIGKNHRRSGGSKTGGKEKETTGESCKDHFARKQTQRAQSENLEL
LSLKRLTLTTSQSLPKPSSHGLAKTAATVFSKSFEQVSGVTVPHNPSSAVGCGAGTDANR
FSACSLQEEKLIYVSERTELPMKHQSGQQRPPSISITLSTD'''.replace('\n','')}
]
from __future__ import annotations
from dataclasses import dataclass
from typing import List
import pyrosetta
@dataclass
class GapData:
chain: str
start: int # PDB index of first missing residue
previous: int # PDB index of the preceeding present residue
pose_i: int # pose index of residue after the cutpoint (will become first was-missing)
end: int # PDB index of last missing residue
sequence: str = ''
def fill_sequence(self, data: List[dict]) -> None:
for peptide in data:
if self.chain in peptide['chains']:
msg = f"Chain {self.chain} peptide ({len(peptide['sequence'])}) is shorter than the span ({self.start}:{self.end})"
assert len(peptide['sequence']) >= self.start - 1, msg
assert len(peptide['sequence']) >= self.end - 1, msg
self.sequence = peptide['sequence'][self.start-1:self.end]
assert self.sequence, 'Empty??!'
return
else:
raise ValueError(f'Unknown chain in pose {chain}')
def get_pose(self) -> pyrosetta.Pose:
# not used.
pose = pyrosetta.pose_from_sequence(self.sequence)
for i in range(1, pose.total_residue() + 1):
pose.pdb_info().chain(i, self.chain)
pose.pdb_info().number(i, i+self.start - 1)
return pose
def add_to_pose(self, pose: pyrosetta.Pose):
# by extension
chm = pyrosetta.rosetta.core.chemical.ChemicalManager.get_instance()
rts = chm.residue_type_set( 'fa_standard' )
#previous = self.pose_i - 1 #this may have changed!
previous = pose.pdb_info().pdb2pose(res=self.previous, chain=self.chain)
# self.pose_i is the pos of the one after the gap
# it will become the new first added residue
rm_upper = pyrosetta.rosetta.core.conformation.remove_upper_terminus_type_from_conformation_residue
rm_lower = pyrosetta.rosetta.core.conformation.remove_lower_terminus_type_from_conformation_residue
rm_upper(pose.conformation(), previous)
rm_lower(pose.conformation(), previous)
rm_lower(pose.conformation(), previous + 1)
rm_upper(pose.conformation(), previous + 1)
# LOWER_CONNECT N
# UPPER_CONNECT C
for i, r in enumerate(self.sequence):
res_type = rts.get_representative_type_name1(r)
residue = pyrosetta.rosetta.core.conformation.ResidueFactory.create_residue(res_type)
pose.append_polymer_residue_after_seqpos(residue, previous + i, True)
npos = previous + i + 1
# pose.pdb_info().chain(npos, 'A')
# pose.pdb_info().number(npos, self.previous + i + 1)
rm_lower(pose.conformation(), npos)
rm_upper(pose.conformation(), npos)
# close loop
lm = pyrosetta.rosetta.protocols.loop_modeler.LoopModeler()
loops = pyrosetta.rosetta.protocols.loops.Loops()
loop = pyrosetta.rosetta.protocols.loops.Loop(previous - 1,
npos + 2,
npos) #cutpoint
#loop.auto_choose_cutpoint(pose)
loops.add_loop(loop)
lm.set_loops(loops)
# these are enabled by default. here for quick changing.
lm.enable_centroid_stage()
lm.enable_fullatom_stage()
lm.enable_build_stage()
lm.apply(pose)
def add_to_poseX(self, pose):
# this results in two cutpoints. Wrong way.
gap_pose = self.get_pose()
assert gap_pose.sequence(), 'No sequence in gap pose?'
ft = pyrosetta.FoldTree()
ft.simple_tree(pose.total_residue())
ft.new_jump(gap.pose_i -1,gap.pose_i,gap.pose_i -1)
assert ft.check_fold_tree(), f'Error in foldtree {ft}'
pose.fold_tree(ft)
n = ft.num_jump()
pose.append_pose_by_jump(gap_pose, n)
# lm = pyrosetta.rosetta.protocols.loop_modeler.LoopModeler()
# loops = pyrosetta.rosetta.protocols.loops.Loops()
# loop = pyrosetta.rosetta.protocols.loops.Loop(gap.pose_i -1,
# gap.pose_i + len(gap.sequence),
# gap.pose_i + len(gap.sequence) -1)
# loops.add_loop(loop)
# lm.set_loops(loops)
# lm.apply(pose)
@classmethod
def get_gaps(cls, pose: pyrosetta.Pose) -> List[GapData]:
gaps = []
pose2pdb = pose.pdb_info().pose2pdb
previous_resi, previous_chain = (-1, '禁') # forbidden kanji is deffo not a chain name.
for residue in pose.residues:
resi, chain = map(lambda x: int(x) if x.isdigit() else x, pose2pdb(residue.seqpos()).split())
if residue.is_ligand() or residue.is_metal(): # so why are ligands is_protein?
previous_resi, previous_chain = (-1, '禁')
elif chain != previous_chain:
pass # reset!
elif resi <= previous_resi:
raise ValueError(f'PDB ordering error: {previous_resi, previous_chain, resi, chain}')
elif resi != previous_resi + 1:
gaps.append(cls(chain=chain,
start= previous_resi + 1,
end= resi - 1,
pose_i = residue.seqpos(),
previous = previous_resi
))
else:
pass # countinous.
previous_resi, previous_chain = resi, chain
return gaps
@classmethod
def fix_pose(cls, pose: pyrosetta.Pose, data: List[dict]) -> None:
gaps = cls.get_gaps(pose)
for gap in reversed(gaps):
gap.fill_sequence(data)
print(gap)
gap_pose = gap.add_to_pose(pose)
pymolmover = pyrosetta.PyMOLMover()
pymolmover.apply(pose)
gaps = get_gaps(pose)
while gaps:
gap = gaps[0]
gap.fill_sequence(data)
print(gap)
print(pose.total_residue())
gap_pose = gap.add_to_pose(pose)
pymolmover.apply(pose)
pymolmover.pymol_name('mod')
print(pose.total_residue())
```
> FIND BLOCK WHERE THE PDB NUMBERING IS FIXED WITH PYMOL
| github_jupyter |
```
from __future__ import print_function
import numpy as np
np.random.seed(42)
class Layer:
"""
A building block. Each layer is capable of performing two things:
- Process input to get output: output = layer.forward(input)
- Propagate gradients through itself: grad_input = layer.backward(input, grad_output)
Some layers also have learnable parameters which they update during layer.backward.
"""
def __init__(self):
"""Here you can initialize layer parameters (if any) and auxiliary stuff."""
# A dummy layer does nothing
pass
def forward(self, input):
"""
Takes input data of shape [batch, input_units], returns output data [batch, output_units]
"""
# A dummy layer just returns whatever it gets as input.
return input
def backward(self, input, grad_output):
"""
Performs a backpropagation step through the layer, with respect to the given input.
To compute loss gradients w.r.t input, you need to apply chain rule (backprop):
d loss / d x = (d loss / d layer) * (d layer / d x)
Luckily, you already receive d loss / d layer as input, so you only need to multiply it by d layer / d x.
If your layer has parameters (e.g. dense layer), you also need to update them here using d loss / d layer
"""
# The gradient of a dummy layer is precisely grad_output, but we'll write it more explicitly
num_units = input.shape[1]
d_layer_d_input = np.eye(num_units)
return np.dot(grad_output, d_layer_d_input) # chain rule
class ReLU(Layer):
def __init__(self):
"""ReLU layer simply applies elementwise rectified linear unit to all inputs"""
pass
def forward(self, input):
"""Apply elementwise ReLU to [batch, input_units] matrix"""
output = np.maximum(input, 0)
return output
def backward(self, input, grad_output):
"""Compute gradient of loss w.r.t. ReLU input"""
relu_grad = input > 0
return grad_output*relu_grad
from util import eval_numerical_gradient
x = np.linspace(-1,1,10*32).reshape([10,32])
l = ReLU()
grad_output = np.ones([10,32])/(32*10)
grads = l.backward(x,grad_output)
numeric_grads = eval_numerical_gradient(lambda x: l.forward(x).mean(), x=x)
assert np.allclose(grads, numeric_grads, rtol=1e-3, atol=0),\
"gradient returned by your layer does not match the numerically computed gradient"
class Dense(Layer):
def __init__(self, input_units, output_units, learning_rate=0.1):
self.learning_rate = learning_rate
self.weights = np.random.randn(input_units, output_units)*0.01
self.biases = np.zeros(output_units)
def forward(self,input):
output = np.dot(input,self.weights)+self.biases
return output
def backward(self,input,grad_output):
grad_input = np.dot(grad_output,self.weights.T)
# compute gradient w.r.t. weights and biases
# df(x) /dW = X * do/dW
grad_weights = np.dot(input.T,grad_output)
grad_biases = np.dot(np.ones_like(grad_output,self.biases))
assert grad_weights.shape == self.weights.shape and grad_biases.shape == self.biases.shape
self.weights = self.weights - self.learning_rate * grad_weights
self.biases = self.biases - self.learning_rate * grad_biases
return grad_input
l = Dense(128, 150)
assert -0.05 < l.weights.mean() < 0.05 and 1e-3 < l.weights.std() < 1e-1,\
"The initial weights must have zero mean and small variance. "\
"If you know what you're doing, remove this assertion."
assert -0.05 < l.biases.mean() < 0.05, "Biases must be zero mean. Ignore if you have a reason to do otherwise."
# To test the outputs, we explicitly set weights with fixed values. DO NOT DO THAT IN ACTUAL NETWORK!
l = Dense(3,4)
x = np.linspace(-1,1,2*3).reshape([2,3])
l.weights = np.linspace(-1,1,3*4).reshape([3,4])
l.biases = np.linspace(-1,1,4)
assert np.allclose(l.forward(x),np.array([[ 0.07272727, 0.41212121, 0.75151515, 1.09090909],
[-0.90909091, 0.08484848, 1.07878788, 2.07272727]]))
print("Well done!")
# To test the grads, we use gradients obtained via finite differences
from util import eval_numerical_gradient
x = np.linspace(-1,1,10*32).reshape([10,32])
l = Dense(32,64,learning_rate=0)
numeric_grads = eval_numerical_gradient(lambda x: l.forward(x).sum(),x)
grads = l.backward(x,np.ones([10,64]))
assert np.allclose(grads,numeric_grads,rtol=1e-3,atol=0), "input gradient does not match numeric grad"
print("Well done!")
```
| github_jupyter |
```
# This cell is mandatory in all Dymos documentation notebooks.
missing_packages = []
try:
import openmdao.api as om
except ImportError:
if 'google.colab' in str(get_ipython()):
!python -m pip install openmdao[notebooks]
else:
missing_packages.append('openmdao')
try:
import dymos as dm
except ImportError:
if 'google.colab' in str(get_ipython()):
!python -m pip install dymos
else:
missing_packages.append('dymos')
try:
import pyoptsparse
except ImportError:
if 'google.colab' in str(get_ipython()):
!pip install -q condacolab
import condacolab
condacolab.install_miniconda()
!conda install -c conda-forge pyoptsparse
else:
missing_packages.append('pyoptsparse')
if missing_packages:
raise EnvironmentError('This notebook requires the following packages '
'please install them and restart this notebook\'s runtime: {",".join(missing_packages)}')
```
# Contributing to Dymos
Dymos is open-source software and the developers welcome collaboration with the community on finding and fixing bugs or requesting and implementing new features.
## Found a bug in Dymos?
If you believe you've found a bug in Dymos, [submit a new issue](https://github.com/OpenMDAO/dymos/issues).
If at all possible, please include a functional code example which demonstrates the issue (the expected behavior vs. the actual behavior).
## Fixed a bug in Dymos?
If you believe you have a fix for an existing bug in Dymos, please submit the fix as [pull request](https://github.com/OpenMDAO/dymos/pulls).
Under the "related issues" section of the pull request template, include the issue resolved by the pull request using Github's [referencing syntax](https://help.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue).
When submitting a bug-fix pull request, please include a [unit test](https://docs.python.org/3.8/library/unittest.html) that demonstrates the corrected behavior.
This will prevent regressions in the future.
## Need new functionality in Dymos?
If you would like to have new functionality that currently doesn't exist in Dymos, please submit your request via [the Dymos issues on Github](https://github.com/OpenMDAO/dymos/issues).
The Dymos development team is small and we can't promise that we'll add every requested capability, but we'll happily have a discussion and try to accommodate reasonable requests that fit within the goals of the library.
## Adding new examples
Adding new examples are a great way to contribute to Dymos.
They're a great introduction to the Dymos development process, and examples provide a great way for users to learn to apply Dymos in new applications.
Submit new examples via [the Dymos issues on Github](https://github.com/OpenMDAO/dymos/issues).
New examples should do the following:
- Include a new directory under the `dymos/examples` directory.
- A unittest should be included in a `doc` subfolder within the example directory.
- The unittest method should be self-contained (it should include all imports necessary to run the example).
- If you want to include output and/or plots from the example in the documentation (highly recommended), decorate the test with the `@dymos.utils.doc_utils.save_for_docs` decorator. This will save the text and plot outputs from the test for inclusion in the Dymos documentation.
- A new markdown file should be added under `mkdocs/docs/examples/<example name>` within the Dymos repository.
The Dymos docs are built on [JupyterBook](https://jupyterbook.org/intro.html) which allows users to run any page of the documentation by opening it in colab as a [Jupyter Notebook](https://jupyter.org). For those wanting to contribute, they are able to contribute by writing their own Jupyter Notebooks. Below are some important ways on how to build notebooks for Dymos.
## Notebook Creation
**Header**
At the begining of every notebook, we require (without exception) to have the following code cell at the top of every notebook with the three tags: `active-ipynb`, `remove-input`, `remove-output`. Tags can be added at the top of the notebook menu by going to `View` -> `Cell Toolbar` -> `Tags`.
```
try:
import openmdao.api as om
import dymos as dm
except ModuleNotFoundError:
!python -m pip install openmdao[notebooks]
!python -m pip install dymos
import openmdao.api as om
import dymos as dm
```
**Adding Code Examples**
If you want to add a block of code, for example, simply add it to a code block like we have below.
```
om.display_source("dymos.examples.brachistochrone.doc.brachistochrone_ode")
import numpy as np
import openmdao.api as om
from dymos.examples.brachistochrone.doc.brachistochrone_ode import BrachistochroneODE
num_nodes = 5
p = om.Problem(model=om.Group())
ivc = p.model.add_subsystem('vars', om.IndepVarComp())
ivc.add_output('v', shape=(num_nodes,), units='m/s')
ivc.add_output('theta', shape=(num_nodes,), units='deg')
p.model.add_subsystem('ode', BrachistochroneODE(num_nodes=num_nodes))
p.model.connect('vars.v', 'ode.v')
p.model.connect('vars.theta', 'ode.theta')
p.setup(force_alloc_complex=True)
p.set_val('vars.v', 10*np.random.random(num_nodes))
p.set_val('vars.theta', 10*np.random.uniform(1, 179, num_nodes))
p.run_model()
cpd = p.check_partials(method='cs', compact_print=True)
```
There should be a unit test associated with the code and it needs to be below the test. To keep the docs clean for users, we require that all tests be hidden (with few exceptions) using the tags `remove-input` and `remove-output`.
- On the off chance you want to show the assert, use the tag `allow_assert`.
- If your output is unusually long, use the tag `output_scroll` to make the output scrollable.
Below is an assert test of the code above.
```
from dymos.utils.testing_utils import assert_check_partials
assert_check_partials(cpd)
```
**Showing Source Code**
If you want to show the source code of a particular class, there is a utility function from OpenMDAO to help you. Use `om.display_source()` to display your code. Example below:
```{Note}
This should include the tag `remove-input` to keep the docs clean
```
```
om.display_source("dymos.examples.brachistochrone.brachistochrone_ode")
```
**Citing**
If you want to cite a journal, article, book, etc, simply add ```{cite}`youbibtextname` ``` next to what you want to cite. Add your citiation to `reference.bib` so that keyword will be picked up by JupyterBook. Below is an example of a Bibtex citation, that citation applied, and then a reference section with a filter to compile a list of the references mentioned in this notebook.
```
@inproceedings{gray2010openmdao,
title={OpenMDAO: An open source framework for multidisciplinary analysis and optimization},
author={Gray, Justin and Moore, Kenneth and Naylor, Bret},
booktitle={13th AIAA/ISSMO Multidisciplinary Analysis Optimization Conference},
pages={9101},
year={2010}
}
```
Grey {cite}`gray2010openmdao`
### References
```{bibliography}
:filter: docname in docnames
```
**Building Docs**
When you want to build the docs, run the following line from the top level of the Dymos folder: `jupyter-book build docs/`
## Running Tests
Dymos tests can be run with any test runner such as [nosetests](https://nose.readthedocs.io/en/latest/) or [pytest](https://docs.pytest.org/en/stable/).
However, due to some MPI-specific tests in our examples, we prefer our [testflo](https://github.com/OpenMDAO/testflo) package.
The testflo utility can be installed using
```
python -m pip install testflo
```
Testflo can be invoked from the top-level Dymos directory with:
```
testflo .
```
With pyoptsparse correctly installed and things working correctly, the tests should conclude after several minutes with a message like the following:
The lack of MPI capability or pyoptsparse will cause additional tests to be skipped.
```
The following tests were skipped:
test_command_line.py:TestCommandLine.test_ex_brachistochrone_reset_grid
OK
Passed: 450
Failed: 0
Skipped: 1
Ran 451 tests using 2 processes
```
| github_jupyter |

# Data Wrangling using RDDs
## Starting Spark Session
The programming language Python is used for the implementation in this course - for this we use 'pyspark. (PySpark documentation https://spark.apache.org/docs/latest/api/python/)
PySpark is an interface for Apache Spark in Python. It not only allows you to write Spark applications using Python APIs, but also provides the PySpark shell for interactively analyzing your data in a distributed environment.
```
# ipmort libraries from pyspark
from pyspark import SparkConf, SparkContext
# set values for Spark configuration
conf = SparkConf().setMaster("local").setAppName("Data Analysis")
# get (if already running) or create a Spark Context
sc = SparkContext.getOrCreate(conf=conf)
# check (try) if Spark context variable (sc) exists and print information about the Spark context
try:
sc
except NameError:
print("Spark context does not context exist. Please create Spark context first (run cell above).")
else:
configurations = sc.getConf().getAll()
for item in configurations: print(item)
# print link to Spark UI, Version, Master and AppName
sc
```
> *For the Tutorials I will be using MovieLens 1M Dataset you can get it from the [Grouplens](https://grouplens.org/datasets/movielens/) website.*
```
ls data/ml-1m
!cat data/ml-1m/README
```
*Lets read in the ratings.dat nad create a ratings RDDs*
```
= sc.textFile("data/ml-1m/ratings.dat")
ratingsRDD.take(5)
```
*Thats it We have read the Text file and we are printing out the first 5 rows using `take action` and make sure you don't use a collect action here because that will printout the whole RDD.*
*Now if you check the readme file provided in the Dataset these are the columns in the Data*
>*UserID::MovieID::Rating::Timestamp*
*Lets check counts on each ratings given, But first we need to split our data and for that we need to make use of a Transformation.*
```
ratings = ratingsRDD.map(lambda x: x.split('::')[2])
ratings.take(5)
result = ratings.countByValue()
type(result)
result
```
*So you can see how easy it was to get the ratings counter. As it has returned a dictionary lets sort and print the results.*
```
import collections
sortedResults = collections.OrderedDict(sorted(result.items()))
print(f"{'Ratings':10}{'Count'}\n")
for key, value in sortedResults.items():
print(f"{'★'* int(key):{10}}{value}")
```
*Lets look at another example and check which are the most rated movies.*
```
def loadMovieNames():
movieNames = {}
with open("data/ml-1m/movies.dat", encoding= 'ISO-8859-1') as f:
for line in f:
fields = line.split('::')
movieNames[int(fields[0])] = fields[1]
return movieNames
nameDict = sc.broadcast(loadMovieNames())
movies = ratingsRDD.map(lambda x: (int(x.split("::")[1]), 1))
movies.take(5)
movieCounts = movies.reduceByKey(lambda x, y: x + y)
movieCounts.take(5)
flipped = movieCounts.map( lambda x : (x[1], x[0]))
sortedMovies = flipped.sortByKey(ascending=False)
sortedMovies.take(5)
sortedMoviesWithNames = sortedMovies.map(lambda countMovie : (nameDict.value[countMovie[1]], countMovie[0]))
sortedMoviesWithNames.take(10)
```
*Now these are top 10 most rated movies.*
*Now lets look at movies with most 5 star ratings*
```
def filter_five_star(line):
splited_line= line.split("::")
if splited_line[2] == '5':
return line
five_start_rattingsRDD= ratingsRDD.filter(lambda x: filter_five_star(x))
five_start_rattingsRDD.take(5)
five_start_movies = five_start_rattingsRDD.map(lambda x: (int(x.split("::")[1]), 1))
five_start_movieCounts = five_start_movies.reduceByKey(lambda x, y: x + y)
flipped = five_start_movieCounts.map( lambda x : (x[1], x[0]))
five_start_sortedMovies = flipped.sortByKey(ascending=False)
five_start_sortedMoviesWithNames = five_start_sortedMovies.map(lambda countMovie : (nameDict.value[countMovie[1]], countMovie[0]))
five_start_sortedMoviesWithNames.take(10)
```
*Lets look at number of movies produced in each year*
```
moviesRDD =sc.textFile("data/ml-1m/movies.dat")
moviesRDD.take(5)
'Toy Story (1995)'[-6:]
import re
re.search(r'\([0-9]{4}\)$','Grumpier Old Men (1995)').group(0)[1:-1]
def get_year(line):
split_line= line.split('::')
year= re.search(r'\([0-9]{4}\)$',split_line[1]).group(0)[1:-1]
return (year, 1)
year_RDD= moviesRDD.map(lambda x: get_year(x))
year_RDD.take(5)
yearCounts = year_RDD.reduceByKey(lambda x, y: x + y)
yearCounts.take(5)
ascending_sorted_yearCounts = yearCounts.sortByKey()
ascending_sorted_yearCounts.take(5)
descending_sorted_yearCounts = yearCounts.sortByKey(ascending= False)
descending_sorted_yearCounts.take(5)
```
*Years with most movies*
```
flipped = yearCounts.map( lambda x : (x[1], x[0]))
descending_sorted_yearCounts = flipped.sortByKey(ascending= False)
descending_sorted_yearCounts.take(10)
```
*Lets find out the which age group is most active on the platform*
```
ratingsRDD.take(5)
def load_age_group():
age_group= {'1': "Under 18", '18': "18-24", '25': "25-34", '35': "35-44", '45': "45-49", '50': "50-55", '56': "56+"}
user_ageGroup = {}
with open("data/ml-1m/users.dat") as f:
for line in f:
fields = line.split('::')
user_ageGroup[int(fields[0])] = age_group[fields[2]]
return user_ageGroup
ageGroupDict = sc.broadcast(load_age_group())
users_ratings = ratingsRDD.map(lambda x: (int(x.split("::")[0]), 1))
count_user_ratings = users_ratings.reduceByKey(lambda x, y: x + y)
count_user_ratings.take(5)
flipped = count_user_ratings.map( lambda x : (x[1], x[0]))
age_group_count = flipped.map(lambda countuser : (ageGroupDict.value[countuser[1]], countuser[0]))
age_group_counts= age_group_count.reduceByKey(lambda x , y: x + y)
age_group_counts.collect()
age_group_counts.map(lambda x: (x[1], x[0])).sortByKey(ascending= False).map(lambda x: (x[1], x[0])).collect()
```
*Lets Load in another fake social network dataset.*
```
friends = sc.textFile("data/fakefriends.csv")
friends.take(5)
friends.count()
```
*Lets look at the average number of friends broken down by age in this Dataset.*
```
def parseLine(line):
fields = line.split(',')
age = int(fields[2])
numFriends = int(fields[3])
return (age, numFriends)
friendsRDD = friends.map(parseLine)
friendsRDD.take(5)
totalsByAge = friendsRDD.mapValues(lambda x: (x, 1)).reduceByKey(lambda x, y: (x[0] + y[0], x[1] + y[1]))
totalsByAge.take(5)
averagesByAge = totalsByAge.mapValues(lambda x: round(x[0] / x[1], 2))
averagesByAge.take(5)
```
*Lets load up another dataset*
```
temp = sc.textFile("data/1800.csv")
temp.take(5)
```
*Lets check the weather stations with minimum temperatures in 1800.*
```
def parseLine(line):
fields = line.split(',')
stationID = fields[0]
entryType = fields[2]
temperature = float(fields[3]) * 0.1 * (9.0 / 5.0) + 32.0
return (stationID, entryType, temperature)
tempRDD = temp.map(parseLine)
tempRDD.take(5)
minTemps = tempRDD.filter(lambda x: "TMIN" in x[1])
minTemps.take(5)
stationTemps = minTemps.map(lambda x: (x[0], x[2]))
minTemps = stationTemps.reduceByKey(lambda x, y: round(min(x,y), 2))
minTemps.collect()
```
*Lets do another word count on a text file*
```
book = sc.textFile("data/Book.txt")
book.take(2)
def normalizeWords(text):
return re.compile(r'\W+', re.UNICODE).split(text.lower())
words = book.flatMap(normalizeWords)
wordCounts = words.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x + y)
wordCounts.take(5)
wordCountsSorted = wordCounts.map(lambda x: (x[1], x[0])).sortByKey(ascending= False)
wordCountsSorted.take(10)
```
----
| github_jupyter |
# Prüfung
bei Antworten an prüfung - nicht nur einfach 'Nullhypothese verworfen, sondern auch was es genau bedeuted'
# Vorhersage von Zeitreihen
Im Kapitel _Einführung in Zeitreihen_ haben wir verschiedene Beispiele von Zeitreihen untersucht, die aus der Oekonomie, Ingenieur- und Umweltwissenschaften stammen. Wir haben dort verschiedene Visualisierungs- und Verdichtungstechniken, sowie die Zerlegung einer Zeitreihe kennengelernt.
Im letzten Kapitel lernten wir das stochastische Grundgerüst von Zeitreihen kennen: (diskrete) stochastische Prozesse. Autokovarianz und -korrelation wurden als informative Quantitäten eines gegebenen Prozesse eingeführt, und wir haben gelernt, wie man diese aus den Daten schätzt, sofern der zugrundeliegende Prozess (schwach) stationär ist.
Nun wollen wir schliesslich untersuchen, wie eine Modellierung einer Zeitreihe mit einem geeigneten stochastischen Prozess verwendet werden kann, um zukünftige und nichtbeobachtete Daten _vorherzusagen_.
Anders ausgedrückt ist es das Ziel dieses Kapitels, zukünfige Werte $ x_{n+k} $ für $ k=1,2,\ldots $ vorherzusagen, falls $\{x_1,\dots, x_n\}$ bis zur jetzigen Zeit bekannt sind. Im Wesentlichen müssen wir drei Schritte nacheinander durchführen, um dieses Ziel zu erreichen:
(i) Wir müssen sicher sein, dass der zugrundeliegende Prozess vorhersagbar ist. Das heisst, in der Zukunft ändert sich der Prozess nicht dramatisch, sondern fährt so weiter wie bisher (im probalistischen Sinn).
(ii) Wir wählen eine Modellklasse durch explorative Datenanalyse einer gegebenen Zeitreihe. Danach passen wir das Modell an Trainingsdaten an und erhalten die Modellparameter, die das angepasste Modell vollumfänglich beschreiben.
(iii) Mit dem angepassten Modell sagen wir zukünftige Werte des Prozesses voraus.
Wir fokusieren uns auf die einfachsten und doch wichtigsten
parametrischen Modelle für stationäre parametrische Prozesse: _autoregressive Modelle_.
# Autoregressive Modelle AR(p)
Autoregressive Modelle basieren auf der Idee, dass der momentane
Wert einer Zeitreihe durch eine Linearkombination der $ p $
vorhergehenden Wert erklärt werden kann.
Das autoregressive Modell der Ordnung $p$ ist ein diskreter stochastischer Prozess, der folgender Gleichung genügt
$$
X_n
= a_1 X_{n-1} + a_2 X_{n-2} + \dots + a_p X_{n-p} + W_n
$$
wobei $a_1,a_2,\dots, a_n$ die Modellparameter sind und $W_1,W_2,\dots$ ein Prozess des weissen Rauschens mit Varianz $\sigma^2$ ist.
Autoregressive Modelle werden ausschliesslich für die Modellierung von stationären Prozessen verwendet.
### Beispiel : $\text{AR}(1)$ Prozess
$$ X_n = a_1 X_{n-1} + W_n $$
Random Walk ist vorheriger Tag + Rauschen
Wir berechnen zuerst den Erwartungswert des Prozesses, indem wir die Erwartung auf beiden
Seiten der Gleichung nehmen:
$$ \mu = E(X_n) = a_1 E(X_{n-1}) + E(W_n) = a_1 \mu + 0 $$
Daraus folgt, dass
$$ (a_1 - 1)\mu = 0 $$
was $\mu = 0$ impliziert, falls $a_1 \neq 1$. Ist der Prozess insbesondere stationär, dann ist die Mittelwertsfunktion
$$ \mu(i) = 0 $$
$$\sigma_X^2 = Var(X_n) = a_1^2 Var(X_{n-1}) + Var(W_n) = a_1^2 \sigma_X^2 + \sigma^2$$
Somit gilt
$$ \sigma_X^2 = a_1^2 \sigma_X^2 + \sigma^2 $$
oder nach ein paar Umformungen:
$$\sigma_X^2 = \frac{\sigma^2}{1-a_1^2} $$
Um einen vernünftigen Wert für die konstante Varianz $\sigma_X^2$ (eine minimale Eigenschaft für die Stationarität) zu erhalten, muss der absolute Wert $|a_1|$ kleiner als $ 1 $ sein (sonst könnte die Varianz negativ sein).
Dies kann wie folgt interpretiert werden: Damit ein Prozess stationär ist, darf die Abhängigkeit von vergangenen Werten des Prozesses nicht zu stark sein.
## Beispiel : $\text{AR}(3)$ Prozess
Wir untersuchen den $\text{AR}(3)$ Prozess
$$ X_n = 0.5 X_{n-1} - 0.5 X_{n-2} - 0.1 X_{n-3} + W_n $$
Wir wollen nun eine Zeitreihe basierend auf diesem Modell simulieren:
```
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.arima_process import ArmaProcess
%matplotlib inline
ar1 = np.array([1, -.5, -.1, .1])
simulated_data = ArmaProcess(ar1, ma=[1]).generate_sample(nsample=200)
plt.plot(simulated_data)
plt.xlabel("Zeit")
plt.ylabel("Wert")
plt.title("AR(3) Prozess")
plt.show()
```
Obige Abbildung zeigt eine Realisierung des im obigen Code
implementierten stochastischen Prozesses.
## Autokorrelation eines $\text{AR}(p)$ Prozesses
Falls wir die Anpassung eines autoregressiven Modelles an eine
gegebene Zeitreihe beabsichtigen, so müssen wir zwei Dinge vorher klären:
(i) Ist das autoregressive Modell die richtige Wahl für diese Daten?
(ii) Welches ist die geeignete Modellordung $ p $ für die gegebenen Daten?
Eine gute Orientierungshilfe, um diese beiden Fragen zu beantworten, besteht darin, die Autokorrelation des Prozesses zu betrachten. In diesem Kapitel werden wir deswegen die theoretische Form der Autokorrelation von $\text{AR}(p)$ Prozessen untersuchen.
In der Praxis wird die empirische Autokorrelation einer gegebenen Zeitreihe berechnet und mit der theoretischen verglichen. Auf diese Weise können wir beurteilen, ob eine Zeitreihe autoregressiv ist und wenn ja, welche Modellordnung sie hat.
## Beispiel : Autokorrelation eines $ \text{AR}(1) $ Prozesses
Wir betrachten den $\text{AR}(1)$ Prozess mit $|a_1|<1$ und berechnen die Autokorrelation mit lag $h=1$
\begin{align*}
\rho(1)
&= \frac{Cov(X_n, X_{n-1})}{\gamma(0)}\\
& = \frac{Cov(a_1 X_{n-1} + W_{n}, X_{n-1})}{\gamma(0)}\\
&= \frac{a_1 Cov(X_{n-1}, X_{n-1}) + Cov(W_{n}, X_{n-1})}{\gamma(0)}\\
& = \frac{a_1 \gamma(0)}{\gamma(0)}\\
& = a_1
\end{align*}
Die Autokorrelation mit lag $h=2$ können wir nun berechnen durch
\begin{align*}
\rho(2)
&= \frac{Cov(X_n, X_{n-2})}{\gamma(0)} \\
&= \frac{a_1 Cov(X_{n-1}, X_{n-2})+ Cov(W_{n-1}, X_{n-1})}{\gamma(0)}\\
& = a_1 \gamma(1)\\
& = a_1^2
\end{align*}
Fahren wir auf diese Weise fort, so erhalten wir
$$
\rho(h)
= a_1^h
$$
Dies läuft darauf hinaus, das die Autokorrelation des $ \text{AR}(1) $ Prozesses exponentiell schnell zerfällt.
Die Berechnung der Autokorrelation eines allgemeinen $ \text{AR}(p) $ Prozesses ist ziemlich kompliziert für $ p>1 $. Diese Berechnung übernimmt Python mit dem Befehl _ArmaProcess(ar = [...]).acf()_. Man beachte, dass bei _ar = [...]_ die Koeffizienten des charakteristischen Polynoms eingegeben werden müssen.
## Beispiel : $\text{AR}(3)$ Prozess
Wir untersuchen den $\ AR(3)$ Prozess:
$$ X_n = 0.5 X_{n-1} - 0.5 X_{n-2} - 0.1 X_{n-3} + W_n $$
Dieser Prozess ist definiert durch die Koeffizienten $a_1 = 0.5$, $a_2= -0.5$ and $a_3 = -0.1$.
```
from statsmodels.tsa.arima_process import ArmaProcess
lag = 25
acf_theor =ArmaProcess(ar = [1, -.5, .5, .1], ma=[1]).acf(lag)
x = np.arange(lag)
plt.bar(x, acf_theor , width=.2, color="black")
plt.plot(x, acf_theor, "ro", markerfacecolor="C1")
plt.xlabel("Index")
plt.ylabel("ACF")
plt.title("ACF eines AR(3) Prozesses")
plt.show()
```
Die obere Funktion ist wahrscheinlich ein autoregressiver Prozess
Wie aus obiger Abbildung zu erkennen ist,
oszilliert die Autokorrelation eines gegebenen $ \text{AR}(3) $
und nimmt im Wesentlichen exponentiell ab. Dies ist das typische Muster der Autokorrelationsfunktion im Falle von autoregressiven Prozessen.
Das Beispiel oben deutet an, dass die Autokorrelation eines $ \text{AR}(p) $ Prozesses nicht 0 ist für eine grosse Spannweite von lags. Dies ist wegen der Verbreitung von Korrelation durch das Modell: Falls $ X_{k} $ stark mit $ X_{k+1} $ korreliert und $ X_{k+1} $ stark mit $ X_{k+2} $ korreliert, dann ist es wahrscheinlich, dass $ X_{k} $ auch stark mit $ X_{k+2} $ korreliert.
Falls wir die _direkte_ Korrelation zwischen $ X_{k} $ und $ X_{k+2} $ untersuchen wollen, also insbesondere den Anteil der Korrelation, der _nicht_ aufgrund von $ X_{k+1} $ zu Stande kommt, dann müssen wir die partielle Autokorrelation berechnen. Die exakte mathematische Definition lautet wie folgt:
Für einen schwach stationären stochastischen Prozess $\{X_1,X_2,\dots\}$ ist die partielle Autokorrelation definiert durch
$$ \pi(h) = Cor(X_k, X_{k+h}\mid X_{k+1},\dots, X_{k+h-1}) $$
Die Grösse $Cor(X,Y\mid Z)$ beschreibt die bedingte Korrelation von $X$ und $Y$ unter der Bedingung, dass der Wert von $Z$ gegeben ist.
Wir erwähnen aber, dass für einen autoregressiven Prozess $ \text{AR}(p) $ die partielle Autokorrelation zwei wichtige Eigenschaften hat.
(i) Der $ p $-te Koeffizient $ \alpha_{p} $ eines $ \text{AR}(p) $ Prozesses ist gleich $ \pi(p) $, also insbesondere der Wert der partiellen Autokorrelation bei lag $ p $ des Prozesses.
(ii) Für einen autoregressiven Prozess $ \text{AR}(p) $ ist die partielle Autokorrelation 0 für lags grösser als $ p $. Insbesondere $ \pi(k)=0 $ für $ k>p $.
# partielle Autokorrelation
Mit diesen Eigenschaften haben wir ein Werkzeug zu Verfügung, um gegebene autoregressive Zeitreihen zu untersuchen und die Modellordnung zu bestimmen. Wir berechnen dazu die partielle Autokorrelation und wählen den grössten lag $ k $, für welches der Wert $ \pi(k) $ nicht 0 ist. In Python wird die partielle Autokorrelation einer stationären Zeitreihe geschätzt durch den Befehl _ArmaProcess(...).pacf()_.
## Beispiel : $\text{AR}(3)$ Prozess
```
from statsmodels.tsa.arima_process import ArmaProcess
lag=15
pacf_theor =ArmaProcess(ar= [1, -.5, .5, .1], ma=[1]).pacf(lag)
x = np.arange(lag)
plt.bar(x, pacf_theor, width=.2, color="black")
plt.plot(x, pacf_theor, "ro", markerfacecolor="C1")
plt.xlabel("Index")
plt.ylabel("Partieller ACF")
plt.title("Partielle Autokorrelation eines AR(3) Prozesses")
plt.show()
```
Ab lag 4 ist es null, also modeliere ich es mit einem mit einem Model dritter ordnung ->
die Paramter kann man aus dem Diagram auslesen
Wie aus obiger Abbildung ersichtlich ist, sind die Koeffizienten der partiellen Autokorrelation grösser als $ 3 $ beinahe $0$. In der Praxis, insbesondere wenn wir nur die beobachtete Zeitreihe haben, wählen wir ein autoregressives Modell der Ordnung $ 3 $ für die Modellierung der gegenwärtigen Reihe.
## Beispiel : Sunspots
Die Vorhersage von Sonnenaktivität ist wichtig für den satellite drag (Reibungskräfte der Restatmosphäre), Ausfall von Telekommunikation und Solarwinde im Zusammenhang mit Blackouts von Kraftwerken. Ein Indikator von Sonnenaktivität sind unter anderem die Zahl der Sonnenflecken
Im Datensatz _sunspot_, der in _statsmodels_ integriert ist, sind die Anzahl Sonnenflecken von 1700 bis 2008 jährlich aufgeführt.
```
import statsmodels.api as sm
import pandas as pd
dta = sm.datasets.sunspots.load_pandas().data
dta.index = pd.Index(sm.tsa.datetools.dates_from_range('1700', '2008'))
del dta["YEAR"]
dta.plot()
plt.show()
```
Es ist wichtig zu bemerken, dass die Sonnefleckendaten _nicht_ periodisch sind, insbesondere sind die Zyklenlängen nicht konstant. Dieses quasiperiodisches Verhalten dürfen wir nicht mit Saisonalität verwechseln. Die Peaks und Minimas sind im Vornherein _nicht_ bekannt.
### Box-Cox Transformation
```
dta_sq = (np.sqrt(dta)-1)
dta_sq.plot()
plt.xlabel("Jahre")
plt.ylabel("Wurzel der Sonnenflecken")
plt.show()
```
Als nächstes berechnen wird die Autokorrelations- und partielle Autokorrelationsfunktion, um zu klären, ob das autoregressive Modell die richtige Modellwahl ist und falls ja, um die richtige Ordnung des Modelles zu bestimmen.
```
from statsmodels.graphics.tsaplots import plot_pacf
from statsmodels.graphics.tsaplots import plot_acf
fig = plt.figure()
ax1 = fig.add_subplot(121)
plot_acf(dta_sq,lags=50, ax=ax1)
ax2 = fig.add_subplot(122)
plot_pacf(dta_sq,lags=50,ax=ax2)
plt.show()
```
In obiger Abbildung ist links die Autokorrelation und rechts die partielle Autokorrelation aufgeführt. Wie man sieht, zeigt die Autokorrelation typisches Verhalten für einen autoregressiven Prozess: ein oszillierendes Muster mit exponentiellem Verfall.
Die partielle Autokorrelation zeigt, dass die direkte Abhängigkeit einen maximalen lag 9 hat. Dies wählen wir als Modellparameter $ p $ im nächsten Abschnitt.
# Anpassung der Koeffizienten in einem $\text{AR}(p)$ Modell
Wir wenden uns nun der Parameterschätzung zu. Dazu gehen wir nicht
allzu sehr ins Detail, sondern erwähnen bloss den grundlegendsten
Ansatz, um die Koeffizienten zu sch\"atzen, nämlich die Methode der
kleinsten Quadrate. Gegeben sind die Daten $\{x_1,x_2,\dots, x_n\}$
und ein Modell der Ordnung $ p $. Wir passen den $\text{AR}(p)$ Prozess
an die Daten an, indem wir das folgende lineare Gleichungssystem l\"osen:
\begin{align*}
x_{p+1} & = a_1 x_p + a_2 x_{p-1} + \dots + a_p x_{1} + W_{p+1} \\
x_{p+2} & = a_1 x_{p+1} + a_2 x_{p} + \dots + a_p x_{2} + W_{p+2} \\
\vdots & \\
x_{n} & = a_1 x_{n-1} + a_2 x_{n-2} + \dots + a_p x_{n-p} + W_{n} \\
\end{align*}
Dieses System wird im Sinne der Methode der kleinsten Quadrate gelöst.
Es gibt mindestens drei weitere Methoden, die Standard sind, um die
Koeffizienten von $\text{AR}(p)$ zu schätzen :
(i) Burg's Algorithmus
(ii) Yule-Walker Gleichungen
(iii) Maximum Likelihood Methode
Mehr dazu steht im Vorlesungsskript.
### Beispiel : Sunspots
Wir wollen nun das Modell an die Daten anpassen.
```
from statsmodels.tsa.arima_model import ARMA
model = ARMA(dta_sq, order=(9,0)).fit()
```
Die Option _order=(9,0)_ erreicht, dass die Ordnung der Anpassung 9. Ordnung ist.
```
plt.plot(dta_sq)
plt.plot(dta_sq["SUNACTIVITY"] - model.resid)
plt.show()
```
Obige Abbildung zeigt die jährlich gemittelte Zeitreihe in blau und den Output des Modelles in orange. Die Anpassung scheint eingermassen genau zu sein.
Allerdings sollten wir noch den Residuenplot überprüfen. Wir wählen ein Histogramm und ein qq-Plot.
```
from statsmodels.graphics.api import qqplot
fig = plt.figure()
ax1 = fig.add_subplot(121)
model.resid.hist(edgecolor="black", bins=100, ax=ax1)
ax2 = fig.add_subplot(122)
qqplot(model.resid, line="q", ax=ax2)
plt.show()
```
## Vorhersagen von $\text{AR}(p)$ Prozessen
Am Ende wollen wir natürlich mit unserem Modell
zuk\"unftige (unbeobachtete) Werte vorhersagen.
Die allgemeine Vorgehensweise, um stationäre
Zeitreihen mit Hilfe eines autoregressiven Modells vorherzusagen,
lässt sich wie folgt zusammenfassen:
Unter der Annahme, dass $\{X_1,X_2,\dots,\}$ ein stationärer
Prozess ist und wir die Zeitreihe $\{x_1,x_2,\dots, x_n\}$ beobachtet
haben: Die Vorhersage für den $k$-ten Schritt im voraus
ist eine Schätzung der Zufallvariable $X_{n+k}$ gegeben durch
$$
\hat{X}_{n+k}
= \text{E}(X_{n+k} \mid X_1 = x_1,\dots, X_n = x_n)
$$
Hier ist $\text{E}(X\mid Y=y)$ die bedingte Wahrscheinlichkeit
des Erwartungswertes $X$ unter der Bedingung, dass $Y=y$.
In Python können Vorhersagen einer Zeitreihe mit dem Attribut
_predict_ ausgeführt werden.
### Beispiel $\text{AR}(1)$ Prozess
Im Folgenden simulieren wir Zeitreihen, die auf einem $ \text{AR}(1) $ Prozess
beruhen und wollen einige zukünftige Werte aufgrund dieser Zeitreihendaten
voraussagen. Dazu bilden wir ein Modell aufgrund einer Teilmenge dieser
erzeugten Daten. Das heisst, wir schneiden einige Werte am Ende der Zeitreihe ab,
die wir dann vorhersagen und untersuchen möchten.
```
from statsmodels.tsa.arima_process import ArmaProcess
from statsmodels.tsa.arima_model import ARMA
from pandas import Series
from pandas import DataFrame
ar = np.array([1, -0.7])
ma = np.array([1])
AR_object = ArmaProcess(ar, ma)
simulated_data = DataFrame({'values' : AR_object.generate_sample(nsample=150)})
simulated_data.index = pd.DatetimeIndex(start='1700', end='1850', freq='A')
mod = ARMA(simulated_data, order=(1,0)).fit()
fig, ax = plt.subplots(figsize=(12, 8))
ax = simulated_data.loc['1700':].plot(ax=ax)
mod.plot_predict('1830','1850', dynamic=True, ax=ax, plot_insample=True);
plt.show()
```
Obige Abbildung zeigt die vollständigen Daten (blau, grün). Aus diesen Daten wird ein $ \text{AR}(1) $ Modell gebildet und die Werte für die restlichen 20 Werte vorhergesagt (orange). Auch eingezeichnet sind die Grenzen des Vertrauensintervall (grau).
## Vertrauensintervall für Vorhersagen
Der Standardfehler $\sigma_k$, der hier angegeben ist, ist die Quadratwurzel
der bedingten Varianz
$$
\sigma_k^2
= \text{Var}(X_{n+k}\mid X_1=x_1,\dots, X_n = x_n)
$$
Diese Grösse nimmt mit $ k $ zu und konvergiert gegen
die Prozessvarianz $\sigma_X^2$. Mit diesem Standardfehler
können wir ein 95\% Vertrauensintervall für
den bedingten Erwartungswert $\text{E}(X_{n+k}\mid X_1 = x_1, \dots, X_n = x_n)$ berechnen
$$ \hat{X}_{n+k} \pm 1.96 \sigma_k $$
Wir wollen noch bemerken, dass in der Praxis sowohl der Standardfehler,
Das Beispiel oben deutet an, dass die $ \text{AR}(1) $ Vorhersagen sehr grob sind: Sie beginnen bei der letzten Beobachtung und gehen exponentiell gegen 0. Dies überrascht nicht, da alle zukünftigen Vorhersagen nur vom letzten beobachteten Wert abhängen.
### Beispiel : Sonnenflecken
Wir betrachten nochmals das Beispiel der Sonnenflecken. Wir verwenden die jährlichen Daten von 1749 bis 1989 als
Trainingsdaten und schätzen das $ \text{AR}(9) $ Modell aus diesen Daten. Dann sagen wir die Sonnenfleckenzahlen für die nächsten 25 Jahre voraus.
```
fig, ax = plt.subplots()
ax = dta_sq.loc['1950':].plot(ax=ax)
fig = model.plot_predict('1990', '2014', dynamic=True, ax=ax, plot_insample=True)
plt.show()
```
| github_jupyter |
# IMDB
```
%reload_ext autoreload
%autoreload 2
%matplotlib inline
from fastai.text import *
torch.cuda.set_device(2)
```
## Preparing the data
First let's download the dataset we are going to study. The [dataset](http://ai.stanford.edu/~amaas/data/sentiment/) has been curated by Andrew Maas et al. and contains a total of 100,000 reviews on IMDB. 25,000 of them are labelled as positive and negative for training, another 25,000 are labelled for testing (in both cases they are highly polarized). The remaning 50,000 is an additional unlabelled data (but we will find a use for it nonetheless).
We'll begin with a sample we've prepared for you, so that things run quickly before going over the full dataset.
```
path = untar_data(URLs.IMDB_SAMPLE)
path.ls()
```
It only contains one csv file, let's have a look at it.
```
df = pd.read_csv(path/'texts.csv')
df.head()
df['text'][1]
```
It contains one line per review, with the label ('negative' or 'positive'), the text and a flag to determine if it should be part of the validation set or the training set. If we ignore this flag, we can create a DataBunch containing this data in one line of code:
```
data_lm = TextDataBunch.from_csv(path, 'texts.csv')
```
By executing this line a process was launched that took a bit of time. Let's dig a bit into it. Images could be fed (almost) directly into a model because they're just a big array of pixel values that are floats between 0 and 1. A text is composed of words, and we can't apply mathematical functions to them directly. We first have to convert them to numbers. This is done in two differents steps: tokenization and numericalization. A `TextDataBunch` does all of that behind the scenes for you.
Before we delve into the explanations, let's take the time to save the things that were calculated.
```
data_lm.save()
```
Next time we launch this notebook, we can skip the cell above that took a bit of time (and that will take a lot more when you get to the full dataset) and load those results like this:
```
data = TextDataBunch.load(path)
```
### Tokenization
The first step of processing we make texts go through is to split the raw sentences into words, or more exactly tokens. The easiest way to do this would be to split the string on spaces, but we can be smarter:
- we need to take care of punctuation
- some words are contractions of two different words, like isn't or don't
- we may need to clean some parts of our texts, if there's HTML code for instance
To see what the tokenizer had done behind the scenes, let's have a look at a few texts in a batch.
```
data = TextClasDataBunch.load(path)
data.show_batch()
```
The texts are truncated at 100 tokens for more readability. We can see that it did more than just split on space and punctuation symbols:
- the "'s" are grouped together in one token
- the contractions are separated like his: "did", "n't"
- content has been cleaned for any HTML symbol and lower cased
- there are several special tokens (all those that begin by xx), to replace unkown tokens (see below) or to introduce different text fields (here we only have one).
### Numericalization
Once we have extracted tokens from our texts, we convert to integers by creating a list of all the words used. We only keep the ones that appear at list twice with a maximum vocabulary size of 60,000 (by default) and replace the ones that don't make the cut by the unknown token `UNK`.
The correspondance from ids tokens is stored in the `vocab` attribute of our datasets, in a dictionary called `itos` (for int to string).
```
data.vocab.itos[:10]
```
And if we look at what a what's in our datasets, we'll see the tokenized text as a representation:
```
data.train_ds[0][0]
```
But the underlying data is all numbers
```
data.train_ds[0][0].data[:10]
```
### With the data block API
We can use the data block API with NLP and have a lot more flexibility than what the default factory methods offer. In the previous example for instance, the data was randomly split between train and validation instead of reading the third column of the csv.
With the data block API though, we have to manually call the tokenize and numericalize steps. This allows more flexibility, and if you're not using the defaults from fastai, the variaous arguments to pass will appear in the step they're revelant, so it'll be more readable.
```
data = (TextList.from_csv(path, 'texts.csv', cols='text')
.split_from_df(col=2)
.label_from_df(cols=0)
.databunch())
```
## Language model
Note that language models can use a lot of GPU, so you may need to decrease batchsize here.
```
bs=48
```
Now let's grab the full dataset for what follows.
```
path = untar_data(URLs.IMDB)
path.ls()
(path/'train').ls()
```
The reviews are in a training and test set following an imagenet structure. The only difference is that there is an `unsup` folder on top of `train` and `test` that contains the unlabelled data.
We're not going to train a model that classifies the reviews from scratch. Like in computer vision, we'll use a model pretrained on a bigger dataset (a cleaned subset of wikipeia called [wikitext-103](https://einstein.ai/research/blog/the-wikitext-long-term-dependency-language-modeling-dataset)). That model has been trained to guess what the next word, its input being all the previous words. It has a recurrent structure and a hidden state that is updated each time it sees a new word. This hidden state thus contains information about the sentence up to that point.
We are going to use that 'knowledge' of the English language to build our classifier, but first, like for computer vision, we need to fine-tune the pretrained model to our particular dataset. Because the English of the reviex lefts by people on IMDB isn't the same as the English of wikipedia, we'll need to adjust a little bit the parameters of our model. Plus there might be some words extremely common in that dataset that were barely present in wikipedia, and therefore might no be part of the vocabulary the model was trained on.
This is where the unlabelled data is going to be useful to us, as we can use it to fine-tune our model. Let's create our data object with the data block API (next line takes a few minutes).
```
data_lm = (TextList.from_folder(path)
#Inputs: all the text files in path
.filter_by_folder(include=['train', 'test', 'unsup'])
#We may have other temp folders that contain text files so we only keep what's in train and test
.random_split_by_pct(0.1)
#We randomly split and keep 10% (10,000 reviews) for validation
.label_for_lm()
#We want to do a language model so we label accordingly
.databunch(bs=bs))
data_lm.save('tmp_lm')
```
We have to use a special kind of `TextDataBunch` for the language model, that ignores the labels (that's why we put 0 everywhere), will shuffle the texts at each epoch before concatenating them all together (only for training, we don't shuffle for the validation set) and will send batches that read that text in order with targets that are the next word in the sentence.
The line before being a bit long, we want to load quickly the final ids by using the following cell.
```
data_lm = TextLMDataBunch.load(path, 'tmp_lm', bs=bs)
data_lm.show_batch()
```
We can then put this in a learner object very easily with a model loaded with the pretrained weights. They'll be downloaded the first time you'll execute the following line and stored in `~/.fastai/models/` (or elsewhere if you specified different paths in your config file).
```
learn = language_model_learner(data_lm, pretrained_model=URLs.WT103_1, drop_mult=0.3)
learn.lr_find()
learn.recorder.plot(skip_end=15)
learn.fit_one_cycle(1, 1e-2, moms=(0.8,0.7))
learn.save('fit_head')
learn.load('fit_head');
```
To complete the fine-tuning, we can then unfeeze and launch a new training.
```
learn.unfreeze()
learn.fit_one_cycle(10, 1e-3, moms=(0.8,0.7))
learn.save('fine_tuned')
```
How good is our model? Well let's try to see what it predicts after a few given words.
```
learn.load('fine_tuned');
TEXT = "i liked this movie because"
N_WORDS = 40
N_SENTENCES = 2
print("\n".join(learn.predict(TEXT, N_WORDS, temperature=0.75) for _ in range(N_SENTENCES)))
```
We have to save the model but also it's encoder, the part that's responsible for creating and updating the hidden state. For the next part, we don't care about the part that tries to guess the next word.
```
learn.save_encoder('fine_tuned_enc')
```
## Classifier
Now, we'll create a new data object that only grabs the labelled data and keeps those labels. Again, this line takes a bit of time.
```
path = untar_data(URLs.IMDB)
data_clas = (TextList.from_folder(path, vocab=data_lm.vocab)
#grab all the text files in path
.split_by_folder(valid='test')
#split by train and valid folder (that only keeps 'train' and 'test' so no need to filter)
.label_from_folder(classes=['neg', 'pos'])
#label them all with their folders
.databunch(bs=bs))
data_clas.save('tmp_clas')
data_clas = TextClasDataBunch.load(path, 'tmp_clas', bs=bs)
data_clas.show_batch()
```
We can then create a model to classify those reviews and load the encoder we saved before.
```
learn = text_classifier_learner(data_clas, drop_mult=0.5)
learn.load_encoder('fine_tuned_enc')
learn.freeze()
gc.collect();
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(1, 2e-2, moms=(0.8,0.7))
learn.fit_one_cycle(1, 2e-2, moms=(0.8,0.7))
learn.save('first')
learn.load('first');
learn.freeze_to(-2)
learn.fit_one_cycle(1, slice(1e-2/(2.6**4),1e-2), moms=(0.8,0.7))
learn.save('second')
learn.load('second');
learn.freeze_to(-3)
learn.fit_one_cycle(1, slice(5e-3/(2.6**4),5e-3), moms=(0.8,0.7))
learn.save('third')
learn.load('third');
learn.unfreeze()
learn.fit_one_cycle(2, slice(1e-3/(2.6**4),1e-3), moms=(0.8,0.7))
learn.predict("I really loved that movie, it was awesome!")
```
| github_jupyter |
```
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import seaborn as sns
from sqlalchemy import extract
import numpy as np
import pandas as pd
import datetime as dt
```
# Reflect Tables into SQLAlchemy ORM
```
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
inspector = inspect(engine)
inspector.get_table_names()
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
Measurement = Base.classes.measurement
Station = Base.classes.station
# Creating our session (link) from Python to the DB
session = Session(engine)
```
# EXPLORING THE DATABASE
```
first_row = session.query(Measurement).first()
first_row.__dict__
# Finding the number of Measurement using the first station(Hawaii)
Hawaii_station = session.query(Measurement).filter(Measurement.station == 'USC00519397').count()
print("There are {} total measurements from the Hawaii station(USC00519397)".format(Hawaii_station))
#Checking the Measurements columns
columns = inspector.get_columns('Measurement')
for columns in columns:
print(columns['name'], columns["type"])
#Checking the Stations columns
columns = inspector.get_columns('Station')
for columns in columns:
print(columns['name'], columns["type"])
#checking the data layout for Measurements
engine.execute('SELECT * FROM Measurement LIMIT 10').fetchall()
#checking the data layout for Station
engine.execute('SELECT * FROM Station LIMIT 10').fetchall()
# Checking for the earliest Measurement Date of our data set
session.query(Measurement.date).order_by(Measurement.date).first().date
first_date = session.query(Measurement.date).order_by(Measurement.date).first().date
first_date
# Checking for the last Measurement Date of our dataset
last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first().date
last_date
# Query to calclate my start date using the last date as my last date of vacation
start_date = dt.date(2017, 8, 23) - dt.timedelta(days=14)
print("My vacation starts on: ", start_date,"and my end date is",last_date)
```
# Exploratory Climate Analysis
```
# Retrieve the last 12 months of precipitation data and plotting the results on a bar chart
precipitation_data = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date.between('2016-08-23', '2017-08-23')).all()
last_12_months_of_precipitation_data=precipitation_data
last_12_months_of_precipitation_data
# Sorting the dataframe by date
date = [precipitation_data[0] for precipitation in precipitation_data[0:]]
Precipitation= [precipitation_data[1] for precipitation in precipitation_data[0:]]
Precipitation_df= pd.DataFrame(precipitation_data[0:], columns=['date', 'Precipitation'] )
Precipitation_df.set_index('date', inplace=True, )
Precipitation_df.head(10)
precipitation_list = Precipitation_df["Precipitation"].tolist()
#Plotting the ata using Pandas Plotting with Matplotlib
ax = Precipitation_df.plot(kind='bar', width=3, figsize=(14,8))
plt.locator_params(axis='x', nbins=6)
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.tick_params(axis='y', labelsize=16)
ax.grid(True)
plt.legend(bbox_to_anchor=(.3,1), fontsize="16")
plt.title("Precipitation in Hawaii in Last 12 Months", size=22)
plt.ylabel("Precipitation Measured in inches", size=16)
plt.xlabel("Date", size=16)
plt.savefig("./Images/Precipitation.png")
plt.show
# Calcualting the summary statistics for the precipitation data
Precipitation_df.describe()
# Counting the total number of stations available in this dataset
session.query(Station.id).count()
# The most active station,i.e. what station have the most rows)?
Most_active_stations = session.query(Measurement.station).\
group_by(Measurement.station).order_by(func.count(Measurement.prcp).desc()).limit(1).scalar()
print ( "The most active station with the largest number of rows is " + str(Most_active_stations))
# Listing the stations in descending orer in order of their counts
station_results = session.query(Measurement.station, func.count(Measurement.station)).\
group_by(Measurement.station).\
order_by(func.count(Measurement.station).desc()).all()
station_results
# Calculating the minimum, average and the maximum temperature recorded,of the most active station?
best_station = station_results[0][0]
session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.station == best_station).all()
temp_results = session.query(Measurement.station, Measurement.tobs).\
filter(Measurement.station == best_station).\
filter(Measurement.date.between('2016-08-23', '2017-08-23')).all()
temp_results
tempObs_df = pd.DataFrame(temp_results)
tempObs_df.set_index('station', inplace=True)
tempObs_df.head()
# Plotting the temperature observed as a histogram with bins=12.
tempObs_df.plot.hist(by='station', bins=12, figsize=(12,8))
plt.grid()
plt.title("Temperature Observations for Station " + best_station, fontsize=20)
plt.xlabel("Temperature Reported", fontsize=16)
plt.legend(bbox_to_anchor=(1,1), fontsize=16)
plt.savefig("./Images/StationTemps.png")
plt.show
# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, average, and maximum temperatures for that range of dates
def calc_temps(start_date, end_date):
c_results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).\
filter(Measurement.date <= end_date).all()
return c_results
calc_temps('2016-08-23', '2017-08-23')
trip_results=calc_temps('2016-08-23', '2017-08-23')
trip_df = pd.DataFrame(trip_results, columns=['Min Temp', 'Avg Temp', 'Max Temp'])
avg_temp = trip_df['Avg Temp']
min_max_temp = trip_df.iloc[0]['Max Temp'] - trip_df.iloc[0]['Min Temp']
avg_temp.plot(kind='bar', yerr=min_max_temp, figsize=(6,8), alpha=0.5, color='coral')
plt.title("Trip Avg Temp", fontsize=20)
plt.ylabel("Temp (F)")
plt.xticks([])
plt.grid()
plt.savefig("./Images/TripTempSummary.png")
plt.show()
```
# Optional Challenge
```
# Creating a query that will calculate the daily normals
# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)
def daily_normals(date):
"""Daily Normals.
Args:
date (str): A date string in the format '%m-%d'
Returns:
A list of tuples containing the daily normals, tmin, tavg, and tmax
"""
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all()
daily_normals("01-01")
dates=[]
daily_normals_Three_AVG=[]
daily_normals_TMAX=[]
daily_normals_TMIN=[]
daily_normals_TAVG=[]
trip_month=8
trip_days=range(1, 16)
def daily_normals():
for i in range(1,16):
results=session.query(Measurement.date.label("dates"), func.max(Measurement.tobs).label("max_tobs"),\
func.min(Measurement.tobs).label("min_tobs"),func.avg(Measurement.tobs).label("avg_tobs")).\
filter(extract('month', Measurement.date)==trip_month).\
filter(extract('day', Measurement.date)== i ).group_by(Measurement.date).order_by(Measurement.date)
results
for res in results.all():
print( res)
res
TMAX = res.max_tobs
TMIN= res.min_tobs
TAVG= res.avg_tobs
Date= res.dates
data = [TMAX, TMIN, TAVG]
#avg = [float(sum(col))/len(col) for col in zip(*data)]
dates.append(Date)
#daily_normals_Three_AVG.append(avg)
daily_normals_TMAX.append(TMAX)
daily_normals_TMIN.append(TMIN)
daily_normals_TAVG.append(TAVG)
daily_normals ()
# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index
Daily_normals_df= pd.DataFrame({'Date': dates, 'TMIN': daily_normals_TMIN,'TAVG': daily_normals_TAVG,'TMAX': daily_normals_TMAX,})
Daily_normals_df.set_index('Date', inplace=True, )
Daily_normals_df.head()
# Ploting the daily normals as an area plot with `stacked=False`
Daily_normals_df.plot.area(stacked=False, title='Daily normals of Temperature in Hawaii',figsize=(14,6))
ax.set_xlabel('Date')
plt.savefig("./Images/DailyNormalsTemp.png")
plt.show()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import train_test_split
import seaborn as sns
from itertools import combinations_with_replacement
sns.set()
df = pd.read_csv('TempLinkoping2016.csv')
df.head()
X = df.iloc[:, 0:1].values
Y = df.iloc[:, 1].values
n_features = X.shape[1]
degree = 15
combs = [combinations_with_replacement(range(n_features), i) for i in range(0, degree + 1)]
flat_combs = [item for sublist in combs for item in sublist]
X_new = np.empty((X.shape[0], len(flat_combs)))
for i, index_combs in enumerate(flat_combs):
X_new[:, i] = np.prod(X[:, index_combs], axis=1)
train_X, test_X, train_Y, test_Y = train_test_split(X_new, Y, test_size = 0.2)
def divide(X, i, threshold):
more_than = np.where(X[:,i]>=threshold)[0]
less_than = np.setxor1d(np.arange(X.shape[0]), more_than)
return np.array([X[more_than,:], X[less_than,:]])
class Node:
def __init__(self, feature=None, threshold=None, val=None, true_b=None, false_b=None):
self.feature = feature
self.threshold = threshold
self.val = val
self.true_b = true_b
self.false_b = false_b
class RegressionTree:
def __init__(self, min_samples_split=2, min_impurity = 1e-8, max_depth=float('inf')):
self.root = None
self.min_samples_split = min_samples_split
self.min_impurity = min_impurity
self.max_depth = max_depth
self.impurity_calculation = None
self.leaf_value_calculation = None
def _cal_variance_reduction(self, Y, y_more, y_less):
var = np.var(Y)
var_more = np.var(y_more)
var_less = np.var(y_less)
frac_more = len(y_more) / len(Y)
frac_less = len(y_less) / len(Y)
return np.sum(var - (frac_more * var_more + frac_less * var_less))
def _mean_y(self, Y):
return np.mean(Y[:,0])
def build_tree(self, X, Y, current_depth=0):
largest_impurity = 0
best_criteria = None
best_sets = None
if len(Y.shape) == 1:
Y = np.expand_dims(Y, axis=1)
XY = np.concatenate((X, Y), axis=1)
n_samples, n_features = np.shape(X)
if n_samples >= self.min_samples_split and current_depth <= self.max_depth:
for i in range(n_features):
feature_values = np.expand_dims(X[:, i], axis=1)
unique_values = np.unique(feature_values)
for threshold in unique_values:
XY_more, XY_less = divide(XY, i, threshold)
if XY_more.shape[0] > 0 and XY_less.shape[0]:
y_more = XY_more[:, n_features:]
y_less = XY_less[:, n_features:]
impurity = self._cal_variance_reduction(Y, y_more, y_less)
if impurity > largest_impurity:
largest_impurity = impurity
best_criteria = {"i": i, "threshold": threshold}
best_sets = {
'left_X': XY_more[:, :n_features],
'left_Y': XY_more[:, n_features:],
'right_X': XY_less[:, :n_features],
'right_Y': XY_less[:, n_features:],
}
if largest_impurity > self.min_impurity:
T_branch = self.build_tree(best_sets["left_X"], best_sets["left_Y"], current_depth + 1)
F_branch = self.build_tree(best_sets["right_X"], best_sets["right_Y"], current_depth + 1)
return Node(feature=best_criteria['i'],threshold=best_criteria['threshold'],
true_b=T_branch, false_b=F_branch)
selected_val = self._mean_y(Y)
return Node(val=selected_val)
def fit(self, X, Y):
self.root = self.build_tree(X, Y)
def _predict_val(self, X, tree=None):
if tree is None:
tree = self.root
if tree.val is not None:
return tree.val
feature_val = X[tree.feature]
branch = tree.false_b
if feature_val >= tree.threshold:
branch = tree.true_b
return self._predict_val(X, branch)
def predict(self, X):
results = []
for i in range(X.shape[0]):
results.append(self._predict_val(X[i,:]))
return results
regression_tree = RegressionTree()
regression_tree.fit(train_X, train_Y)
np.mean(np.square(test_Y - regression_tree.predict(test_X)))
plt.scatter(X[:,0],Y)
plt.plot(X,regression_tree.predict(X_new), c='red')
plt.show()
```
| github_jupyter |
# Announcements
- __Please familiarize yourself with the term projects, and sign up for your (preliminary) choice__ using [this form](https://forms.gle/ByLLpsthrpjCcxG89). _You may revise your choice, but I'd recommend settling on a choice well before Thanksgiving._
- Recommended reading on ODEs: [Lecture notes by Prof. Hjorth-Jensen (University of Oslo)](https://www.asc.ohio-state.edu/physics/ntg/6810/readings/hjorth-jensen_notes2013_08.pdf)
- Problem Set 5 will be posted on D2L on Oct 12, due Oct 20.
- __Outlook__: algorithms for solving high-dimensional linear and non-linear equations; then Boundary Value Problems and Partial Differential Equations.
- Conference for Undergraduate Women in Physics: online event in 2021, [applications accepted until 10/25](https://www.aps.org/programs/women/cuwip/)
This notebook presents as selection of topics from the book "Numerical Linear Algebra" by Trefethen and Bau (SIAM, 1997), and uses notebooks by Kyle Mandli.
# Numerical Linear Algebra
Numerical methods for linear algebra problems lies at the heart of many numerical approaches and is something we will spend some time on. Roughly we can break down problems that we would like to solve into two general problems, solving a system of equations
$$A \vec{x} = \vec{b}$$
and solving the eigenvalue problem
$$A \vec{v} = \lambda \vec{v}.$$
We examine each of these problems separately and will evaluate some of the fundamental properties and methods for solving these problems. We will be careful in deciding how to evaluate the results of our calculations and try to gain some understanding of when and how they fail.
## General Problem Specification
The number and power of the different tools made available from the study of linear algebra makes it an invaluable field of study. Before we dive in to numerical approximations we first consider some of the pivotal problems that numerical methods for linear algebra are used to address.
For this discussion we will be using the common notation $m \times n$ to denote the dimensions of a matrix $A$. The $m$ refers to the number of rows and $n$ the number of columns. If a matrix is square, i.e. $m = n$, then we will use the notation that $A$ is $m \times m$.
### Systems of Equations
The first type of problem is to find the solution to a linear system of equations. If we have $m$ equations for $m$ unknowns it can be written in matrix/vector form,
$$A \vec{x} = \vec{b}.$$
For this example $A$ is an $m \times m$ matrix, denoted as being in $\mathbb{R}^{m\times m}$, and $\vec{x}$ and $\vec{b}$ are column vectors with $m$ entries, denoted as $\mathbb{R}^m$.
#### Example: Vandermonde Matrix
We have data $(x_i, y_i), ~~ i = 1, 2, \ldots, m$ that we want to fit a polynomial of order $m-1$. Solving the linear system $A p = y$ does this for us where
$$A = \begin{bmatrix}
1 & x_1 & x_1^2 & \cdots & x_1^{m-1} \\
1 & x_2 & x_2^2 & \cdots & x_2^{m-1} \\
\vdots & \vdots & \vdots & & \vdots \\
1 & x_m & x_m^2 & \cdots & x_m^{m-1}
\end{bmatrix} \quad \quad y = \begin{bmatrix}
y_1 \\ y_2 \\ \vdots \\ y_m
\end{bmatrix}$$
and $p$ are the coefficients of the interpolating polynomial $\mathcal{P}_N(x) = p_0 + p_1 x + p_2 x^2 + \cdots + p_m x^{m-1}$. The solution to this system satisfies $\mathcal{P}_N(x_i)=y_i$ for $i=1, 2, \ldots, m$.
#### Example: Linear least squares 1
In a similar case as above, say we want to fit a particular function (could be a polynomial) to a given number of data points except in this case we have more data points than free parameters. In the case of polynomials this could be the same as saying we have $m$ data points but only want to fit a $n - 1$ order polynomial through the data where $n - 1 \leq m$. One of the common approaches to this problem is to minimize the "least-squares" error between the data and the resulting function:
$$
E = \left( \sum^m_{i=1} |y_i - f(x_i)|^2 \right )^{1/2}.
$$
But how do we do this if our matrix $A$ is now $m \times n$ and looks like
$$
A = \begin{bmatrix}
1 & x_1 & x_1^2 & \cdots & x_1^{n-1} \\
1 & x_2 & x_2^2 & \cdots & x_2^{n-1} \\
\vdots & \vdots & \vdots & & \vdots \\
1 & x_m & x_m^2 & \cdots & x_m^{n-1}
\end{bmatrix}?
$$
Turns out if we solve the system
$$A^T A x = A^T b$$
we can gaurantee that the error is minimized in the least-squares sense[<sup>1</sup>](#footnoteRegression).
#### Practical Example: Linear least squares implementation
Fitting a line through data that has random noise added to it.
```
%matplotlib inline
%precision 3
import numpy
import matplotlib.pyplot as plt
# Linear Least Squares Problem
# First define the independent and dependent variables.
N = 20
x = numpy.linspace(-1.0, 1.0, N)
y = x + numpy.random.random((N))
# Define the Vandermonde matrix based on our x-values
A = numpy.ones((x.shape[0], 2))
A[:, 1] = x
# Determine the coefficients of the polynomial that will
# result in the smallest sum of the squares of the residual.
p = numpy.linalg.solve(numpy.dot(A.transpose(), A), numpy.dot(A.transpose(), y))
print("Error in slope = %s, y-intercept = %s" % (numpy.abs(p[1] - 1.0), numpy.abs(p[0] - 0.5)))
# Plot it out, cuz pictures are fun!
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, y, 'ko')
axes.plot(x, p[0] + p[1] * x, 'r')
axes.set_title("Least Squares Fit to Data")
axes.set_xlabel("$x$")
axes.set_ylabel("$f(x)$ and $y_i$")
plt.show()
```
### Eigenproblems
Eigenproblems come up in a variety of contexts and often are integral to many problem of scientific and engineering interest. It is such a powerful idea that it is not uncommon for us to take a problem and convert it into an eigenproblem. We will covered detailed algorithms for eigenproblems in the next lectures, but for now let's remind ourselves of the problem and analytic solution:
If $A \in \mathbb{C}^{m\times m}$ (a square matrix with complex values), a non-zero vector $\vec{v}\in\mathbb{C}^m$ is an **eigenvector** of $A$ with a corresponding **eigenvalue** $\lambda \in \mathbb{C}$ if
$$A \vec{v} = \lambda \vec{v}.$$
One way to interpret the eigenproblem is that we are attempting to ascertain the "action" of the matrix $A$ on some subspace of $\mathbb{C}^m$ where this action acts like scalar multiplication. This subspace is called an **eigenspace**.
#### Example
Compute the eigenspace of the matrix
$$
A = \begin{bmatrix}
1 & 2 \\
2 & 1
\end{bmatrix}
$$
Recall that we can find the eigenvalues of a matrix by computing $\det(A - \lambda I) = 0$.
In this case we have
$$\begin{aligned}
A - \lambda I &= \begin{bmatrix}
1 & 2 \\
2 & 1
\end{bmatrix} - \begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix} \lambda\\
&= \begin{bmatrix}
1 - \lambda & 2 \\
2 & 1 - \lambda
\end{bmatrix}.
\end{aligned}$$
The determinant of the matrix is
$$\begin{aligned}
\begin{vmatrix}
1 - \lambda & 2 \\
2 & 1 - \lambda
\end{vmatrix} &= (1 - \lambda) (1 - \lambda) - 2 \cdot 2 \\
&= 1 - 2 \lambda + \lambda^2 - 4 \\
&= \lambda^2 - 2 \lambda - 3.
\end{aligned}$$
This result is sometimes referred to as the characteristic equation of the matrix, $A$.
Setting the determinant equal to zero we can find the eigenvalues as
$$\begin{aligned}
& \\
\lambda &= \frac{2 \pm \sqrt{4 - 4 \cdot 1 \cdot (-3)}}{2} \\
&= 1 \pm 2 \\
&= -1 \mathrm{~and~} 3
\end{aligned}$$
The eigenvalues are used to determine the eigenvectors. The eigenvectors are found by going back to the equation $(A - \lambda I) \vec{v}_i = 0$ and solving for each vector. A trick that works some of the time is to normalize each vector such that the first entry is 1 ($\vec{v}_1 = 1$):
$$
\begin{bmatrix}
1 - \lambda & 2 \\
2 & 1 - \lambda
\end{bmatrix} \begin{bmatrix} 1 \\ v_2 \end{bmatrix} = 0
$$
$$\begin{aligned}
1 - \lambda + 2 v_2 &= 0 \\
v_2 &= \frac{\lambda - 1}{2}
\end{aligned}$$
We can check this by
$$\begin{aligned}
2 + \left(1- \lambda \frac{\lambda - 1}{2}\right) & = 0\\
(\lambda - 1)^2 - 4 &=0
\end{aligned}$$
which by design is satisfied by our eigenvalues. Another sometimes easier approach is to plug-in the eigenvalues to find each corresponding eigenvector. The eigenvectors are therefore
$$\vec{v} = \begin{bmatrix}1 \\ -1 \end{bmatrix}, \begin{bmatrix}1 \\ 1 \end{bmatrix}.$$
Note that these are linearly independent.
## Fundamentals
### Matrix-Vector Multiplication
One of the most basic operations we can perform with matrices is to multiply them be a vector. This matrix-vector product $A \vec{x} = \vec{b}$ is defined as
$$
b_i = \sum^n_{j=1} a_{ij} x_j \quad \text{where}\quad i = 1, \ldots, m
$$
Writing the matrix-vector product this way we see that one interpretation of this product is that each column of $A$ is weighted by the value $x_j$, or in other words $\vec{b}$ is a linear combination of the columns of $A$ where each column's weighting is $x_j$.
$$
\begin{align}
\vec{b} &= A \vec{x}, \\
\vec{b} &=
\begin{bmatrix} & & & \\ & & & \\ \vec{a}_1 & \vec{a}_2 & \cdots & \vec{a}_n \\ & & & \\ & & & \end{bmatrix}
\begin{bmatrix} x_1 \\ x_2 \\ \vdots \\ x_n \end{bmatrix}, \\
\vec{b} &= x_1 \vec{a}_1 + x_2 \vec{a}_2 + \cdots + x_n \vec{a}_n.
\end{align}
$$
This view will be useful later when we are trying to interpret various types of matrices.
One important property of the matrix-vector product is that is a **linear** operation, also known as a **linear operator**. This means that the for any $\vec{x}, \vec{y} \in \mathbb{C}^n$ and any $c \in \mathbb{C}$ we know that
1. $A (\vec{x} + \vec{y}) = A\vec{x} + A\vec{y}$
1. $A\cdot (c\vec{x}) = c A \vec{x}$
#### Example: Vandermonde Matrix
In the case where we have $m$ data points and want $m - 1$ order polynomial interpolant the matrix $A$ is a square, $m \times m$, matrix as before. Using the above interpretation the polynomial coefficients $p$ are the weights for each of the monomials that give exactly the $y$ values of the data.
#### Example: Numerical matrix-vector multiply
Write a matrix-vector multiply function and check it with the appropriate `numpy` routine. Also verify the linearity of the matrix-vector multiply.
```
#A x = b
#(m x n) (n x 1) = (m x 1)
def matrix_vector_product(A, x):
m, n = A.shape
b = numpy.zeros(m)
for i in range(m):
for j in range(n):
b[i] += A[i, j] * x[j]
return b
m = 4
n = 3
A = numpy.random.uniform(size=(m,n))
x = numpy.random.uniform(size=(n))
y = numpy.random.uniform(size=(n))
c = numpy.random.uniform()
b = matrix_vector_product(A, x)
print(numpy.allclose(b, numpy.dot(A, x)))
print(numpy.allclose(matrix_vector_product(A, (x + y)), matrix_vector_product(A, x) + matrix_vector_product(A, y)))
print(numpy.allclose(matrix_vector_product(A, c * x), c*matrix_vector_product(A, x)))
```
### Matrix-Matrix Multiplication
The matrix product with another matrix $A C = B$ is defined as
$$
b_{ij} = \sum^m_{k=1} a_{ik} c_{kj}.
$$
Again, a useful interpretation of this operation is that the product result $B$ is the a linear combination of the columns of $A$.
_What are the dimensions of $A$ and $C$ so that the multiplication works?_
#### Example: Outer Product
The product of two vectors $\vec{u} \in \mathbb{C}^m$ and $\vec{v} \in \mathbb{C}^n$ is a $m \times n$ matrix where the columns are the vector $u$ multiplied by the corresponding value of $v$:
$$
\begin{align}
\vec{u} \vec{v}^T &=
\begin{bmatrix} u_1 \\ u_2 \\ \vdots \\ u_n \end{bmatrix}
\begin{bmatrix} v_1 & v_2 & \cdots & v_n \end{bmatrix}, \\
& = \begin{bmatrix} v_1u_1 & \cdots & v_n u_1 \\ \vdots & & \vdots \\ v_1 u_m & \cdots & v_n u_m \end{bmatrix}.
\end{align}
$$
It is useful to think of these as operations on the column vectors, and an equivalent way to express this relationship is
$$
\begin{align}
\vec{u} \vec{v}^T &=
\begin{bmatrix} \\ \vec{u} \\ \\ \end{bmatrix}
\begin{bmatrix} v_1 & v_2 & \cdots & v_n \end{bmatrix}, \\
&=
\begin{bmatrix} & & & \\ & & & \\ \vec{u}v_1 & \vec{u} v_2 & \cdots & \vec{u} v_n \\ & & & \\ & & & \end{bmatrix}, \\
& = \begin{bmatrix} v_1u_1 & \cdots & v_n u_1 \\ \vdots & & \vdots \\ v_1 u_m & \cdots & v_n u_m \end{bmatrix}.
\end{align}
$$
#### Example: Upper Triangular Multiplication
Consider the multiplication of a matrix $A \in \mathbb{C}^{m\times n}$ and the **upper-triangular** matrix $R$ defined as the $n \times n$ matrix with entries $r_{ij} = 1$ for $i \leq j$ and $r_{ij} = 0$ for $i > j$. The product can be written as
$$
\begin{bmatrix} \\ \\ \vec{b}_1 & \cdots & \vec{b}_n \\ \\ \\ \end{bmatrix} = \begin{bmatrix} \\ \\ \vec{a}_1 & \cdots & \vec{a}_n \\ \\ \\ \end{bmatrix} \begin{bmatrix} 1 & \cdots & 1 \\ & \ddots & \vdots \\ & & 1 \end{bmatrix}.
$$
The columns of $B$ are then
$$
\vec{b}_j = A \vec{r}_j = \sum^j_{k=1} \vec{a}_k
$$
so that $\vec{b}_j$ is the sum of the first $j$ columns of $A$.
#### Example: Write Matrix-Matrix Multiplication
Write a function that computes matrix-matrix multiplication and demonstrate the following properties:
1. $A (B + C) = AB + AC$ (for square matrices))
1. $A (cB) = c AB$ where $c \in \mathbb{C}$
1. $AB \neq BA$ in general
```
def matrix_matrix_product(A, B):
C = numpy.zeros((A.shape[0], B.shape[1]))
for i in range(A.shape[0]):
for j in range(B.shape[1]):
for k in range(A.shape[1]):
C[i, j] += A[i, k] * B[k, j]
return C
m = 4
n = 4
p = 4
A = numpy.random.uniform(size=(m, n))
B = numpy.random.uniform(size=(n, p))
C = numpy.random.uniform(size=(m, p))
c = numpy.random.uniform()
print(numpy.allclose(matrix_matrix_product(A, B), numpy.dot(A, B)))
print(numpy.allclose(matrix_matrix_product(A, (B + C)), matrix_matrix_product(A, B) + matrix_matrix_product(A, C)))
print(numpy.allclose(matrix_matrix_product(A, c * B), c*matrix_matrix_product(A, B)))
print(numpy.allclose(matrix_matrix_product(A, B), matrix_matrix_product(B, A)))
```
### Matrices in NumPy
NumPy and SciPy contain routines that ware optimized to perform matrix-vector and matrix-matrix multiplication. Given two `ndarray`s you can take their product by using the `dot` function.
```
n = 10
m = 5
# Matrix vector with identity
A = numpy.identity(n)
x = numpy.random.random(n)
print(numpy.allclose(x, numpy.dot(A, x)))
# Matrix vector product
A = numpy.random.random((m, n))
print(numpy.dot(A, x))
# Matrix matrix product
B = numpy.random.random((n, m))
print(numpy.dot(A, B))
```
### Range and Null-Space
#### Range
- The **range** of a matrix $A \in \mathbb R^{m \times n}$ (similar to any function), denoted as $\text{range}(A)$, is the set of vectors that can be expressed as $A x$ for $x \in \mathbb R^n$.
- We can also then say that that $\text{range}(A)$ is the space **spanned** by the columns of $A$. In other words the columns of $A$ provide a basis for $\text{range}(A)$, also called the **column space** of the matrix $A$.
#### Null-Space
- Similarly the **null-space** of a matrix $A$, denoted $\text{null}(A)$ is the set of vectors $x$ that satisfy $A x = 0$.
- A similar concept is the **rank** of the matrix $A$, denoted as $\text{rank}(A)$, is the dimension of the column space. A matrix $A$ is said to have **full-rank** if $\text{rank}(A) = \min(m, n)$. This property also implies that the matrix mapping is **one-to-one**.
### Inverse
A **non-singular** or **invertible** matrix is characterized as a matrix with full-rank. This is related to why we know that the matrix is one-to-one, we can use it to transform a vector $x$ and using the inverse, denoted $A^{-1}$, we can map it back to the original matrix. The familiar definition of this is
\begin{align*}
A \vec{x} &= \vec{b}, \\
A^{-1} A \vec{x} & = A^{-1} \vec{b}, \\
x &=A^{-1} \vec{b}.
\end{align*}
Since $A$ has full rank, its columns form a basis for $\mathbb{R}^m$ and the vector $\vec{b}$ must be in the column space of $A$.
There are a number of important properties of a non-singular matrix A. Here we list them as the following equivalent statements
1. $A$ has an inverse $A^{-1}$
1. $\text{rank}(A) = m$
1. $\text{range}(A) = \mathbb{C}^m$
1. $\text{null}(A) = {0}$
1. 0 is not an eigenvalue of $A$
1. $\text{det}(A) \neq 0$
#### Example: Properties of invertible matrices
Show that given an invertible matrix that the rest of the properties hold. Make sure to search the `numpy` packages for relevant functions.
```
m = 3
for n in range(100):
A = numpy.random.uniform(size=(m, m))
if numpy.linalg.det(A) != 0:
break
print(numpy.dot(numpy.linalg.inv(A), A))
print(numpy.linalg.matrix_rank(A))
print("range")
print(numpy.linalg.solve(A, numpy.zeros(m)))
print(numpy.linalg.eigvals(A))
```
### Orthogonal Vectors and Matrices
Orthogonality is a very important concept in linear algebra that forms the basis of many of the modern methods used in numerical computations.
Two vectors are said to be orthogonal if their **inner-product** or **dot-product** defined as
$$
< \vec{x}, \vec{y} > \equiv (\vec{x}, \vec{y}) \equiv \vec{x}^T\vec{y} \equiv \vec{x} \cdot \vec{y} = \sum^m_{i=1} x_i y_i
$$
Here we have shown the various notations you may run into (the inner-product is in-fact a general term for a similar operation for mathematical objects such as functions).
If $\langle \vec{x},\vec{y} \rangle = 0$ then we say $\vec{x}$ and $\vec{y}$ are orthogonal. The reason we use this terminology is that the inner-product of two vectors can also be written in terms of the angle between them where
$$
\cos \theta = \frac{\langle \vec{x}, \vec{y} \rangle}{||\vec{x}||_2~||\vec{y}||_2}
$$
and $||\vec{x}||_2$ is the Euclidean ($\ell^2$) norm of the vector $\vec{x}$.
We can write this in terms of the inner-product as well as
$$
||\vec{x}||_2^2 = \langle \vec{x}, \vec{x} \rangle = \vec{x}^T\vec{x} = \sum^m_{i=1} |x_i|^2.
$$
The generalization of the inner-product to complex spaces is defined as
$$
\langle x, y \rangle = \sum^m_{i=1} x_i^* y_i
$$
where $x_i^*$ is the complex-conjugate of the value $x_i$.
#### Orthonormality
Taking this idea one step further we can say a set of vectors $\vec{x} \in X$ are orthogonal to $\vec{y} \in Y$ if $\forall \vec{x},\vec{y}$ $< \vec{x}, \vec{y} > = 0$. If $\forall \vec{x},\vec{y}$ $||\vec{x}|| = 1$ and $||\vec{y}|| = 1$ then they are also called orthonormal. Note that we dropped the 2 as a subscript to the notation for the norm of a vector. Later we will explore other ways to define a norm of a vector other than the Euclidean norm defined above.
Another concept that is related to orthogonality is linear-independence. A set of vectors $\vec{x} \in X$ are **linearly independent** if $\forall \vec{x} \in X$ that each $\vec{x}$ cannot be written as a linear combination of the other vectors in the set $X$.
An equivalent statement is that there does not exist a set of scalars $c_i$ such that
$$
\vec{x}_k = \sum^n_{i=1, i \neq k} c_i \vec{x}_i.
$$
Another way to write this is that $\vec{x}_k \in X$ is orthogonal to all the rest of the vectors in the set $X$.
This can be related directly through the idea of projection. If we have a set of vectors $\vec{x} \in X$ we can project another vector $\vec{v}$ onto the vectors in $X$ by using the inner-product. This is especially powerful if we have a set of linearly-independent vectors $X$, which are said to **span** a space (or provide a **basis** for a space), s.t. any vector in the space spanned by $X$ can be expressed as a linear combination of the basis vectors $X$
$$
\vec{v} = \sum^n_{i=1} \, \langle \vec{v}, \vec{x}_i \rangle \, \vec{x}_i.
$$
Note if $\vec{v} \in X$ that
$$
\langle \vec{v}, \vec{x}_i \rangle = 0 \quad \forall \vec{x}_i \in X \setminus \vec{v}.
$$
Looping back to matrices, the column space of a matrix is spanned by its linearly independent columns. Any vector $v$ in the column space can therefore be expressed via the equation above. A special class of matrices are called **unitary** matrices when complex-valued and **orthogonal** when purely real-valued if the columns of the matrix are orthonormal to each other. Importantly this implies that for a unitary matrix $Q$ we know the following
1. $Q^* = Q^{-1}$
1. $Q^*Q = I$
where $Q^*$ is called the **adjoint** of $Q$. The adjoint is defined as the transpose of the original matrix with the entries being the complex conjugate of each entry as the notation implies.
### Vector Norms
Norms (and also measures) provide a means for measure the "size" or distance in a space. In general a norm is a function, denoted by $||\cdot||$, that maps $\mathbb{C}^m \rightarrow \mathbb{R}$. In other words we stick in a multi-valued object and get a single, real-valued number out the other end. All norms satisfy the properties:
1. $||\vec{x}|| \geq 0$, and $||\vec{x}|| = 0$ only if $\vec{x} = \vec{0}$
1. $||\vec{x} + \vec{y}|| \leq ||\vec{x}|| + ||\vec{y}||$ (triangle inequality)
1. $||c \vec{x}|| = |c| ~ ||\vec{x}||$ where $c \in \mathbb{C}$
There are a number of relevant norms that we can define beyond the Euclidean norm, also know as the 2-norm or $\ell_2$ norm:
1. $\ell_1$ norm:
$$
||\vec{x}||_1 = \sum^m_{i=1} |x_i|,
$$
1. $\ell_2$ norm:
$$
||\vec{x}||_2 = \left( \sum^m_{i=1} |x_i|^2 \right)^{1/2},
$$
1. $\ell_p$ norm:
$$
||\vec{x}||_p = \left( \sum^m_{i=1} |x_i|^p \right)^{1/p}, \quad \quad 1 \leq p < \infty,
$$
1. $\ell_\infty$ norm:
$$
||\vec{x}||_\infty = \max_{1\leq i \leq m} |x_i|,
$$
1. weighted $\ell_p$ norm:
$$
||\vec{x}||_{W_p} = \left( \sum^m_{i=1} |w_i x_i|^p \right)^{1/p}, \quad \quad 1 \leq p < \infty,
$$
These are also related to other norms denoted by capital letters ($L_2$ for instance). In this case we use the lower-case notation to denote finite or discrete versions of the infinite dimensional counterparts.
#### Example: Comparisons Between Norms
Compute the norms given some vector $\vec{x}$ and compare their values. Verify the properties of the norm for one of the norms.
```
m = 10
p = 4
x = numpy.random.uniform(size=m)
ell_1 = 0.0
for i in range(m):
ell_1 += numpy.abs(x[i])
ell_2 = 0.0
for i in range(m):
ell_2 += numpy.abs(x[i])**2
ell_2 = numpy.sqrt(ell_2)
ell_p = 0.0
for i in range(m):
ell_p += numpy.abs(x[i])**p
ell_p = (ell_2)**(1.0 / p)
ell_infty = numpy.max(numpy.abs(x))
print("L_1 = %s, L_2 = %s, L_%s = %s, L_infty = %s" % (ell_1, ell_2, p, ell_p, ell_infty))
y = numpy.random.uniform(size=m)
print()
print("Properties of norms:")
print(numpy.max(numpy.abs(x + y)), numpy.max(numpy.abs(x)) + numpy.max(numpy.abs(y)))
print(numpy.max(numpy.abs(0.1 * x)), 0.1 * numpy.max(numpy.abs(x)))
```
### Matrix Norms
The most direct way to consider a matrix norm is those induced by a vector-norm. Given a vector norm, we can define a matrix norm as the smallest number $C$ that satisfies the inequality
$$
||A \vec{x}||_{m} \leq C ||\vec{x}||_{n}.
$$
or as the supremum of the ratios so that
$$
C = \sup_{\vec{x}\in\mathbb{C}^n ~ \vec{x}\neq\vec{0}} \frac{||A \vec{x}||_{m}}{||\vec{x}||_n}.
$$
Noting that $||A \vec{x}||$ lives in the column space and $||\vec{x}||$ on the domain we can think of the matrix norm as the "size" of the matrix that maps the domain to the range. Also noting that if $||\vec{x}||_n = 1$ we also satisfy the condition we can write the induced matrix norm as
$$
||A||_{(m,n)} = \sup_{\vec{x} \in \mathbb{C}^n ~ ||\vec{x}||_{n} = 1} ||A \vec{x}||_{m}.
$$
#### Example: Induced Matrix Norms
Consider the matrix
$$
A = \begin{bmatrix} 1 & 2 \\ 0 & 2 \end{bmatrix}.
$$
Compute the induced-matrix norm of $A$ for the vector norms $\ell_2$ and $\ell_\infty$.
$\ell^2$: For both of the requested norms the unit-length vectors $[1, 0]$ and $[0, 1]$ can be used to give an idea of what the norm might be and provide a lower bound.
$$
||A||_2 = \sup_{x \in \mathbb{R}^n} \left( ||A \cdot [1, 0]^T||_2, ||A \cdot [0, 1]^T||_2 \right )
$$
computing each of the norms we have
$$\begin{aligned}
\begin{bmatrix} 1 & 2 \\ 0 & 2 \end{bmatrix} \cdot \begin{bmatrix} 1 \\ 0 \end{bmatrix} &= \begin{bmatrix} 1 \\ 0 \end{bmatrix} \\
\begin{bmatrix} 1 & 2 \\ 0 & 2 \end{bmatrix} \cdot \begin{bmatrix} 0 \\ 1 \end{bmatrix} &= \begin{bmatrix} 2 \\ 2 \end{bmatrix}
\end{aligned}$$
which translates into the norms $||A \cdot [1, 0]^T||_2 = 1$ and $||A \cdot [0, 1]^T||_2 = 2 \sqrt{2}$. This implies that the $\ell_2$ induced matrix norm of $A$ is at least $||A||_{2} = 2 \sqrt{2} \approx 2.828427125$.
The exact value of $||A||_2$ can be computed using the spectral radius defined as
$$
\rho(A) = \max_{i} |\lambda_i|,
$$
where $\lambda_i$ are the eigenvalues of $A$. With this we can compute the $\ell_2$ norm of $A$ as
$$
||A||_2 = \sqrt{\rho(A^\ast A)}
$$
Computing the norm again here we find
$$
A^\ast A = \begin{bmatrix} 1 & 0 \\ 2 & 2 \end{bmatrix} \begin{bmatrix} 1 & 2 \\ 0 & 2 \end{bmatrix} = \begin{bmatrix} 1 & 2 \\ 2 & 8 \end{bmatrix}
$$
which has eigenvalues
$$
\lambda = \frac{1}{2}\left(9 \pm \sqrt{65}\right )
$$
so $||A||_2 \approx 2.9208096$.
$\ell^\infty$: We can again bound $||A||_\infty$ by looking at the unit vectors which give us the matrix lower bound of 2. To compute it turns out $||A||_{\infty} = \max_{1 \leq i \leq m} ||a^\ast_i||_1$ where $a^\ast_i$ is the $i$th row of $A$. This represents then the maximum of the row sums of $A$. Therefore $||A||_\infty = 3$.
```
A = numpy.array([[1, 2], [0, 2]])
print(numpy.linalg.norm(A, ord=2))
print(numpy.linalg.norm(A, ord=numpy.infty))
```
#### Example: General Norms of a Matrix
Compute a bound on the induced norm of the $m \times n$ dimensional matrix $A$ using $\ell_1$ and $\ell_2$
One of the most useful ways to think about matrix norms is as a transformation of a unit-ball to an ellipse. Depending on the norm in question, the norm will be some combination of the resulting ellipse. For the above cases we have some nice relations based on these ideas.
1. $||A \vec{x}||_1 = || \sum^n_{j=1} x_j \vec{a}_j ||_1 \leq \sum^n_{j=1} |x_j| ||\vec{a}_j||_1 \leq \max_{1\leq j\leq n} ||\vec{a}_j||_1$
1. $||A \vec{x}||_\infty = || \sum^n_{j=1} x_j \vec{a_j} ||_\infty \leq \sum^n_{j=1} |x_j| ||\vec{a}_j||_\infty \leq \max_{1 \leq i \leq m} ||a^*_i||_1$
```
# Note: that this code is a bit fragile to angles that go beyond pi
# due to the use of arccos.
import matplotlib.patches as patches
A = numpy.array([[1, 2], [0, 2]])
def draw_unit_vectors(axes, A, head_width=0.1):
head_length = 1.5 * head_width
image_e = numpy.empty(A.shape)
angle = numpy.empty(A.shape[0])
image_e[:, 0] = numpy.dot(A, numpy.array((1.0, 0.0)))
image_e[:, 1] = numpy.dot(A, numpy.array((0.0, 1.0)))
for i in range(A.shape[0]):
angle[i] = numpy.arccos(image_e[0, i] / numpy.linalg.norm(image_e[:, i], ord=2))
axes.arrow(0.0, 0.0, image_e[0, i] - head_length * numpy.cos(angle[i]),
image_e[1, i] - head_length * numpy.sin(angle[i]),
head_width=head_width, color='b', alpha=0.5)
head_width = 0.2
head_length = 1.5 * head_width
# ============
# 1-norm
# Unit-ball
fig = plt.figure()
fig.set_figwidth(fig.get_figwidth() * 2)
fig.suptitle("1-Norm")
axes = fig.add_subplot(1, 2, 1, aspect='equal')
axes.plot((1.0, 0.0, -1.0, 0.0, 1.0), (0.0, 1.0, 0.0, -1.0, 0.0), 'r')
draw_unit_vectors(axes, numpy.eye(2))
axes.set_title("Unit Ball")
axes.set_xlim((-1.1, 1.1))
axes.set_ylim((-1.1, 1.1))
axes.grid(True)
# Image
axes = fig.add_subplot(1, 2, 2, aspect='equal')
axes.plot((1.0, 2.0, -1.0, -2.0, 1.0), (0.0, 2.0, 0.0, -2.0, 0.0), 'r')
draw_unit_vectors(axes, A, head_width=0.2)
axes.set_title("Images Under A")
axes.grid(True)
plt.show()
# ============
# 2-norm
# Unit-ball
fig = plt.figure()
fig.suptitle("2-Norm")
fig.set_figwidth(fig.get_figwidth() * 2)
axes = fig.add_subplot(1, 2, 1, aspect='equal')
axes.add_artist(plt.Circle((0.0, 0.0), 1.0, edgecolor='r', facecolor='none'))
draw_unit_vectors(axes, numpy.eye(2))
axes.set_title("Unit Ball")
axes.set_xlim((-1.1, 1.1))
axes.set_ylim((-1.1, 1.1))
axes.grid(True)
# Image
# Compute some geometry
u, s, v = numpy.linalg.svd(A)
theta = numpy.empty(A.shape[0])
ellipse_axes = numpy.empty(A.shape)
theta[0] = numpy.arccos(u[0][0]) / numpy.linalg.norm(u[0], ord=2)
theta[1] = theta[0] - numpy.pi / 2.0
for i in range(theta.shape[0]):
ellipse_axes[0, i] = s[i] * numpy.cos(theta[i])
ellipse_axes[1, i] = s[i] * numpy.sin(theta[i])
axes = fig.add_subplot(1, 2, 2, aspect='equal')
axes.add_artist(patches.Ellipse((0.0, 0.0), 2 * s[0], 2 * s[1], theta[0] * 180.0 / numpy.pi,
edgecolor='r', facecolor='none'))
for i in range(A.shape[0]):
axes.arrow(0.0, 0.0, ellipse_axes[0, i] - head_length * numpy.cos(theta[i]),
ellipse_axes[1, i] - head_length * numpy.sin(theta[i]),
head_width=head_width, color='k')
draw_unit_vectors(axes, A, head_width=0.2)
axes.set_title("Images Under A")
axes.set_xlim((-s[0] + 0.1, s[0] + 0.1))
axes.set_ylim((-s[0] + 0.1, s[0] + 0.1))
axes.grid(True)
plt.show()
# ============
# infty-norm
# Unit-ball
fig = plt.figure()
fig.suptitle("$\infty$-Norm")
fig.set_figwidth(fig.get_figwidth() * 2)
axes = fig.add_subplot(1, 2, 1, aspect='equal')
axes.plot((1.0, -1.0, -1.0, 1.0, 1.0), (1.0, 1.0, -1.0, -1.0, 1.0), 'r')
draw_unit_vectors(axes, numpy.eye(2))
axes.set_title("Unit Ball")
axes.set_xlim((-1.1, 1.1))
axes.set_ylim((-1.1, 1.1))
axes.grid(True)
# Image
# Geometry - Corners are A * ((1, 1), (1, -1), (-1, 1), (-1, -1))
# Symmetry implies we only need two. Here we just plot two
u = numpy.empty(A.shape)
u[:, 0] = numpy.dot(A, numpy.array((1.0, 1.0)))
u[:, 1] = numpy.dot(A, numpy.array((-1.0, 1.0)))
theta[0] = numpy.arccos(u[0, 0] / numpy.linalg.norm(u[:, 0], ord=2))
theta[1] = numpy.arccos(u[0, 1] / numpy.linalg.norm(u[:, 1], ord=2))
axes = fig.add_subplot(1, 2, 2, aspect='equal')
axes.plot((3, 1, -3, -1, 3), (2, 2, -2, -2, 2), 'r')
for i in range(A.shape[0]):
axes.arrow(0.0, 0.0, u[0, i] - head_length * numpy.cos(theta[i]),
u[1, i] - head_length * numpy.sin(theta[i]),
head_width=head_width, color='k')
draw_unit_vectors(axes, A, head_width=0.2)
axes.set_title("Images Under A")
axes.set_xlim((-4.1, 4.1))
axes.set_ylim((-3.1, 3.1))
axes.grid(True)
plt.show()
```
#### General Matrix Norms (induced and non-induced)
In general matrix-norms have the following properties whether they are induced from a vector-norm or not:
1. $||A|| \geq 0$ and $||A|| = 0$ only if $A = 0$
1. $||A + B|| \leq ||A|| + ||B||$ (Triangle Inequality)
1. $||c A|| = |c| ||A||$
The most widely used matrix norm not induced by a vector norm is the **Frobenius norm** defined by
$$
||A||_F = \left( \sum^m_{i=1} \sum^n_{j=1} |A_{ij}|^2 \right)^{1/2}.
$$
#### Invariance under unitary multiplication
One important property of the matrix 2-norm (and Forbenius norm) is that multiplication by a unitary matrix does not change the product (kind of like multiplication by 1). In general for any $A \in \mathbb{C}^{m\times n}$ and unitary matrix $Q \in \mathbb{C}^{m \times m}$ we have
\begin{align*}
||Q A||_2 &= ||A||_2 \\ ||Q A||_F &= ||A||_F.
\end{align*}
## Singular Value Decomposition
Definition: Let $A \in \mathbb R^{m \times n}$, then $A$ can be factored as
$$
A = U\Sigma V^{T}
$$
where,
* $U \in \mathbb R^{m \times m}$ and is the orthogonal matrix whose columns are the eigenvectors of $AA^{T}$
* $V \in \mathbb R^{n \times n}$ and is the orthogonal matrix whose columns are the eigenvectors of $A^{T}A$
* $\Sigma \in \mathbb R^{m \times n}$ and is a diagonal matrix with elements $\sigma_{1}, \sigma_{2}, \sigma_{3}, ... \sigma_{r}$ where $r = rank(A)$ corresponding to the square roots of the eigenvalues of $A^{T}A$. They are called the singular values of $A$ and are non negative arranged in descending order. ($\sigma_{1} \geq \sigma_{2} \geq \sigma_{3} \geq ... \sigma_{r} \geq 0$).
The SVD has a number of applications mostly related to reducing the dimensionality of a matrix.
### Full SVD example
Consider the matrix
$$
A = \begin{bmatrix}
2 & 0 & 3 \\
5 & 7 & 1 \\
0 & 6 & 2
\end{bmatrix}.
$$
The example below demonstrates the use of the `numpy.linalg.svd` function and shows the numerical result.
```
A = numpy.array([
[2.0, 0.0, 3.0],
[5.0, 7.0, 1.0],
[0.0, 6.0, 2.0]
])
U, sigma, V_T = numpy.linalg.svd(A, full_matrices=True)
print(numpy.dot(U, numpy.dot(numpy.diag(sigma), V_T)))
```
### Eigenvalue Decomposition vs. SVD Decomposition
Let the matrix $X$ contain the eigenvectors of $A$ which are linearly independent, then we can write a decomposition of the matrix $A$ as
$$
A = X \Lambda X^{-1}.
$$
How does this differ from the SVD?
- The basis of the SVD representation differs from the eigenvalue decomposition
- The basis vectors are not in general orthogonal for the eigenvalue decomposition where it is for the SVD
- The SVD effectively contains two basis sets.
- All matrices have an SVD decomposition whereas not all have eigenvalue decompositions.
### Existence and Uniqueness
Every matrix $A \in \mathbb{C}^{m \times n}$ has a singular value decomposition. Furthermore, the singular values $\{\sigma_{j}\}$ are uniquely determined, and if $A$ is square and the $\sigma_{j}$ are distinct, the left and right singular vectors $\{u_{j}\}$ and $\{v_{j}\}$ are uniquely determined up to complex signs (i.e., complex scalar factors of absolute value 1).
### Matrix Properties via the SVD
- The $\text{rank}(A) = r$ where $r$ is the number of non-zero singular values.
- The $\text{range}(A) = [u_1, ... , u_r]$ and $\text{null}(a) = [v_{r+1}, ... , v_n]$.
- The $|| A ||_2 = \sigma_1$ and $||A||_F = \sqrt{\sigma_{1}^{2}+\sigma_{2}^{2}+...+\sigma_{r}^{2}}$.
- The nonzero singular values of A are the square roots of the nonzero eigenvalues of $A^{T}A$ or $AA^{T}$.
- If $A = A^{T}$, then the singular values of $A$ are the absolute values of the eigenvalues of $A$.
- For $A \in \mathbb{C}^{m \times n}$ then $|det(A)| = \Pi_{i=1}^{m} \sigma_{i}$
### Low-Rank Approximations
- $A$ is the sum of the $r$ rank-one matrices:
$$
A = U \Sigma V^T = \sum_{j=1}^{r} \sigma_{j}u_{j}v_{j}^{T}
$$
- For any $k$ with $0 \leq k \leq r$, define
$$
A = \sum_{j=1}^{k} \sigma_{j}u_{j}v_{j}^{T}
$$
Let $k = min(m,n)$, then
$$
||A - A_{v}||_{2} = \text{inf}_{B \in \mathbb{C}^{m \times n}} \text{rank}(B)\leq k|| A-B||_{2} = \sigma_{k+1}
$$
- For any $k$ with $0 \leq k \leq r$, the matrix $A_{k}$ also satisfies
$$
||A - A_{v}||_{F} = \text{inf}_{B \in \mathbb{C}^{m \times n}} \text{rank}(B)\leq v ||A-B||_{F} = \sqrt{\sigma_{v+1}^{2} + ... + \sigma_{r}^{2}}
$$
#### Example: Putting the above equations into code
How does this work in practice?
```
data = numpy.zeros((15,40))
#H
data[2:10,2:4] = 1
data[5:7,4:6] = 1
data[2:10,6:8] = 1
#E
data[3:11,10:12] = 1
data[3:5,12:16] = 1
data[6:8, 12:16] = 1
data[9:11, 12:16] = 1
#L
data[4:12,18:20] = 1
data[10:12,20:24] = 1
#L
data[5:13,26:28] = 1
data[11:13,28:32] = 1
#0
data[6:14,34:36] = 1
data[6:8, 36:38] = 1
data[12:14, 36:38] = 1
data[6:14,38:40] = 1
plt.imshow(data)
plt.show()
u, diag, vt = numpy.linalg.svd(data, full_matrices=True)
fig = plt.figure()
fig.set_figwidth(fig.get_figwidth() * 3)
fig.set_figheight(fig.get_figheight() * 4)
for i in range(1, 16):
diag_matrix = numpy.concatenate((numpy.zeros((len(diag[:i]) -1),), diag[i-1: i], numpy.zeros((40-i),)))
reconstruct = numpy.dot(numpy.dot(u, numpy.diag(diag_matrix)[:15,]), vt)
axes = fig.add_subplot(5, 3, i)
mappable = axes.imshow(reconstruct, vmin=0.0, vmax=1.0)
axes.set_title('Component = %s' % i)
plt.show()
u, diag, vt = numpy.linalg.svd(data, full_matrices=True)
fig = plt.figure()
fig.set_figwidth(fig.get_figwidth() * 3)
fig.set_figheight(fig.get_figheight() * 4)
for i in range(1, 16):
diag_matrix = numpy.concatenate((diag[:i], numpy.zeros((40-i),)))
reconstruct = numpy.dot(numpy.dot(u, numpy.diag(diag_matrix)[:15,]), vt)
axes = fig.add_subplot(5, 3, i)
mappable = axes.imshow(reconstruct, vmin=0.0, vmax=1.0)
axes.set_title('Component = %s' % i)
plt.show()
```
<sup>1</sup><span id="footnoteRegression"> http://www.utstat.toronto.edu/~brunner/books/LinearModelsInStatistics.pdf</span>
| github_jupyter |
<a href="https://colab.research.google.com/github/jonathanmendoza-tx/DS-Unit-2-Regression-Classification/blob/master/module2/Jonathan_Mendoza_assignment_regression_classification_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Lambda School Data Science, Unit 2: Predictive Modeling
# Regression & Classification, Module 2
## Assignment
You'll continue to **predict how much it costs to rent an apartment in NYC,** using the dataset from renthop.com.
- [ ] Do train/test split. Use data from April & May 2016 to train. Use data from June 2016 to test.
- [ ] Engineer at least two new features. (See below for explanation & ideas.)
- [ ] Fit a linear regression model with at least two features.
- [ ] Get the model's coefficients and intercept.
- [ ] Get regression metrics RMSE, MAE, and $R^2$, for both the train and test data.
- [ ] What's the best test MAE you can get? Share your score and features used with your cohort on Slack!
- [ ] As always, commit your notebook to your fork of the GitHub repo.
#### [Feature Engineering](https://en.wikipedia.org/wiki/Feature_engineering)
> "Some machine learning projects succeed and some fail. What makes the difference? Easily the most important factor is the features used." — Pedro Domingos, ["A Few Useful Things to Know about Machine Learning"](https://homes.cs.washington.edu/~pedrod/papers/cacm12.pdf)
> "Coming up with features is difficult, time-consuming, requires expert knowledge. 'Applied machine learning' is basically feature engineering." — Andrew Ng, [Machine Learning and AI via Brain simulations](https://forum.stanford.edu/events/2011/2011slides/plenary/2011plenaryNg.pdf)
> Feature engineering is the process of using domain knowledge of the data to create features that make machine learning algorithms work.
#### Feature Ideas
- Does the apartment have a description?
- How long is the description?
- How many total perks does each apartment have?
- Are cats _or_ dogs allowed?
- Are cats _and_ dogs allowed?
- Total number of rooms (beds + baths)
- Ratio of beds to baths
- What's the neighborhood, based on address or latitude & longitude?
## Stretch Goals
- [ ] If you want more math, skim [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapter 3.1, Simple Linear Regression, & Chapter 3.2, Multiple Linear Regression
- [ ] If you want more introduction, watch [Brandon Foltz, Statistics 101: Simple Linear Regression](https://www.youtube.com/watch?v=ZkjP5RJLQF4)
(20 minutes, over 1 million views)
- [ ] Do the [Plotly Dash](https://dash.plot.ly/) Tutorial, Parts 1 & 2.
- [ ] Add your own stretch goal(s) !
## Load
```
# If you're in Colab...
import os, sys
in_colab = 'google.colab' in sys.modules
if in_colab:
# Install required python packages:
# pandas-profiling, version >= 2.0
# plotly, version >= 4.0
!pip install --upgrade pandas-profiling plotly
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Regression-Classification.git
!git pull origin master
# Change into directory for module
os.chdir('module1')
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
import numpy as np
import pandas as pd
# Read New York City apartment rental listing data
df = pd.read_csv('../data/renthop-nyc.csv')
assert df.shape == (49352, 34)
# Remove the most extreme 1% prices,
# the most extreme .1% latitudes, &
# the most extreme .1% longitudes
df = df[(df['price'] >= np.percentile(df['price'], 0.5)) &
(df['price'] <= np.percentile(df['price'], 99.5)) &
(df['latitude'] >= np.percentile(df['latitude'], 0.05)) &
(df['latitude'] < np.percentile(df['latitude'], 99.95)) &
(df['longitude'] >= np.percentile(df['longitude'], 0.05)) &
(df['longitude'] <= np.percentile(df['longitude'], 99.95))]
```
## Explore
```
import matplotlib.pyplot as plt
import datetime
df['created'] = pd.to_datetime(df['created'])
df['month'] = df['created'].map(lambda x : x.month)
df['month'].value_counts()
train = df[df['month']<6]
test = df[df['month']==6]
train.shape, test.shape, df.shape
train.corr()
train['price'].describe()
train.query('bedrooms<1').shape
train.query('bathrooms<1').shape
train = train.query('bedrooms>0')
train = train.query('bathrooms>0')
train = train.query('price<10000')
train.shape
train['price'].describe()
```
| github_jupyter |
# Fractales aleatorios
<img style="float: left; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/f/fa/Fractal_Crown_of_morgoth2_5600x4200.jpg" width="400px" height="125px" />
<img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/8/80/Super-volcano_Tierazon_fractal_Tiera4414.jpg" width="400px" height="125px" />
**Referencias:**
- http://fractalfoundation.org/resources/what-are-fractals/
- https://georgemdallas.wordpress.com/2014/05/02/what-are-fractals-and-why-should-i-care/
- https://en.wikipedia.org/wiki/Barnsley_fern
- http://www.home.aone.net.au/~byzantium/ferns/fractal.html
Antes que nada, ¿qué son fractales?
___
## 1. Fractales
<img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/2/21/Mandel_zoom_00_mandelbrot_set.jpg" width="400px" height="125px" />
- La geometría fractal es un campo de las matemáticas que tiene lugar a partir de los años setenta, y fue desarrollada principalmente por [Benoit Mandelbrot](https://es.wikipedia.org/wiki/Beno%C3%AEt_Mandelbrot).
- La geometría elemental que aprendimos en la escuela se trataba de estudiar (y hacer) formas o figuras. Pues bien, la geometría fractal no es distinta.
- Mientras que en geometría clásica las formas son suaves (círculos, triángulos, etcétera), las formas que produce la geometría fractal es tosca e infinitamente compleja.
Bueno, ¿y cuál es su importancia?
1. El proceso por el cual se obtienen formas fractales es impresionantemente simple y completamente diferente al seguido en geometría clásica. Mientras la geometría clásica se usan fórmulas para definir una forma, la geometría fractal usa iteración. Básicamente, podríamos decir que los fractales son imágenes de sistemas dinámicos.
2. Las formas fractales se parecen mucho a formas encontradas en la naturaleza. Este impresionante hecho es difícil de ignorar. Como sabemos no existen círculos perfectos en la naturaleza, ni cuadrados perfectos. No es solo eso, el solo mirar los árboles, ríos o montañas, y no se encuentra una forma que sea descrita por una fórmula. Sin embargo, usando fórmulas simples iteradas muchas veces, la geometría fractal puede modelar esos fenómenos con alta precisión. Si puedes usar matemáticas simples para modelar el mundo, vas por buen camino.
<img style="float: center; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/thumb/4/4d/Fractal_canopy.svg/1200px-Fractal_canopy.svg.png" width="400px" height="125px" />
___
<img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/e/ee/Sa-fern.jpg" width="400px" height="125px" />
La figura de la derecha (planta verde) corresponde a un helecho. El matemático británico [Michael Barnsley](https://en.wikipedia.org/wiki/Michael_Barnsley) fue quien primero describió un fractal que representa impresionantemente bien estas plantas, en su libro *'Fractals Everywhere'*.
## 2. Fractal helecho de Barnsley
El helecho de Barnsley es un fractal que usa cuatro transformaciones afines para generar los nuevos puntos. En la escogencia de cuál transformación usar para generar el siguiente punto es donde entra el componente probabilístico.
Este fractal se puede describir de la siguiente manera:
$$\left[\begin{array}{c}x_{k+1}\\ y_{k+1}\end{array}\right]=\left[\begin{array}{cc}a_i & b_i \\ c_i & d_i\end{array}\right]\left[\begin{array}{c}x_k\\ y_k\end{array}\right]+\left[\begin{array}{c}e_i\\f_i\end{array}\right],$$
donde $a_i$, $b_i$, $c_i$, $d_i$, $e_i$ y $f_i$ son coeficientes que dependen de la variable aleatoria $i\in\left\lbrace 0,1,2,3\right\rbrace$. Las condiciones iniciales son $x_0=y_0=0$.
Las probabilidades de ocurrencia de cada valor de $i$ son $P\left\lbrace i=0\right\rbrace=p_0=0.01$, $P\left\lbrace i=1\right\rbrace=p_1=0.85$, $P\left\lbrace i=2\right\rbrace=p_2=0.07$ y $P\left\lbrace i=3\right\rbrace=p_3=0.07$ (notar que la suma de las probabilidades es 1).
Todas las anteriores constantes se resume en la siguiente tabla (matriz):
```
# Importamos pandas y numpy
import pandas as pd
import numpy as np
# Matriz de Barnsley
i = np.arange(4)
df = pd.DataFrame(index=i,columns=['ai', 'bi', 'ci', 'di', 'ei', 'fi', 'pi', 'Porción generada'], dtype='float')
df.index.name = "$i$"
df['ai'] = [0.0, 0.85, 0.2, -0.15]
df['bi'] = [0.0, 0.04, -0.26, 0.28]
df['ci'] = [0.0, -0.04, 0.23, 0.26]
df['di'] = [0.16, 0.85, 0.22, 0.24]
df['ei'] = [0.0, 0.0, 0.0, 0.0]
df['fi'] = [0.0, 1.6, 1.6, 0.44]
df['pi'] = [0.01, 0.85, 0.07, 0.07]
df['Porción generada'] = ['Tallo', 'Follaje cada vez más pequeño', 'Ramas izquierda', 'Ramas derecha']
df.round(2)
```
Con la anterior descripción, generemos el helecho de Barnsley con un código en python...
```
# Importar librerías para graficar y de números aleatorios
import matplotlib.pyplot as plt
import random
# Matriz de Barnsley
Mat_Barnsley = df.iloc[:,:-1].values
Mat_Barnsley
#help choices
random.choices?
random.choices([0,1,2,3],[0.01,0.85,0.07,0.07])
# Punto inicial
x = [0]
y = [0]
# Valores de i con su probabilidad
seq = [0,1,2,3]
prob = Mat_Barnsley[:,-1]
a = Mat_Barnsley[:,0]
b = Mat_Barnsley[:,1]
c = Mat_Barnsley[:,2]
d = Mat_Barnsley[:,3]
e = Mat_Barnsley[:,4]
f = Mat_Barnsley[:,5]
# Graficamos
for k in range(50000):
i = random.choices(seq,prob)
x.append(a[i]*x[k] + b[i]*y[k] + e[i])
y.append(c[i]*x[k] + d[i]*y[k] + f[i])
plt.figure(figsize=(8,8))
plt.scatter(x,y,c='g',s=0.8)
plt.show()
```
### 2.1 Mutaciones
Jugando con los coeficientes de la transformación, es posible crear mutaciones del helecho.
#### Mutación Cyclosorus
Un experimentador dió con una tabla de coeficientes que produce otro helecho que se ve muy parecido a la naturaleza. La tabla es la siguiente:
```
i = np.arange(0,4)
df = pd.DataFrame(index=i,columns=['ai', 'bi', 'ci', 'di', 'ei', 'fi', 'pi', 'Porción generada'], dtype='float')
df.index.name = "$i$"
df['ai'] = [0.0, 0.95, 0.035, -0.04]
df['bi'] = [0.0, 0.005, -0.2, 0.2]
df['ci'] = [0.0, -0.005, 0.16, 0.16]
df['di'] = [0.25, 0.93, 0.04, 0.04]
df['ei'] = [0.0, -0.002, -0.09, 0.083]
df['fi'] = [-0.4, 0.5, 0.02, 0.12]
df['pi'] = [0.02, 0.84, 0.07, 0.07]
df['Porción generada'] = ['Tallo', 'Follaje cada vez más pequeño', 'Ramas izquierda', 'Ramas derecha']
df.round(3)
```
La actividad consiste en generar el helecho mutante con los coeficientes de esta nueva tabla.
```
# Matriz Cyclosorus
Mat_Cyclosorus = df.iloc[:,:-1].values
Mat_Cyclosorus
# Punto inicial
x = [0]
y = [0]
# Valores de i con su probabilidad
seq = [0,1,2,3]
prob = Mat_Cyclosorus[:,-1]
a = Mat_Cyclosorus[:,0]
b = Mat_Cyclosorus[:,1]
c = Mat_Cyclosorus[:,2]
d = Mat_Cyclosorus[:,3]
e = Mat_Cyclosorus[:,4]
f = Mat_Cyclosorus[:,5]
# Graficamos
for k in range(100000):
i = random.choices(seq,prob)
x.append(a[i]*x[k] + b[i]*y[k] + e[i])
y.append(c[i]*x[k] + d[i]*y[k] + f[i])
plt.figure(figsize=(8,8))
plt.scatter(x,y,c='g',s=0.3)
plt.show()
```
#### Mutación Culcita
Un experimentador dió con una tabla de coeficientes que produce otro helecho que se ve muy parecido a la naturaleza. La tabla es la siguiente:
```
i = np.arange(0,4)
df = pd.DataFrame(index=i,columns=['ai', 'bi', 'ci', 'di', 'ei', 'fi', 'pi', 'Porción generada'], dtype='float')
df.index.name = "$i$"
df['ai'] = [0.0, 0.85, 0.09, -0.09]
df['bi'] = [0.0, 0.02, -0.28, 0.28]
df['ci'] = [0.0, -0.02, 0.3, 0.3]
df['di'] = [0.25, 0.83, 0.11, 0.09]
df['ei'] = [0.0, 0.0, 0.0, 0.0]
df['fi'] = [-0.14, 1.0, 0.6, 0.7]
df['pi'] = [0.02, 0.84, 0.07, 0.07]
df['Porción generada'] = ['Tallo', 'Follaje cada vez más pequeño', 'Ramas izquierda', 'Ramas derecha']
df.round(3)
```
**Actividad** La actividad consiste en generar el helecho mutante con los coeficientes de esta nueva tabla.
Hacer un nuevo archivo de jupyter (extensión .ipynb) con el nombre *T8_PrimerNombrePrimerApellido* y subirlo en el enlace habilitado.
```
Mat_Culcita = df.iloc[:,:-1].values
Mat_Culcita
x = [0]
y = [0]
# Valores de i con su probabilidad
seq = [0,1,2,3]
prob = Mat_Culcita[:,-1]
a = Mat_Culcita[:,0]
b = Mat_Culcita[:,1]
c = Mat_Culcita[:,2]
d = Mat_Culcita[:,3]
e = Mat_Culcita[:,4]
f = Mat_Culcita[:,5]
# Graficamos
for k in range(100000):
i = random.choices(seq,prob)
x.append(a[i]*x[k] + b[i]*y[k] + e[i])
y.append(c[i]*x[k] + d[i]*y[k] + f[i])
plt.figure(figsize=(8,8))
plt.scatter(x,y,c='g',s=0.3)
plt.show()
```
## 3. Otra aplicación (elegible para proyecto)
Un paisaje fractal es una superficie generada usando un algoritmo estocástico diseñado para producir un comportamiento fractal que mimetiza la apariencia de un terreno natural. En otras palabras, el resultado de este procedimiento no es una superficie fractal determinística, sino una superficie aleatoria que exhibe comportamiento fractal.
<img style="float: left; margin: 0px 0px 0px 0px;" src="https://upload.wikimedia.org/wikipedia/commons/6/6d/Animated_fractal_mountain.gif" width="300px" height="100px" />
<img style="float: center; margin: 0px 0px 0px 0px;" src="https://upload.wikimedia.org/wikipedia/commons/6/6e/FractalLandscape.jpg" width="300px" height="100px" />
<img style="float: right; margin: 0px 0px 0px 0px;" src="https://upload.wikimedia.org/wikipedia/commons/8/8b/Fractal_terrain_texture.jpg" width="300px" height="100px" />
**Referencia:**
- https://en.wikipedia.org/wiki/Fractal_landscape
<script>
$(document).ready(function(){
$('div.prompt').hide();
$('div.back-to-top').hide();
$('nav#menubar').hide();
$('.breadcrumb').hide();
$('.hidden-print').hide();
});
</script>
<footer id="attribution" style="float:right; color:#808080; background:#fff;">
Created with Jupyter by Cristian Camilo Zapata Zuluaga.
</footer>
| github_jupyter |
```
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
```
# Reflect Tables into SQLAlchemy ORM
```
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
# reflect the tables
# We can view all of the classes that automap found
# Save references to each table
# Create our session (link) from Python to the DB
```
# Exploratory Climate Analysis
```
# Design a query to retrieve the last 12 months of precipitation data and plot the results
# Calculate the date 1 year ago from the last data point in the database
# Perform a query to retrieve the data and precipitation scores
# Save the query results as a Pandas DataFrame and set the index to the date column
# Sort the dataframe by date
# Use Pandas Plotting with Matplotlib to plot the data
# Use Pandas to calcualte the summary statistics for the precipitation data
# Design a query to show how many stations are available in this dataset?
# What are the most active stations? (i.e. what stations have the most rows)?
# List the stations and the counts in descending order.
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature of the most active station?
# Choose the station with the highest number of temperature observations.
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
```
## Bonus Challenge Assignment
```
# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, average, and maximum temperatures for that range of dates
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
# function usage example
print(calc_temps('2012-02-28', '2012-03-05'))
# Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax
# for your trip using the previous year's data for those same dates.
# Plot the results from your previous query as a bar chart.
# Use "Trip Avg Temp" as your Title
# Use the average temperature for the y value
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.
# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation
# Create a query that will calculate the daily normals
# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)
def daily_normals(date):
"""Daily Normals.
Args:
date (str): A date string in the format '%m-%d'
Returns:
A list of tuples containing the daily normals, tmin, tavg, and tmax
"""
sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", Measurement.date) == date).all()
daily_normals("01-01")
# calculate the daily normals for your trip
# push each tuple of calculations into a list called `normals`
# Set the start and end date of the trip
# Use the start and end date to create a range of dates
# Stip off the year and save a list of %m-%d strings
# Loop through the list of %m-%d strings and calculate the normals for each date
# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index
# Plot the daily normals as an area plot with `stacked=False`
```
| github_jupyter |
# Simpson paradoxes over time
Copyright 2021 Allen B. Downey
License: [Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0)](https://creativecommons.org/licenses/by-nc-sa/4.0/)
[Click here to run this notebook on Colab](https://colab.research.google.com/github/AllenDowney/ProbablyOverthinkingIt2/blob/master/simpson_wages.ipynb)
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from simpson import *
gss = pd.read_hdf('gss_simpson', 'gss')
```
Would you say that most of the time people try to be helpful, or that they are mostly just looking out for themselves?
```
xvarname = 'year'
yvarname = 'helpful'
gvarname = 'cohort10'
run_subgroups(gss, xvarname, yvarname, gvarname)
series_all, table = summarize(gss, xvarname, yvarname, gvarname)
table.drop([1890, 1990], axis=1, inplace=True)
table
visualize(series_all, table)
plt.title('')
title = """Would you say that most of the time people try to be helpful,
or that they are mostly just looking out for themselves?
"""
plt.title(title, loc='left', fontdict=dict(fontsize=14))
plt.ylabel('Percent saying "helpful"')
plt.xlabel('Year')
x = y = 1.02
plt.legend(title='Birth decade', bbox_to_anchor=(x, y), loc='upper left', ncol=1)
plt.tight_layout()
plt.savefig('helpful_vs_year_by_cohort10.jpg')
```
## trust
Generally speaking, would you say that most people can be trusted or that you can't be too careful in dealing with people?
```
xvarname = 'year'
yvarname = 'trust'
gvarname = 'cohort10'
yvalue = 1
run_subgroups(gss, xvarname, yvarname, gvarname, yvalue)
series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue)
table.drop([1890, 1990], axis=1, inplace=True)
table
visualize(series_all, table)
plt.title('')
title = """Generally speaking, would you say that most people can be trusted
or that you can't be too careful in dealing with people?
"""
plt.title(title, loc='left', fontdict=dict(fontsize=14))
plt.ylabel('Percent saying "can be trusted"')
plt.xlabel('Year')
x = y = 1.02
plt.legend(title='Birth decade', bbox_to_anchor=(x, y), loc='upper left', ncol=1)
plt.tight_layout()
plt.savefig('trust_vs_year_by_cohort10.jpg')
```
Do you think most people would try to take advantage of you if they got a chance, or would they try to be fair?
```
xvarname = 'year'
yvarname = 'fair'
gvarname = 'cohort10'
yvalue = 2
run_subgroups(gss, xvarname, yvarname, gvarname, yvalue)
series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue)
table.drop([1890, 1990], axis=1, inplace=True)
table
visualize(series_all, table)
plt.title('')
title = """Do you think most people would try to take advantage of you if they got a chance,
or would they try to be fair?
"""
plt.title(title, loc='left', fontdict=dict(fontsize=14))
plt.ylabel('Percent saying "would try to be fair"')
plt.xlabel('Year')
x = y = 1.02
plt.legend(title='Birth decade', bbox_to_anchor=(x, y), loc='upper left', ncol=1)
plt.tight_layout()
plt.savefig('fair_vs_year_by_cohort10.jpg')
```
Is there any area right around here--that is, within a mile--where you would be afraid to walk alone at night?
```
xvarname = 'year'
yvarname = 'fear'
gvarname = 'cohort10'
yvalue = 2
run_subgroups(gss, xvarname, yvarname, gvarname, yvalue)
series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue)
table.drop([1890, 1990], axis=1, inplace=True)
table
visualize(series_all, table)
plt.title('')
title = """Is there any area right around here--that is, within a mile--
where you would be afraid to walk alone at night?
"""
plt.title(title, loc='left', fontdict=dict(fontsize=14))
plt.ylabel('Percent saying "no"')
plt.xlabel('Year')
x = y = 1.02
plt.legend(title='Birth decade', bbox_to_anchor=(x, y), loc='upper left', ncol=1)
plt.tight_layout()
plt.savefig('fear_vs_year_by_cohort10.jpg')
```
## happy
```
xvarname = 'year'
yvarname = 'happy'
gvarname = 'cohort10'
yvalue = [1,2]
run_subgroups(gss, xvarname, yvarname, gvarname, yvalue)
series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue)
table.drop([1890, 1990], axis=1, inplace=True)
table
visualize(series_all, table)
plt.title('')
title = """Taken all together, how would you say things are these days--
would you say that you are very happy, pretty happy, or not too happy?
"""
plt.title(title, loc='left', fontdict=dict(fontsize=14))
plt.ylabel('Percent saying "very happy" or "pretty happy"')
plt.xlabel('Year')
x = y = 1.02
plt.legend(title='Birth decade', bbox_to_anchor=(x, y), loc='upper left', ncol=1)
plt.tight_layout()
plt.savefig('happy_vs_year_by_cohort10.jpg')
```
## pornlaw
https://gss.norc.org/Documents/quex/GSS2018%20Ballot%202%20-%20English.pdf
Which of these statements comes closest to your feelings about pornography laws?
1. There should be laws against the distribution of pornography, whatever the age, or
2. There should be laws against the distribution of pornography to persons under 18, or
3. There should be no laws forbidding the distribution of pornography
```
xvarname = 'year'
yvarname = 'pornlaw'
gvarname = 'cohort10'
yvalue = 1
run_subgroups(gss, xvarname, yvarname, gvarname, yvalue)
series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue)
table.drop([1890, 1990], axis=1, inplace=True)
table
visualize(series_all, table)
plt.title('')
title = """Do you think there should be laws against the distribution of pornography,
whatever the age?
"""
plt.title(title, loc='left', fontdict=dict(fontsize=14))
plt.ylabel('Percent who agree')
plt.xlabel('Year')
x = y = 1.02
plt.legend(title='Birth decade', bbox_to_anchor=(x, y), loc='upper left', ncol=1)
plt.tight_layout()
plt.savefig('pornlaw_vs_year_by_cohort10.jpg')
```
Do you think the use of marijuana should be made legal or not?
```
xvarname = 'year'
yvarname = 'fair'
gvarname = 'cohort10'
yvalue = 1
run_subgroups(gss, xvarname, yvarname, gvarname, yvalue)
series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue)
table.drop([1890, 1990], axis=1, inplace=True)
table
visualize(series_all, table)
plt.title('')
title = """Do you think the use of marijuana should be made legal or not?
"""
plt.title(title, loc='left', fontdict=dict(fontsize=14))
plt.ylabel('Percent saying "No legal"')
plt.xlabel('Year')
x = y = 1.02
plt.legend(title='Birth decade', bbox_to_anchor=(x, y), loc='upper left', ncol=1)
plt.tight_layout()
plt.savefig('grass_vs_year_by_cohort10.jpg')
```
Please tell me whether or not you think it should be possible for a pregnant woman to obtain a legal abortion if she is married and does not want any more children?
```
xvarname = 'year'
yvarname = 'abnomore'
gvarname = 'degree5'
yvalue = 1
run_subgroups(gss, xvarname, yvarname, gvarname, yvalue)
xvarname = 'year'
yvarname = 'abnomore'
gvarname = 'degree5'
yvalue = 1
pre2002 = gss['year'] <= 2002
run_subgroups(gss[pre2002].copy(), xvarname, yvarname, gvarname, yvalue)
series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue)
table.sort_values(by=2017, axis=1, ascending=False, inplace=True)
table
visualize(series_all, table)
plt.title('')
title = """Do you think it should be possible for a pregnant woman to obtain
a legal abortion if she is married and does not want any more children?
"""
plt.title(title, loc='left', fontdict=dict(fontsize=14))
plt.ylabel('Percent saying yes')
plt.xlabel('Year')
x = y = 1.02
plt.legend(title='Highest degree', bbox_to_anchor=(x, y), loc='upper left', ncol=1)
plt.tight_layout()
plt.savefig('abnomore_vs_year_by_degree.jpg')
```
Suppose an admitted Communist wanted to make a speech in your community. Should he be allowed to speak, or not?
```
xvarname = 'year'
yvarname = 'spkcom'
gvarname = 'degree5'
yvalue = 1
run_subgroups(gss, xvarname, yvarname, gvarname, yvalue)
series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue)
table.sort_values(by=2017, axis=1, ascending=False, inplace=True)
table
visualize(series_all, table)
plt.title('')
title = """Suppose an admitted Communist wanted to make a speech in your community.
Should he be allowed to speak, or not?
"""
plt.title(title, loc='left', fontdict=dict(fontsize=14))
plt.ylabel('Percent saying "allowed"')
plt.xlabel('Year')
x = y = 1.02
plt.legend(title='Highest degree', bbox_to_anchor=(x, y), loc='upper left', ncol=1)
plt.tight_layout()
plt.savefig('spkcom_vs_year_by_degree.jpg')
```
There are always some people whose ideas are considered bad or dangerous by other people. For instance, somebody who is against all churches and religion . . .
If some people in your community suggested that a book he wrote against churches and religion should be taken out of your public library, would you favor removing this book, or not?
```
xvarname = 'year'
yvarname = 'libath'
gvarname = 'degree5'
yvalue = 2
run_subgroups(gss, xvarname, yvarname, gvarname, yvalue)
series_all, table = summarize(gss, xvarname, yvarname, gvarname, yvalue)
table.sort_values(by=2017, axis=1, ascending=False, inplace=True)
table
visualize(series_all, table)
plt.title('')
title = """If people object to a book by someone who is opposed to
churches and religion, should it be removed from a public library, or not?
"""
plt.title(title, loc='left', fontdict=dict(fontsize=14))
plt.ylabel('Percent saying "not removed"')
plt.xlabel('Year')
x = y = 1.02
plt.legend(title='Highest degree', bbox_to_anchor=(x, y), loc='upper left', ncol=1)
plt.tight_layout()
plt.savefig('libath_vs_year_by_degree.jpg')
```
| github_jupyter |
# Skills Space
Felix Zaussinger | 21.07.2021
## Core Analysis Goal(s)
1. visualise adjacency matrix of skills space
## Key Insight(s)
1.
```
import os
import sys
import logging
from pathlib import Path
import numpy as np
import scipy as sp
import statsmodels.api as sm
from statsmodels.formula.api import ols
%load_ext autoreload
%autoreload 2
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set_context("poster")
sns.set(rc={'figure.figsize': (16, 9.)})
sns.set_style("ticks")
import pandas as pd
pd.set_option("display.max_rows", 120)
pd.set_option("display.max_columns", 120)
import networkx as nx
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
```
Define directory structure
```
# project directory
abspath = os.path.abspath('')
project_dir = str(Path(abspath).parents[0])
# sub-directories
data_raw = os.path.join(project_dir, "data", "raw")
data_interim = os.path.join(project_dir, "data", "interim")
data_processed = os.path.join(project_dir, "data", "processed")
data_external = os.path.join(project_dir, "data", "external")
figure_dir = os.path.join(project_dir, "reports", "figures")
```
Read data
```
skills = pd.read_csv(os.path.join(data_raw, "esco", "v1.0.3", "skills_en.csv"))
#adj_matrix = pd.read_pickle(
# os.path.join(project_dir, "data", "processed", "adjacency_matrix.pkl")
#)
# A = adj_matrix.values
# At = A.transpose()
# np.matmul(At, A)
```
Read skills adjacency matrix from Skilllab
```
# nesta report
sim_skills = np.load(
os.path.join(data_external, "Neighborhood_Model_skill_to_skill.npy")
)
```
Remove links below a certain threshold (weakly connected skills)
```
#w_thresh = 0.001
#sim_skills[sim_skills < w_thresh] = np.nan
```
Load subset of full graph
```
subset = sim_skills.shape[0]
Gsub = nx.from_numpy_array(sim_skills[:subset, :subset])
```
Apply threshold to remove irrelevant edges
```
#w_thresh = 0.01
#edge_weights = nx.get_edge_attributes(Gsub,'weight')
#Gsub.remove_edges_from((e for e, w in edge_weights.items() if w < w_thresh))
closeness_centrality = np.array(list(nx.algorithms.centrality.closeness_centrality(Gsub).values()))
degree_centrality = np.array(list(nx.algorithms.centrality.degree_centrality(Gsub).values()))
betweenness_centrality = np.array(list(nx.algorithms.centrality.betweenness_centrality(Gsub).values()))
eigenvector_centrality = np.array(list(nx.algorithms.centrality.eigenvector_centrality(Gsub).values()))
clustering_coefficient = np.array(list(nx.algorithms.cluster.clustering(Gsub).values()))
attr_dict = {}
for i in np.arange(subset):
attr_dict[i] = {
"label": skills.preferredLabel.values[i],
"closeness_centrality": closeness_centrality[i],
"degree_centrality": degree_centrality[i],
"betweenness_centrality": betweenness_centrality[i],
"eigenvector_centrality": eigenvector_centrality[i],
"clustering_coefficient": clustering_coefficient[i]
}
# set attributes
nx.set_node_attributes(Gsub, attr_dict)
weights = nx.get_edge_attributes(Gsub,'weight')
weights_array = list(weights.values())
node_labels = dict(zip(np.arange(subset), skills.preferredLabel.values[:subset]))
# remove labels for non-central skills
centrality_thresh = 0.01
for i, c in enumerate(betweenness_centrality):
if c <= centrality_thresh:
node_labels[i] = ""
# plot
pos = nx.spring_layout(Gsub)
#pos = nx.kamada_kawai_layout(Gsub)
nx.draw_networkx(
Gsub,
labels=node_labels,
font_size=6,
horizontalalignment="left",
verticalalignment="top",
node_size=betweenness_centrality * 1000,
edge_cmap=plt.cm.Blues,
edge_color=list(weights.values()),
edge_vmin=0,
edge_vmax=max(weights_array),
font_color="lightgrey"
)
plt.box(False)
plt.tight_layout()
plt.savefig(
os.path.join(figure_dir, "skills_centrality_all_edges.png"),
dpi=300,
bbox_inches="tight"
)
skills_centrality = {
"label": skills.preferredLabel.values[:subset],
"closeness_centrality": closeness_centrality,
"degree_centrality": degree_centrality,
"betweenness_centrality": betweenness_centrality,
"eigenvector_centrality": eigenvector_centrality,
"clustering_coefficient": clustering_coefficient
}
df_skills_centrality = pd.DataFrame.from_dict(skills_centrality)
# from kanders 2020
def coreness(b, e, c):
"""Measure of node coreness proposed by Kanders et al. 2020"""
return 0.5 * (b/max(b) + e/max(e)) * (1-c)
df_skills_centrality["coreness"] = coreness(
b=df_skills_centrality["betweenness_centrality"],
e=df_skills_centrality["eigenvector_centrality"],
c=df_skills_centrality["clustering_coefficient"]
)
df_skills_centrality.to_csv(
os.path.join(data_processed, "skills_coreness_all_edges.csv")
)
# export graph files
nx.write_gexf(Gsub, os.path.join(data_raw, "networks", "skills_network_all_edges.gexf"))
nx.write_graphml(Gsub, os.path.join(data_raw, "networks", "skills_network_all_edges.graphml"))
df_skills_centrality.sort_values("coreness", ascending=False)
```
| github_jupyter |
# CI/CD for TFX pipelines
## Learning Objectives
1. Develop a CI/CD workflow with Cloud Build to build and deploy TFX pipeline code.
2. Integrate with Github to automatically trigger pipeline deployment with source code repository changes.
In this lab, you will walk through authoring a Cloud Build CI/CD workflow that automatically builds and deploys the same TFX pipeline from `lab-02.ipynb`. You will also integrate your workflow with GitHub by setting up a trigger that starts the workflow when a new tag is applied to the GitHub repo hosting the pipeline's code.
## Setup
```
import yaml
# Set `PATH` to include the directory containing TFX CLI.
PATH=%env PATH
%env PATH=/home/jupyter/.local/bin:{PATH}
!python -c "import tfx; print('TFX version: {}'.format(tfx.__version__))"
```
**Note**: this lab was built and tested with the following package versions:
`TFX version: 0.25.0`
(Optional) If the TFX version above does not match the lab tested defaults, run the command below:
```
%pip install --upgrade --user tfx==0.25.0
```
**Note**: you may need to restart the kernel to pick up the correct package versions.
## Understanding the Cloud Build workflow
Review the `cloudbuild.yaml` file to understand how the CI/CD workflow is implemented and how environment specific settings are abstracted using **Cloud Build** variables.
The **Cloud Build** CI/CD workflow automates the steps you walked through manually during `lab-02`:
1. Builds the custom TFX image to be used as a runtime execution environment for TFX components and as the AI Platform Training training container.
1. Compiles the pipeline and uploads the pipeline to the KFP environment
1. Pushes the custom TFX image to your project's **Container Registry**
The **Cloud Build** workflow configuration uses both standard and custom [Cloud Build builders](https://cloud.google.com/cloud-build/docs/cloud-builders). The custom builder encapsulates **TFX CLI**.
## Configuring environment settings
Navigate to [AI Platform Pipelines](https://console.cloud.google.com/ai-platform/pipelines/clusters) page in the Google Cloud Console.
### Create or select an existing Kubernetes cluster (GKE) and deploy AI Platform
Make sure to select `"Allow access to the following Cloud APIs https://www.googleapis.com/auth/cloud-platform"` to allow for programmatic access to your pipeline by the Kubeflow SDK for the rest of the lab. Also, provide an `App instance name` such as "tfx" or "mlops". Note you may have already deployed an AI Pipelines instance during the Setup for the lab series. If so, you can proceed using that instance below in the next step.
Validate the deployment of your AI Platform Pipelines instance in the console before proceeding.
### Configure environment settings
Update the below constants with the settings reflecting your lab environment.
- `GCP_REGION` - the compute region for AI Platform Training and Prediction
- `ARTIFACT_STORE` - the GCS bucket created during installation of AI Platform Pipelines. The bucket name starts with the `kubeflowpipelines-` prefix.
```
# Use the following command to identify the GCS bucket for metadata and pipeline storage.
!gsutil ls
```
* `CUSTOM_SERVICE_ACCOUNT` - In the gcp console Click on the Navigation Menu and navigate to `IAM & Admin`, then to `Service Accounts` and use the service account starting with prefix - 'tfx-tuner-caip-service-account'. This enables CloudTuner and the Google Cloud AI Platform extensions Tuner component to work together and allows for distributed and parallel tuning backed by AI Platform Vizier's hyperparameter search algorithm. Please see the lab setup `README` for setup instructions.
- `ENDPOINT` - set the `ENDPOINT` constant to the endpoint to your AI Platform Pipelines instance. The endpoint to the AI Platform Pipelines instance can be found on the [AI Platform Pipelines](https://console.cloud.google.com/ai-platform/pipelines/clusters) page in the Google Cloud Console. Open the *SETTINGS* for your instance and use the value of the `host` variable in the *Connect to this Kubeflow Pipelines instance from a Python client via Kubeflow Pipelines SKD* section of the *SETTINGS* window. The format is `'....[region].pipelines.googleusercontent.com'`.
```
#TODO: Set your environment resource settings here for GCP_REGION, ARTIFACT_STORE_URI, ENDPOINT, and CUSTOM_SERVICE_ACCOUNT.
GCP_REGION = 'us-central1'
ARTIFACT_STORE_URI = 'gs://qwiklabs-gcp-04-248da7eb1719-kubeflowpipelines-default'
ENDPOINT = '34091a4f3634ed17-dot-us-central2.pipelines.googleusercontent.com'
CUSTOM_SERVICE_ACCOUNT = 'tfx-tuner-caip-service-account@qwiklabs-gcp-04-248da7eb1719.iam.gserviceaccount.com'
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
```
## Creating the TFX CLI builder
### Review the Dockerfile for the TFX CLI builder
```
!cat tfx-cli/Dockerfile
!cat tfx-cli/requirements.txt
```
### Build the image and push it to your project's Container Registry
**Hint**: Review the [Cloud Build](https://cloud.google.com/cloud-build/docs/running-builds/start-build-manually#gcloud) gcloud command line reference for builds submit. Your image should follow the format `gcr.io/[PROJECT_ID]/[IMAGE_NAME]:latest`. Note the source code for the tfx-cli is in the directory `./tfx-cli`.
```
IMAGE_NAME='tfx-cli'
TAG='latest'
IMAGE_URI='gcr.io/{}/{}:{}'.format(PROJECT_ID, IMAGE_NAME, TAG)
# TODO: Your gcloud command here to build tfx-cli and submit to Container Registry.
!gcloud builds submit --timeout=15m --tag {IMAGE_URI} tfx-cli
```
## Exercise: manually trigger CI/CD pipeline run with Cloud Build
You can manually trigger **Cloud Build** runs using the `gcloud builds submit` command.
```
PIPELINE_NAME='tfx_covertype_continuous_training'
MODEL_NAME='tfx_covertype_classifier'
DATA_ROOT_URI='gs://workshop-datasets/covertype/small'
TAG_NAME='test'
TFX_IMAGE_NAME='lab-03-tfx-image'
PIPELINE_FOLDER='pipeline'
PIPELINE_DSL='runner.py'
RUNTIME_VERSION='2.3'
PYTHON_VERSION='3.7'
USE_KFP_SA='False'
ENABLE_TUNING='True'
SUBSTITUTIONS="""
_GCP_REGION={},\
_ARTIFACT_STORE_URI={},\
_CUSTOM_SERVICE_ACCOUNT={},\
_ENDPOINT={},\
_PIPELINE_NAME={},\
_MODEL_NAME={},\
_DATA_ROOT_URI={},\
_TFX_IMAGE_NAME={},\
TAG_NAME={},\
_PIPELINE_FOLDER={},\
_PIPELINE_DSL={},\
_RUNTIME_VERSION={},\
_PYTHON_VERSION={},\
_USE_KFP_SA={},\
_ENABLE_TUNING={},
""".format(GCP_REGION,
ARTIFACT_STORE_URI,
CUSTOM_SERVICE_ACCOUNT,
ENDPOINT,
PIPELINE_NAME,
MODEL_NAME,
DATA_ROOT_URI,
TFX_IMAGE_NAME,
TAG_NAME,
PIPELINE_FOLDER,
PIPELINE_DSL,
RUNTIME_VERSION,
PYTHON_VERSION,
USE_KFP_SA,
ENABLE_TUNING
).strip()
```
Hint: you can manually trigger **Cloud Build** runs using the `gcloud builds submit` command. See the [documentation](https://cloud.google.com/sdk/gcloud/reference/builds/submit) for pass the `cloudbuild.yaml` file and SUBSTITIONS as arguments.
```
# TODO: write gcloud builds submit command to trigger manual pipeline run.
!gcloud builds submit . --timeout=15m --config cloudbuild.yaml --substitutions {SUBSTITUTIONS}
```
## Exercise: Setting up GitHub integration
In this exercise you integrate your CI/CD workflow with **GitHub**, using [Cloud Build GitHub App](https://github.com/marketplace/google-cloud-build).
You will set up a trigger that starts the CI/CD workflow when a new tag is applied to the **GitHub** repo managing the pipeline source code. You will use a fork of this repo as your source GitHub repository.
### Create a fork of this repo
#### [Follow the GitHub documentation](https://help.github.com/en/github/getting-started-with-github/fork-a-repo) to fork this repo
#### Create a Cloud Build trigger
Connect the fork you created in the previous step to your Google Cloud project and create a trigger following the steps in the [Creating GitHub app trigger](https://cloud.google.com/cloud-build/docs/create-github-app-triggers) article. Use the following values on the **Edit trigger** form:
|Field|Value|
|-----|-----|
|Name|[YOUR TRIGGER NAME]|
|Description|[YOUR TRIGGER DESCRIPTION]|
|Event| Tag|
|Source| [YOUR FORK]|
|Tag (regex)|.\*|
|Build Configuration|Cloud Build configuration file (yaml or json)|
|Cloud Build configuration file location|/ workshops/tfx-caip-tf23/lab-03-tfx-cicd/labs/cloudbuild.yaml|
Use the following values for the substitution variables:
|Variable|Value|
|--------|-----|
|_GCP_REGION|[YOUR GCP_REGION]|
|_CUSTOM_SERVICE_ACCOUNT|[YOUR CUSTOM_SERVICE_ACCOUNT]|
|_ENDPOINT|[Your inverting proxy host pipeline ENDPOINT]|
|_TFX_IMAGE_NAME|lab-03-tfx-image|
|_PIPELINE_NAME|tfx_covertype_continuous_training|
|_MODEL_NAME|tfx_covertype_classifier|
|_DATA_ROOT_URI|gs://workshop-datasets/covertype/small|
|_PIPELINE_FOLDER|workshops/tfx-caip-tf23/lab-03-tfx-cicd/labs/pipeline|
|_PIPELINE_DSL|runner.py|
|_PYTHON_VERSION|3.7|
|_RUNTIME_VERSION|2.3|
|_USE_KFP_SA|False|
|_ENABLE_TUNING|True|
#### Trigger the build
To start an automated build [create a new release of the repo in GitHub](https://help.github.com/en/github/administering-a-repository/creating-releases). Alternatively, you can start the build by applying a tag using `git`.
```
git tag [TAG NAME]
git push origin --tags
```
#### Verify triggered build in Cloud Build dashboard
After you see the pipeline finish building on the Cloud Build dashboard, return to [AI Platform Pipelines](https://console.cloud.google.com/ai-platform/pipelines/clusters) in the console. Click `OPEN PIPELINES DASHBOARD` and view the newly deployed pipeline. Creating a release tag on GitHub will create a pipeline with the name `tfx_covertype_continuous_training-[TAG NAME]` while doing so from GitHub will create a pipeline with the name `tfx_covertype_continuous_training_github-[TAG NAME]`.
## Next Steps
In this lab, you walked through authoring a Cloud Build CI/CD workflow that automatically builds and deploys a TFX pipeline. You also integrated your TFX workflow with GitHub by setting up a Cloud Build trigger. In the next lab, you will walk through inspection of TFX metadata and pipeline artifacts created during TFX pipeline runs.
# License
<font size=-1>Licensed under the Apache License, Version 2.0 (the \"License\");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0)
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.</font>
| github_jupyter |
# scikit-learn - Machine Learning in Python
Scikit-learn is a machine learning library for Python. A key feature is that is has been designed to seamlessly interoperate with the scientific libraries NumPy and SciPy, which we have introduced in the previous notebooks, as well as with the graphical library Matplotlib.
It collects a number of algorithms for supervised and unsupervised learning, including
* Classification
* SVM (`sklearn.svm`)
* Nearest neighbors (`sklearn.neighbors`)
* Random forests (`sklearn.ensemble`)
* Regression
* SVR (`sklearn.svm`)
* Ridge regression (`sklearn.linear_model`)
* Lasso (`sklearn.linear_model`)
* Clustering (`sklearn.cluster`)
* k-Means
* Spectral clustering
* Mean-shift
* Dimensionality reduction
* PCA (`sklearn.decomposition`)
* Feature selection (`sklearn.feature_selection`)
* Non-negative matrix factorization (`sklearn.decomposition`)
Scikit-learn also offers modules for model evaluation and selection, including grid search, cross validation and metrics, and for data preprocessing (feature extraction and normalization).
This tutorial has been adapted from materials at [scikit-learn](https://scikit-learn.org) (BSD License).
To access the scikit-learn module, you can import the whole module:
```
import sklearn
```
or just the required component(s):
```
from sklearn import svm
```
# Basic commands
## Loading a dataset
Scikit-learn ships with some standard datasets, that are ideal for tutorial purposes.
For instance the `digits` dataset is a collection of handwritten digits, suitable for classification tasks:
```
from sklearn import datasets
digits = datasets.load_digits()
```
This dataset is a dictionary-like object holding data and metadata. As typical when using scikit-learn, data are stored as a 2D Numpy array, namely the `data` member having shape `n_samples` by `n_features`:
```
type(digits.data)
digits.data.shape
print(digits.data)
```
Data need to be formatted as a 2D `(n_samples,n_features)` array to be used with the scikit-learn methods. However, original data can have a different shape; in the case of the `digits` dataset the 64 features correspond to the pixels of a 8x8 image. These original data can be accessed through the `images` member, e.g. to inspect the first image:
```
print(digits.images[0])
```
In the case of a supervised problem, response variables are stored in the `target` member, a 1D Numpy array of size `n_samples`:
```
type(digits.target)
digits.target.shape
print(digits.target)
```
As we are dealing with a classification problem, the set of target classes is available, too, through the `target_names` member:
```
type(digits.target_names)
digits.target_names.shape
print(digits.target_names)
```
Scikit-learn allows to import external datasets in a variety of ways. Typical formats include Numpy arrays, Scipy sparse matrices, Pandas dataframes, and more.
## Learning and predicting
In the case of the `digits` dataset, the task is to predict, given an image, which digit it represents. We are given samples of each of the 10 possible classes (the digits zero through nine) on which we fit a so called **estimator** to be able to predict the classes to which unseen samples belong.
In scikit-learn, an estimator for classification is a Python object that implements the methods `fit(X, y)` and `predict(T)`.
An example of estimator is `sklearn.svm.SVC`, which implements the Support Vector Classification algorithm. Let's set up a SVC model with fixed hyper-parameters `gamma` and `C`:
```
from sklearn import svm
clf = svm.SVC(gamma=0.001, C=100.)
```
The estimator instance `clf` is trained to the model using a training set and the `fit` method. For the purposes of this tutorial let us use as training set all the images from the `digits` dataset but the last one, which we are keeping for the prediction step:
```
clf.fit(digits.data[:-1], digits.target[:-1])
```
After the estimator has been trained, we can use it to predict new values. For instance, let us predict the last image from the dataset. This is the image:
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(1, figsize=(3, 3))
plt.imshow(digits.images[-1], cmap=plt.cm.gray_r, interpolation='nearest')
plt.show()
```
And this is the prediction:
```
clf.predict(digits.data[-1:])
```
Image classification can be a challenging task, especially if the images are low resolution. Do you agree with the classifier?
## Displaying results of an image classification task
Let us re-run the `digits` dataset example using the first half of the images for training and the second half for prediction. We are going to use Matplotlib to plot some of the images and get graphical insights.
First, let us load the dataset and display the first 4 images from the training subset:
```
import matplotlib.pyplot as plt
from sklearn import datasets, svm, metrics
%matplotlib inline
digits = datasets.load_digits()
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
```
Then, let us proceed with some data preparation and the actual training:
```
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
classifier = svm.SVC(gamma=0.001)
classifier.fit(data[:n_samples // 2], digits.target[:n_samples // 2])
```
Finally, let's make predictions and visualise some of the outcomes:
```
expected = digits.target[n_samples // 2:]
predicted = classifier.predict(data[n_samples // 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples // 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
```
## Model persistence
What if we want to save a trained model for future use? One possibility is to use Python’s built-in persistence model, `pickle`. First let us instantiate and train an estimator:
```
from sklearn import svm
from sklearn import datasets
clf = svm.SVC(gamma='scale')
iris = datasets.load_iris()
X, y = iris.data, iris.target
clf.fit(X, y)
```
Then we can save our trained model to a string:
```
import pickle
s = pickle.dumps(clf)
```
And later on load it back and use it:
```
clf2 = pickle.loads(s)
clf2.predict(X[0:1])
```
In alternative, the `joblib` module can be used. It is more efficient on big data, but only allows to write to disk:
```
from joblib import dump, load
dump(clf, 'filename.joblib')
clf3 = load('filename.joblib')
clf3.predict(X[0:1])
```
## Refitting the hyper-parameters
Suppose we have trained an estimator:
```
import numpy as np
from sklearn.svm import SVC
rng = np.random.RandomState(0)
X = rng.rand(100, 10)
y = rng.binomial(1, 0.5, 100)
X_test = rng.rand(5, 10)
clf = SVC(kernel='linear')
clf.fit(X, y)
clf.predict(X_test)
```
Later on, we can update the hyper-parameters using the `set_params()` method. Re-calling the `fit()` method will then overwrite any previous training:
```
clf.set_params(kernel='rbf', gamma='scale').fit(X, y)
clf.predict(X_test)
```
## Choosing the hyper-parameters of the model
To tune the model hyper-parameters, we can use tools such as grid search and cross validation.
As an example, let us optimize the classifier estimator for the `digits` dataset using cross-validation and the `sklearn.model_selection.GridSearchCV` object. Again, half of the available data will be used for training, and the other half for evaluation.
```
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
```
Now we are setting the hyper-parameters to be tuned:
```
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
```
Then tuning for a `precision` score:
```
score = 'precision'
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(), tuned_parameters, cv=5,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
```
And finally tuning for a `recall` score:
```
score = 'recall'
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(), tuned_parameters, cv=5,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
```
Note how this is just a toy problem: the hyper-parameter plateau is too flat and the output model is the same for precision and recall with ties in quality.
# A clustering example for image segmentation
Let us generate an image with connected circles. We will then apply a Spectral Clustering model to separate them.
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
```
Here, the spectral clustering approach solves the problem known as "normalized graph cuts": the image is seen as a graph of connected voxels, and the algorithm amounts to choosing graph cuts defining regions while minimizing the ratio of the gradient along the cut, and the volume of the region.
```
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = np.full(mask.shape, -1.)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors. [Licensed under the Apache License, Version 2.0](#scrollTo=Afd8bu4xJOgh).
```
// #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
```
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/swift/tutorials/custom_differentiation"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/swift/blob/master/docs/site/tutorials/custom_differentiation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/swift/blob/master/docs/site/tutorials/custom_differentiation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
# Custom differentiation
This tutorial will show you how to define your own custom derivatives, perform derivative surgery, and implement your own gradient checkpointing API in just 5 lines of Swift.
## Declaring custom derivatives
You can define custom derivatives for any Swift function that has differentiable parameters and results. By doing that, you can even import a C function and make it differentiable.
```
import Glibc
func sillyExp(_ x: Float) -> Float {
let 𝑒 = Float(M_E)
print("Taking 𝑒(\(𝑒)) to the power of \(x)!")
return pow(𝑒, x)
}
@differentiating(sillyExp)
func sillyDerivative(_ x: Float) -> (value: Float, pullback: (Float) -> Float) {
let y = sillyExp(x)
return (value: y, pullback: { v in v * y })
}
print("exp(3) =", sillyExp(3))
print("𝛁exp(3) =", gradient(of: sillyExp)(3))
```
## Stop derivatives from propagating
Commonly known as "stop gradient" in machine learning use cases, method `withoutDerivative(at:)` stops derivatives from propagating.
Plus, `withoutDerivative(at:)` can sometimes help the Swift compiler with identifying what not to differentiate and producing more efficient derivaitves. When it is detectable that the derivative of a function will always be zero, the Swift compiler will produce a warning. Explicitly using `withoutDerivative(at:)` silences that warning.
```
let x: Float = 2.0
let y: Float = 3.0
gradient(at: x, y) { x, y in
sin(sin(sin(x))) + withoutDerivative(at: cos(cos(cos(y))))
}
```
## Derivative surgery
Method [`withDerivative(_:)`](https://www.tensorflow.org/swift/api_docs/Protocols/Differentiable#/s:10TensorFlow14DifferentiablePAAE12withGradientyxy15CotangentVectorQzzcF) makes arbitrary operations (including mutation) run on the gradient at a value during the enclosing function’s backpropagation.
Use this to debug or make experimental tweaks to backpropagation.
### It works anywhere
All differentiation APIs provided by the standard library are defined generically over all types that conform to the `Differentiable` protocol: `Float`, `Double`, `Float80`, SIMD vectors, and even your own types!
Read technical document [Differentiable Types](https://github.com/tensorflow/swift/blob/master/docs/DifferentiableTypes.md) for more insights on the `Differentiable` protocol.
```
var x: Float = 30
x.gradient { x -> Float in
// Print the partial derivative with respect to the result of `sin(x)`.
let a = sin(x).withDerivative { print("∂+/∂sin = \($0)") }
// Force the partial derivative with respect to `x` to be `0.5`.
let b = log(x.withDerivative { (dx: inout Float) in
print("∂log/∂x = \(dx), but rewritten to 0.5");
dx = 0.5
})
return a + b
}
```
### Use it in a neural network module
Just like how we used it in a simple `Float` function, we can use it in any numerical application, like the following neural network built using the [Swift for TensorFlow Deep Learning Library](https://github.com/tensorflow/swift-apis).
```
import TensorFlow
struct MLP: Layer {
var layer1 = Dense<Float>(inputSize: 2, outputSize: 10, activation: relu)
var layer2 = Dense<Float>(inputSize: 10, outputSize: 1, activation: relu)
@differentiable
func callAsFunction(_ input: Tensor<Float>) -> Tensor<Float> {
let h0 = layer1(input).withDerivative { print("∂L/∂layer1 =", $0) }
return layer2(h0)
}
}
var classifier = MLP()
let optimizer = SGD(for: classifier, learningRate: 0.02)
let x: Tensor<Float> = [[0, 0], [0, 1], [1, 0], [1, 1]]
let y: Tensor<Float> = [0, 1, 1, 0]
for _ in 0..<10 {
let 𝛁model = classifier.gradient { classifier -> Tensor<Float> in
let ŷ = classifier(x).withDerivative { print("∂L/∂ŷ =", $0) }
let loss = (ŷ - y).squared().mean()
print("Loss: \(loss)")
return loss
}
optimizer.update(&classifier, along: 𝛁model)
}
```
## Recomputing activations during backpropagation to save memory (checkpointing)
Checkpointing is a traditional technique in reverse-mode automatic differentiation for saving memory. Rather than saving large intermediate values in the original computation for computing derivatives, the intermediate values are instead recomputed as needed during backpropagation.
This technique has been realized in modern deep learning libraries as well. In Swift, API [`withRecomputationInPullbacks(_:)`](https://www.tensorflow.org/swift/api_docs/Protocols/Differentiable#/s:10TensorFlow14DifferentiablePAAE28withRecomputationInPullbacksyqd__qd__xcAaBRd__lF) enables you to control what to recompute during backpropagation, and it is available on all `Differentiable` types.
But today, let us learn how to define our own gradient checkpointing APIs from scratch, in just a few lines of code.
### Our gradient checkpointing API
We can define our own gradient checkpointing API, `makeRecomputedInGradient(_:)`, in terms of standard library function [`differentiableFunction(from:)`](https://www.tensorflow.org/swift/api_docs/Functions#/s:10TensorFlow22differentiableFunction4fromq0_x_q_tcq0_5value_15CotangentVectorQz_AEQy_tAEQy0_c8pullbacktx_q_tc_tAA14DifferentiableRzAaJR_AaJR0_r1_lF), which is a shorthand for creating a differentiable function directly from a derivative function (also called a "vector-Jacobian products (VJP) function").
As we have seen before, the derivative function returns a tuple of the original function's result and a pullback closure. We return `original(x)` in `value:`, and call `pullback(at:in:)` on `original` to evaluate the original function again and get a pullback.
```
/// Given a differentiable function, returns the same differentiable function except when
/// derivatives of this function are being computed. In that case, values in the original function needed
/// for computing the derivatives will be recomputed, instead of being captured by the differential or pullback.
///
/// - Parameter body: The body of the differentiable function.
/// - Returns: The same differentiable function whose derivatives, when computed, will recompute
/// some values from the original function.
func makeRecomputedInGradient<T: Differentiable, U: Differentiable>(
_ original: @escaping @differentiable (T) -> U
) -> @differentiable (T) -> U {
return differentiableFunction { x in
(value: original(x), pullback: { v in pullback(at: x, in: original)(v) })
}
}
```
### Verify it works
```
let input: Float = 10.0
print("Running original computation...")
// Differentiable multiplication with checkpointing.
let square = makeRecomputedInGradient { (x: Float) -> Float in
print(" Computing square...")
return x * x
}
// Differentiate `f(x) = (cos(x))^2`.
let (output, backprop) = input.valueWithPullback { input -> Float in
return square(cos(input))
}
print("Running backpropagation...")
let grad = backprop(1)
print("Gradient = \(grad)")
```
### Extend it to neural network modules
In this example, we define a simple convolutional neural network.
```swift
struct Model: Layer {
var conv = Conv2D<Float>(filterShape: (5, 5, 3, 6))
var maxPool = MaxPool2D<Float>(poolSize: (2, 2), strides: (2, 2))
var flatten = Flatten<Float>()
var dense = Dense<Float>(inputSize: 36 * 6, outputSize: 10)
@differentiable
func call(_ input: Tensor<Float>) -> Tensor<Float> {
return input.sequenced(through: conv, maxPool, flatten, dense)
}
}
```
We want to make activations in the convolution layer (`conv`) be recomputed during backpropagation. However, using `makeRecomputedInGradient(_:)` could make the resulting code look cumbersome, especially when we want to apply layers sequentially using [`sequenced(in:through:_:_:_:_:)`](https://www.tensorflow.org/swift/api_docs/Protocols/Differentiable#/s:10TensorFlow14DifferentiablePAAE9sequenced2in7through____6OutputQyd_3_AA7ContextC_qd__qd_0_qd_1_qd_2_qd_3_t5InputQyd__RszAA5LayerRd__AaMRd_0_AaMRd_1_AaMRd_2_AaMRd_3_AKQyd_0_AGRtd__AKQyd_1_AGRtd_0_AKQyd_2_AGRtd_1_AKQyd_3_AGRtd_2_r3_lF).
```swift
input.sequenced(in: context, through: conv, maxPool, flatten, dense)
```
So, why don't we define a **special layer type** that wraps a layer and makes its activations be recomputed during backpropagation? Let's do it.
First, we define a `makeRecomputedInGradient(_:)` function that takes a binary function.
```
// Same as the previous `makeRecomputedInGradient(_:)`, except it's for binary functions.
func makeRecomputedInGradient<T: Differentiable, U: Differentiable, V: Differentiable>(
_ original: @escaping @differentiable (T, U) -> V
) -> @differentiable (T, U) -> V {
return differentiableFunction { x, y in
(value: original(x, y), pullback: { v in pullback(at: x, y, in: original)(v) })
}
}
```
Then, we define a generic layer `ActivationDiscarding<Wrapped>`.
```
import TensorFlow
/// A layer wrapper that makes the underlying layer's activations be discarded during application
/// and recomputed during backpropagation.
struct ActivationDiscarding<Wrapped: Layer>: Layer {
/// The wrapped layer.
var wrapped: Wrapped
@differentiable
func callAsFunction(_ input: Wrapped.Input) -> Wrapped.Output {
let apply = makeRecomputedInGradient { (layer: Wrapped, input: Input) -> Wrapped.Output in
print(" Applying \(Wrapped.self) layer...")
return layer(input)
}
return apply(wrapped, input)
}
}
```
Finally, we can add a method on all layers that returns the same layer except its activations are discarded during application and recomputed during backpropagation.
```
extension Layer {
func discardingActivations() -> ActivationDiscarding<Self> {
return ActivationDiscarding(wrapped: self)
}
}
```
Back in the model, all we have to change is to wrap the convolution layer into the activation-recomputing layer.
```swift
var conv = Conv2D<Float>(filterShape: (5, 5, 3, 6)).discardingActivations()
```
Now, simply use it in the model!
```
struct Model: Layer {
var conv = Conv2D<Float>(filterShape: (5, 5, 3, 6)).discardingActivations()
var maxPool = MaxPool2D<Float>(poolSize: (2, 2), strides: (2, 2))
var flatten = Flatten<Float>()
var dense = Dense<Float>(inputSize: 36 * 6, outputSize: 10)
@differentiable
func callAsFunction(_ input: Tensor<Float>) -> Tensor<Float> {
return input.sequenced(through: conv, maxPool, flatten, dense)
}
}
```
When we run a training loop, we can see that the convolution layer's activations are computed twice: once during layer application, and once during backpropagation.
```
// Use random training data.
let x = Tensor<Float>(randomNormal: [10, 16, 16, 3])
let y = Tensor<Int32>(rangeFrom: 0, to: 10, stride: 1)
var model = Model()
let opt = SGD(for: model)
for i in 1...5 {
print("Starting training step \(i)")
print(" Running original computation...")
let (logits, backprop) = model.appliedForBackpropagation(to: x)
let (loss, dL_dŷ) = logits.valueWithGradient { logits in
softmaxCrossEntropy(logits: logits, labels: y)
}
print(" Loss: \(loss)")
print(" Running backpropagation...")
let (dL_dθ, _) = backprop(dL_dŷ)
opt.update(&model, along: dL_dθ)
}
```
Just like that, it is super easy to define generic differentiable programming libraries for different domains.
| github_jupyter |
```
from trackml import dataset, randomize, score, weights
import numpy as np
import pandas as pd
# hits, cells, particles, truth = dataset.load_event_particles('/home/ec2-user/SageMaker/efs/dataset/train/')
import time
from collections import defaultdict
# CONSTANTS
EVENT_ID = 'event000001000';
hits_file_template = '/home/ec2-user/SageMaker/efs/dataset/train/{event_id}-hits.csv'
truth_file_template = '/home/ec2-user/SageMaker/efs/dataset/train/{event_id}-truth.csv'
hit_orders_template = '/home/ec2-user/SageMaker/efs/particles-in-order/{event_id}-hit_orders.csv'
graph_output_forfile_template = '/home/ec2-user/SageMaker/efs/graph-data/{event_id}_graph_data.csv'
def create_graph(event_id, hits_file_template, truth_file_template, hit_orders_template):
"""Create a graph from the provided event_id.
Keyword arguments:
event_id -- the event_id to be used for making a graph
hits_file_template -- the file path template in the EFS for the hits_file
truth_file_template -- the file path template in the EFS for the truth file
hit_orders_template -- the file path template in the EFS for the hit_order file (manually generated)
"""
print("=========Start=========")
start_time = time.process_time()
# Step 0: Obtain file paths
hits_file = hits_file_template.format(event_id=event_id)
truth_file = truth_file_template.format(event_id=event_id)
hit_orders_file = hit_orders_template.format(event_id=event_id)
# Step 1.1
hits_df = pd.read_csv(hits_file)
# Step 1.2
truth_df = pd.read_csv(truth_file)
# Step 1.3
hit_orders_df = pd.read_csv(hit_orders_file)
# Step 2
hits_truth_df = pd.merge(hits_df, truth_df, on=['hit_id'])
# Step 3
hits_truth_orders_df = pd.merge(hits_truth_df, hit_orders_df, on=['particle_id','hit_id'])
# Step 4. drop all columns except these
col_list = ["volume_id", "layer_id", "module_id", "particle_id", "hit_order"]
hits_truth_orders_df_col_filtered = hits_truth_orders_df[col_list]
# Step 5 Make a copy of hits_truth_orders_df_col_filtered
hits_truth_orders_df_col_filtered_copy = hits_truth_orders_df_col_filtered.copy()
# Step 6 -- renaming columns
hits_truth_orders_df_col_filtered = hits_truth_orders_df_col_filtered.rename(index=str, columns={"volume_id": "volume_id_1", "layer_id": "layer_id_1", "module_id": "module_id_1"})
hits_truth_orders_df_col_filtered_copy = hits_truth_orders_df_col_filtered_copy.rename(index=str, columns={"volume_id": "volume_id_2", "layer_id": "layer_id_2", "module_id": "module_id_2"})
# Step 7 -- subtracting 1 from `hit_order` column values
hits_truth_orders_df_col_filtered_copy['hit_order'] = hits_truth_orders_df_col_filtered_copy['hit_order'].apply(lambda x: x - 1)
# Step 8 -- create df by inner join of df1 and df2 on particle_id, hit_order
hits_truth_orders_join_particle_hit_id = pd.merge(hits_truth_orders_df_col_filtered, hits_truth_orders_df_col_filtered_copy, on=['particle_id','hit_order'])
# Step 9 -- create edge_weight column and initialize each entry to 1
hits_truth_orders_join_particle_hit_id['edge_weight'] = pd.Series(1, index=hits_truth_orders_join_particle_hit_id.index)
# Step 10hit_orders_template
cols_to_join = ["volume_id_1", "layer_id_1", "module_id_1", "volume_id_2", "layer_id_2", "module_id_2"]
# Step 11
output_df = hits_truth_orders_join_particle_hit_id.groupby(cols_to_join)['edge_weight'].sum()
graph_output_file_name = graph_output_file_template.format(event_id=event_id)
# Step 12 -- write to csv
write_df_to_csv_default_location(output_df, graph_output_file_name)
end_time = time.process_time()
print("Time taken for event {event_id}: ".format(event_id=event_id) + str(end_time - start_time))
print(hits_truth_orders_join_particle_hit_id)
def write_df_to_csv_default_location(df, file_name):
"""Write dataframe to csv with the provided file name with utf-8 encoding.
Keyword arguments:
df -- the dataframe to write
file_name -- the file name to write it out.
"""
df.to_csv(file_name, sep='\t', encoding='utf-8', header=True)
create_graph(EVENT_ID, hits_file_template, truth_file_template, hit_orders_template)
```
| github_jupyter |
[Reference](https://www.dataquest.io/blog/python-pandas-databases/) <br>
SQLite is a databse engine that makes it simple to store and work with relational data. Python has a library to access SQLite databases called sqlite3 has been included with Python since version `2.5`. <br>
```
import sqlite3
import pandas as pd
from pandas import DataFrame
# flights.db contains three tables airports, airlines, and routes
conn = sqlite3.connect("flights.db") # create a connection object to a local db
csor = conn.cursor() # a Cursor object allows us to execute SQL queries against a database
# to fetch the first 5 rows from the airlines table
csor.execute("select * from airlines limit 5;") # execute a query using the cursor object's method `execute`.
results = DataFrame(csor.fetchall()) # to assign the result of the query to a variable, use fetchall() to fetch the results.
results # note that the original result is a list of tuples, so we convert the reslut to DataFrame.
# much better to create a DataFrame and automatically read the names of the table hearders.
df = pd.read_sql_query("select * from airlines limit 5;", conn)
df
# Good practice to close connection and cursor objects that are open.
csor.close()
conn.close()
```
# Modifying database rows
We can use the `sqlite3` package to modify a SQLite database by inserting, updating, or deleting rows.
## Inserting rows with Python
### [1] Hardcoding value into the database
```
import sqlite3
import pandas as pd
from pandas import DataFrame
# flights.db contains three tables airports, airlines, and routes
conn = sqlite3.connect("flights.db") # create a connection object to a local db
df = pd.read_sql_query("select * from airlines", conn)
conn.close()
df.tail(10)
# The practice below may add a column "airplanes" to the original db.
# If ths is the case, you need to rename the old one and copy the original column to the new one.
```
#### Displaying column datatype
```
import sqlite3
conn = sqlite3.connect('flights.db')
csor = conn.cursor()
msg = csor.execute("""
PRAGMA table_info (airlines)
""").fetchall()
print(msg)
conn.close()
```
#### Keeping part of columns
Run this subsection, only when you encouter the problem where airlines table has 10 rather than the original 9 columns.
```
# In sqlite, you cannot drop columns as you do in other sql languages.
# You need to rename the old db, copy the desired column to a new one, and in the end drop the old db.
import sqlite3
conn = sqlite3.connect('flights.db')
csor = conn.cursor()
csor.execute("ALTER TABLE airlines RENAME TO _airlines_old;")
csor.execute("""
CREATE TABLE airlines
(index INTEGER,
id INTEGER,
name TEXT,
alias TEXT,
iata TEXT,
icao TEXT,
callsign TEXT,
country TEXT,
active TEXT
);""")
csor.execute("""
INSERT INTO airlines (index, id, name, alias, iata, icao, callsign, country, active)
SELECT index, id, name, alias, iata, icao, callsign, country, active FROM _airlines_old;
""")
conn.commit()
conn = sqlite3.connect("flights.db")
csor = conn.cursor()
csor.execute("""
insert into airlines
values (6048, 19846, 'Test flight', '', '', null, null, null, 'Y')
""")
```
If you try to query the table now, you won't see the new row yet.
SQLite doesn't write to the database until you commit a transaction, which consists of one or more queries. A transaction won't commit
until all the queries succeed to avoid inconsistency among tables.
Sidenote: why do I get a database lock message? [reference](https://www.dataquest.io/blog/python-pandas-databases/#insertingrowswithpython)
```
conn.commit()
df = pd.read_sql_query("select * from airlines", conn)
df.tail(10)
#
conn = sqlite3.connect('flights.db')
df = pd.read_sql_query()
csor.close()
conn.close()
```
### [2] Insert values with string formatting
```
conn = sqlite3.connect("flights.db")
csor = conn.cursor()
values = ('Test Flight', 'Y')
csor.execute("insert into airlines values (6049, 19847, ?, '', '', null, null, null, ?)", values)
conn.commit()
df = pd.read_sql_query("select * from airlines", conn)
csor.close()
conn.close()
df.tail(10)
```
## Deleting rows
```
import sqlite3
conn = sqlite3.connect("flights.db")
csor = conn.cursor()
values = (19847,) # comma is not optional.
csor.execute("delete from airlines where id=?", values)
df = pd.read_sql_query("select * from airlines", conn)
conn.commit()
csor.close()
conn.close()
df.tail(10)
```
## Creating tables
```
import sqlite3
import pandas as pd
conn = sqlite3.connect("flights.db")
csor = conn.cursor()
csor.execute("""
create table daily_flights
(id integer, departure date, arrival date, number text, route_id integer)
""")
conn.commit()
csor.execute("""
insert into daily_flights
values (1, '2016-09-28 0:00', '2016-09-28 12:00', 'T1', 1)
""")
conn.commit()
df = pd.read_sql_query("select * from daily_flights", conn)
csor.close()
conn.close()
df.tail(10)
```
## Removing tables
```
import sqlite3
conn = sqlite3.connect("flights.db")
csor = conn.cursor()
csor.execute("drop table daily_flights")
conn.commit()
csor.close()
conn.close()
```
## Creating tables with pandas
```
from datetime import datetime
import pandas as pd
import sqlite3
conn = sqlite3.connect("flights.db")
df = pd.DataFrame(
[[1, datetime(2016, 9, 29, 0, 0) , datetime(2016, 9, 29, 12, 0), 'T1', 1]],
columns=["id", "departure", "arrival", "number", "route_id"]
)
# to convert df to a table in a database
df.to_sql("daily_flights", conn, if_exists = "replace")
# to verify that everything worked by querying the database
df = pd.read_sql_query("select * from daily_flights;", conn)
conn.close()
df
```
## Adding a column with pandas
```
import pandas as pd
import sqlite3
conn = sqlite3.connect("flights.db")
csor = conn.cursor()
csor.execute("""
alter table airlines
add column airplanes integer
""")
conn.commit()
df = pd.read_sql_query("select * from airlines limit 1", conn)
csor.close()
conn.close()
print(df)
csor.close()
conn.close()
# Note that DROP COLUMN doensn't work in sqlite
# Thsu, the operation of this cell fails.
import pandas as pd
import sqlite3
conn = sqlite3.connect("flights.db")
csor = conn.cursor()
csor.execute("alter table airlines drop column airplanes")
csor.close()
conn.close()
```
# Mapping airports
[Reference](https://www.dataquest.io/blog/python-pandas-databases/#mappingroutes): Dataquest
—
Working with SQLite Databases using Python and Pandas <br>
```
import sqlite3
conn = sqlite3.connect("flights.db") # access to a local db
csor = conn.cursor() # allow us to execute SQL queries against a database
# retrieve the latitude and longitude columns from airports
# , and convert them to floats. Then we call fetchall() to retrieve them
coords = csor.execute("""
select cast(longitude as float),
cast(latitude as float)
from airports;
""").fetchall()
import Basemap
import matplotlib.pyplot as plt
# map setup:
# draw the continets and coastlines
# that will from the background of our map
m = Basemap(
projection='merc',
llcrnrlat=-80,
urcrnrlat=80,
llcrnrlon=-180,
urcrnrlon=180,
lat_ts=20,
resolution='c'
)
m.drawcoastlines()
m.drawmapboundary()
# I didn't proceed, because Basemap and Tensorflow conflinct.
```
# Import db from a local file
## [1] Through sqlite3 package
```
import sqlite3
# data source:
# https://github.com/jpwhite3/northwind-SQLite3
sqlite_file = r'C:\Users\libin\Desktop\Northwind_large.sqlite'
conn = sqlite3.connect(sqlite_file)
conn.close()
```
# Northwind Database
## Show list of tables
```
from sqlalchemy import create_engine
engine = create_engine('sqlite:///Northwind_small.sqlite')
print(engine.table_names())
```
## Import .sqlite files
```
# Be sure you have installed ipython-sql.
# If you do, run the following...
%reload_ext sql
# source 1: https://github.com/jpwhite3/northwind-SQLite3
# source 2: https://northwinddatabase.codeplex.com/downloads/get/269239
# example: %sql sqlite:///flights.db
%sql sqlite:///Northwind_small.sqlite
```
A link for learning sqlite: https://sebastianraschka.com/Articles/2014_sqlite_in_python_tutorial.html <br>
Drop column in sqlite: https://www.techonthenet.com/sqlite/tables/alter_table.php <br>
Northwind question set No.1: https://www.youtube.com/watch?v=3j9PFyvakOA <br>
SQL project: https://www.youtube.com/channel/UCvIHnJ8croj_v2fx_ZXuTlw <br>
Google Cloud python: https://www.youtube.com/watch?v=chk2rRjSn5o <br>
Google compute engine: https://www.youtube.com/watch?annotation_id=annotation_1708545089&feature=iv&src_vid=LrjpcR-IJwY&v=gxZvofAvgHQ <br>
Create table: https://www.youtube.com/watch?v=NCc5r7Wr7gg <br>
KD projects: https://www.kdnuggets.com/2017/05/data-science-tutorial-series-software-engineers.html <br>
install Northwind into SQL servor 2014: https://www.youtube.com/watch?v=iKVbx5IeUvQ <br>
```
import sqlite3
conn = sqlite3.connect('Northwind_small.sqlite')
csor = conn.cursor()
df = csor.execute("SELECT * FROM Employee LIMIT 3").fetchall()
print(df)
conn.close()
```
| github_jupyter |
# Introduction
You've built up your SQL skills enough that the remaining hands-on exercises will use different datasets than you see in the explanations. If you need to get to know a new dataset, you can run a couple of **SELECT** queries to extract and review the data you need.
The next exercises are also more challenging than what you've done so far. Don't worry, you are ready for it!
Run the code in the following cell to get everything set up:
```
# Set up feedback system
from learntools.core import binder
binder.bind(globals())
from learntools.sql.ex4 import *
print("Setup Complete")
```
The World Bank has made tons of interesting education data available through BigQuery. Run the following cell to see the first few rows of the `international_education` table from the `world_bank_intl_education` dataset.
```
from google.cloud import bigquery
# Create a "Client" object
client = bigquery.Client()
# Construct a reference to the "world_bank_intl_education" dataset
dataset_ref = client.dataset("world_bank_intl_education", project="bigquery-public-data")
# API request - fetch the dataset
dataset = client.get_dataset(dataset_ref)
# Construct a reference to the "international_education" table
table_ref = dataset_ref.table("international_education")
# API request - fetch the table
table = client.get_table(table_ref)
# Preview the first five lines of the "international_education" table
client.list_rows(table, max_results=5).to_dataframe()
```
# Exercises
The value in the `indicator_code` column describes what type of data is shown in a given row.
One interesting indicator code is `SE.XPD.TOTL.GD.ZS`, which corresponds to "Government expenditure on education as % of GDP (%)".
### 1) Government expenditure on education
Which countries spend the largest fraction of GDP on education?
To answer this question, consider only the rows in the dataset corresponding to indicator code `SE.XPD.TOTL.GD.ZS`, and write a query that returns the average value in the `value` column for each country in the dataset between the years 2010-2017 (including 2010 and 2017 in the average).
Requirements:
- Your results should have the country name rather than the country code. You will have one row for each country.
- The aggregate function for average is **AVG()**. Use the name `avg_ed_spending_pct` for the column created by this aggregation.
- Order the results so the countries that spend the largest fraction of GDP on education show up first.
In case it's useful to see a sample query, here's a query you saw in the tutorial (using a different dataset):
```
# Query to find out the number of accidents for each day of the week
query = """
SELECT COUNT(consecutive_number) AS num_accidents,
EXTRACT(DAYOFWEEK FROM timestamp_of_crash) AS day_of_week
FROM `bigquery-public-data.nhtsa_traffic_fatalities.accident_2015`
GROUP BY day_of_week
ORDER BY num_accidents DESC
"""
```
```
# Your code goes here
country_spend_pct_query = """
SELECT _____
FROM `bigquery-public-data.world_bank_intl_education.international_education`
WHERE ____
GROUP BY ____
ORDER BY ____
"""
# Set up the query (cancel the query if it would use too much of
# your quota, with the limit set to 1 GB)
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=1e9)
country_spend_pct_query_job = client.query(country_spend_pct_query, job_config=safe_config)
# API request - run the query, and return a pandas DataFrame
country_spending_results = country_spend_pct_query_job.to_dataframe()
# View top few rows of results
print(country_spending_results.head())
# Check your answer
q_1.check()
```
For a hint or the solution, uncomment the appropriate line below.
```
#q_1.hint()
#q_1.solution()
```
### 2) Identify interesting codes to explore
The last question started by telling you to focus on rows with the code `SE.XPD.TOTL.GD.ZS`. But how would you find more interesting indicator codes to explore?
There are 1000s of codes in the dataset, so it would be time consuming to review them all. But many codes are available for only a few countries. When browsing the options for different codes, you might restrict yourself to codes that are reported by many countries.
Write a query below that selects the indicator code and indicator name for all codes with at least 175 rows in the year 2016.
Requirements:
- You should have one row for each indicator code.
- The columns in your results should be called `indicator_code`, `indicator_name`, and `num_rows`.
- Only select codes with 175 or more rows in the raw database (exactly 175 rows would be included).
- To get both the `indicator_code` and `indicator_name` in your resulting DataFrame, you need to include both in your **SELECT** statement (in addition to a **COUNT()** aggregation). This requires you to include both in your **GROUP BY** clause.
- Order from results most frequent to least frequent.
```
# Your code goes here
code_count_query = """____"""
# Set up the query
code_count_query_job = client.query(code_count_query, job_config=safe_config)
# API request - run the query, and return a pandas DataFrame
code_count_results = code_count_query_job.to_dataframe()
# View top few rows of results
print(code_count_results.head())
# Check your answer
q_2.check()
```
For a hint or the solution, uncomment the appropriate line below.
```
#q_2.hint()
#q_2.solution()
```
# Keep Going
**[Click here](#$NEXT_NOTEBOOK_URL$)** to learn how to use **AS** and **WITH** to clean up your code and help you construct more complex queries.
| github_jupyter |
<a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_01_1_overview.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# T81-558: Applications of Deep Neural Networks
**Module 1: Python Preliminaries**
* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# Module 1 Material
* **Part 1.1: Course Overview** [[Video]](https://www.youtube.com/watch?v=v8QsRio8zUM&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_01_1_overview.ipynb)
* Part 1.2: Introduction to Python [[Video]](https://www.youtube.com/watch?v=czq5d53vKvo&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_01_2_intro_python.ipynb)
* Part 1.3: Python Lists, Dictionaries, Sets and JSON [[Video]](https://www.youtube.com/watch?v=kcGx2I5akSs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_01_3_python_collections.ipynb)
* Part 1.4: File Handling [[Video]](https://www.youtube.com/watch?v=FSuSLCMgCZc&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_01_4_python_files.ipynb)
* Part 1.5: Functions, Lambdas, and Map/Reduce [[Video]](https://www.youtube.com/watch?v=jQH1ZCSj6Ng&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_01_5_python_functional.ipynb)
Watch one (or more) of these depending on how you want to setup your Python TensorFlow environment:
* [How to Submit a Module Assignment locally](https://www.youtube.com/watch?v=hmCGjCVhYNc)
* [How to Use Google CoLab and Submit Assignment](https://www.youtube.com/watch?v=Pt-Od-oBgOM)
* [Installing TensorFlow, Keras, and Python in Windows](https://www.youtube.com/watch?v=59duINoc8GM)
* [Installing TensorFlow, Keras, and Python in Mac](https://www.youtube.com/watch?v=mcIKDJYeyFY)
# Part 1.1: Course Overview
Deep learning is a group of exciting new technologies for neural networks. By using a combination of advanced training techniques neural network architectural components, it is now possible to train neural networks of much greater complexity. This course will introduce the student to deep belief neural networks, regularization units (ReLU), convolution neural networks and recurrent neural networks. High performance computing (HPC) aspects will demonstrate how deep learning can be leveraged both on graphical processing units (GPUs), as well as grids. Deep learning allows a model to learn hierarchies of information in a way that is similar to the function of the human brain. Focus will be primarily upon the application of deep learning, with some introduction to the mathematical foundations of deep learning. Students will use the Python programming language to architect a deep learning model for several of real-world data sets and interpret the results of these networks.
# Assignments
Your grade will be calculated according to the following assignments:
Assignment |Weight|Description
--------------------|------|-------
Class Participation | 10%|Class attendance and participation (individual)
Class Assignments | 50%|10 small programming assignments (5% each, individual)
Kaggle Project | 20%|"Kaggle In-Class" project submitted through Kaggle (Kaggle Team, up to 5 people)
Final Project | 20%|Deep Learning Implementation Report (Same Kaggle Team)
The 10 class assignments will be assigned with each of the first 10 modules. Generally, each module assignment is due just before the following module date. Refer to syllabus for exact due dates. The 10 class assignments are submitted using the Python submission script. Refer to assignment 1 for details.
* Module 1 Assignment: [How to Submit an Assignment](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class1.ipynb)
* Module 2 Assignment: [Creating Columns in Pandas](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class2.ipynb)
* Module 3 Assignment: [Data Preparation in Pandas](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class3.ipynb)
* Module 4 Assignment: [Classification and Regression Neural Networks](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class4.ipynb)
* Module 5 Assignment: [K-Fold Cross-Validation](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class5.ipynb)
* Module 6 Assignment: [Image Processing](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class6.ipynb)
* Module 7 Assignment: [Computer Vision](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class7.ipynb)
* Module 8 Assignment: [Building a Kaggle Submission File](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class8.ipynb)
* Module 9 Assignment: [Counting Items in a YOLO Image](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class9.ipynb)
* Module 10 Assignment: [Time Series Neural Network](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class10.ipynb)
# Your Instructor: Jeff Heaton

I will be your instructor for this course. A brief summary of my credentials is given here:
* Master of Information Management (MIM), Washington University in St. Louis, MO
* PhD in Computer Science, Nova Southeastern University in Ft. Lauderdale, FL
* [Vice President and Data Scientist](http://www.rgare.com/knowledge-center/media/articles/rga-where-logic-meets-curiosity), Reinsurance Group of America (RGA)
* Senior Member, IEEE
* jtheaton at domain name of this university
* Other industry certifications: FLMI, ARA, ACS
Social media:
* [Homepage](http://www.heatonresearch.com) - My home page. Includes my research interests and publications.
* [YouTube Channel](https://www.youtube.com/user/HeatonResearch) - My YouTube Channel. Subscribe for my videos on AI and updates to this class.
* [GitHub](https://github.com/jeffheaton) - My GitHub repositories.
* [Linked In](https://www.linkedin.com/in/jeffheaton) - My Linked In profile.
* [Twitter](https://twitter.com/jeffheaton) - My Twitter feed.
* [Google Scholar](https://scholar.google.com/citations?user=1jPGeg4AAAAJ&hl=en) - My citations on Google Scholar.
* [Research Gate](https://www.researchgate.net/profile/Jeff_Heaton) - My profile/research at Research Gate.
* [Others](http://www.heatonresearch.com/about/) - About me and other social media sites that I am a member of.
# Course Resources
* [Google CoLab](https://colab.research.google.com/) - Free web-based platform that includes Python, Juypter Notebooks, and TensorFlow [[Cite:GoogleTensorFlow]](http://download.tensorflow.org/paper/whitepaper2015.pdf). No setup needed.
* [Python Anaconda](https://www.continuum.io/downloads) - Python distribution that includes many data science packages, such as Numpy, Scipy, Scikit-Learn, Pandas, and much more.
* [Juypter Notebooks](http://jupyter.org/) - Easy to use environment that combines Python, Graphics and Text.
* [TensorFlow](https://www.tensorflow.org/) - Google's mathematics package for deep learning.
* [Kaggle](https://www.kaggle.com/) - Competitive data science. Good source of sample data.
* [Course GitHub Repository](https://github.com/jeffheaton/t81_558_deep_learning) - All of the course notebooks will be published here.
# What is Deep Learning
The focus of this class is deep learning, which is a very popular type of machine learning that is based upon the original neural networks popularized in the 1980's. There is very little difference between how a deep neural network is calculated compared with the original neural network. We've always been able to create and calculate deep neural networks. A deep neural network is nothing more than a neural network with many layers. While we've always been able to create/calculate deep neural networks, we've lacked an effective means of training them. Deep learning provides an efficient means to train deep neural networks.
## What is Machine Learning
If deep learning is a type of machine learning, this begs the question, "What is machine learning?" The following diagram illustrates how machine learning differs from traditional software development.

* **Traditional Software Development** - Programmers create programs that specify how to transform input into the desired output.
* **Machine Learning** - Programmers create models that can learn to produce the desired output for given input. This learning fills the traditional role of the computer program.
Researchers have applied machine learning to many different areas. This class will explore three specific domains for the application of deep neural networks:

* **Predictive Modeling** - Several named input values are used to predict another named value that becomes the output. For example, using four measurements of iris flowers to predict the species. This type of data is often called tabular data.
* **Computer Vision** - The use of machine learning to detect patterns in visual data. For example, is an image a cat or a dog.
* **Time Series** - The use of machine learning to detect patterns in in time. Common applications of time series are: financial applications, speech recognition, and even natural language processing (NLP).
### Regression
Regression is when a model, such as a neural network, accepts input and produces a numeric output. Consider if you were tasked to write a program that predicted how many miles per gallon (MPG) a car could achieve. For the inputs you would probably want such features as the weight of the car, the horsepower, how large the engine is, etc. Your program would be a combination of math and if-statements.
Machine learning lets the computer learn the "formula" for calculating the MPG of a car, using data. Consider [this](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/data/auto-mpg.csv) dataset. We can use regression machine learning models to study this data and learn how to predict the MPG for a car.
### Classification
The output of a classification model is what class the input belongs to. For example, consider using four measurements of an iris flower to determine the species that the flower is in. This dataset could be used to perform [this](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/data/iris.csv).
### Beyond Classification and Regression
One of the most powerful aspects of neural networks is that they simply cannot be typed as either regression or classification. The output from a neural network could be any number of the following:
* An image
* A series of numbers that could be interpreted as text, audio, or another time series
* A regression number
* A classification class
## What are Neural Networks
Neural networks are one of the earliest types of machine learning model. Neural networks were originally introduced in the 1940's and have risen and fallen [several times from popularity](http://hushmagazine.ca/living-2/business/the-believers-the-hidden-story-behind-the-code-that-runs-our-lives). Four researchers have contributed greatly to the development of neural networks. They have consistently pushed neural network research, both through the ups and downs:

The current luminaries of artificial neural network (ANN) research and ultimately deep learning, in order as appearing in the above picture:
* [Yann LeCun](http://yann.lecun.com/), Facebook and New York University - Optical character recognition and computer vision using convolutional neural networks (CNN). The founding father of convolutional nets.
* [Geoffrey Hinton](http://www.cs.toronto.edu/~hinton/), Google and University of Toronto. Extensive work on neural networks. Creator of deep learning and early adapter/creator of backpropagation for neural networks.
* [Yoshua Bengio](http://www.iro.umontreal.ca/~bengioy/yoshua_en/index.html), University of Montreal. Extensive research into deep learning, neural networks, and machine learning. He has so far remained completely in academia.
* [Andrew Ng](http://www.andrewng.org/), Badiu and Stanford University. Extensive research into deep learning, neural networks, and application to robotics.
Geoffrey Hinton, Yann LeCun, and Yoshua Bengio won the [Turing Award](https://www.acm.org/media-center/2019/march/turing-award-2018) for their contributions to deep lerning.
## Why Deep Learning?
For predictive modeling neural networks are not that different than other models, such as:
* Support Vector Machines
* Random Forests
* Gradient Boosted Machines
Like these other models, neural networks can perform both **classification** and **regression**. When applied to relatively low-dimensional predictive modeling tasks, deep neural networks do not necessarily add significant accuracy over other model types. Andrew Ng describes the advantage of deep neural networks over traditional model types as follows:

Neural networks also have two additional significant advantages over other machine learning models:
* **Convolutional Neural Networks** - Can scan an image for patterns within the image.
* **Recurrent Neural Networks** - Can find patterns across several inputs, not just within a single input.
Neural networks are also very flexible on the type of data that can be presented to the input and output layers. A neural network can take tabular data, images, audio sequences, time series tabular data, and text as its input or output.
# Python for Deep Learning
Python 3.x is the programming language that will be used for this class. Python, as a programming language, has the widest support for deep learning. The three most popular frameworks for deep learning in Python are:
* [TensorFlow](https://www.tensorflow.org/) (Google)
* [MXNet](https://github.com/dmlc/mxnet) (Amazon)
* [CNTK](https://cntk.ai/) (Microsoft)
* [Theano](http://deeplearning.net/software/theano/) (University of Montreal) - Popular but discontinued.
Some references on popular programming languages for AI/Data Science:
* [Popular Programming Languages for AI](https://en.wikipedia.org/wiki/List_of_programming_languages_for_artificial_intelligence)
* [Popular Programming Languages for Data Science](http://www.kdnuggets.com/2014/08/four-main-languages-analytics-data-mining-data-science.html)
# Software Installation
This is a technical class. You will need to be able to compile and execute Python code that makes use of TensorFlow for deep learning. There are two options to you for accomplish this:
* Install Python, TensorFlow and some IDE (Jupyter, TensorFlow, etc.)
* Use Google CoLab in the cloud
## Installing Python and TensorFlow
It is possible to install and run Python/TensorFlow entirely from your own computer. Google provides TensorFlow for Windows, Mac and Linux. Previously, TensorFlow did not support Windows. However, as of December 2016, TensorFlow supports Windows for both CPU and GPU operation.
The first step is to install Python 3.7. As of August 2019, this is the latest version of Python 3. I recommend using the Miniconda (Anaconda) release of Python, as it already includes many of the data science related packages that will be needed by this class. Anaconda directly supports: Windows, Mac and Linux. Miniconda is the minimal set of features from the very large Anaconda Python distribution. Download Miniconda from the following URL:
* [Miniconda](https://docs.conda.io/en/latest/miniconda.html)
# Dealing with TensorFlow incompatibility with Python 3.7
*Note: I will remove this section once all needed libraries add support for Python 3.7.
**VERY IMPORTANT** Once Miniconda has been downloaded you must create a Python 3.6 environment. Not all TensorFlow 2.0 packages currently (as of August 2019) support Python 3.7. This is not unusual, usually you will need to stay one version back from the latest Python to maximize compatibility with common machine learning packages. So you must execute the following commands:
```
conda create -y --name tensorflow python=3.6
```
To enter this environment, you must use the following command (**for Windows**), this command must be done every time you open a new Anaconda/Miniconda terminal window:
```
activate tensorflow
```
For **Mac**, do this:
```
source activate tensorflow
```
# Installing Jupyter
it is easy to install Jupyter notebooks with the following command:
```
conda install -y jupyter
```
Once Jupyter is installed, it is started with the following command:
```
jupyter notebook
```
The following packages are needed for this course:
```
conda install -y scipy
pip install --exists-action i --upgrade sklearn
pip install --exists-action i --upgrade pandas
pip install --exists-action i --upgrade pandas-datareader
pip install --exists-action i --upgrade matplotlib
pip install --exists-action i --upgrade pillow
pip install --exists-action i --upgrade tqdm
pip install --exists-action i --upgrade requests
pip install --exists-action i --upgrade h5py
pip install --exists-action i --upgrade pyyaml
pip install --exists-action i --upgrade tensorflow_hub
pip install --exists-action i --upgrade bayesian-optimization
pip install --exists-action i --upgrade spacy
pip install --exists-action i --upgrade gensim
pip install --exists-action i --upgrade flask
pip install --exists-action i --upgrade boto3
pip install --exists-action i --upgrade gym
pip install --exists-action i --upgrade tensorflow==2.0.0-beta1
pip install --exists-action i --upgrade keras-rl2 --user
conda update -y --all
```
Notice that I am installing as specific version of TensorFlow. As of the current semester, this is the latest version of TensorFlow. It is very likely that Google will upgrade this during this semester. The newer version may have some incompatibilities, so it is important that we start with this version and end with the same.
You should also link your new **tensorflow** environment to Jupyter so that you can choose it as a Kernal. Always make sure to run your Jupyter notebooks from your 3.6 kernel. This is demonstrated in the video.
```
python -m ipykernel install --user --name tensorflow --display-name "Python 3.6 (tensorflow)"
```
# Python Introduction
* [Anaconda v3.6](https://www.continuum.io/downloads) Scientific Python Distribution, including: [Scikit-Learn](http://scikit-learn.org/), [Pandas](http://pandas.pydata.org/), and others: csv, json, numpy, scipy
* [Jupyter Notebooks](http://jupyter.readthedocs.io/en/latest/install.html)
* [PyCharm IDE](https://www.jetbrains.com/pycharm/)
* [Cx_Oracle](http://cx-oracle.sourceforge.net/)
* [MatPlotLib](http://matplotlib.org/)
## Jupyter Notebooks
Space matters in Python, indent code to define blocks
Jupyter Notebooks Allow Python and Markdown to coexist.
Even LaTeX math:
$ f'(x) = \lim_{h\to0} \frac{f(x+h) - f(x)}{h}. $
## Python Versions
* If you see `xrange` instead of `range`, you are dealing with Python 2
* If you see `print x` instead of `print(x)`, you are dealing with Python 2
* This class uses Python 3.6!
```
# What version of Python do you have?
import sys
import tensorflow.keras
import pandas as pd
import sklearn as sk
import tensorflow as tf
print(f"Tensor Flow Version: {tf.__version__}")
print(f"Keras Version: {tensorflow.keras.__version__}")
print()
print(f"Python {sys.version}")
print(f"Pandas {pd.__version__}")
print(f"Scikit-Learn {sk.__version__}")
print("GPU is", "available" if tf.test.is_gpu_available() else "NOT AVAILABLE")
```
Software used in this class:
* **Python** - The programming language.
* **TensorFlow** - Googles deep learning framework, must have the version specified above.
* **Keras** - [Keras](https://github.com/fchollet/keras) is a high-level neural networks API, written in Python and capable of running on top of TensorFlow, CNTK, or Theano.
* **Pandas** - Allows for data preprocessing. Tutorial [here](http://pandas.pydata.org/pandas-docs/version/0.18.1/tutorials.html)
* **Scikit-Learn** - Machine learning framework for Python. Tutorial [here](http://scikit-learn.org/stable/tutorial/basic/tutorial.html).
# Module 1 Assignment
You can find the first assignment here: [assignment 1](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class1.ipynb)
| github_jupyter |
# 4. Categorical Model
Author: _Carlos Sevilla Salcedo (Updated: 18/07/2019)_
This notebook presents the categorical approach of the algorithm. for our model we understand that the view we are analysing is composed of one among several categories (The data given to the model must be an integer). To do so, we have to use the graphic model shown in the next image modifying the relation between the variables $X$ and $t$.
<img src="Images/Graphic_Model_Categorical.png" style="max-width:100%; width: 70%">
where, in this case, variable $t$ is now a vector instead of a matrix.
In order to have this relationship we have stablished a multinomial probit function as the connection between them, as proposed by _Girolami (2016)_.
## Synthetic data generation
We can now generate data in a similar manner to the regression model to compare the performance of both apporaches. In this case we are going to change the regression data to a categorical approach, to work with classes.
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import math
np.random.seed(0)
N = 1000 # number of samples
D0 = 55 # input features
D1 = 3 # output features
myKc = 20
K = 2 # common latent variables
K0 = 3 # first view's latent variables
K1 = 3 # second view's latent variables
Kc=K+K0+K1 # latent variables
# Generation of matrix W
A0 = np.random.normal(0.0, 1, D0 * K).reshape(D0, K)
A1 = np.random.normal(0.0, 1, D1 * K).reshape(D1, K)
B0 = np.random.normal(0.0, 1, D0 * K0).reshape(D0, K0)
B1 = np.random.normal(0.0, 1, D1 * K1).reshape(D1, K1)
W0 = np.hstack((np.hstack((A0,B0)),np.zeros((D0,K1))))
W1 = np.hstack((np.hstack((A1,np.zeros((D1,K0)))),B1))
W_tot = np.vstack((W0,W1))
# Generation of matrix Z
Z = np.random.normal(0.0, 1, Kc * N).reshape(N, Kc)
# Generation of matrix X
X0 = np.dot(Z,W0.T) + np.random.normal(0.0, 0.1, D0 * N).reshape(N, D0)
X1 = np.dot(Z,W1.T) + np.random.normal(0.0, 0.1, D1 * N).reshape(N, D1)
# Generation of matrix t
t1 = np.argmax(X1,axis=1)
```
Once the data is generated we divide it into train and test in order to be able to test the performance of the model. After that, we can normalize the data.
```
from sklearn.model_selection import train_test_split
X_tr, X_tst, Y_tr, Y_tst = train_test_split(X0, t1, test_size=0.3, random_state = 31)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_tr = scaler.fit_transform(X_tr)
X_tst = scaler.transform(X_tst)
```
## Training the model
Once the data is prepared we just have to feed it to the model. As the model has so many possibilities we have decided to pass the data to the model following a particular structure so that we can now, for each view, if the data corresponds to real, multilabel or categorical as well as knowing if we want to calculate the model with sparsity in the features.
```
import os
os.sys.path.append('lib')
import sshiba
myKc = 20 # number of latent features
max_it = int(5*1e4) # maximum number of iterations
tol = 1e-6 # tolerance of the stopping condition (abs(1 - L[-2]/L[-1]) < tol)
prune = 1 # whether to prune the irrelevant latent features
myModel = sshiba.SSHIBA(myKc, prune)
X0_tr = myModel.struct_data(X_tr, 0, 0)
X1_tr = myModel.struct_data(Y_tr, 1, 0)
X0_tst = myModel.struct_data(X_tst, 0, 0)
X1_tst = myModel.struct_data(Y_tst, 1, 0)
myModel.fit(X0_tr, X1_tr, max_iter = max_it, tol = tol, Y_tst = X1_tst, X_tst = X0_tst, AUC = 1)
print('Final AUC %.3f' %(myModel.AUC[-1]))
```
## Visualization of the results
### Lower Bound and MSE
Now the model is trained we can plot the evolution of the lower bound through out the iterations. This lower bound is calculated using the values of the variables the model is calculating and is the value we are maximizing. As we want to maximize this value it has to be always increasing with each iteration.
At the same time, we are plotting now the evolution of the Minimum Square Error (MSE) with each update of the model. As we are not minimizing this curve, this doesn't necessarily have to be always decreasing and might need more iterations to reach a minimum.
```
def plot_AUC(AUC):
fig, ax = plt.subplots(figsize=(10, 4))
ax.plot(AUC, linewidth=2, marker='s',markersize=5, label='SSHIBA', markerfacecolor='red')
ax.grid()
ax.set_xlabel('Iteration')
ax.set_ylabel('Multiclass AUC')
plt.legend()
def plot_L(L):
fig, ax = plt.subplots(figsize=(10, 4))
ax.plot(L, linewidth=2, marker='s',markersize=5, markerfacecolor='red')
ax.grid()
ax.set_xlabel('Iteration')
ax.set_ylabel('L(Q)')
plot_L(myModel.L)
plt.title('Lower Bound')
plot_AUC(myModel.AUC)
plt.title('AUC test')
plt.show()
```
## LFW Dataset
In order to improve the analysis of the results, we are showing in this section the results obtained using the _LFW_ database. This database is composed by different images of famous people and the goal is to identify what person each of them is. For the purpose of this example we have included the images of the people with more images, having that our data is now composed of 7 people or categories.
First of all, we can prepare the data we want to work with.
```
import pickle
resize = 0.4
my_dict = pickle.load( open('Databases/data_lfwa_'+str(resize)+'_7classes.pkl', "rb" ), encoding='latin1' )
X = my_dict['X'].astype(float)
Y = (my_dict['Y_cat']).astype(int)
h = my_dict['h']
w = my_dict['w']
target_names = my_dict['target']
from sklearn.model_selection import train_test_split
X_tr, X_tst, Y_tr, Y_tst = train_test_split(X, Y.astype(int), test_size=0.3, random_state = 31)
n_samples = X.shape[0]
n_features = X.shape[1]
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
```
Here we can see what the images we have downloaded look like.
```
n_col, n_row = 6,3
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(X_tst[i,:].reshape((h, w)), cmap=plt.cm.gray)
plt.xticks(())
plt.yticks(())
```
At this point, the model can be trained with the train and test splits.
```
myKc = 50 # number of latent features
max_it = int(5*1e4) # maximum number of iterations
tol = 1e-7 # tolerance of the stopping condition (abs(1 - L[-2]/L[-1]) < tol)
prune = 1 # whether to prune the irrelevant latent features
myModel = sshiba.SSHIBA(myKc, prune)
X0_tr = myModel.struct_data(X_tr, 0, 0)
X1_tr = myModel.struct_data(Y_tr, 1, 0)
X0_tst = myModel.struct_data(X_tst, 0, 0)
X1_tst = myModel.struct_data(Y_tst, 1, 0)
myModel.fit(X0_tr, X1_tr, max_iter = max_it, tol = tol, Y_tst = X1_tst, X_tst = X0_tst, AUC = 1)
print('Final AUC %.3f' %(myModel.AUC[-1]))
```
Now the model is trained, we can visualize the results, seeing how the image look like as well as both the true and predicted label for each one of them.
```
def plot_gallery(images, titles, h, w, n_row=3, n_col=6):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
def plot_W(W):
plt.figure()
plt.imshow((np.abs(W)), aspect=W.shape[1]/W.shape[0])
plt.colorbar()
plt.title('W')
plt.ylabel('features')
plt.xlabel('K')
y_pred = myModel.predict([0],1,0, X0_tst)
prediction_titles = [title(y_pred, Y_tst, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_tst, prediction_titles, h, w)
```
## LFW Dataset with Sparsity
Finally, we can use the sparse version of the method to make the model learn not only which latent features are relevant but also which features are the more relevant as well in order to learn the labels given.
To do so, we just need to train the model as we did before, specifying which views are to be learned with the before mentioned sparsity.
```
myKc = 50 # number of latent features
max_it = int(5*1e4) # maximum number of iterations
tol = 1e-7 # tolerance of the stopping condition (abs(1 - L[-2]/L[-1]) < tol)
prune = 1 # whether to prune the irrelevant latent features
myModel = sshiba.SSHIBA(myKc, prune)
X0_tr = myModel.struct_data(X_tr, 0, 1)
X1_tr = myModel.struct_data(Y_tr, 1, 0)
X0_tst = myModel.struct_data(X_tst, 0, 1)
X1_tst = myModel.struct_data(Y_tst, 1, 0)
myModel.fit(X0_tr, X1_tr, max_iter = max_it, tol = tol, Y_tst = X1_tst, X_tst = X0_tst, AUC = 1)
print('Final AUC %.3f' %(myModel.AUC[-1]))
import pickle
my_dict = {}
my_dict['models'] = myModel
filename = 'Models_categorical_sparse'
with open(filename+'.pkl', 'wb') as output:
pickle.dump(my_dict, output, pickle.HIGHEST_PROTOCOL)
import pickle
filename = 'Models_categorical_sparse'
my_dict = pickle.load( open( filename+'.pkl', "rb" ))
myModel = my_dict['models']
y_pred = myModel.predict([0],1,0, X0_tst)
prediction_titles = [title(y_pred, Y_tst, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_tst, prediction_titles, h, w)
```
## Visualization of the results
### Vector $\gamma$
Once the model is trained, we can visualize the variable $\gamma$ to see which parts of the image are considered as relevant and which ones irrelevant.
```
q = myModel.q_dist
gamma = q.gamma_mean(0)
ax1 = plt.subplot(2, 1, 1)
plt.title('Feature selection analysis')
plt.hist(gamma,100)
ax2 = plt.subplot(2, 1, 2)
plt.plot(gamma,'.')
plt.ylabel('gamma')
plt.xlabel('feature')
plt.show()
```
### Matrix $W$
Now we can see as we did in the _sparse notebook_ how the model is learning matrix $W$ to trasnform $X$ to the latent space, $Z$.
```
pos_ord_var=np.argsort(gamma)[::-1]
plot_W(q.W[0]['mean'][pos_ord_var,:])
```
### Vector $\gamma$ mask visualization
Finally, as the data we are working with are images, we could visualize the values the variable $\gamma$ takes as an image to see the relevance each pixel has.
In our case, we can see that the method is capable of finding the most relevant features to describe the different attributes we have as labels.
```
q = myModel.q_dist
gamma = q.gamma_mean(0)
plt.figure(figsize=(3, 5))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
plt.imshow(gamma.reshape((h, w)), cmap=plt.cm.gray)
plt.xticks(())
plt.yticks(())
plt.title('Gamma mask')
plt.show()
```
### Matrix $W$ masks visualization
Conversely, we can plot the projection matrix W to see the how the latent features are learning the different parts of face learning.
```
alpha = q.alpha_mean(0)
pos_ord_var = np.argsort(alpha)
W_0 = q.W[0]['mean'][:,pos_ord_var]
Wface_titles = ["Latent feature %d" % i for i in range(W_0.shape[0])]
n_col, n_row = 6,8
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(W_0[:,i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(Wface_titles[i], size=12)
plt.xticks(())
plt.yticks(())
plt.show()
```
| github_jupyter |
<img src="https://github.com/pmservice/ai-openscale-tutorials/raw/master/notebooks/images/banner.png" align="left" alt="banner">
# Working with Watson Machine Learning
The notebook will train, create and deploy a Credit Risk model, configure OpenScale to monitor that deployment, and inject seven days' worth of historical records and measurements for viewing in the OpenScale Insights dashboard.
### Contents
- [Setup](#setup)
- [Model building and deployment](#model)
- [OpenScale configuration](#openscale)
- [Quality monitor and feedback logging](#quality)
- [Fairness, drift monitoring and explanations](#fairness)
- [Custom monitors and metrics](#custom)
- [Payload analytics](#analytics)
- [Historical data](#historical)
# 1.0 Setup <a name="setup"></a>
## 1.1 Package installation
> Note: Some packages that are installed are dependencies for other packages. The versions are pinned to prevent warnings or errors.
```
!rm -rf /home/spark/shared/user-libs/python3.6*
!pip install --upgrade ibm-ai-openscale==2.2.1 --no-cache --user | tail -n 1
!pip install --upgrade opt-einsum==2.3.2 --no-cache | tail -n 1
!pip install --upgrade typing-extensions==3.6.2.1 --no-cache | tail -n 1
!pip install --upgrade jupyter==1 --no-cache | tail -n 1
!pip install --upgrade tensorboard==1.15.0 | tail -n 1
!pip install --upgrade JPype1-py3 | tail -n 1
!pip install --upgrade watson-machine-learning-client-V4==1.0.93 | tail -n 1
!pip install --upgrade numpy==1.18.3 --no-cache | tail -n 1
!pip install --upgrade SciPy==1.4.1 --no-cache | tail -n 1
!pip install --upgrade pyspark==2.3 | tail -n 1
!pip install --upgrade scikit-learn==0.20.3 | tail -n 1
!pip install --upgrade pandas==0.24.2 | tail -n 1
```
### Action: restart the kernel!
## 1.2 Configure credentials
- WOS_CREDENTIALS (ICP)
- WML_CREDENTIALS (ICP)
<font color='red'>Replace the `username` and `password` values of `************` with your Cloud Pak for Data `username` and `password`. The value for `url` should match the `url` for your Cloud Pak for Data cluster, which you can get from the browser address bar (be sure to include the 'https://'.</font> The credentials should look something like this (these are example values, not the ones you will use):
`
WOS_CREDENTIALS = {
"url": "https://zen.clusterid.us-south.containers.appdomain.cloud",
"username": "cp4duser",
"password" : "cp4dpass"
}
`
#### NOTE: Make sure that there is no trailing forward slash `/` in the `url`
```
WOS_CREDENTIALS = {
"url": "******",
"username": "******",
"password": "******"
}
WML_CREDENTIALS = WOS_CREDENTIALS.copy()
WML_CREDENTIALS['instance_id']='openshift'
WML_CREDENTIALS['version']='3.0.0'
```
Provide a custom name to be concatenated to model name, deployment name and open scale monitor. Sample value for CUSTOM_NAME could be ```CUSTOM_NAME = 'SAMAYA_OPENSCALE_3.0'```
```
CUSTOM_NAME = '******'
MODEL_NAME = CUSTOM_NAME + "_MODEL"
DEPLOYMENT_NAME = CUSTOM_NAME + "_DEPLOYMENT"
MONITOR_NAME = CUSTOM_NAME + "_MONITOR"
```
# 2.0 Model building and deployment <a name="model"></a>
In this section you will learn how to train Spark MLLib model and next deploy it as web-service using Watson Machine Learning service.
## 2.1 Load the training data
```
!rm german_credit_data_biased_training.csv
!wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/german_credit_data_biased_training.csv
from pyspark.sql import SparkSession
import pandas as pd
import json
spark = SparkSession.builder.getOrCreate()
pd_data = pd.read_csv("german_credit_data_biased_training.csv", sep=",", header=0)
df_data = spark.read.csv(path="german_credit_data_biased_training.csv", sep=",", header=True, inferSchema=True)
df_data.head()
```
## 2.2 Explore data
```
df_data.printSchema()
print("Number of records: " + str(df_data.count()))
```
## 2.3 Create a model
```
spark_df = df_data
(train_data, test_data) = spark_df.randomSplit([0.8, 0.2], 24)
print("Number of records for training: " + str(train_data.count()))
print("Number of records for evaluation: " + str(test_data.count()))
spark_df.printSchema()
```
The code below creates a Random Forest Classifier with Spark, setting up string indexers for the categorical features and the label column. Finally, this notebook creates a pipeline including the indexers and the model, and does an initial Area Under ROC evaluation of the model.
```
from pyspark.ml.feature import OneHotEncoder, StringIndexer, IndexToString, VectorAssembler
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.ml import Pipeline, Model
si_CheckingStatus = StringIndexer(inputCol = 'CheckingStatus', outputCol = 'CheckingStatus_IX')
si_CreditHistory = StringIndexer(inputCol = 'CreditHistory', outputCol = 'CreditHistory_IX')
si_LoanPurpose = StringIndexer(inputCol = 'LoanPurpose', outputCol = 'LoanPurpose_IX')
si_ExistingSavings = StringIndexer(inputCol = 'ExistingSavings', outputCol = 'ExistingSavings_IX')
si_EmploymentDuration = StringIndexer(inputCol = 'EmploymentDuration', outputCol = 'EmploymentDuration_IX')
si_Sex = StringIndexer(inputCol = 'Sex', outputCol = 'Sex_IX')
si_OthersOnLoan = StringIndexer(inputCol = 'OthersOnLoan', outputCol = 'OthersOnLoan_IX')
si_OwnsProperty = StringIndexer(inputCol = 'OwnsProperty', outputCol = 'OwnsProperty_IX')
si_InstallmentPlans = StringIndexer(inputCol = 'InstallmentPlans', outputCol = 'InstallmentPlans_IX')
si_Housing = StringIndexer(inputCol = 'Housing', outputCol = 'Housing_IX')
si_Job = StringIndexer(inputCol = 'Job', outputCol = 'Job_IX')
si_Telephone = StringIndexer(inputCol = 'Telephone', outputCol = 'Telephone_IX')
si_ForeignWorker = StringIndexer(inputCol = 'ForeignWorker', outputCol = 'ForeignWorker_IX')
si_Label = StringIndexer(inputCol="Risk", outputCol="label").fit(spark_df)
label_converter = IndexToString(inputCol="prediction", outputCol="predictedLabel", labels=si_Label.labels)
va_features = VectorAssembler(inputCols=["CheckingStatus_IX", "CreditHistory_IX", "LoanPurpose_IX", "ExistingSavings_IX", "EmploymentDuration_IX", "Sex_IX", \
"OthersOnLoan_IX", "OwnsProperty_IX", "InstallmentPlans_IX", "Housing_IX", "Job_IX", "Telephone_IX", "ForeignWorker_IX", \
"LoanDuration", "LoanAmount", "InstallmentPercent", "CurrentResidenceDuration", "LoanDuration", "Age", "ExistingCreditsCount", \
"Dependents"], outputCol="features")
from pyspark.ml.classification import RandomForestClassifier
classifier = RandomForestClassifier(featuresCol="features")
pipeline = Pipeline(stages=[si_CheckingStatus, si_CreditHistory, si_EmploymentDuration, si_ExistingSavings, si_ForeignWorker, si_Housing, si_InstallmentPlans, si_Job, si_LoanPurpose, si_OthersOnLoan,\
si_OwnsProperty, si_Sex, si_Telephone, si_Label, va_features, classifier, label_converter])
model = pipeline.fit(train_data)
predictions = model.transform(test_data)
evaluatorDT = BinaryClassificationEvaluator(rawPredictionCol="prediction", metricName='areaUnderROC')
area_under_curve = evaluatorDT.evaluate(predictions)
evaluatorDT = BinaryClassificationEvaluator(rawPredictionCol="prediction", metricName='areaUnderPR')
area_under_PR = evaluatorDT.evaluate(predictions)
#default evaluation is areaUnderROC
print("areaUnderROC = %g" % area_under_curve, "areaUnderPR = %g" % area_under_PR)
```
### 2.4 evaluate more metrics by exporting them into pandas and numpy
```
from sklearn.metrics import classification_report
y_pred = predictions.toPandas()['prediction']
y_pred = ['Risk' if pred == 1.0 else 'No Risk' for pred in y_pred]
y_test = test_data.toPandas()['Risk']
print(classification_report(y_test, y_pred, target_names=['Risk', 'No Risk']))
```
## 2.5 Publish the model
In this section, the notebook uses Watson Machine Learning to save the model (including the pipeline) to the WML instance. Previous versions of the model are removed so that the notebook can be run again, resetting all data for another demo.
```
from watson_machine_learning_client import WatsonMachineLearningAPIClient
import json
wml_client = WatsonMachineLearningAPIClient(WML_CREDENTIALS)
```
### 2.5.1 Set default space
In order to deploy a model, you would have to create different
deployment spaces and deploy your models there. You can list all the spaces using the .list()
function, or you can create new spaces by going to CP4D menu on top left corner --> analyze -->
analytics deployments --> New Deployment Space. Once you know which space you want to deploy
in, simply use the GUID of the space as argument for .set.default_space() function below
```
wml_client.spaces.list()
```
We'll use the `GUID` for your Deployment space as listed for the `default_space` in the method below:
```
wml_client.set.default_space('******************')
```
Alternately, set `space_name` below and use the following cell to create a space with that name
```
# space_name = "my_space_name"
# spaces = wml_client.spaces.get_details()['resources']
# space_id = None
# for space in spaces:
# if space['entity']['name'] == space_name:
# space_id = space["metadata"]["guid"]
# if space_id is None:
# space_id = wml_client.spaces.store(
# meta_props={wml_client.spaces.ConfigurationMetaNames.NAME: space_name})["metadata"]["guid"]
#wml_client.set.default_space(space_id)
```
### 2.5.2 Remove existing model and deployment
```
deployment_details = wml_client.deployments.get_details()
for deployment in deployment_details['resources']:
deployment_id = deployment['metadata']['guid']
model_id = deployment['entity']['asset']['href'].split('/')[3].split('?')[0]
if deployment['entity']['name'] == DEPLOYMENT_NAME:
print('Deleting deployment id', deployment_id)
wml_client.deployments.delete(deployment_id)
print('Deleting model id', model_id)
wml_client.repository.delete(model_id)
wml_client.repository.list_models()
```
### 2.5.4 Store the model in Watson Machine Learning on CP4D
```
wml_models = wml_client.repository.get_model_details()
model_uid = None
for model_in in wml_models['resources']:
if MODEL_NAME == model_in['entity']['name']:
model_uid = model_in['metadata']['guid']
break
if model_uid is None:
print("Storing model ...")
metadata = {
wml_client.repository.ModelMetaNames.NAME: MODEL_NAME,
wml_client.repository.ModelMetaNames.TYPE: 'mllib_2.3',
wml_client.repository.ModelMetaNames.RUNTIME_UID: 'spark-mllib_2.3',
}
published_model_details = wml_client.repository.store_model(model, metadata, training_data=df_data, pipeline=pipeline)
model_uid = wml_client.repository.get_model_uid(published_model_details)
print("Done")
model_uid
```
## 2.6 Deploy the model
The next section of the notebook deploys the model as a RESTful web service in Watson Machine Learning. The deployed model will have a scoring URL you can use to send data to the model for predictions.
```
wml_deployments = wml_client.deployments.get_details()
deployment_uid = None
for deployment in wml_deployments['resources']:
if DEPLOYMENT_NAME == deployment['entity']['name']:
deployment_uid = deployment['metadata']['guid']
break
if deployment_uid is None:
print("Deploying model...")
meta_props = {
wml_client.deployments.ConfigurationMetaNames.NAME: DEPLOYMENT_NAME,
wml_client.deployments.ConfigurationMetaNames.ONLINE: {}
}
deployment = wml_client.deployments.create(artifact_uid=model_uid, meta_props=meta_props)
deployment_uid = wml_client.deployments.get_uid(deployment)
print("Model id: {}".format(model_uid))
print("Deployment id: {}".format(deployment_uid))
```
# 3.0 Configure OpenScale <a name="openscale"></a>
The notebook will now import the necessary libraries and set up a Python OpenScale client.
```
from ibm_ai_openscale import APIClient4ICP
from ibm_ai_openscale.engines import *
from ibm_ai_openscale.utils import *
from ibm_ai_openscale.supporting_classes import PayloadRecord, Feature
from ibm_ai_openscale.supporting_classes.enums import *
ai_client = APIClient4ICP(WOS_CREDENTIALS)
ai_client.version
```
## 3.1 Create datamart
### 3.1.1 Set up datamart
Watson OpenScale uses a database to store payload logs and calculated metrics. If an OpenScale datamart exists in Db2, the existing datamart will be used and no data will be overwritten.
Prior instances of the Credit model will be removed from OpenScale monitoring.
```
try:
data_mart_details = ai_client.data_mart.get_details()
print('Using existing external datamart')
except:
print('Datamart is not set up. Please have your cluster Admin set up the DB for OpenScale')
# Admin will need to setup the datamart:
#ai_client.data_mart.setup(db_credentials=DATABASE_CREDENTIALS, schema=SCHEMA_NAME)
```
## 3.2 Bind machine learning engines
Watson OpenScale needs to be bound to the Watson Machine Learning instance to capture payload data into and out of the model. If this binding already exists, this code will output a warning message and use the existing binding.
```
binding_uid = ai_client.data_mart.bindings.add('WML instance', WatsonMachineLearningInstance4ICP(wml_credentials=WML_CREDENTIALS))
if binding_uid is None:
binding_uid = ai_client.data_mart.bindings.get_details()['service_bindings'][0]['metadata']['guid']
bindings_details = ai_client.data_mart.bindings.get_details()
binding_uid
ai_client.data_mart.bindings.list()
ai_client.data_mart.bindings.list_assets()
```
## 3.3 Subscriptions
### 3.3.1 Remove existing credit risk subscriptions
This code removes previous subscriptions to the Credit model to refresh the monitors with the new model and new data.
```
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
for subscription in subscriptions_uids:
sub_name = ai_client.data_mart.subscriptions.get_details(subscription)['entity']['asset']['name']
if sub_name == MODEL_NAME:
ai_client.data_mart.subscriptions.delete(subscription)
print('Deleted existing subscription for', MODEL_NAME)
```
This code creates the model subscription in OpenScale using the Python client API. Note that we need to provide the model unique identifier, and some information about the model itself.
```
subscription = ai_client.data_mart.subscriptions.add(WatsonMachineLearningAsset(
model_uid,
problem_type=ProblemType.BINARY_CLASSIFICATION,
input_data_type=InputDataType.STRUCTURED,
label_column='Risk',
prediction_column='predictedLabel',
probability_column='probability',
feature_columns = ["CheckingStatus","LoanDuration","CreditHistory","LoanPurpose","LoanAmount","ExistingSavings","EmploymentDuration","InstallmentPercent","Sex","OthersOnLoan","CurrentResidenceDuration","OwnsProperty","Age","InstallmentPlans","Housing","ExistingCreditsCount","Job","Dependents","Telephone","ForeignWorker"],
categorical_columns = ["CheckingStatus","CreditHistory","LoanPurpose","ExistingSavings","EmploymentDuration","Sex","OthersOnLoan","OwnsProperty","InstallmentPlans","Housing","Job","Telephone","ForeignWorker"]
))
if subscription is None:
print('Subscription already exists; get the existing one')
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
for sub in subscriptions_uids:
if ai_client.data_mart.subscriptions.get_details(sub)['entity']['asset']['name'] == MODEL_NAME:
subscription = ai_client.data_mart.subscriptions.get(sub)
```
Get subscription list
```
subscriptions_uids = ai_client.data_mart.subscriptions.get_uids()
ai_client.data_mart.subscriptions.list()
subscription_details = subscription.get_details()
```
### 3.3.2 Score the model so we can configure monitors
Now that the WML service has been bound and the subscription has been created, we need to send a request to the model before we configure OpenScale. This allows OpenScale to create a payload log in the datamart with the correct schema, so it can capture data coming into and out of the model. First, the code gets the model deployment's endpoint URL, and then sends a few records for predictions.
```
credit_risk_scoring_endpoint = None
print(deployment_uid)
for deployment in wml_client.deployments.get_details()['resources']:
if deployment_uid in deployment['metadata']['guid']:
credit_risk_scoring_endpoint = deployment['entity']['status']['online_url']['url']
print(credit_risk_scoring_endpoint)
fields = ["CheckingStatus","LoanDuration","CreditHistory","LoanPurpose","LoanAmount","ExistingSavings","EmploymentDuration","InstallmentPercent","Sex","OthersOnLoan","CurrentResidenceDuration","OwnsProperty","Age","InstallmentPlans","Housing","ExistingCreditsCount","Job","Dependents","Telephone","ForeignWorker"]
values = [
["no_checking",13,"credits_paid_to_date","car_new",1343,"100_to_500","1_to_4",2,"female","none",3,"savings_insurance",46,"none","own",2,"skilled",1,"none","yes"],
["no_checking",24,"prior_payments_delayed","furniture",4567,"500_to_1000","1_to_4",4,"male","none",4,"savings_insurance",36,"none","free",2,"management_self-employed",1,"none","yes"],
["0_to_200",26,"all_credits_paid_back","car_new",863,"less_100","less_1",2,"female","co-applicant",2,"real_estate",38,"none","own",1,"skilled",1,"none","yes"],
["0_to_200",14,"no_credits","car_new",2368,"less_100","1_to_4",3,"female","none",3,"real_estate",29,"none","own",1,"skilled",1,"none","yes"],
["0_to_200",4,"no_credits","car_new",250,"less_100","unemployed",2,"female","none",3,"real_estate",23,"none","rent",1,"management_self-employed",1,"none","yes"],
["no_checking",17,"credits_paid_to_date","car_new",832,"100_to_500","1_to_4",2,"male","none",2,"real_estate",42,"none","own",1,"skilled",1,"none","yes"],
["no_checking",33,"outstanding_credit","appliances",5696,"unknown","greater_7",4,"male","co-applicant",4,"unknown",54,"none","free",2,"skilled",1,"yes","yes"],
["0_to_200",13,"prior_payments_delayed","retraining",1375,"100_to_500","4_to_7",3,"male","none",3,"real_estate",37,"none","own",2,"management_self-employed",1,"none","yes"]
]
payload_scoring = {"fields": fields,"values": values}
payload = {
wml_client.deployments.ScoringMetaNames.INPUT_DATA: [payload_scoring]
}
scoring_response = wml_client.deployments.score(deployment_uid, payload)
print('Single record scoring result:', '\n fields:', scoring_response['predictions'][0]['fields'], '\n values: ', scoring_response['predictions'][0]['values'][0])
```
# 4.0 Quality monitoring and feedback logging <a name="quality"></a>
## 4.1 Enable quality monitoring
The code below waits ten seconds to allow the payload logging table to be set up before it begins enabling monitors. First, it turns on the quality (accuracy) monitor and sets an alert threshold of 70%. OpenScale will show an alert on the dashboard if the model accuracy measurement (area under the curve, in the case of a binary classifier) falls below this threshold.
The second paramater supplied, min_records, specifies the minimum number of feedback records OpenScale needs before it calculates a new measurement. The quality monitor runs hourly, but the accuracy reading in the dashboard will not change until an additional 50 feedback records have been added, via the user interface, the Python client, or the supplied feedback endpoint.
```
time.sleep(10)
subscription.quality_monitoring.enable(threshold=0.7, min_records=50)
```
## 4.2 Feedback logging
The code below downloads and stores enough feedback data to meet the minimum threshold so that OpenScale can calculate a new accuracy measurement. It then kicks off the accuracy monitor. The monitors run hourly, or can be initiated via the Python API, the REST API, or the graphical user interface.
```
!rm additional_feedback_data.json
!wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/additional_feedback_data.json
with open('additional_feedback_data.json') as feedback_file:
additional_feedback_data = json.load(feedback_file)
subscription.feedback_logging.store(additional_feedback_data['data'])
subscription.feedback_logging.show_table()
run_details = subscription.quality_monitoring.run(background_mode=False)
subscription.quality_monitoring.show_table()
%matplotlib inline
quality_pd = subscription.quality_monitoring.get_table_content(format='pandas')
quality_pd.plot.barh(x='id', y='value');
ai_client.data_mart.get_deployment_metrics()
```
# 5.0 Fairness, drift monitoring and explanations
<a name="fairness"></a>
The code below configures fairness monitoring for our model. It turns on monitoring for two features, Sex and Age. In each case, we must specify:
* Which model feature to monitor
* One or more **majority** groups, which are values of that feature that we expect to receive a higher percentage of favorable outcomes
* One or more **minority** groups, which are values of that feature that we expect to receive a higher percentage of unfavorable outcomes
* The threshold at which we would like OpenScale to display an alert if the fairness measurement falls below (in this case, 95%)
Additionally, we must specify which outcomes from the model are favourable outcomes, and which are unfavourable. We must also provide the number of records OpenScale will use to calculate the fairness score. In this case, OpenScale's fairness monitor will run hourly, but will not calculate a new fairness rating until at least 200 records have been added. Finally, to calculate fairness, OpenScale must perform some calculations on the training data, so we provide the dataframe containing the data.
## 5.1 Enable fairness monitoring
```
subscription.fairness_monitoring.enable(
features=[
Feature("Sex", majority=['male'], minority=['female'], threshold=0.95),
Feature("Age", majority=[[26,75]], minority=[[18,25]], threshold=0.95)
],
favourable_classes=['No Risk'],
unfavourable_classes=['Risk'],
min_records=200,
training_data=pd_data
)
```
## 5.2 Score the model again now that monitoring is configured
This next section randomly selects 200 records from the data feed and sends those records to the model for predictions. This is enough to exceed the minimum threshold for records set in the previous section, which allows OpenScale to begin calculating fairness.
```
!rm german_credit_feed.json
!wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/german_credit_feed.json
```
Score 200 randomly chosen records
```
import random
with open('german_credit_feed.json', 'r') as scoring_file:
scoring_data = json.load(scoring_file)
fields = scoring_data['fields']
values = []
for _ in range(200):
values.append(random.choice(scoring_data['values']))
payload_scoring = {"fields": fields, "values": values}
payload = {
wml_client.deployments.ScoringMetaNames.INPUT_DATA: [payload_scoring]
}
scoring_response = wml_client.deployments.score(deployment_uid, payload)
```
### 5.3 Run fairness monitor
Kick off a fairness monitor run on current data. The monitor runs hourly, but can be manually initiated using the Python client, the REST API, or the graphical user interface. We have a 30 second sleep so that the scoring of 200 payloads above can complete.
```
time.sleep(30)
run_details = subscription.fairness_monitoring.run(background_mode=False)
time.sleep(5)
subscription.fairness_monitoring.show_table()
```
### 5.4 Configure Explainability
Finally, we provide OpenScale with the training data to enable and configure the explainability features.
```
from ibm_ai_openscale.supporting_classes import *
subscription.explainability.enable(training_data=pd_data)
explainability_details = subscription.explainability.get_details()
```
### 5.7 Run explanation for sample record
```
transaction_id = subscription.payload_logging.get_table_content(limit=1)['scoring_id'].values[0]
print(transaction_id)
explain_run = subscription.explainability.run(transaction_id=transaction_id, background_mode=False)
if explain_run == None:
# explanation didn't finish within 180 seconds, if explaination is still not finished give it a minute or so then re-run this cell
time.sleep(10)
explain_table = subscription.explainability.get_table_content(format='pandas')
explain_result = pd.DataFrame.from_dict(explain_table[explain_table['transaction_id']==transaction_id]['explanation'][0]['entity']['predictions'][0]['explanation_features'])
else:
explain_result = pd.DataFrame.from_dict(explain_run['entity']['predictions'][0]['explanation_features'])
explain_result.plot.barh(x='feature_name', y='weight', color='g', alpha=0.8);
```
# 6.0 Custom monitors and metrics <a name="custom"></a>
## 6.1 Register custom monitor
```
def get_definition(monitor_name):
monitors_definitions = ai_client.data_mart.monitors.get_details()['monitor_definitions']
for definition in monitors_definitions:
if monitor_name == definition['entity']['name']:
return definition
return None
from ibm_ai_openscale.supporting_classes import Metric, Tag
monitor_name = MONITOR_NAME
metrics = [Metric(name='sensitivity', lower_limit_default=0.8), Metric(name='specificity', lower_limit_default=0.75)]
tags = [Tag(name='region', description='customer geographical region')]
existing_definition = get_definition(monitor_name)
if existing_definition is None:
my_monitor = ai_client.data_mart.monitors.add(name=monitor_name, metrics=metrics, tags=tags)
else:
my_monitor = existing_definition
```
### 6.1.1 Get monitors uids and details
```
monitor_uid = my_monitor['metadata']['guid']
print(monitor_uid)
my_monitor = ai_client.data_mart.monitors.get_details(monitor_uid=monitor_uid)
print('monitor definition details', my_monitor)
```
### 6.2 Enable custom monitor for subscription
```
from ibm_ai_openscale.supporting_classes import Threshold
thresholds = [Threshold(metric_uid='sensitivity', lower_limit=0.9)]
subscription.monitoring.enable(monitor_uid=monitor_uid, thresholds=thresholds)
```
#### 6.2.1 Get monitor configuration details
```
subscription.monitoring.get_details(monitor_uid=monitor_uid)
```
## 6.3 Storing custom metrics
```
metrics = {"specificity": 0.78, "sensitivity": 0.67, "region": "us-south"}
subscription.monitoring.store_metrics(monitor_uid=monitor_uid, metrics=metrics)
```
### 6.3.1 List and get custom metrics
```
subscription.monitoring.show_table(monitor_uid=monitor_uid)
custom_metrics = subscription.monitoring.get_metrics(monitor_uid=monitor_uid, deployment_uid='credit')
custom_metrics
custom_metrics_pandas = subscription.monitoring.get_table_content(monitor_uid=monitor_uid)
%matplotlib inline
custom_metrics_pandas.plot.barh(x='id', y='value');
```
# 7.0 Payload analytics <a name="analytics"></a>
## 7.1 Run data distributions calculation
```
from datetime import datetime
start_date = "2018-01-01T00:00:00.00Z"
end_date = datetime.utcnow().isoformat() + "Z"
sex_distribution = subscription.payload_logging.data_distribution.run(
start_date=start_date,
end_date=end_date,
group=['predictedLabel', 'Sex'],
agg=['count'])
```
## 7.2 Get data distributions as pandas dataframe
```
sex_distribution_run_uid = sex_distribution['id']
distributions_pd = subscription.payload_logging.data_distribution.get_run_result(run_id=sex_distribution_run_uid, format='pandas')
distributions_pd
subscription.payload_logging.data_distribution.show_chart(sex_distribution_run_uid);
credit_history_distribution = subscription.payload_logging.data_distribution.run(
start_date=start_date,
end_date=end_date,
group=['predictedLabel', 'CreditHistory'],
agg=['count'])
credit_history_distribution_run_uid = credit_history_distribution['id']
subscription.payload_logging.data_distribution.show_chart(credit_history_distribution_run_uid);
```
# 8.0 Historical data <a name="historical"></a>
## 8.1 Insert historical payloads
The next section of the notebook downloads and writes historical data to the payload and measurement tables to simulate a production model that has been monitored and receiving regular traffic for the last seven days. This historical data can be viewed in the Watson OpenScale user interface. The code uses the Python and REST APIs to write this data.
```
!rm history_payloads_with_transaction_*.json
!wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_payloads_with_transaction_id_0.json
!wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_payloads_with_transaction_id_1.json
!wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_payloads_with_transaction_id_2.json
!wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_payloads_with_transaction_id_3.json
!wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_payloads_with_transaction_id_4.json
!wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_payloads_with_transaction_id_5.json
!wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_payloads_with_transaction_id_6.json
historyDays = 7
from ibm_ai_openscale.supporting_classes import PayloadRecord, Feature
import datetime
import time
for day in range(historyDays):
print('Loading day {}'.format(day + 1))
history_file = 'history_payloads_with_transaction_id_' + str(day) + '.json'
with open(history_file) as f:
payloads = json.load(f)
hourly_records = int(len(payloads) / 24)
index = 0
for hour in range(24):
recordsList = []
for i in range(hourly_records):
score_time = str(datetime.datetime.utcnow() + datetime.timedelta(hours=(-(24*day + hour + 1))))
recordsList.append(PayloadRecord(request=payloads[index]['request'], response=payloads[index]['response'], scoring_timestamp=score_time))
index += 1
subscription.payload_logging.store(records=recordsList)
print('Finished')
data_mart_id = subscription.get_details()['metadata']['url'].split('/service_bindings')[0].split('marts/')[1]
print(data_mart_id)
performance_metrics_url = WOS_CREDENTIALS['url'] + subscription.get_details()['metadata']['url'].split('/service_bindings')[0] + '/metrics'
print(performance_metrics_url)
```
## 8.2 Insert historical fairness metrics
```
!rm history_fairness.json
!wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_fairness.json
import requests
from requests.auth import HTTPBasicAuth
def create_token():
header = {
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json"
}
response = requests.Session().get(
WOS_CREDENTIALS['url'] + '/v1/preauth/validateAuth',
headers=header,
auth=HTTPBasicAuth(
WOS_CREDENTIALS['username'],
WOS_CREDENTIALS['password']
),
verify=False)
response = handle_response(200, 'access token', response, True)
token = response['accessToken']
return token
iam_token = create_token()
iam_headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % iam_token
}
with open('history_fairness.json', 'r') as history_file:
payloads = json.load(history_file)
for day in range(historyDays):
print('Loading day', day + 1)
for hour in range(24):
score_time = (datetime.datetime.utcnow() + datetime.timedelta(hours=(-(24*day + hour + 1)))).strftime('%Y-%m-%dT%H:%M:%SZ')
index = (day * 24 + hour) % len(payloads) # wrap around and reuse values if needed
qualityMetric = {
'metric_type': 'fairness',
'binding_id': binding_uid,
'timestamp': score_time,
'subscription_id': model_uid,
'asset_revision': model_uid,
'deployment_id': deployment_uid,
'value': payloads[index]
}
response = requests.post(performance_metrics_url, json=[qualityMetric], headers=iam_headers, verify=False)
print('Finished')
```
## 8.3 Insert historical debias metrics
```
!rm history_debias.json
!wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_debias.json
iam_token = create_token()
iam_headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % iam_token
}
with open('history_debias.json', 'r') as history_file:
payloads = json.load(history_file)
for day in range(historyDays):
print('Loading day', day + 1)
for hour in range(24):
score_time = (datetime.datetime.utcnow() + datetime.timedelta(hours=(-(24*day + hour + 1)))).strftime('%Y-%m-%dT%H:%M:%SZ')
index = (day * 24 + hour) % len(payloads) # wrap around and reuse values if needed
qualityMetric = {
'metric_type': 'debiased_fairness',
'binding_id': binding_uid,
'timestamp': score_time,
'subscription_id': model_uid,
'asset_revision': model_uid,
'deployment_id': deployment_uid,
'value': payloads[index]
}
response = requests.post(performance_metrics_url, json=[qualityMetric], headers=iam_headers, verify=False)
print('Finished')
```
## 8.4 Insert historical quality metrics
```
iam_token = create_token()
iam_headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % iam_token
}
measurements = [0.76, 0.78, 0.68, 0.72, 0.73, 0.77, 0.80]
for day in range(historyDays):
print('Day', day + 1)
for hour in range(24):
score_time = (datetime.datetime.utcnow() + datetime.timedelta(hours=(-(24*day + hour + 1)))).strftime('%Y-%m-%dT%H:%M:%SZ')
qualityMetric = {
'metric_type': 'quality',
'binding_id': binding_uid,
'timestamp': score_time,
'subscription_id': model_uid,
'asset_revision': model_uid,
'deployment_id': deployment_uid,
'value': {
'quality': measurements[day],
'threshold': 0.7,
'metrics': [
{
'name': 'auroc',
'value': measurements[day],
'threshold': 0.7
}
]
}
}
response = requests.post(performance_metrics_url, json=[qualityMetric], headers=iam_headers, verify=False)
print('Finished')
```
## 8.5 Insert historical confusion matrixes
```
!rm history_quality_metrics.json
!wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_quality_metrics.json
measurements_url = WOS_CREDENTIALS['url'] + subscription.get_details()['metadata']['url'].split('/service_bindings')[0] + '/measurements'
print(measurements_url)
with open('history_quality_metrics.json') as json_file:
records = json.load(json_file)
for day in range(historyDays):
index = 0
measurments = []
print('Day', day + 1)
for hour in range(24):
score_time = (datetime.datetime.utcnow() + datetime.timedelta(hours=(-(24*day + hour + 1)))).strftime('%Y-%m-%dT%H:%M:%SZ')
measurement = {
"monitor_definition_id": 'quality',
"binding_id": subscription.binding_uid,
"subscription_id": subscription.uid,
"asset_id": subscription.source_uid,
'metrics': [records[index]['metrics']],
'sources': [records[index]['sources']],
'timestamp': score_time
}
measurments.append(measurement)
index+=1
response = requests.post(measurements_url, json=measurments, headers=ai_client._get_headers(), verify=False)
print('Finished')
```
## 8.6 Insert historical performance metrics
```
iam_token = create_token()
iam_headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % iam_token
}
for day in range(historyDays):
print('Day', day + 1)
for hour in range(24):
score_time = (datetime.datetime.utcnow() + datetime.timedelta(hours=(-(24*day + hour + 1)))).strftime('%Y-%m-%dT%H:%M:%SZ')
score_count = random.randint(60, 600)
score_resp = random.uniform(60, 300)
performanceMetric = {
'metric_type': 'performance',
'binding_id': binding_uid,
'timestamp': score_time,
'subscription_id': model_uid,
'asset_revision': model_uid,
'deployment_id': deployment_uid,
'value': {
'response_time': score_resp,
'records': score_count
}
}
response = requests.post(performance_metrics_url, json=[performanceMetric], headers=iam_headers, verify=False)
print('Finished')
```
## 8.7 Insert historical manual labeling
```
manual_labeling_url = WOS_CREDENTIALS['url'] + subscription.get_details()['metadata']['url'].split('/service_bindings')[0] + '/manual_labelings'
print(manual_labeling_url)
!rm history_manual_labeling.json
!wget https://raw.githubusercontent.com/IBM/credit-risk-workshop-cpd/master/data/openscale/history_manual_labeling.json
iam_token = create_token()
iam_headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % iam_token
}
with open('history_manual_labeling.json', 'r') as history_file:
records = json.load(history_file)
for day in range(historyDays):
print('Loading day', day + 1)
record_json = []
for hour in range(24):
for record in records:
if record['fastpath_history_day'] == day and record['fastpath_history_hour'] == hour:
record['binding_id'] = binding_uid
record['subscription_id'] = model_uid
record['asset_revision'] = model_uid
record['deployment_id'] = deployment_uid
record['scoring_timestamp'] = (datetime.datetime.utcnow() + datetime.timedelta(hours=(-(24*day + hour + 1)))).strftime('%Y-%m-%dT%H:%M:%SZ')
record_json.append(record)
response = requests.post(manual_labeling_url, json=record_json, headers=iam_headers, verify=False)
print('Finished')
```
## 8.8 Additional data to help debugging
```
print('Datamart:', data_mart_id)
print('Model:', model_uid)
print('Deployment:', deployment_uid)
print('Binding:', binding_uid)
# print('Scoring URL:', credit_risk_scoring_endpoint)
```
## 8.9 Identify transactions for Explainability
Transaction IDs identified by the cells below can be copied and pasted into the Explainability tab of the OpenScale dashboard.
```
payload_data = subscription.payload_logging.get_table_content(limit=10)
payload_data.filter(items=['scoring_id', 'predictedLabel', 'probability'])
```
## Congratulations!
You have finished this section of the hands-on lab for IBM Watson OpenScale. You can now view the OpenScale dashboard by going to the Cloud Pak for Data `Home` page, and clicking `Services`. Choose the `OpenScale` tile and click the menu to `Open`. Click on the tile for the model you've created to see the monitors.
OpenScale shows model performance over time. You have two options to keep data flowing to your OpenScale graphs:
* Download, configure and schedule the [model feed notebook](https://raw.githubusercontent.com/emartensibm/german-credit/master/german_credit_scoring_feed.ipynb). This notebook can be set up with your WML credentials, and scheduled to provide a consistent flow of scoring requests to your model, which will appear in your OpenScale monitors.
* Re-run this notebook. Running this notebook from the beginning will delete and re-create the model and deployment, and re-create the historical data. Please note that the payload and measurement logs for the previous deployment will continue to be stored in your datamart, and can be deleted if necessary.
This notebook has been adapted from notebooks available at https://github.com/pmservice/ai-openscale-tutorials.
| github_jupyter |
# Single NFW profile
Here we demonstrate most of the NFW functionality using a single NFW profile.
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from profiley.nfw import NFW
mass = 1e14
concentration = 4
redshift = 0.5
nfw = NFW(mass, concentration, redshift)
print(nfw)
```
Note that the profile attributes are always arrays, even if scalars are passed to it.
The first thing to look at is the 3-dimensional density profile. For all profiles we need to pass the distances at which these will be computed, as a 1d array, so let's define that first.
These distances must be in Mpc.
```
R = np.logspace(-2, 1, 100)
```
With that, getting the density profile is as simple as
```
rho = nfw.profile(R)
```
That's it!
```
plt.loglog(R, rho)
plt.xlabel('$r$ (Mpc)', fontsize=16)
plt.ylabel(r'$\rho(r)$ (M$_\odot$/Mpc$^3$)', fontsize=16);
```
Similarly, we can obtain the projected surface density or the excess surface density (the weak lensing observable):
```
sigma = nfw.surface_density(R)
esd = nfw.excess_surface_density(R)
fig, axes = plt.subplots(figsize=(14,5), ncols=2)
axes[0].plot(R, sigma)
axes[0].set_ylabel(r'$\Sigma(R)$ (M$_\odot$/Mpc$^2$)', fontsize=16)
axes[1].plot(R, esd)
axes[1].set_ylabel(r'$\Delta\Sigma(R)$ (M$_\odot$/Mpc$^2$)', fontsize=16)
for ax in axes:
ax.set_xlabel('$R$ (Mpc)', fontsize=16)
ax.set_xscale('log')
ax.set_yscale('log')
```
The ESD can also be calculated "manually":
```
barsigma = nfw.enclosed_surface_density(R)
esd_manual = barsigma - sigma
np.allclose(esd, esd_manual)
```
We can also calculate the convergence profile for a given source redshift:
```
z_source = 1.0
kappa = nfw.convergence(R, z_source)
plt.loglog(R, kappa)
plt.xlabel('$R$ (Mpc)', fontsize=16)
plt.ylabel(f'$\kappa(R)$ ($z_s={z_source}$)', fontsize=16);
```
Finally, we can also obtain offset profiles like so:
```
Roff = np.linspace(0.2, 1, 5)
sigma_off = nfw.offset_surface_density(R, Roff)
sigma_off.shape
for Ri, sigma_i in zip(Roff, sigma_off):
plt.loglog(R, sigma_i[0], label=rf'$R_\mathrm{{off}}={Ri:.1f}$ Mpc')
plt.plot(R, sigma, 'k-')
plt.legend()
plt.xlabel('$R$ (Mpc)', fontsize=16)
plt.ylabel(r'$\Sigma(R)$ (M$_\odot$/Mpc$^2$)', fontsize=16);
```
There is a similar `offset_excess_surface_density` method, as well as `offset_density` and `ofset_enclosed_density`, though these would not be used so often. The offset convergence has a different signature:
```
kappa_off = nfw.convergence(R, z_source, Roff=Roff)
for Ri, kappa_i in zip(Roff, kappa_off):
plt.loglog(R, kappa_i[0], label=rf'$R_\mathrm{{off}}={Ri:.1f}$ Mpc')
plt.plot(R, kappa, 'k-')
plt.legend()
plt.xlabel('$R$ (Mpc)', fontsize=16)
plt.ylabel(r'$\kappa(R)$', fontsize=16);
```
| github_jupyter |
## Visual Comparison Between Different Classification Methods in Shogun
Notebook by Youssef Emad El-Din (Github ID: <a href="https://github.com/youssef-emad/">youssef-emad</a>)
This notebook demonstrates different classification methods in Shogun. The point is to compare and visualize the decision boundaries of different classifiers on two different datasets, where one is linear seperable, and one is not.
1. <a href ="#section1">Data Generation and Visualization</a>
2. <a href ="#section2">Support Vector Machine</a>
1. <a href ="#section2a">Linear SVM</a>
2. <a href ="#section2b">Gaussian Kernel</a>
3. <a href ="#section2c">Sigmoid Kernel</a>
4. <a href ="#section2d">Polynomial Kernel</a>
3. <a href ="#section3">Naive Bayes</a>
4. <a href ="#section4">Nearest Neighbors</a>
5. <a href ="#section5">Linear Discriminant Analysis</a>
6. <a href ="#section6">Quadratic Discriminat Analysis</a>
7. <a href ="#section7">Gaussian Process</a>
1. <a href ="#section7a">Logit Likelihood model</a>
2. <a href ="#section7b">Probit Likelihood model</a>
8. <a href ="#section8">Putting It All Together</a>
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
from shogun import *
import shogun as sg
#Needed lists for the final plot
classifiers_linear = []*10
classifiers_non_linear = []*10
classifiers_names = []*10
fadings = []*10
```
## <a id = "section1">Data Generation and Visualization</a>
Transformation of features to Shogun format using <a href="http://www.shogun-toolbox.org/doc/en/current/classshogun_1_1CDenseFeatures.html">RealFeatures</a> and <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CBinaryLabels.html">BinaryLables</a> classes.
```
shogun_feats_linear = features(CSVFile(os.path.join(SHOGUN_DATA_DIR, 'toy/classifier_binary_2d_linear_features_train.dat')))
shogun_labels_linear = BinaryLabels(CSVFile(os.path.join(SHOGUN_DATA_DIR, 'toy/classifier_binary_2d_linear_labels_train.dat')))
shogun_feats_non_linear = features(CSVFile(os.path.join(SHOGUN_DATA_DIR, 'toy/classifier_binary_2d_nonlinear_features_train.dat')))
shogun_labels_non_linear = BinaryLabels(CSVFile(os.path.join(SHOGUN_DATA_DIR, 'toy/classifier_binary_2d_nonlinear_labels_train.dat')))
feats_linear = shogun_feats_linear.get_real_matrix('feature_matrix')
labels_linear = shogun_labels_linear.get_real_vector('labels')()
feats_non_linear = shogun_feats_non_linear.get_real_matrix('feature_matrix')
labels_non_linear = shogun_labels_non_linear.get_real_vector('labels')()
```
Data visualization methods.
```
def plot_binary_data(plot,X_train, y_train):
"""
This function plots 2D binary data with different colors for different labels.
"""
plot.xlabel(r"$x$")
plot.ylabel(r"$y$")
plot.plot(X_train[0, np.argwhere(y_train == 1)], X_train[1, np.argwhere(y_train == 1)], 'ro')
plot.plot(X_train[0, np.argwhere(y_train == -1)], X_train[1, np.argwhere(y_train == -1)], 'bo')
def compute_plot_isolines(classifier,feats,size=200,fading=True):
"""
This function computes the classification of points on the grid
to get the decision boundaries used in plotting
"""
x1 = np.linspace(1.2*min(feats[0]), 1.2*max(feats[0]), size)
x2 = np.linspace(1.2*min(feats[1]), 1.2*max(feats[1]), size)
x, y = np.meshgrid(x1, x2)
plot_features=features(np.array((np.ravel(x), np.ravel(y))))
if fading == True:
plot_labels = classifier.apply(plot_features).get_real_vector('current_values')
else:
plot_labels = classifier.apply(plot_features).get_real_vector('labels')
z = plot_labels.reshape((size, size))
return x,y,z
def plot_model(plot,classifier,features,labels,fading=True):
"""
This function plots an input classification model
"""
x,y,z = compute_plot_isolines(classifier,features,fading=fading)
plot.pcolor(x,y,z,cmap='RdBu_r')
plot.contour(x, y, z, linewidths=1, colors='black')
plot_binary_data(plot,features, labels)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("Linear Features")
plot_binary_data(plt,feats_linear, labels_linear)
plt.subplot(122)
plt.title("Non Linear Features")
plot_binary_data(plt,feats_non_linear, labels_non_linear)
```
## <a id="section2" href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CSVM.html">Support Vector Machine</a>
<a id="section2a" href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CLibLinear.html">Linear SVM</a>
Shogun provide <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CLibLinear.html">Liblinear</a> which is a library for large-scale linear learning focusing on SVM used for classification
```
plt.figure(figsize=(15,5))
c = 0.5
epsilon =1e-3
svm_linear = LibLinear(c,shogun_feats_linear,shogun_labels_linear)
svm_linear.put('liblinear_solver_type', L2R_L2LOSS_SVC)
svm_linear.put('epsilon', epsilon)
svm_linear.train()
classifiers_linear.append(svm_linear)
classifiers_names.append("SVM Linear")
fadings.append(True)
plt.subplot(121)
plt.title("Linear SVM - Linear Features")
plot_model(plt,svm_linear,feats_linear,labels_linear)
svm_non_linear = LibLinear(c,shogun_feats_non_linear,shogun_labels_non_linear)
svm_non_linear.put('liblinear_solver_type', L2R_L2LOSS_SVC)
svm_non_linear.put('epsilon', epsilon)
svm_non_linear.train()
classifiers_non_linear.append(svm_non_linear)
plt.subplot(122)
plt.title("Linear SVM - Non Linear Features")
plot_model(plt,svm_non_linear,feats_non_linear,labels_non_linear)
```
## SVM - Kernels
Shogun provides many options for using kernel functions. Kernels in Shogun are based on two classes which are <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CKernel.html">CKernel</a> and <a href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CKernelMachine.html">CKernelMachine</a> base class.
<a id ="section2b" href = "http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CGaussianKernel.html">Gaussian Kernel</a>
```
gaussian_c=0.7
gaussian_kernel_linear=sg.kernel("GaussianKernel", log_width=np.log(100))
gaussian_svm_linear=LibSVM(gaussian_c, gaussian_kernel_linear, shogun_labels_linear)
gaussian_svm_linear.train(shogun_feats_linear)
classifiers_linear.append(gaussian_svm_linear)
fadings.append(True)
gaussian_kernel_non_linear=sg.kernel("GaussianKernel", log_width=np.log(100))
gaussian_svm_non_linear=LibSVM(gaussian_c, gaussian_kernel_non_linear, shogun_labels_non_linear)
gaussian_svm_non_linear.train(shogun_feats_non_linear)
classifiers_non_linear.append(gaussian_svm_non_linear)
classifiers_names.append("SVM Gaussian Kernel")
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("SVM Gaussian Kernel - Linear Features")
plot_model(plt,gaussian_svm_linear,feats_linear,labels_linear)
plt.subplot(122)
plt.title("SVM Gaussian Kernel - Non Linear Features")
plot_model(plt,gaussian_svm_non_linear,feats_non_linear,labels_non_linear)
```
<a id ="section2c" href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CSigmoidKernel.html">Sigmoid Kernel</a>
```
sigmoid_c = 0.9
sigmoid_kernel_linear = SigmoidKernel(shogun_feats_linear,shogun_feats_linear,200,1,0.5)
sigmoid_svm_linear = LibSVM(sigmoid_c, sigmoid_kernel_linear, shogun_labels_linear)
sigmoid_svm_linear.train()
classifiers_linear.append(sigmoid_svm_linear)
classifiers_names.append("SVM Sigmoid Kernel")
fadings.append(True)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("SVM Sigmoid Kernel - Linear Features")
plot_model(plt,sigmoid_svm_linear,feats_linear,labels_linear)
sigmoid_kernel_non_linear = SigmoidKernel(shogun_feats_non_linear,shogun_feats_non_linear,400,2.5,2)
sigmoid_svm_non_linear = LibSVM(sigmoid_c, sigmoid_kernel_non_linear, shogun_labels_non_linear)
sigmoid_svm_non_linear.train()
classifiers_non_linear.append(sigmoid_svm_non_linear)
plt.subplot(122)
plt.title("SVM Sigmoid Kernel - Non Linear Features")
plot_model(plt,sigmoid_svm_non_linear,feats_non_linear,labels_non_linear)
```
<a id ="section2d" href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CPolyKernel.html">Polynomial Kernel</a>
```
poly_c = 0.5
degree = 4
poly_kernel_linear = PolyKernel(shogun_feats_linear, shogun_feats_linear, degree, True)
poly_svm_linear = LibSVM(poly_c, poly_kernel_linear, shogun_labels_linear)
poly_svm_linear.train()
classifiers_linear.append(poly_svm_linear)
classifiers_names.append("SVM Polynomial kernel")
fadings.append(True)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("SVM Polynomial Kernel - Linear Features")
plot_model(plt,poly_svm_linear,feats_linear,labels_linear)
poly_kernel_non_linear=PolyKernel(shogun_feats_non_linear, shogun_feats_non_linear, degree, True)
poly_svm_non_linear = LibSVM(poly_c, poly_kernel_non_linear, shogun_labels_non_linear)
poly_svm_non_linear.train()
classifiers_non_linear.append(poly_svm_non_linear)
plt.subplot(122)
plt.title("SVM Polynomial Kernel - Non Linear Features")
plot_model(plt,poly_svm_non_linear,feats_non_linear,labels_non_linear)
```
## <a id ="section3" href="http://www.shogun-toolbox.org/doc/en/latest/classshogun_1_1CGaussianNaiveBayes.html">Naive Bayes</a>
```
multiclass_labels_linear = shogun_labels_linear.get_real_vector('labels')()
for i in range(0,len(multiclass_labels_linear)):
if multiclass_labels_linear[i] == -1:
multiclass_labels_linear[i] = 0
multiclass_labels_non_linear = shogun_labels_non_linear.get_real_vector('labels')()
for i in range(0,len(multiclass_labels_non_linear)):
if multiclass_labels_non_linear[i] == -1:
multiclass_labels_non_linear[i] = 0
shogun_multiclass_labels_linear = MulticlassLabels(multiclass_labels_linear)
shogun_multiclass_labels_non_linear = MulticlassLabels(multiclass_labels_non_linear)
naive_bayes_linear = GaussianNaiveBayes()
naive_bayes_linear.put('features', shogun_feats_linear)
naive_bayes_linear.put('labels', shogun_multiclass_labels_linear)
naive_bayes_linear.train()
classifiers_linear.append(naive_bayes_linear)
classifiers_names.append("Naive Bayes")
fadings.append(False)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("Naive Bayes - Linear Features")
plot_model(plt,naive_bayes_linear,feats_linear,labels_linear,fading=False)
naive_bayes_non_linear = GaussianNaiveBayes()
naive_bayes_non_linear.put('features', shogun_feats_non_linear)
naive_bayes_non_linear.put('labels', shogun_multiclass_labels_non_linear)
naive_bayes_non_linear.train()
classifiers_non_linear.append(naive_bayes_non_linear)
plt.subplot(122)
plt.title("Naive Bayes - Non Linear Features")
plot_model(plt,naive_bayes_non_linear,feats_non_linear,labels_non_linear,fading=False)
```
## <a id ="section4" href="http://www.shogun-toolbox.org/doc/en/current/classshogun_1_1CKNN.html">Nearest Neighbors</a>
```
number_of_neighbors = 10
distances_linear = EuclideanDistance(shogun_feats_linear, shogun_feats_linear)
knn_linear = KNN(number_of_neighbors,distances_linear,shogun_labels_linear)
knn_linear.train()
classifiers_linear.append(knn_linear)
classifiers_names.append("Nearest Neighbors")
fadings.append(False)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("Nearest Neighbors - Linear Features")
plot_model(plt,knn_linear,feats_linear,labels_linear,fading=False)
distances_non_linear = EuclideanDistance(shogun_feats_non_linear, shogun_feats_non_linear)
knn_non_linear = KNN(number_of_neighbors,distances_non_linear,shogun_labels_non_linear)
knn_non_linear.train()
classifiers_non_linear.append(knn_non_linear)
plt.subplot(122)
plt.title("Nearest Neighbors - Non Linear Features")
plot_model(plt,knn_non_linear,feats_non_linear,labels_non_linear,fading=False)
```
## <a id ="section5" href="http://www.shogun-toolbox.org/doc/en/current/classshogun_1_1CLDA.html">Linear Discriminant Analysis</a>
```
gamma = 0.1
lda_linear = LDA(gamma, shogun_feats_linear, shogun_labels_linear)
lda_linear.train()
classifiers_linear.append(lda_linear)
classifiers_names.append("LDA")
fadings.append(True)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("LDA - Linear Features")
plot_model(plt,lda_linear,feats_linear,labels_linear)
lda_non_linear = LDA(gamma, shogun_feats_non_linear, shogun_labels_non_linear)
lda_non_linear.train()
classifiers_non_linear.append(lda_non_linear)
plt.subplot(122)
plt.title("LDA - Non Linear Features")
plot_model(plt,lda_non_linear,feats_non_linear,labels_non_linear)
```
## <a id ="section6" href="http://www.shogun-toolbox.org/doc/en/current/classshogun_1_1CQDA.html">Quadratic Discriminant Analysis</a>
```
qda_linear = QDA(shogun_feats_linear, shogun_multiclass_labels_linear)
qda_linear.train()
classifiers_linear.append(qda_linear)
classifiers_names.append("QDA")
fadings.append(False)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("QDA - Linear Features")
plot_model(plt,qda_linear,feats_linear,labels_linear,fading=False)
qda_non_linear = QDA(shogun_feats_non_linear, shogun_multiclass_labels_non_linear)
qda_non_linear.train()
classifiers_non_linear.append(qda_non_linear)
plt.subplot(122)
plt.title("QDA - Non Linear Features")
plot_model(plt,qda_non_linear,feats_non_linear,labels_non_linear,fading=False)
```
## <a id ="section7" href="http://www.shogun-toolbox.org/doc/en/current/classshogun_1_1CGaussianProcessBinaryClassification.html">Gaussian Process</a>
<a id ="section7a">Logit Likelihood model</a>
Shogun's <a href= "http://www.shogun-toolbox.org/doc/en/current/classshogun_1_1CLogitLikelihood.html">CLogitLikelihood</a> and <a href="http://www.shogun-toolbox.org/doc/en/current/classshogun_1_1CEPInferenceMethod.html">CEPInferenceMethod</a> classes are used.
```
# create Gaussian kernel with width = 2.0
kernel = sg.kernel("GaussianKernel", log_width=np.log(2))
# create zero mean function
zero_mean = ZeroMean()
# create logit likelihood model
likelihood = LogitLikelihood()
# specify EP approximation inference method
inference_model_linear = EPInferenceMethod(kernel, shogun_feats_linear, zero_mean, shogun_labels_linear, likelihood)
# create and train GP classifier, which uses Laplace approximation
gaussian_logit_linear = GaussianProcessClassification(inference_model_linear)
gaussian_logit_linear.train()
classifiers_linear.append(gaussian_logit_linear)
classifiers_names.append("Gaussian Process Logit")
fadings.append(True)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("Gaussian Process - Logit - Linear Features")
plot_model(plt,gaussian_logit_linear,feats_linear,labels_linear)
inference_model_non_linear = EPInferenceMethod(kernel, shogun_feats_non_linear, zero_mean,
shogun_labels_non_linear, likelihood)
gaussian_logit_non_linear = GaussianProcessClassification(inference_model_non_linear)
gaussian_logit_non_linear.train()
classifiers_non_linear.append(gaussian_logit_non_linear)
plt.subplot(122)
plt.title("Gaussian Process - Logit - Non Linear Features")
plot_model(plt,gaussian_logit_non_linear,feats_non_linear,labels_non_linear)
```
<a id ="section7b">Probit Likelihood model</a>
Shogun's <a href="http://www.shogun-toolbox.org/doc/en/current/classshogun_1_1CProbitLikelihood.html">CProbitLikelihood</a> class is used.
```
likelihood = ProbitLikelihood()
inference_model_linear = EPInferenceMethod(kernel, shogun_feats_linear, zero_mean, shogun_labels_linear, likelihood)
gaussian_probit_linear = GaussianProcessClassification(inference_model_linear)
gaussian_probit_linear.train()
classifiers_linear.append(gaussian_probit_linear)
classifiers_names.append("Gaussian Process Probit")
fadings.append(True)
plt.figure(figsize=(15,5))
plt.subplot(121)
plt.title("Gaussian Process - Probit - Linear Features")
plot_model(plt,gaussian_probit_linear,feats_linear,labels_linear)
inference_model_non_linear = EPInferenceMethod(kernel, shogun_feats_non_linear,
zero_mean, shogun_labels_non_linear, likelihood)
gaussian_probit_non_linear = GaussianProcessClassification(inference_model_non_linear)
gaussian_probit_non_linear.train()
classifiers_non_linear.append(gaussian_probit_non_linear)
plt.subplot(122)
plt.title("Gaussian Process - Probit - Non Linear Features")
plot_model(plt,gaussian_probit_non_linear,feats_non_linear,labels_non_linear)
```
## <a id="section8">Putting It All Together</a>
```
figure = plt.figure(figsize=(30,9))
plt.subplot(2,11,1)
plot_binary_data(plt,feats_linear, labels_linear)
for i in range(0,10):
plt.subplot(2,11,i+2)
plt.title(classifiers_names[i])
plot_model(plt,classifiers_linear[i],feats_linear,labels_linear,fading=fadings[i])
plt.subplot(2,11,12)
plot_binary_data(plt,feats_non_linear, labels_non_linear)
for i in range(0,10):
plt.subplot(2,11,13+i)
plot_model(plt,classifiers_non_linear[i],feats_non_linear,labels_non_linear,fading=fadings[i])
```
| github_jupyter |
```
# Import relevant libraries
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import rcParams
rcParams['font.family'] = 'monospace'
#rcParams['font.sans-serif'] = ['Tahoma']
import math
import datetime
import numpy as np
import networkx as nx
import os
def todays_date():
'''Return todays data, used in output-file name'''
today = datetime.date.today()
todays_date = today.strftime('%d%b%Y')
return todays_date
def dna_coding_sequence(dna_sequence, QualitySequence, start_sequence, stop_sequence, cDNA_MinLength, cDNA_MaxLength, QualityScore):
'''Validate the dna_coding_sequence based on (1) its Lenght AND (2) QualityScore of each nucleotide;
ONLY ONE stop_sequence is used, ONLY ONE coding_sequence is returned'''
quality_score_string = '''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~'''
threshold_quality_string = quality_score_string[QualityScore:]
start_index = dna_sequence.find(start_sequence) + len(start_sequence)
stop_index = dna_sequence.rfind(stop_sequence)
coding_sequence = dna_sequence[start_index:stop_index]
if cDNA_MinLength <= len(coding_sequence) and len(coding_sequence) <= cDNA_MaxLength and len(coding_sequence)%3 == 0:
for Character in QualitySequence[start_index:stop_index]:
if Character not in threshold_quality_string:
return None
return str(coding_sequence)
# Define Translation of RNASequence into peptide (peptide can be encoded as DNA or as RNA)
def Translation(coding_sequence):
# Set RNA TranslationCode
TranslationCode = {
'AAA':'K','AAC':'N','AAG':'K','AAU':'N',
'ACA':'T','ACC':'T','ACG':'T','ACU':'T',
'AGA':'R','AGC':'S','AGG':'R','AGU':'S',
'AUA':'I','AUC':'I','AUG':'M','AUU':'I',
'CAA':'Q','CAC':'H','CAG':'Q','CAU':'H',
'CCA':'P','CCC':'P','CCG':'P','CCU':'P',
'CGA':'R','CGC':'R','CGG':'R','CGU':'R',
'CUA':'L','CUC':'L','CUG':'L','CUU':'L',
'GAA':'E','GAC':'D','GAG':'E','GAU':'D',
'GCA':'A','GCC':'A','GCG':'A','GCU':'A',
'GGA':'G','GGC':'G','GGG':'G','GGU':'G',
'GUA':'V','GUC':'V','GUG':'V','GUU':'V',
'UAA':'#','UAC':'Y','UAG':'*','UAU':'Y',
'UCA':'S','UCC':'S','UCG':'S','UCU':'S',
'UGA':'&','UGC':'C','UGG':'W','UGU':'C',
'UUA':'L','UUC':'F','UUG':'L','UUU':'F'
}
# Stop Codons are as follows:
# UAA (ochre) — #
# UAG (amber) — *
# UGA (opal) — &
TranscriptionCode = {'A':'A','C':'C','G':'G','T':'U','U':'T'}
# Convert DNA to RNA
RNASequence = ''
for Nucleotide in coding_sequence:
RNASequence += TranscriptionCode.get(Nucleotide,'X')
#RNASequence-Test Print
#print (RNASequence)
peptide = ''
while len(RNASequence) != 0:
peptide += TranslationCode.get(RNASequence[0:3],'Do not fuck with me!')
RNASequence = RNASequence[3:]
return peptide
# Define SingleSelectionRoundSummary function, which returns the Occurrence of coding_sequences, grouped by peptide
# SingleSelectionRoundSummary = {peptideY: {coding_sequence_YZ: Occurrence_YZ}}
def SingleSelectionRoundSummary(fastq_file_path, start_sequence, stop_sequence, cDNA_MinLength, cDNA_MaxLength, QualityScore):
RawDataFile = open(fastq_file_path, 'r')
lines = RawDataFile.readlines()
RawDataFile.close
# Create an empty SingleSelectionRoundSummary dictionary to store the results from a single cycle of selection
# SingleSelectionRoundSummary = {peptideY: {coding_sequence_YZ: Occurrence_YZ}}
SingleSelectionRoundSummary = {}
# Populate SingleSelectionRoundSummary with the results from a single round of selection
# Go through the NGS-data file (.fastq) line by line
for i,line in enumerate(lines):
# Check that the line contains a valid sequence
if start_sequence in line and stop_sequence in line:
coding_sequence = dna_coding_sequence(line, lines[i + 2], start_sequence, stop_sequence, cDNA_MinLength, cDNA_MaxLength, QualityScore)
if coding_sequence != None:
# Translated DNA or RNA sequence into peptide
peptide = Translation(coding_sequence)
# Add sequence to a SingleSelectionRoundSummary
if peptide not in SingleSelectionRoundSummary:
SingleSelectionRoundSummary[str(peptide)] = {str(coding_sequence) : 1}
else:
if coding_sequence not in SingleSelectionRoundSummary[str(peptide)]:
SingleSelectionRoundSummary[str(peptide)][str(coding_sequence)] = 1
else:
SingleSelectionRoundSummary[str(peptide)][str(coding_sequence)] += 1
return SingleSelectionRoundSummary
# Define HammingDistance, which returns the number of mismatches between two Sequences
def HammingDistance(Sequence1, Sequence2):
if len(Sequence1) < len(Sequence2):
Sequence1 = Sequence1 + (len(Sequence2) - len(Sequence1)) * '%'
elif len(Sequence1) > len(Sequence2):
Sequence2 = Sequence2 + (len(Sequence1) - len(Sequence2)) * '%'
HammingDistance = 0
for i in range(len(Sequence1)):
if Sequence1[i] == Sequence2[i]:
HammingDistance = HammingDistance
else:
HammingDistance = HammingDistance + 1
return HammingDistance
# Define HammingDistanceBasedFormating, which returns FormatedSequence, such that mismatches are capitalised, and the rest of the FormatedSequence is lowercase
def HammingDistanceBasedFormating(Sequence1, Sequence2):
if len(Sequence1) < len(Sequence2):
Sequence1 = Sequence1 + (len(Sequence2) - len(Sequence1)) * '-'
elif len(Sequence1) > len(Sequence2):
Sequence2 = Sequence2 + (len(Sequence1) - len(Sequence2)) * '-'
HammingDistance = 0
FormatedSequence2 = ''
for i in range(len(Sequence1)):
if Sequence1[i] == Sequence2[i]:
FormatedSequence2 += Sequence2[i].lower()
HammingDistance = HammingDistance
else:
FormatedSequence2 += Sequence2[i]
HammingDistance = HammingDistance + 1
return FormatedSequence2
# Define Completedisplay_summary, which returns the Occurrence of coding_sequences, grouped by peptide, grouped by SelectionRound
# display_summary = {SelectionRound_X: {peptideXY: {CodingDNA_XYZ: Occurrence_XYZ}}}
def Completedisplay_summary(data_directory_path,
start_sequence,
stop_sequence,
cDNA_MinLength,
cDNA_MaxLength,
QualityScore):
# Create an empty display_summary dictionary to store the results from all the rounds of selection
Completedisplay_summary = {}
for file in os.listdir(data_directory_path):
file_path = os.path.join(data_directory_path, file)
#(1.A) Extract the round number from the file name (file name should have two digit number before full stop — '00.')
if file.endswith('.fastq'): # this conditional is necessary; without it some shit appears in the beginning of the file list
cycle_numberFirstDigit = file[file.find('.')-2]
cycle_numberSecondDigit = file[file.find('.')-1]
if cycle_numberFirstDigit == '0':
cycle_number = int(cycle_numberSecondDigit)
# cycle_number_Test Print
#print cycle_number
elif cycle_numberFirstDigit != '0':
cycle_number = int(file[file.find('.')-2 : file.find('.')])
# cycle_number_Test Print
#print cycle_number
#(1.B) Extract single round results
SelectionRoundSummary = SingleSelectionRoundSummary(file_path, start_sequence, stop_sequence, cDNA_MinLength, cDNA_MaxLength, QualityScore)
#(1.C) Populate ConcatenatedResultsList
Completedisplay_summary[cycle_number] = SelectionRoundSummary
# ConcatenatedResultsList-Test Print
#print ConcatenatedResultsList
return Completedisplay_summary
# Define peptidesOccurrences_BY_Round, which returns the Occurrences of peptides groupped by round
# peptidesOccurrences_BY_Round = {Round_X: {peptideXY: Occurrence_XY}}
def peptidesOccurrences_BY_Round(data_directory_path, cDNA_MinLength, cDNA_MaxLength, QualityScore):
display_summary = Completedisplay_summary(data_directory_path, start_sequence, stop_sequence, cDNA_MinLength, cDNA_MaxLength, QualityScore)
peptidesOccurrences_BY_Round = {}
for Round in display_summary:
peptidesOccurrences_IN_Round = {}
for peptide in display_summary[Round]:
peptidesOccurrences_IN_Round[peptide] = sum(display_summary[Round][peptide].values())
peptidesOccurrences_BY_Round[Round] = peptidesOccurrences_IN_Round
return peptidesOccurrences_BY_Round
# Define DNAsOccurrences_BY_Round, which returns the Occurrences of DNAs groupped by round
# DNAsOccurrences_BY_Round = {Round_X: {DNA_XY: Occurrence_XY}}
def DNAsOccurrences_BY_Round(data_directory_path, cDNA_MinLength, cDNA_MaxLength, QualityScore):
display_summary = Completedisplay_summary(data_directory_path, start_sequence, stop_sequence, cDNA_MinLength, cDNA_MaxLength, QualityScore)
DNAsOccurrences_BY_Round = {}
for Round in display_summary:
DNAsOccurrences_IN_Round = {}
for peptide in display_summary[Round]:
for DNA in display_summary[Round][peptide]:
DNAsOccurrences_IN_Round[DNA] = display_summary[Round][peptide][DNA]
DNAsOccurrences_BY_Round[Round] = DNAsOccurrences_IN_Round
return DNAsOccurrences_BY_Round
# Define TotalReads_BY_Round, which returns number of reads by round
# TotalReads_BY_Round = {Round_X: TotalReads_X}
def TotalReads_BY_Round(data_directory_path, cDNA_MinLength, cDNA_MaxLength, QualityScore):
display_summary = Completedisplay_summary(data_directory_path, start_sequence, stop_sequence, cDNA_MinLength, cDNA_MaxLength, QualityScore)
peptides_BY_Round = peptidesOccurrences_BY_Round(data_directory_path, cDNA_MinLength, cDNA_MaxLength, QualityScore)
TotalReads_BY_Round = {}
for Round in display_summary:
TotalReads_BY_Round[Round] = sum(peptides_BY_Round[Round].values())
return TotalReads_BY_Round
# Define BaseRoundSortedpeptidesList, which returns list of peptides in base round sorted by their occurrence
# BaseRoundSortedpeptidesList = [peptide_1, ..., peptide_N]; Occurrence(peptide_1) > ... > Occurrence(peptide_N)
def BaseRoundSortedpeptidesList(data_directory_path, base_cycle):
peptides_BY_Round = peptidesOccurrences_BY_Round(data_directory_path, cDNA_MinLength, cDNA_MaxLength, QualityScore)
peptidesOccurrencesInBaseRound = peptides_BY_Round[base_cycle]
BaseRoundSortedpeptidesList = sorted(peptidesOccurrencesInBaseRound, key = peptidesOccurrencesInBaseRound.get, reverse = True)
return BaseRoundSortedpeptidesList
# Define BaseRoundSortedDNAsList, which returns list of DNAs in base round sorted by their occurrence
# BaseRoundSortedDNAsList = [DNA_1, ..., DNA_N]; Occurrence(DNA_1) > ... > Occurrence(DNA_N)
def BaseRoundSortedDNAsList(data_directory_path, base_cycle):
DNAs_BY_Round = DNAsOccurrences_BY_Round(data_directory_path, cDNA_MinLength, cDNA_MaxLength, QualityScore)
DNAsOccurrences_IN_BaseRound = DNAs_BY_Round[base_cycle]
BaseRoundSortedDNAsList = sorted(DNAsOccurrences_IN_BaseRound, key = DNAsOccurrences_IN_BaseRound.get, reverse = True)
return BaseRoundSortedDNAsList
def peptidesRank_IN_BaseRound(data_directory_path, base_cycle):
peptides_BY_Round = peptidesOccurrences_BY_Round(data_directory_path, cDNA_MinLength, cDNA_MaxLength, QualityScore)
BaseRoundSortedpeptides = BaseRoundSortedpeptidesList(data_directory_path, base_cycle)
BasepeptideCount = 0
peptideRank = 1
peptidesRank_IN_BaseRound = {}
for peptide in BaseRoundSortedpeptides:
peptideCount = peptides_BY_Round[base_cycle][peptide]
if peptideCount < BasepeptideCount:
peptideRank += 1
peptidesRank_IN_BaseRound[peptide] = peptideRank
BasepeptideCount = peptideCount
return peptidesRank_IN_BaseRound
# Occurrences is a bad word in here
# Define DNAClonesOccurrences_BY_Round_BY_peptide, which returns number of clones for each peptide groupped by round.
# DNAClonesOccurrences_BY_Round_BY_peptide = {SelectionRound_X: {peptideXY: DNAClonesOccurrences}}
def DNAClonesOccurrences_BY_Round_BY_peptide(data_directory_path, cDNA_MinLength, cDNA_MaxLength, QualityScore):
display_summary = Completedisplay_summary(data_directory_path, start_sequence, stop_sequence, cDNA_MinLength, cDNA_MaxLength, QualityScore)
DNAClonesOccurrences_BY_Round_BY_peptide = {}
for Round in display_summary:
DNAClonesOccurrences_BY_peptide = {}
for peptide in display_summary[Round]:
DNAClonesOccurrences_BY_peptide[peptide] = len(display_summary[Round][peptide])
DNAClonesOccurrences_BY_Round_BY_peptide[Round] = DNAClonesOccurrences_BY_peptide
return DNAClonesOccurrences_BY_Round_BY_peptide
# Define peptidesAppearances_BY_Round, which returns for each peptide in selection a list of rounds in which this peptide appears.
# peptidesAppearances_BY_Round = {peptide_X: [Round_A, ..., Round_N]}
def peptidesAppearances_BY_Round(BaseRoundSortedpeptidesList, peptidesOccurrences_BY_Round):
peptidesAppearances_BY_Round = {}
for peptide in BaseRoundSortedpeptidesList:
peptidesAppearances_BY_Round[peptide] = []
for Round in peptidesOccurrences_BY_Round:
if peptide in peptidesOccurrences_BY_Round[Round]:
peptidesAppearances_BY_Round[peptide] += [Round]
return peptidesAppearances_BY_Round
# Define DNAsAppearances_BY_Round, which returns for each DNA in selection a list of rounds in which this DNA appears.
# DNAsAppearances_BY_Round = {DNA_X: [Round_A, ..., Round_N]}
def DNAsAppearances_BY_Round(BaseRoundSortedDNAsList, DNAsOccurrences_BY_Round):
DNAsAppearances_BY_Round = {}
for DNA in BaseRoundSortedDNAsList:
DNAsAppearances_BY_Round[DNA] = []
for Round in DNAsOccurrences_BY_Round:
if DNA in DNAsOccurrences_BY_Round[Round]:
DNAsAppearances_BY_Round[DNA] += [Round]
return DNAsAppearances_BY_Round
# Define display_summaryReport, which, for the TopNpeptidesNumber, returns a summary table (.txt) and provides a summary grpaph (.png).
def display_summaryReport(data_directory_path,
base_cycle,
TopNpeptidesNumber,
start_sequence,
stop_sequence,
cDNA_MinLength,
cDNA_MaxLength,
QualityScore,
FileName):
today = todays_date()
display_summaryFileNameCSV = str(today) + 'display_summary' + FileName + '.csv'
display_summaryReportFile = open(display_summaryFileNameCSV, 'w')
display_summary = Completedisplay_summary(data_directory_path, start_sequence, stop_sequence, cDNA_MinLength, cDNA_MaxLength, QualityScore)
SortedRoundsList = sorted(display_summary.keys())
peptides_BY_Round = peptidesOccurrences_BY_Round(data_directory_path, cDNA_MinLength, cDNA_MaxLength, QualityScore)
Totalpeptides_BY_Round = TotalReads_BY_Round(data_directory_path, cDNA_MinLength, cDNA_MaxLength, QualityScore)
BaseRoundSortedpeptides = BaseRoundSortedpeptidesList(data_directory_path, base_cycle)
BaseRoundTopSortedpeptides = BaseRoundSortedpeptides[0 : (TopNpeptidesNumber)]
BaseRoundpeptidesRank = peptidesRank_IN_BaseRound(data_directory_path, base_cycle)
display_summaryReportFile.write('peptide sequence' + ',' +
'rank (#)' + ',' +
'cDNA mutants' + ',')
for Round in SortedRoundsList:
display_summaryReportFile.write('C' +
str(Round) +
' count (#) [frequency(%)]' + ',')
display_summaryReportFile.write('\n')
for peptide in BaseRoundTopSortedpeptides:
BaseRoundpeptideFraction = float((peptides_BY_Round[Round].get(peptide, 0)))/float(Totalpeptides_BY_Round[base_cycle])
peptideRank = BaseRoundpeptidesRank[peptide]
Formatedpeptide = HammingDistanceBasedFormating(BaseRoundTopSortedpeptides[0], peptide)
peptidecDNAMutants = len(display_summary[base_cycle][peptide])
display_summaryReportFile.write(Formatedpeptide + ',' +
str(peptideRank) + ',' +
str(peptidecDNAMutants) + ',')
for Round in SortedRoundsList:
peptideFraction = float((peptides_BY_Round[Round].get(peptide, 0)))/float(Totalpeptides_BY_Round[Round])
BaseFraction = peptideFraction
display_summaryReportFile.write(str(peptides_BY_Round[Round].get(peptide, 0)) +
' [' + '{:.1%}'.format(peptideFraction) + ']' + ',')
BaseFraction = peptideFraction
display_summaryReportFile.write('\n')
display_summaryReportFile.write('total count (#)' + ',' + ',')
for Round in SortedRoundsList:
display_summaryReportFile.write(str(Totalpeptides_BY_Round[Round]) + ',')
display_summaryReportFile.write('\n\n\n')
display_summaryReportFile.close()
#-------------------------------------------------------------------------------
# Create a figure of size 8x6 inches, 500 dots per inch
plt.figure(figsize = (8, 6),
dpi = 500)
# Create 'ggplot' style
plt.style.use('fivethirtyeight')
# Create a new subplot from a grid of 1x1
Graph = plt.subplot(1, 1, 1)
Xs = []
Ys = []
# Map colours onto lines
cNorm = matplotlib.colors.Normalize(vmin = 0,
vmax = TopNpeptidesNumber - 1)
scalarMap = matplotlib.cm.ScalarMappable(norm = cNorm,
cmap = 'gist_rainbow')
peptideLabels = []
for peptide in BaseRoundTopSortedpeptides:
#for peptide in Top24peptidesKDs:
peptidesFractions_BY_Round = []
for Round in SortedRoundsList:
peptidesFractions_BY_Round += [float((peptides_BY_Round[Round].get(peptide, 0)))/float(Totalpeptides_BY_Round[Round])]
x = SortedRoundsList
y = peptidesFractions_BY_Round
Xs += x
Ys += y
peptideRank = BaseRoundpeptidesRank[peptide]
#peptideColour = scalarMap.to_rgba(peptideRank)
peptideColour = scalarMap.to_rgba(BaseRoundTopSortedpeptides.index(peptide))
Formatedpeptide = HammingDistanceBasedFormating(BaseRoundTopSortedpeptides[0], peptide)
peptideLabel = Formatedpeptide + ' (' + str(peptideRank) +')'
#Set peptideLabel
peptideLabels += [peptideLabel]
plt.plot(x, y,
'o-',
c = peptideColour,
lw = 2.0,
ms = 4.0,
mew = 0.1,
mec = '#191919')
XMin = min(Xs) - 0.05*(max(Xs) - min(Xs))
XMax = max(Xs) + 0.05*(max(Xs) - min(Xs))
YMin = min(Ys) - 0.05*(max(Ys) - min(Ys))
YMax = max(Ys) + 0.05*(max(Ys) - min(Ys))
plt.axis([XMin, XMax, YMin, YMax])
plt.xticks(fontsize = 10)
plt.yticks(fontsize = 10)
plt.xlabel("Selection Cycle (#)",
fontsize = 10)
plt.ylabel("peptide Fraction",
fontsize = 10)
legend = plt.legend(peptideLabels,
title = 'cyclic-peptide random region',
loc = 'upper center',
bbox_to_anchor = (0.5, -0.10),
fancybox = True,
shadow = False,
fontsize = 10,
ncol = 3)
Graph.get_legend().get_title().set_size('small')
display_summaryFileNamePNG = str(today) + 'display_summary' + FileName + '.png'
plt.savefig(display_summaryFileNamePNG,
bbox_extra_artists = [legend],
bbox_inches = 'tight',
dpi = 300)
plt.show()
plt.close()
def DNAMutantsAnalysis(data_directory_path,
base_cycle,
TopNpeptidesNumber,
start_sequence,
stop_sequence,
FileName):
today = todays_date()
DNAMutantsAnalysisFileNameCSV = str(today) + 'DNAsMutantsAnalysis' + FileName + '.csv'
DNAMutantsAnalysisFile = open(DNAMutantsAnalysisFileNameCSV, 'w')
display_summary = Completedisplay_summary(data_directory_path, start_sequence, stop_sequence, cDNA_MinLength, cDNA_MaxLength, QualityScore)
SortedRoundsList = sorted(display_summary.keys())
peptides_BY_Round = peptidesOccurrences_BY_Round(data_directory_path, cDNA_MinLength, cDNA_MaxLength, QualityScore)
Totalpeptides_BY_Round = TotalReads_BY_Round(data_directory_path, cDNA_MinLength, cDNA_MaxLength, QualityScore)
BaseRoundSortedpeptides = BaseRoundSortedpeptidesList(data_directory_path, base_cycle)
BaseRoundTopSortedpeptides = BaseRoundSortedpeptides[0 : (TopNpeptidesNumber)]
DNAClones_BY_Round_BY_peptide = DNAClonesOccurrences_BY_Round_BY_peptide(data_directory_path, cDNA_MinLength, cDNA_MaxLength, QualityScore)
DNAMutantsAnalysisFile.write('peptide sequence' + ',')
for Round in SortedRoundsList:
DNAMutantsAnalysisFile.write('round # ' + str(Round) + ' DNA clones (#)' + ',')
DNAMutantsAnalysisFile.write('\n')
for peptide in BaseRoundTopSortedpeptides:
DNAMutantsAnalysisFile.write(peptide + ',')
for Round in SortedRoundsList:
DNAMutantsAnalysisFile.write(str(DNAClones_BY_Round_BY_peptide[Round].get(peptide, 0)) + ',')
DNAMutantsAnalysisFile.write('\n')
DNAMutantsAnalysisFile.close()
#-------------------------------------------------------------------------------
# Create a figure of size 8x6 inches, 500 dots per inch
plt.figure(figsize = (8, 6),
dpi = 500)
# Create 'ggplot' style
plt.style.use('fivethirtyeight')
# Create a new subplot from a grid of 1x1
Graph = plt.subplot(1, 1, 1)
# peptideDNAClonesNumber_IN_BaseRound = []
# peptideOccurrence_IN_BaseRound = []
# Map colours onto lines
cNorm = matplotlib.colors.Normalize(vmin = 0,
vmax = len(BaseRoundSortedpeptides) - 1)
scalarMap = matplotlib.cm.ScalarMappable(norm = cNorm,
cmap = 'gist_rainbow')
RoundIndex = base_cycle
Xs = []
Ys = []
for peptide in DNAClones_BY_Round_BY_peptide[RoundIndex]:
peptideDNAClonesNumber_IN_BaseRound = math.log(DNAClones_BY_Round_BY_peptide[RoundIndex].get(peptide, 0), 2)
peptideOccurrence_IN_BaseRound = math.log(peptides_BY_Round[RoundIndex].get(peptide, 0), 2)
peptideColour = scalarMap.to_rgba(BaseRoundSortedpeptides.index(peptide))
x = peptideDNAClonesNumber_IN_BaseRound
y = peptideOccurrence_IN_BaseRound
Xs += [x]
Ys += [y]
plt.plot(x, y,
'o',
c = peptideColour,
ms = 5.0,
mew = 0.1,
mec = '#191919')
XMin = min(Xs) - 0.05*(max(Xs) - min(Xs))
XMax = max(Xs) + 0.05*(max(Xs) - min(Xs))
YMin = min(Ys) - 0.05*(max(Ys) - min(Ys))
YMax = max(Ys) + 0.05*(max(Ys) - min(Ys))
plt.axis([XMin, XMax, YMin, YMax])
XLabel = 'log$2$ (DNA Clones #)' #$_$ makes subscript possible
plt.xlabel(XLabel, fontsize = 14)
YLabel = 'log$2$ (peptide Occurrence)'
plt.ylabel(YLabel, fontsize = 14)
legend = plt.legend(BaseRoundSortedpeptides,
loc = 'upper center',
bbox_to_anchor = (0.5, -0.15),
fancybox = True,
shadow = False,
ncol = 4)
DNAClonesAnalysisFileNamePNG = str(today) + 'DNAsMutantsAnalysisRegression' + 'R' + str(Round) + FileName + '.png'
plt.savefig(DNAClonesAnalysisFileNamePNG, bbox_extra_artists=[legend], bbox_inches='tight', dpi = 300)
plt.show()
plt.close()
def peptidesRelatednessAnalysis(data_directory_path,
base_cycle,
TopNpeptidesNumber,
start_sequence,
stop_sequence,
cDNA_MinLength,
cDNA_MaxLength,
QualityScore,
FileName):
# to extract todays_date
today = todays_date()
# to collect DNAs-based summary information By_Round
DNAs_BY_Round = DNAsOccurrences_BY_Round(data_directory_path, cDNA_MinLength, cDNA_MaxLength, QualityScore)
TotalDNAs_BY_Round = TotalReads_BY_Round(data_directory_path, cDNA_MinLength, cDNA_MaxLength, QualityScore)
BaseRoundSortedDNAs = BaseRoundSortedDNAsList(data_directory_path, base_cycle)
DNAsAppearances = DNAsAppearances_BY_Round(BaseRoundSortedDNAs, DNAs_BY_Round)
# to collect peptides-based summary information By_Round
peptides_BY_Round = peptidesOccurrences_BY_Round(data_directory_path, cDNA_MinLength, cDNA_MaxLength, QualityScore)
Totalpeptides_BY_Round = TotalReads_BY_Round(data_directory_path, cDNA_MinLength, cDNA_MaxLength, QualityScore)
BaseRoundSortedpeptides = BaseRoundSortedpeptidesList(data_directory_path, base_cycle)
peptidesAppearances = peptidesAppearances_BY_Round(BaseRoundSortedpeptides, peptides_BY_Round)
display_summary = Completedisplay_summary(data_directory_path, start_sequence, stop_sequence, cDNA_MinLength, cDNA_MaxLength, QualityScore)
SortedRoundsList = sorted(display_summary.keys())
# to create a disjoint graph (Forest), based on DNAs in the BaseRound (joint subgraphs are the Trees and the unique DNA sequences are the Leaves)
BaseRoundDNAsForest = nx.Graph()
# to add nodes (Leaves, unique DNA sequences) to the BaseRoundDNAsForest disjoint graph
BaseRoundDNAsForest.add_nodes_from(BaseRoundSortedDNAs)
# to add edges (Twigs, DNA-to-DNA connections based on the hamming distance between unique DNA sequences) to the BaseRoundDNAsForest so that disjoint graphs (stand alone Trees) can be identified
UsedNodes = []
for DNA1 in BaseRoundSortedDNAs:
UsedNodes += [DNA1]
for DNA2 in BaseRoundSortedDNAs:
if DNA2 not in UsedNodes and HammingDistance(DNA1, DNA2) == 1:
BaseRoundDNAsForest.add_edge(DNA1,DNA2,
MutationsNumber = 1)
# to extract individual joint subgraphs (stand alone Trees) from the disjoint graph (Forest)
BaseRoundDNAsTrees = list(nx.connected_component_subgraphs(BaseRoundDNAsForest, copy = True))
# to create a peptideSummaryDNAPerspectiveCSV file
peptidesSummaryFileNameCSV = str(today) + 'peptideFamiliesSummary' + FileName + '.csv'
peptidesSummaryFile = open(peptidesSummaryFileNameCSV, 'w')
# to convert list of DNAs Trees into a list of peptides Trees Leaves
peptidesTreesLeaves = []
for DNAsTree in BaseRoundDNAsTrees:
peptideLeaves = []
for DNA in DNAsTree:
peptide = Translation(DNA)
if peptide not in peptideLeaves:
peptideLeaves += [peptide]
peptidesTreesLeaves += [peptideLeaves]
# to sort the resulting list of lists from the largest to smallest
peptidesTreesLeaves.sort(key = len, reverse = True)
# to fix the coordinates of the origin of the graph
Positions = {}
X_0_Coordinate = 1
Y_0_Coordinate = 0
Y_X0_Coordinate = 0
TreesXCoordinates = []
peptideGraphFigure = plt.figure()
peptideGraph = peptideGraphFigure.add_subplot(1, 1, 1)
#to introduce peptideFamilyCounter
MultiplepeptideFamilyCounter = 0
#to introduce SinglepeptideFamilyCounter
SinglepeptideFamilyCounter = 0
#to introduce peptideFamilySize
peptideTreeSize = []
# to create a tree for each set of peptides Trees Leaves
for peptideLeaves in peptidesTreesLeaves:
peptideTree = nx.Graph()
# to convert each peptide (Leave) into a node of a peptide graph (peptideLeave on a peptideTree)
for peptide in peptideLeaves:
peptideTree.add_node(peptide,
Occurrence = peptides_BY_Round[base_cycle][peptide],
FirstAppearance = min(peptidesAppearances[peptide]))
# to join the peptide nodes of a graph by edges (Twigs)
for peptide1 in peptideLeaves:
for peptide2 in peptideLeaves:
if HammingDistance(peptide1, peptide2) == 1:
peptideTree.add_edge(peptide1,peptide2)
# to identify the Rootpeptide of a peptideTree graph
TreepeptidesOccurrences = nx.get_node_attributes(peptideTree, 'Occurrence')
Rootpeptide = max(TreepeptidesOccurrences, key=TreepeptidesOccurrences.get)
# to create a dictionary holder for peptide and their properties (Predecessor and Occurrence)
Treepeptides = {}
Treepeptides[Rootpeptide] = [0, '', 0, peptideTree.node[Rootpeptide]['Occurrence'], peptideTree.node[Rootpeptide]['FirstAppearance']]
TreepeptidesList = list(peptideTree.nodes())
TreepeptidesList.remove(Rootpeptide)
for peptide in TreepeptidesList:
peptidePredecessor = nx.shortest_path(peptideTree, source = peptide, target = Rootpeptide, weight = None)[1]
# PredecessorOccurrence can be used to sort the peptides, but does not seem to be useful
PredecessorOccurrence = peptideTree.node[peptidePredecessor]['Occurrence']
peptideOccurrence = peptideTree.node[peptide]['Occurrence']
Treepeptides[peptide] = [peptidePredecessor, PredecessorOccurrence, peptideOccurrence]
# to sort peptides in a peptideTree by their distance to the Rootpeptide
peptides_BY_DistanceToTheRoot = {}
for peptide in peptideTree.nodes():
DistanceToTheRoot = nx.shortest_path_length(peptideTree, source = peptide, target = Rootpeptide, weight = None)
if DistanceToTheRoot not in peptides_BY_DistanceToTheRoot:
peptides_BY_DistanceToTheRoot[DistanceToTheRoot] = [peptide]
else:
peptides_BY_DistanceToTheRoot[DistanceToTheRoot] += [peptide]
# to identify the largest group of equidistanced peptides
MaxpeptidesNumber = max(map(lambda k: len(peptides_BY_DistanceToTheRoot[k]), peptides_BY_DistanceToTheRoot))
Sortedpeptides_BY_DistanceToTheRoot = {}
# to sort peptides by their distance to the Rootpeptide
for DistanceToTheRoot in peptides_BY_DistanceToTheRoot:
Equidistantpeptides = peptides_BY_DistanceToTheRoot[DistanceToTheRoot]
Equidistantpeptides = sorted(Equidistantpeptides, key = lambda peptide: (Treepeptides[peptide][2]), reverse = True)
# PredecessorOccurrence can be used to sort the peptides, but does not seem to be useful
# Equidistantpeptides = sorted(Equidistantpeptides, key = lambda peptide: (Treepeptides[peptide][1]), reverse = True)
Equidistantpeptides = sorted(Equidistantpeptides, key = lambda peptide: (Treepeptides[peptide][0]), reverse = False)
AdditionalElements = MaxpeptidesNumber - len(Equidistantpeptides)
Sortedpeptides_BY_DistanceToTheRoot[DistanceToTheRoot] = Equidistantpeptides + AdditionalElements * ['']
if len(peptideTree.nodes()) > 1:
for peptide in Equidistantpeptides:
XCoordinate = X_0_Coordinate + DistanceToTheRoot
YCoordinate = Y_0_Coordinate - Equidistantpeptides.index(peptide)
Positions[peptide] = (XCoordinate, YCoordinate)
elif len(peptideTree.nodes()) == 1:
for peptide in Equidistantpeptides:
XCoordinate = 0
YCoordinate = Y_X0_Coordinate
Positions[peptide] = (XCoordinate, YCoordinate)
#BaseRoundpeptidesGraph = nx.Graph()
#BaseRoundpeptidesGraph.add_nodes_from(BaseRoundSortedpeptides)
Sizes = []
for peptide in peptideTree.nodes():
Sizes.append(math.log(peptides_BY_Round[base_cycle][peptide], 2) + 5)
Colours = []
for peptide in peptideTree.nodes():
Colours.append(min(peptidesAppearances[peptide]))
XSpan = max(map(lambda peptide: Positions[peptide][0], Positions)) - min(map(lambda peptide: Positions[peptide][0], Positions))
YSpan = max(map(lambda peptide: Positions[peptide][1], Positions)) - min(map(lambda peptide: Positions[peptide][1], Positions))
XMin = min(map(lambda peptide: Positions[peptide][0], Positions)) - 0.01 * XSpan
XMax = max(map(lambda peptide: Positions[peptide][0], Positions)) + 0.01 * XSpan
YMin = min(map(lambda peptide: Positions[peptide][1], Positions)) - 0.02 * YSpan
YMax = max(map(lambda peptide: Positions[peptide][1], Positions)) + 0.02 * YSpan
NumberOfColours = len(SortedRoundsList)
ColourMap = plt.get_cmap('Paired', NumberOfColours)
nx.draw_networkx(peptideTree,
pos = Positions,
node_size = Sizes,
node_color = Colours,
cmap = ColourMap,
linewidths = 0.2,
width = 0.2,
with_labels = False,
#font_size = 6,
vmin = min(SortedRoundsList),
vmax = max(SortedRoundsList))
if len(peptideTree.nodes()) > 1:
for DistanceToTheRoot in Sortedpeptides_BY_DistanceToTheRoot:
peptidesSummaryFile.write(str(DistanceToTheRoot) + ' mutations' + ',' + 'frequency' + ',' + 'rank' + ',')
peptidesSummaryFile.write('\n')
for i in range(MaxpeptidesNumber):
for MutationsNumber in Sortedpeptides_BY_DistanceToTheRoot:
peptide = Sortedpeptides_BY_DistanceToTheRoot[MutationsNumber][i]
if peptide != '':
Formatedpeptide = HammingDistanceBasedFormating(Rootpeptide, peptide)
peptideRank = str(BaseRoundSortedpeptides.index(peptide) + 1)
#ClonesNumber = str(len(peptideTree.neighbors(peptide)))
peptideFraction = ('{:.2%}'.format(float((peptides_BY_Round[base_cycle].get(peptide, 0)))/float(Totalpeptides_BY_Round[base_cycle])))
else:
Formatedpeptide = ''
#ClonesNumber = ''
peptideRank = ''
peptideFraction = ''
peptidesSummaryFile.write(Formatedpeptide + ',' +
peptideFraction + ',' +
peptideRank + ',')
#ClonesNumber + ',')
peptidesSummaryFile.write('\n')
if len(peptideTree.nodes()) > 1:
TreesXCoordinates += [X_0_Coordinate]
X_0_Coordinate += max(peptides_BY_DistanceToTheRoot.keys()) + 1
MultiplepeptideFamilyCounter += 1
peptideTreeSize += [len(peptideTree.nodes())]
if len(peptideTree.nodes()) == 1:
Y_X0_Coordinate -= 1
SinglepeptideFamilyCounter += 1
peptidesSummaryFile.write('\n')
peptidesSummaryFile.close()
#plt.axis('off')
plt.axis([XMin, XMax, YMin, YMax])
peptideLegendColour = peptideGraphFigure.add_subplot(1, 1, 1)
ColourMap = plt.get_cmap('Paired', NumberOfColours)
peptideLegendColours = SortedRoundsList
LegendDotsX = XMax - 0.3 * XMax
YIncrement = - 0.03 * YMin
#print (YIncrement)
LegendColourDotsX = np.array([LegendDotsX] * NumberOfColours)
#print (LegendColourDotsX)
FirstYColours = YMin + 12 * YIncrement
#print (FirstYColours)
LastYColours = YMin + (12 + NumberOfColours) * YIncrement
#print (LastYColours)
LegendColourDotsY = np.linspace(FirstYColours, LastYColours, NumberOfColours, endpoint = False)
#print (LegendColourDotsY)
peptideLegendColour.scatter(x = LegendColourDotsX,
y = LegendColourDotsY,
s = 15,
c = peptideLegendColours,
cmap = ColourMap,
linewidths = 0.2)
ColourLabels = SortedRoundsList
# this way of setting the colours seems to be redundant
# ColourLabels = ['{0}'.format(i) for i in range(NumberOfColours)]
for label, x, y in zip(ColourLabels, LegendColourDotsX, LegendColourDotsY):
plt.annotate(label, xy = (x, y), xytext = (5, 0),
textcoords = 'offset points',
fontsize = 5,
ha = 'left', va = 'center')
plt.text(x = LegendDotsX, y = (max(LegendColourDotsY) + YIncrement),
s = 'first-appearance round #',
fontsize = 5)
#plt.axis('off')
peptideLegendSize = peptideGraphFigure.add_subplot(1, 1, 1)
Size = []
for i in [1, 10, 100, 1000, 10000]:
Size.append(math.log(i, 2) + 5)
LegendSizeDotsX = np.array([LegendDotsX] * 5)
FirstYSizes = YMin + 5 * YIncrement
LastYSizez = YMin + 10 * YIncrement
LegendSizeDotsY = np.linspace(FirstYSizes, LastYSizez, 5, endpoint = False)
peptideLegendSize.scatter(x = LegendSizeDotsX,
y = LegendSizeDotsY,
s = Size,
c = 'w',
linewidths = 0.2)
SizeLabels = ['{0}'.format(i) for i in [1, 10, 100, 1000, 10000]]
for label, x, y in zip(SizeLabels, LegendSizeDotsX, LegendSizeDotsY):
plt.annotate(label, xy = (x, y), xytext = (5, 0),
textcoords = 'offset points',
fontsize = 5,
ha = 'left', va = 'center')
plt.text(x = LegendDotsX, y = (max(LegendSizeDotsY) - 0.03 * YMin),
s = 'frequency in the last round',
fontsize = 5)
#FamilySizeLabels = ['{0}'.format(i) for i in peptideTreeSize]
#for label, x, y in zip(FamilySizeLabels, LegendSizeDotsX, LegendSizeDotsY):
# plt.annotate(label, xy = (x, y), xytext = (5, 0),
# textcoords = 'offset points',
# fontsize = 5,
# ha = 'left', va = 'center')
for i in range(len(peptideTreeSize)):
plt.text(x = TreesXCoordinates[i], y = YIncrement,
s = peptideTreeSize[i],
fontsize = 5)
plt.text(x = LegendDotsX, y = YMin + 3 * YIncrement,
s = ('total # unique peptide sequence ' + str(len(BaseRoundSortedpeptides))),
fontsize = 5)
plt.text(x = LegendDotsX, y = YMin + 2 * YIncrement,
s = 'single-member peptide family # ' + str(SinglepeptideFamilyCounter),
fontsize = 5)
plt.text(x = LegendDotsX, y = YMin + 1 * YIncrement,
s = 'multiple-member peptide family # ' + str(MultiplepeptideFamilyCounter),
fontsize = 5)
plt.axis('off')
peptidesSummaryFileNamePNG = str(today) + 'peptideFamiliesSummary' + FileName + '.png'
plt.savefig(peptidesSummaryFileNamePNG, dpi = 500)
fig = plt.gcf()
SizeInches = fig.get_size_inches()*fig.dpi
SizeDots = fig.get_size_inches()
#print (SizeInches)
#print (SizeDots)
#print (XMin)
#print (XMax)
#print (YMin)
#print (YMax)
#print (peptideTreeSize)
#print (len(peptideTreeSize))
#print (TreesXCoordinates)
#print (len(TreesXCoordinates))
plt.show()
plt.close()
# Set the parameters for the analysis
data_directory_path = "SampleInput_RawNGSData"
base_cycle = 6
TopNpeptidesNumber = 24
FileName = "PHD2_NNK"
start_sequence = 'TAATACGACTCACTATAGGGTTAACTTTAAGAAGGAGATATACATATG' # NNK - T7g10M.F48
stop_sequence = 'TGCGGCAGCGGCAGCGGCAGCTAGGACGGGGGGCGGAAA' #NNK - CGS3an13.R39
# start_sequence = 'TAATACGACTCACTATAGGGTTGAACTTTAAGTAGGAGATATATCCATG' #NNU - T7-CH-F49
# stop_sequence = 'TGTGGGTCTGGGTCTGGGTCTTAGGACGGGGGGCGGAAA' #NNU - CGS3-CH-R39
# start_sequence = 'ATG' # Met codon
# stop_sequence = 'TGCGGCAGC'# Akane seams to have trimmed siquences
# start_sequence = 'TAGGGTTAACTTTAAGAAGGAGATATACATATG'# Oxford, Akane and Tom
# stop_sequence = 'TGCGGC'# Oxford, Akane and Tom
# stop_sequence = 'TAG' # amber stop codon
QualityScore = 29 # threshold QualityScore must be between 0 and 93
cDNA_MinLength = 24
cDNA_MaxLength = 240
display_summaryReport(data_directory_path,
base_cycle,
TopNpeptidesNumber,
start_sequence,
stop_sequence,
cDNA_MinLength,
cDNA_MaxLength,
QualityScore,
FileName)
# DNAMutantsAnalysis(data_directory_path,
# base_cycle,
# TopNpeptidesNumber,
# start_sequence,
# stop_sequence,
# FileName)
peptidesRelatednessAnalysis(data_directory_path,
base_cycle,
TopNpeptidesNumber,
start_sequence,
stop_sequence,
cDNA_MinLength,
cDNA_MaxLength,
QualityScore,
FileName)
```
| github_jupyter |
# GraviPy - tutorial
## _Coordinates_ and _MetricTensor_
To start working with the gravipy package you must load the package and initialize a pretty-printing mode in Jupyter environment
```
from gravipy.tensorial import * # import GraviPy package
from sympy import init_printing
import inspect
init_printing()
```
The next step is to choose coordinates and define a metric tensor of a particular space. Let's take, for example, the Schwarzschild metric - vacuum solution to the Einstein's field equations which describes the gravitational field of a spherical mass distribution.
```
# define some symbolic variables
t, r, theta, phi, M = symbols('t, r, \\theta, \phi, M')
# create a coordinate four-vector object instantiating
# the Coordinates class
x = Coordinates('\chi', [t, r, theta, phi])
# define a matrix of a metric tensor components
Metric = diag(-(1-2*M/r), 1/(1-2*M/r), r**2, r**2*sin(theta)**2)
# create a metric tensor object instantiating the MetricTensor class
g = MetricTensor('g', x, Metric)
```
Each component of any tensor object, can be computed by calling the appropriate instance of the _GeneralTensor_ subclass with indices as arguments. The covariant indices take positive integer values (1, 2, ..., dim). The contravariant indices take negative values (-dim, ..., -2, -1).
```
x(-1)
g(1, 1)
x(1)
```
Matrix representation of a tensor can be obtained in the following way
```
x(-All)
g(All, All)
g(All, 4)
```
## Predefined _Tensor_ Classes
The GraviPy package contains a number of the _Tensor_ subclasses that can be used to calculate a tensor components. The _Tensor_ subclasses available in the current version of GraviPy package are
```
print([cls.__name__ for cls in vars()['Tensor'].__subclasses__()])
```
### The _Christoffel_ symbols
The first one is the _Christoffel_ class that represents Christoffel symbols of the first and second kind. (Note that the Christoffel symbols are not tensors) Components of the _Christoffel_ objects are computed from the below formula
$$ \Gamma_{\rho \mu \nu} = g_{\rho \sigma}\Gamma^{\sigma}_{\ \mu \nu} = \frac{1}{2}(g_{\rho \mu, \nu} + g_{\rho \nu, \mu} - g_{\mu \nu, \rho})$$
Let's create an instance of the _Christoffel_ class for the Schwarzschild metric g and compute some components of the object
```
Ga = Christoffel('Ga', g)
Ga(1, 2, 1)
```
Each component of the _Tensor_ object is computed only once due to memoization procedure implemented in the _Tensor_ class. Computed value of a tensor component is stored in _components_ dictionary (attribute of a _Tensor_ instance) and returned by the next call to the instance.
```
Ga.components
```
The above dictionary consists of two elements because the symmetry of the Christoffel symbols is implemented in the _Christoffel_ class.
If necessary, you can clear the _components_ dictionary
```
Ga.components = {}
Ga.components
```
The _Matrix_ representation of the Christoffel symbols is the following
```
Ga(All, All, All)
```
You can get help on any of classes mentioned before by running the command
```
help(Christoffel)
```
Try also "_Christoffel?_" and "_Christoffel??_"
### The _Ricci_ tensor
$$ R_{\mu \nu} = \frac{\partial \Gamma^{\sigma}_{\ \mu \nu}}{\partial x^{\sigma}} - \frac{\partial \Gamma^{\sigma}_{\ \mu \sigma}}{\partial x^{\nu}} + \Gamma^{\sigma}_{\ \mu \nu}\Gamma^{\rho}_{\ \sigma \rho} - \Gamma^{\rho}_{\ \mu \sigma}\Gamma^{\sigma}_{\ \nu \rho} $$
```
Ri = Ricci('Ri', g)
Ri(All, All)
```
Contraction of the _Ricci_ tensor $R = R_{\mu}^{\ \mu} = g^{\mu \nu}R_{\mu \nu}$
```
Ri.scalar()
```
### The _Riemann_ tensor
$$ R_{\mu \nu \rho \sigma} = \frac{\partial \Gamma_{\mu \nu \sigma}}{\partial x^{\rho}} - \frac{\partial \Gamma_{\mu \nu \rho}}{\partial x^{\sigma}} + \Gamma^{\alpha}_{\ \nu \sigma}\Gamma_{\mu \rho \alpha} - \Gamma^{\alpha}_{\ \nu \rho}\Gamma_{\mu \sigma \alpha} - \frac{\partial g_{\mu \alpha}}{\partial x^{\rho}}\Gamma^{\alpha}_{\ \nu \sigma} + \frac{\partial g_{\mu \alpha}}{\partial x^{\sigma}}\Gamma^{\alpha}_{\ \nu \rho} $$
```
Rm = Riemann('Rm', g)
```
Some nonzero components of the _Riemann_ tensor are
```
from IPython.display import display, Math
from sympy import latex
for i, j, k, l in list(variations(range(1, 5), 4, True)):
if Rm(i, j, k, l) != 0 and k<l and i<j:
display(Math('R_{'+str(i)+str(j)+str(k)+str(l)+'} = '+ latex(Rm(i, j, k, l))))
```
You can also display the matrix representation of the tensor
```
# Rm(All, All, All, All)
```
Contraction of the _Riemann_ tensor $R_{\mu \nu} = R^{\rho}_{\ \mu \rho \nu} $
```
ricci = sum([Rm(i, All, k, All)*g(-i, -k)
for i, k in list(variations(range(1, 5), 2, True))],
zeros(4))
ricci.simplify()
ricci
```
### The _Einstein_ tensor
$$ G_{\mu \nu} = R_{\mu \nu} - \frac{1}{2}g_{\mu \nu}R $$
```
G = Einstein('G', Ri)
G(All, All)
```
### _Geodesics_
$$ w_{\mu} = \frac{Du_{\mu}}{d\tau} = \frac{d^2x_{\mu}}{d\tau^2} - \frac{1}{2}g_{\rho \sigma, \mu} \frac{dx^{\rho}}{d\tau}\frac{dx^{\sigma}}{d\tau} $$
```
tau = Symbol('\\tau')
w = Geodesic('w', g, tau)
w(All).transpose()
```
Please note that instantiation of a _Geodesic_ class for the metric $g$ automatically turns on a _Parametrization_ mode for the metric $g$. Then all coordinates are functions of a world line parameter $\tau$
```
Parametrization.info()
x(-All)
g(All, All)
```
_Parametrization_ mode can be deactivated by typing
```
Parametrization.deactivate(x)
Parametrization.info()
x(-All)
g(All, All)
```
## Derivatives
### Partial derivative
All instances of a _GeneralTensor_ subclasses inherits _partialD_ method which works exactly the same way as SymPy _diff_ method.
```
T = Tensor('T', 2, g)
T(1, 2)
T.partialD(1, 2, 1, 3) # The first two indices belongs to second rank tensor T
T(1, 2).diff(x(-1), x(-3))
```
The only difference is that computed value of _partialD_ is saved in "_partial_derivative_components_" dictionary an then returned by the next call to the _partialD_ method.
```
T.partial_derivative_components
```
### Covariant derivative
Covariant derivative components of the tensor ___T___ can be computed by the covariantD method from the formula
$$ \nabla_{\sigma} T_{\mu}^{\ \nu} = T_{\mu \ ;\sigma}^{\ \nu} = \frac{\partial T_{\mu}^{\ \nu}}{\partial x^{\sigma}} - \Gamma^{\rho}_{\ \mu \sigma}T_{\rho}^{\ \nu} + \Gamma^{\nu}_{\ \rho \sigma}T_{\mu}^{\ \rho}$$
Let's compute some covariant derivatives of a scalar field C
```
C = Tensor('C', 0, g)
C()
C.covariantD(1)
C.covariantD(2, 3)
```
All _covariantD_ components of every _Tensor_ object are also memoized
```
for k in C.covariant_derivative_components:
display(Math(str(k) + ': '
+ latex(C.covariant_derivative_components[k])))
C.covariantD(1, 2, 3)
```
Proof that the covariant derivative of the metric tensor $g$ is zero
```
not any([g.covariantD(i, j, k).simplify()
for i, j, k in list(variations(range(1, 5), 3, True))])
```
Bianchi identity in the Schwarzschild spacetime
$$ R_{\mu \nu \sigma \rho ;\gamma} + R_{\mu \nu \gamma \sigma ;\rho} + R_{\mu \nu \rho \gamma ;\sigma} = 0$$
```
not any([(Rm.covariantD(i, j, k, l, m) + Rm.covariantD(i, j, m, k, l)
+ Rm.covariantD(i, j, l, m, k)).simplify()
for i, j, k, l, m in list(variations(range(1, 5), 5, True))])
```
## User-defined tensors
To define a new scalar/vector/tensor field in some space you should __extend__ the _Tensor_ class or __create an instance__ of the _Tensor_ class.
### _Tensor_ class instantiation
Let's create a third-rank tensor field living in the Schwarzshild spacetime as an instance of the _Tensor_ class
```
S = Tensor('S', 3, g)
```
Until you define (override) the _\_compute\_covariant\_component_ method of the __S__ object, all of $4^3$ components are arbitrary functions of coordinates
```
S(1, 2, 3)
inspect.getsourcelines(T._compute_covariant_component)
```
Let's assume that tensor __S__ is the commutator of the covariant derivatives of some arbitrary vector field __V__ and create a new _\_compute\_covariant\_component_ method for the object __S__
```
V = Tensor('V', 1, g)
V(All)
def S_new_method(idxs): # definition
component = (V.covariantD(idxs[0], idxs[1], idxs[2])
- V.covariantD(idxs[0], idxs[2], idxs[1])).simplify()
S.components.update({idxs: component}) # memoization
return component
S._compute_covariant_component = S_new_method
# _compute_covariant_component method was overriden
S(1, 1, 3)
```
One can check that the well known formula is correct
$$ V_{\mu ;\nu \rho} - V_{\mu ;\rho \nu} = R^{\sigma}_{\ \mu \nu \rho}V_{\sigma} $$
```
zeros = reduce(Matrix.add, [Rm(-i, All, All, All)*V(i)
for i in range(1, 5)]) - S(All, All, All)
zeros.simplify()
zeros
```
Another way of tensor creation is to make an instance of the _Tensor_ class with components option. Tensor components stored in _Matrix_ object are writen to the _components_ dictionary of the instance by this method.
```
Z = Tensor('Z', 3, g, components=zeros, components_type=(1, 1, 1))
not any(Z.components.values())
```
### _Tensor_ class extension
As an example of the _Tensor_ class extension you can get the source code of any of the predefined _Tensor_ subclasses
```
print([cls.__name__ for cls in vars()['Tensor'].__subclasses__()])
inspect.getsourcelines(Christoffel)
```
| github_jupyter |
```
import re
import numpy as np
import transformers as ppb #!python -m pip install transformers
import torch
import pickle
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils import data
from torchsummary import summary
import warnings
warnings.filterwarnings('ignore')
def preprocess_regex(text):
# Applies preprocessing on text
#remove leading & end white spaces and convert text to lowercase
text = text.strip().lower()
# remove HTML tags
text = re.sub(r'<.*?>', '', text)
# remove punctuation marks
punctuations = '''!()-[]{};:'"\,<>./?@#$%^&*_~'''
for i in text:
if i in punctuations:
text = text.replace(i, "")
# remove the characters [\], ['] and ["]
text = re.sub(r"\\", "", text)
text = re.sub(r"\'", "", text)
text = re.sub(r"\"", "", text)
#remove number
text = re.sub(r"\d+", "", text)
return text
stop_words = ["from", "to", "subject", "title", "request", "looking", "look", "forward", "cheers", "regards", "thank", "thanks", "hi", "all", "since", "mentioned", "free", "ourselves", "hers", "between", "yourself", "but", "again", "there", "about", "once", "during", "out", "very", "having", "with", "they", "own", "an", "be", "some", "for", "do", "its", "yours", "such", "into", "of", "most", "itself", "other", "off", "is", "s", "am", "or", "who", "as", "from", "him", "each", "the", "themselves", "until", "below", "are", "we", "these", "your", "his", "through", "don", "nor", "me", "were", "her", "more", "himself", "this", "down", "should", "our", "their", "while", "above", "both", "up", "to", "ours", "had", "she", "all", "no", "when", "at", "any", "before", "them", "same", "and", "been", "have", "in", "will", "on", "does", "yourselves", "then", "that", "because", "what", "over", "why", "so", "can", "did", "not", "now", "under", "he", "you", "herself", "has", "just", "where", "too", "only", "myself", "which", "those", "i", "after", "few", "whom", "t", "being", "if", "theirs", "my", "against", "a", "by", "doing", "it", "how", "further", "was", "here", "than"]
MAX_TOKENIZE_LEN = 512
def remove_stop_words(input_str):
tokenized_words = input_str.split()
filtered_words = [w for w in tokenized_words if not w in stop_words]
output = " ".join(filtered_words)
if len(output) > MAX_TOKENIZE_LEN:
return output[0: MAX_TOKENIZE_LEN]
return output #return as string
print('Load index label')
label_path = "labelclass.pickle"
labelhandler = open(label_path, 'rb')
labelhandler = pickle.load(labelhandler)
# For DistilBERT:
model_class, tokenizer_class, pretrained_weights = (ppb.DistilBertModel, ppb.DistilBertTokenizer, 'distilbert-base-uncased')
## uncomment below for BERT instead of distilBERT
#model_class, tokenizer_class, pretrained_weights = (ppb.BertModel, ppb.BertTokenizer, 'bert-base-uncased')
# Load pretrained model/tokenizer
tokenizer = tokenizer_class.from_pretrained(pretrained_weights)
model = model_class.from_pretrained(pretrained_weights)
# Load model
from keras.models import load_model
classifier = load_model('bert-embeddings-keras-mlp.h5')
# D:\\Users\\chiawei\\konduit\\Github\\newsgroup_data\\20news-bydate\\20news-bydate-test\\alt.atheism\\53257 alt.atheism
# D:\\Users\\chiawei\\konduit\\Github\\newsgroup_data\\20news-bydate\\20news-bydate-test\\comp.sys.ibm.pc.hardware\\60817 comp.sys.ibm.pc.hardware
test_file_input = 'D:\\Users\\chiawei\\konduit\\Github\\newsgroup_data\\20news-bydate\\20news-bydate-test\\comp.sys.ibm.pc.hardware\\60817'
with open(test_file_input, "r") as file_iterator:
raw_input = file_iterator.read()
processed_input = remove_stop_words(preprocess_regex(raw_input))
tokenized_test_data = tokenizer.encode(processed_input, add_special_tokens=True)
max_len = 512
max_len_add = max_len
if len(tokenized_test_data) > max_len:
max_len_add = len(tokenized_test_data)
padded_test_data = np.array([tokenized_test_data + [0]*(max_len_add-len(tokenized_test_data))])
attention_test_data = np.where(padded_test_data != 0, 1, 0)
input_test_ids = torch.tensor(padded_test_data)
attention_test_mask = torch.tensor(attention_test_data)
input_test_ids = torch.tensor(input_test_ids).to(torch.int64)
with torch.no_grad():
last_hidden_states = model(input_test_ids, attention_mask=attention_test_mask)
test_feature = last_hidden_states[0][:,0,:].numpy()
test_output = classifier.predict(test_feature)
local_index = int(np.argmax(test_output, 1)[0])
print("Class: {}".format(labelhandler[local_index]))
print("Probabilities: {}".format(np.max(test_output)))
import io
import logging
import time
from konduit.load import client_from_file
logging.basicConfig(level='DEBUG')
logging.info("Test")
client = client_from_file("config.yaml")
responses = []
start = time.time()
for i in range(10):
response = client.predict({"default": test_feature})
responses.append(response)
end = time.time()
print("%f seconds elapsed for %d requests (%d RPS)" % (end - start, len(responses), (10.0 / (end - start))))
response = client.predict({"default": test_feature})
results = response["output"]["probabilities"]
index = int(np.argmax(response['output']['probabilities'], 1)[0])
print("Class: {}".format(labelhandler[index]))
print("Probabilities: {}".format(np.max(response['output']['probabilities'])))
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.