markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
https://xavierbourretsicotte.github.io/LDA_QDA.html
import numpy as np import pandas as pd from matplotlib import pyplot as plt import matplotlib.colors as colors # from mpl_toolkits.mplot3d import Axes3D # from mpl_toolkits import mplot3d from sklearn import linear_model, datasets import seaborn as sns import itertools %matplotlib inline sns.set() #plt.style.use('seaborn-white') def multivariate_gaussian_pdf(X,MU,SIGMA): '''Returns the pdf of a nultivariate gaussian distribution - X, MU are p x 1 vectors - SIGMA is a p x p matrix''' #Initialize and reshape X = X.reshape(-1,1) MU = MU.reshape(-1,1) p,_ = SIGMA.shape #Compute values SIGMA_inv = np.linalg.inv(SIGMA) denominator = np.sqrt((2 * np.pi)**p * np.linalg.det(SIGMA)) exponent = -(1/2) * ((X - MU).T @ SIGMA_inv @ (X - MU)) #Return result return float((1. / denominator) * np.exp(exponent) ) def calculate_boundary(X,MU_k,MU_l, SIGMA,pi_k,pi_l): return (np.log(pi_k / pi_l) - 1/2 * (MU_k + MU_l).T @ np.linalg.inv(SIGMA)@(MU_k - MU_l) + X.T @ np.linalg.inv(SIGMA)@ (MU_k - MU_l)).flatten()[0] def QDA_score(X,MU_k,SIGMA,pi_k): #Returns the value of the linear discriminant score function for a given class "k" and # a given x value X SIGMA_inv = np.linalg.inv(SIGMA) return (np.log(pi_k) - 1/2 * np.log(np.linalg.det(SIGMA_inv)) - 1/2 * (X - MU_k).T @ SIGMA_inv @ (X - MU_k)).flatten()[0] def predict_QDA_class(X,MU_list,SIGMA_list,pi_list): #Returns the class for which the the linear discriminant score function is largest scores_list = [] classes = len(MU_list) for p in range(classes): score = QDA_score(X.reshape(-1,1),MU_list[p].reshape(-1,1),SIGMA_list[p],pi_list[p]) scores_list.append(score) return np.argmax(scores_list) iris = sns.load_dataset("iris") sns.pairplot(iris, hue="species") iris = iris.rename(index = str, columns = {'sepal_length':'1_sepal_length','sepal_width':'2_sepal_width', 'petal_length':'3_petal_length', 'petal_width':'4_petal_width'}) sns.FacetGrid(iris, hue="species", size=6) .map(plt.scatter,"1_sepal_length", "2_sepal_width", ) .add_legend() plt.title('Scatter plot') df1 = iris[["1_sepal_length", "2_sepal_width",'species']]
/usr/local/lib/python3.7/dist-packages/seaborn/axisgrid.py:316: UserWarning: The `size` parameter has been renamed to `height`; please update your code. warnings.warn(msg, UserWarning)
MIT
Chapter 4/Python/discriminant analysis/QDA visualization from outside.ipynb
borisgarbuzov/schulich_data_science_1
Visualizing the gaussian estimations and the boundary lines
#Estimating the parameters mu_list = np.split(df1.groupby('species').mean().values,[1,2]) sigma = df1.cov().values pi_list = df1.iloc[:,2].value_counts().values / len(df1) # Our 2-dimensional distribution will be over variables X and Y N = 100 X = np.linspace(3, 8, N) Y = np.linspace(1.5, 5, N) X, Y = np.meshgrid(X, Y) #fig = plt.figure(figsize = (10,10)) #ax = fig.gca() color_list = ['Blues','Greens','Reds'] my_norm = colors.Normalize(vmin=-1.,vmax=1.) g = sns.FacetGrid(iris, hue="species", size=10, palette = 'colorblind') .map(plt.scatter,"1_sepal_length", "2_sepal_width", ) .add_legend() my_ax = g.ax for i,v in enumerate(itertools.combinations([0,1,2],2)): mu = mu_list[i] Sigma = sigma #Computing the cost function for each theta combination zz = np.array( [multivariate_gaussian_pdf( np.array([xx,yy]).reshape(-1,1), mu, Sigma) for xx, yy in zip(np.ravel(X), np.ravel(Y)) ] ) bb = np.array( [ calculate_boundary(np.array([xx,yy]).reshape(-1,1),mu_list[v[0]].reshape(-1,1),mu_list[v[1]].reshape(-1,1), sigma , .33,.33) for xx, yy in zip(np.ravel(X), np.ravel(Y)) ] ) #Reshaping the cost values Z = zz.reshape(X.shape) B = bb.reshape(X.shape) #Plot the result in 3D my_ax.contour( X, Y, Z, 3,cmap = color_list[i] , norm = my_norm, alpha = .3) my_ax.contour( X, Y, B , levels = [0] ,cmap = color_list[i] , norm = my_norm) # Adjust the limits, ticks and view angle my_ax.set_xlabel('X') my_ax.set_ylabel('Y') my_ax.set_title('LDA: gaussians of each class and boundary lines') plt.show()
/usr/local/lib/python3.7/dist-packages/seaborn/axisgrid.py:316: UserWarning: The `size` parameter has been renamed to `height`; please update your code. warnings.warn(msg, UserWarning)
MIT
Chapter 4/Python/discriminant analysis/QDA visualization from outside.ipynb
borisgarbuzov/schulich_data_science_1
Visualizing the Gaussian estimations with different covariance matrices
#Estimating the parameters mu_list = np.split(df1.groupby('species').mean().values,[1,2]) sigma_list = np.split(df1.groupby('species').cov().values,[2,4], axis = 0) pi_list = df1.iloc[:,2].value_counts().values / len(df1) # Our 2-dimensional distribution will be over variables X and Y N = 100 X = np.linspace(3, 8, N) Y = np.linspace(1.5, 5, N) X, Y = np.meshgrid(X, Y) #fig = plt.figure(figsize = (10,10)) #ax = fig.gca() color_list = ['Blues','Greens','Reds'] my_norm = colors.Normalize(vmin=-1.,vmax=1.) g = sns.FacetGrid(iris, hue="species", size=10, palette = 'colorblind') .map(plt.scatter, "1_sepal_length", "2_sepal_width",) .add_legend() my_ax = g.ax for i in range(3): mu = mu_list[i] Sigma = sigma_list[i] #Computing the cost function for each theta combination zz = np.array( [multivariate_gaussian_pdf( np.array([xx,yy]).reshape(-1,1), mu, Sigma) for xx, yy in zip(np.ravel(X), np.ravel(Y)) ] ) #Reshaping the cost values Z = zz.reshape(X.shape) Zm = np.ma.masked_array(Z, Z < 0.15) #Plot the result in 3D my_ax.contour( X, Y, Z, 15, alpha = .3 ,cmap = color_list[i], norm = my_norm) my_ax.pcolor(X,Y,Zm, alpha = .1, cmap = color_list[i], norm = my_norm) # Adjust the limits, ticks and view angle my_ax.set_xlabel('X') my_ax.set_ylabel('Y') my_ax.set_title('Multivariate Gaussians with different Sigma ') plt.show()
/usr/local/lib/python3.7/dist-packages/seaborn/axisgrid.py:316: UserWarning: The `size` parameter has been renamed to `height`; please update your code. warnings.warn(msg, UserWarning)
MIT
Chapter 4/Python/discriminant analysis/QDA visualization from outside.ipynb
borisgarbuzov/schulich_data_science_1
Visualizing the quadratic boundary curves
#Estimating the parameters mu_list = np.split(df1.groupby('species').mean().values,[1,2]) sigma_list = np.split(df1.groupby('species').cov().values,[2,4], axis = 0) pi_list = df1.iloc[:,2].value_counts().values / len(df1) # Our 2-dimensional distribution will be over variables X and Y N = 200 X = np.linspace(4, 8, N) Y = np.linspace(1.5, 5, N) X, Y = np.meshgrid(X, Y) #fig = plt.figure(figsize = (10,10)) #ax = fig.gca() color_list = ['Blues','Greens','Reds'] my_norm = colors.Normalize(vmin=-1.,vmax=1.) g = sns.FacetGrid(iris, hue="species", size=10, palette = 'colorblind') .map(plt.scatter, "1_sepal_length", "2_sepal_width",) .add_legend() my_ax = g.ax #Computing the predicted class function for each value on the grid zz = np.array( [predict_QDA_class( np.array([xx,yy]).reshape(-1,1), mu_list, sigma_list, pi_list) for xx, yy in zip(np.ravel(X), np.ravel(Y)) ] ) #Reshaping the predicted class into the meshgrid shape Z = zz.reshape(X.shape) #Plot the filled and boundary contours my_ax.contourf( X, Y, Z, 2, alpha = .1, colors = ('blue','green','red')) my_ax.contour( X, Y, Z, 2, alpha = 1, colors = ('blue','green','red')) # Addd axis and title my_ax.set_xlabel('X') my_ax.set_ylabel('Y') my_ax.set_title('QDA and boundaries') plt.show()
/usr/local/lib/python3.7/dist-packages/seaborn/axisgrid.py:316: UserWarning: The `size` parameter has been renamed to `height`; please update your code. warnings.warn(msg, UserWarning)
MIT
Chapter 4/Python/discriminant analysis/QDA visualization from outside.ipynb
borisgarbuzov/schulich_data_science_1
QDA Accuracy
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis X_data = df1.iloc[:,0:2] y_labels = df1.iloc[:,2].replace({'setosa':0,'versicolor':1,'virginica':2}).copy() qda = QuadraticDiscriminantAnalysis(store_covariance=True) qda.fit(X_data,y_labels) #Numpy accuracy y_pred = np.array( [predict_QDA_class( np.array([xx,yy]).reshape(-1,1), mu_list, sigma_list, pi_list) for xx, yy in zip(np.ravel(X_data.values[:,0]), np.ravel(X_data.values[:,1])) ] ) display(np.mean(y_pred == y_labels)) #predict_QDA_class( np.array([xx,yy]).reshape(-1,1), mu_list, sigma_list, pi_list) #Sklearn accuracy display(qda.score(X_data,y_labels))
_____no_output_____
MIT
Chapter 4/Python/discriminant analysis/QDA visualization from outside.ipynb
borisgarbuzov/schulich_data_science_1
03 Geometric Machine Learning for Shape Analysis E) Unsupervised Learning: Dimension Reduction$\color{003660}{\text{Nina Miolane - Assistant Professor}}$ @ BioShape Lab @ UCSB ECE This Unit- **Unit 1 (Geometry - Math!)**: Differential Geometry for Engineers- **Unit 2 (Shapes)**: Computational Representations of Biomedical Shapes- **Unit 3 (Machine Learning)**: **Geometric Machine Learning for Shape Analysis** - A) Mean and Covariance - B) Supervised Learning: Classification - C) Supervised Learning: Regression - D) Unsupervised Learning: Clustering - **E) Unsupervised Learning: Dimension Reduction** - Motivation: Dimension Reduction on Optic Nerve Heads (5 landmarks) - Traditional Principal Component Analysis (PCA) - Dimension Reduction Method 1: Tangent PCA - Dimension Reduction Method 2: Principal Geodesic Analysis- **Unit 4 (Deep Learning)**: Geometric Deep Learning for Shape Analysis$\rightarrow$ We explain the machine learning algorithms and statistics used in these real-world scenarios. Overview of Machine Learning (ML)Machine Learning is divided into two principal categories of algorithms: supervised and unsupervised learning algorithms. Both learn from data.$\color{EF5645}{\text{Definition}}$: **Unsupervised learning** refers to the task of discovering any naturally occuring patterns in a dataset of data points $x$. We say that the model is:- a clustering: if we want to find groups (clusters),- a dimension reduction: if we want to find the main sources of variations. Why Dimension Reduction- Some data are (very) high dimensional- Dimension reduction: Extract a low dimensional structure for: - Visualization - More efficient use of resources (memory) - Downstream tasks: fewer dimensions -> better generalization. Dimension Reduction$\color{EF5645}{\text{Given}}$:- dataset $X_1, . . . , X_n$ in a data space of dimension $D$- integer $d < D$,$\color{EF5645}{\text{Goal}}$: - Find representations $z_1, ..., z_n$ of the data points, - that belong to a lower-dimensional space of dimension $d < D$, - that are "representative" of the $X_1, ..., X_n$. Motivation: Dimension Reduction for Optical Nerve Heads $\color{EF5645}{\text{Question}}$: Are the shapes of optic nerve heads split into two clusters: healthy versus glaucoma? --> _Can we visualize the dataset?_Data acquired with a Heidelberg Retina Tomograph - Patrangenaru and Ellingson (2015):- 11 Rhesus monkeys- 22 images of monkeys’ eyes: - an experimental glaucoma was introduced in one eye, - while the second eye was kept as control. $\rightarrow$ On each image, 5 anatomical "landmarks" were recorded.Comparison of optic nerve heads in monkeys with and without glaucoma.
import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as colors from mpl_toolkits.mplot3d import Axes3D from mpl_toolkits.mplot3d.art3d import Poly3DCollection import matplotlib.patches as mpatches import warnings warnings.filterwarnings("ignore") import geomstats.datasets.utils as data_utils nerves, labels, monkeys = data_utils.load_optical_nerves() # Keep the 5 landmarks print(nerves.shape) print(labels) print(monkeys)
(22, 5, 3) [0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1] [ 0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10 10]
MIT
lectures/03_e_dimension_reduction.ipynb
bioshape-lab/ece594n
Plot two optical shapes:
two_nerves = nerves[monkeys == 0] print(two_nerves.shape) two_labels = labels[monkeys == 0] print(two_labels) label_to_str = {0: "Normal nerve", 1: "Glaucoma nerve"} label_to_color = { 0: (102 / 255, 178 / 255, 255 / 255, 1.0), 1: (255 / 255, 178 / 255, 102 / 255, 1.0), } fig = plt.figure(); ax = Axes3D(fig); ax.set_xlim((2000, 4000)); ax.set_ylim((1000, 5000)); ax.set_zlim((-600, 200)) for nerve, label in zip(two_nerves, two_labels): x = nerve[:, 0] y = nerve[:, 1] z = nerve[:, 2] verts = [list(zip(x, y, z))] poly = Poly3DCollection(verts, alpha=0.5) color = label_to_color[int(label)] poly.set_color(colors.rgb2hex(color)) poly.set_edgecolor("k") ax.add_collection3d(poly) patch_0 = mpatches.Patch(color=label_to_color[0], label=label_to_str[0], alpha=0.5) patch_1 = mpatches.Patch(color=label_to_color[1], label=label_to_str[1], alpha=0.5) plt.legend(handles=[patch_0, patch_1], prop={"size": 20}) plt.show()
_____no_output_____
MIT
lectures/03_e_dimension_reduction.ipynb
bioshape-lab/ece594n
Refresher: Traditional Principal Component Analysis $\color{EF5645}{\text{Principal Component Analysis (PCA)}}$ is an:- orthogonal projection of the data (belonging to a vector space $\mathbb{R}^D$),- into a (lower dimensional) linear subspace $\mathbb{R}^d$, $d < D$, - so that the variance of the projected data is maximized. $\color{EF5645}{\text{Notations}}$: $D$ original dimension, $d$ dimension of lower-dimensional subspace. PCA: Intuition What is the 1-dimensional linear subspace that maximizes the variance of the projected data? PCA: Mathematical NotationsFind a orthonormal basis $\left\{v_{1}, \ldots, v_{D}\right\}$ of $\mathbb{R}^{D}$, which satisfies the recursive relationship$$\begin{gathered}v_{1}=\underset{\|v\|=1}{\arg \max } \sum_{i=1}^{n}\left(v \cdot x_{i}\right)^{2} \\v_{k}=\underset{\|v\|=1}{\arg \max } \sum_{i=1}^{n} \sum_{j=1}^{k-1}\left(v_{j} \cdot x_{i}\right)^{2}+\left(v \cdot x_{i}\right)^{2}\end{gathered}$$The $x_i$ are centered at the mean in the equations above. - The subspace $V_{k}=\operatorname{span}\left(\left\{v_{1}, \ldots, v_{k}\right\}\right)$ is: - the $k$-dimensional subspace - that maximizes the variance - of the data projected to that subspace: $\pi_{V_1}(x_i) = v \cdot x_{i}$ $\color{047C91}{\text{Exercise}}$: Consider the two projections below. Which maximizes the variance? PCA: MethodThe basis $\left\{v_{k}\right\}$ is computed as the set of ordered eigenvectors of the sample covariance matrix of the data. $\color{6D7D33}{\text{Method}}$: Given data $\left\{X_{1}, \ldots, X_n\right\}$:- Compute covariance matrix $\Sigma$, where $\quad \overline{\mathbf{x}}=\frac{1}{n} \sum_{i=1}^{n} X_{i}$:$$\Sigma=\frac{1}{n} \sum_{i=1}^{n}\left(\mathbf{x}_{i}-\overline{\mathbf{x}}\right)(\mathbf{x}_i-\overline{\mathbf{x}})^{T} \quad$$ - Compute eigenvectors, eigenvalues of $\Sigma$: - Eigenvectors: principal components (PCs) - Eigenvalues: orders PCs PCA: Explanation$\color{EF5645}{\text{Goal (Rewritten)}}$: Maximize $\quad \mathbf{u}^{\top} \mathbf{X X}^{\top} \mathbf{u}$s.t $\quad \mathbf{u}^{\top} \mathbf{u}=1$, where $\Sigma = \mathbf{X X}^{\top}$$\color{6D7D33}{\text{Method}}$:- Construct Lagrangian $\mathbf{u}^{\top} \mathbf{X X}^{\top} \mathbf{u}-\lambda \mathbf{u}^{\top} \mathbf{u}$- Set partial derivatives to zero$$\mathbf{X X}^{\top} \mathbf{u}-\lambda \mathbf{u}=\left(\mathbf{X X}^{\top}-\lambda \mathrm{I}\right) \mathbf{u}=\mathbf{0}$$As $\mathbf{u} \neq \mathbf{0}$ then $\mathbf{u}$ must be an eigenvector of $XX^{\top}$ with eigenvalue $\lambda$ How Many Principal Components (PCs) ?Maximum number of PCs:- For $D$ original dimensions, sample covariance matrix is $D \times D$, and has up to $D$ eigenvectors.- Maximum number: $D$ PCs.Interesting number of PCs:- Ignore the components of lesser significance, i.e. small eigenvalues.- Interesting number: $d$ PCs. PCA: Two Interpretations$\color{EF5645}{\text{Maximum Variance Direction:}}$ projection captures maximum variance in the data$$\frac{1}{n} \sum_{i=1}^{n}\left(\mathbf{v}^{T} \mathbf{x}_{i}\right)^{2}=\mathbf{v}^{T} \mathbf{X X}^{T} \mathbf{v}$$$\color{EF5645}{\text{Minimum Reconstruction Error:}}$ projection yields minimum mean square error$$\frac{1}{n} \sum_{i=1}^{n}\left\|\mathbf{x}_{i}-\left(\mathbf{v}^{T} \mathbf{x}_{i}\right) \mathbf{v}\right\|^{2}$$ Dimension Reduction Method 1: Tangent Principal Component Analysis Recall: "Trick": Tangent Space at the Fréchet MeanThe Fréchet mean gives us a way of transforming our non-linear data into vectors!1. Compute the Fréchet mean $\bar{x}$ of the data points2. Consider the tangent space $T_\bar{x}M$of the manifold $M$ at $\bar{x}$3. Compute the Logarithms of the data points at $\bar{x}$$\rightarrow$ Get a dataset on a vector space, and apply classical machine learning on it. Tangent Principal Component Analysis= Apply PCA on the tangent space at the Fréchet mean.The next slides illustrate the use of tangent PCA on:- the hyperbolic space (synthetic data)- Kendall shape space (optical nerve head data) On the Hyperbolic Space
from geomstats.geometry.hyperboloid import Hyperboloid from geomstats.learning.frechet_mean import FrechetMean from geomstats.learning.pca import TangentPCA import matplotlib.pyplot as plt import numpy as np import geomstats.visualization as viz
_____no_output_____
MIT
lectures/03_e_dimension_reduction.ipynb
bioshape-lab/ece594n
1. Set-up- $\color{EF5645}{\text{Decide on the model:}}$ We use tangent PCA- $\color{EF5645}{\text{Decide on a loss function:}}$ Minimize -variance
# Synthetic data hyperbolic_plane = Hyperboloid(dim=2) data = hyperbolic_plane.random_point(n_samples=140) # Set-up mean = FrechetMean(metric=hyperbolic_plane.metric) tpca = TangentPCA(metric=hyperbolic_plane.metric, n_components=2)
_____no_output_____
MIT
lectures/03_e_dimension_reduction.ipynb
bioshape-lab/ece594n
2. $\color{EF5645}{\text{Split dataset into train / test sets:}}$ - Train $X_1, ..., X_{n_\text{train}}$: build the algorithm - Test $X_{n_\text{train}+1}, ..., X_n$: assess its performances.
from sklearn.model_selection import train_test_split train, test = train_test_split(data) print(train.shape) print(test.shape)
(105, 3) (35, 3)
MIT
lectures/03_e_dimension_reduction.ipynb
bioshape-lab/ece594n
3. $\color{EF5645}{\text{Train:}}$ Build the algorithm
mean.fit(train) mean_estimate = mean.estimate_ tpca = tpca.fit(train, base_point=mean_estimate) tangent_projected_data = tpca.transform(train)
_____no_output_____
MIT
lectures/03_e_dimension_reduction.ipynb
bioshape-lab/ece594n
4. $\color{EF5645}{\text{Test:}}$ Assess its performances
geodesic_0 = hyperbolic_plane.metric.geodesic( initial_point=mean_estimate, initial_tangent_vec=tpca.components_[0] ) geodesic_1 = hyperbolic_plane.metric.geodesic( initial_point=mean_estimate, initial_tangent_vec=tpca.components_[1] ) n_steps = 100 t = np.linspace(-1, 1, n_steps) geodesic_points_0 = geodesic_0(t) geodesic_points_1 = geodesic_1(t) fig = plt.figure(figsize=(15, 5)) ax_var = fig.add_subplot(121) xticks = np.arange(1, 2 + 1, 1); ax_var.xaxis.set_ticks(xticks) ax_var.set_title("Explained variance"); ax_var.set_xlabel("Number of Principal Components") ax_var.set_ylim((0, 1)) ax_var.plot(xticks, tpca.explained_variance_ratio_) ax = fig.add_subplot(122) viz.plot( mean_estimate, ax, space="H2_poincare_disk", color="darkgreen", s=10 ) viz.plot(geodesic_points_0, ax, space="H2_poincare_disk", linewidth=2) viz.plot(geodesic_points_1, ax, space="H2_poincare_disk", linewidth=2) viz.plot(data, ax, space="H2_poincare_disk", color="black", alpha=0.7) ax.set_aspect("equal") plt.show()
_____no_output_____
MIT
lectures/03_e_dimension_reduction.ipynb
bioshape-lab/ece594n
On Kendall Shape Spaces
from geomstats.geometry.pre_shape import PreShapeSpace, KendallShapeMetric m_ambient = 3 k_landmarks = 5 preshape = PreShapeSpace(m_ambient=m_ambient, k_landmarks=k_landmarks) matrices_metric = preshape.embedding_metric nerves_preshape = preshape.projection(nerves) print(nerves_preshape.shape) print(preshape.belongs(nerves_preshape)) print(np.isclose(matrices_metric.norm(nerves_preshape), 1.0)) base_point = nerves_preshape[0] nerves_shape = preshape.align(point=nerves_preshape, base_point=base_point)
_____no_output_____
MIT
lectures/03_e_dimension_reduction.ipynb
bioshape-lab/ece594n
1. Set-up- $\color{EF5645}{\text{Decide on the model:}}$ We use tangent PCA- $\color{EF5645}{\text{Decide on a loss function:}}$ Minimize -variance
kendall_metric = KendallShapeMetric(m_ambient=m_ambient, k_landmarks=k_landmarks) tpca = TangentPCA(kendall_metric)
_____no_output_____
MIT
lectures/03_e_dimension_reduction.ipynb
bioshape-lab/ece594n
2. $\color{EF5645}{\text{Split dataset into train / test sets:}}$ - Train $X_1, ..., X_{n_\text{train}}$: build the algorithm - Test $X_{n_\text{train}+1}, ..., X_n$: assess its performances.
from sklearn.model_selection import train_test_split train_nerves_shape = nerves_shape[:18] test_nerves_shape = nerves_shape[18:] print(train_nerves_shape.shape) print(test_nerves_shape.shape)
(18, 5, 3) (4, 5, 3)
MIT
lectures/03_e_dimension_reduction.ipynb
bioshape-lab/ece594n
3. $\color{EF5645}{\text{Train:}}$ Build the algorithm
tpca.fit(train_nerves_shape) plt.plot(tpca.explained_variance_ratio_) plt.xlabel("Number of principal tangent components", size=14) plt.ylabel("Fraction of explained variance", size=14);
_____no_output_____
MIT
lectures/03_e_dimension_reduction.ipynb
bioshape-lab/ece594n
Two principal components describe around 60% of the variance. We plot the data projected in the tangent space defined by these two principal components. 4. $\color{EF5645}{\text{Test:}}$ Assess its performances- We project the whole dataset on the principal components.
X = tpca.transform(nerves_shape) plt.figure(figsize=(11, 11)) for label, col in label_to_color.items(): mask = labels == label plt.scatter(X[mask, 0], X[mask, 1], color=col, s=100, label=label_to_str[label]) plt.legend(fontsize=14) for label, x, y in zip(monkeys, X[:, 0], X[:, 1]): plt.annotate(label, xy=(x, y), xytext=(-20, 20), textcoords="offset points", ha="right", va="bottom", bbox=dict(boxstyle="round,pad=0.5", fc="white", alpha=0.5), arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=0")) plt.show()
_____no_output_____
MIT
lectures/03_e_dimension_reduction.ipynb
bioshape-lab/ece594n
Dimension Reduction Method 2: Principal Geodesic Analysis - Variance. Following the work of Fréchet, we define the sample variance of the data as the expected value of the squared Riemannian distance from the mean.- Geodesic subspaces. The lower-dimensional subspaces in PCA are linear subspaces. For general manifolds we extend the concept of a linear subspace to that of a geodesic submanifold.- Projection. In PCA the data is projected onto linear subspaces. We define a projection operator for geodesic submanifolds, and show how it may be efficiently approximated. $\color{EF5645}{\text{Principal Geodesic Analysis (PGA)}}$ is an:- $\color{EF5645}{\text{orthogonal projection}}$ of the data- into a (lower dimensional) $\color{EF5645}{\text{geodesic subspace}}$, - so that the variance of the projected data is maximized. Geodesic Subspace$\color{EF5645}{\text{A submanifold $N$ of a manifold $M$}}$ is a subset of $M$ that is also a manifold.In general, if $N$ is a submanifold of a manifold $M$, geodesics of $N$ are not necessarily geodesics of $M$.- $\color{047C91}{\text{Example}}$: $S^2$ as a submanifold of $\mathbb{R}^3$. $\color{EF5645}{\text{A submanifold $H$ of $M$ is said to be geodesic at $x \in H$}}$ if all geodesics of $H$ _passing through $x$_ are also geodesics of $M$. - $\color{047C91}{\text{Example}}$: A linear subspace of $\mathbb{R}^{D}$ is a submanifold geodesic at 0.$\color{EF5645}{\text{Remark}}$: Submanifolds geodesic at $x$ preserve distances to $x.$ This is an essential property for PGA because variance is defined as the average squared distance to the mean. Thus submanifolds geodesic at the mean will be the generalizations of the linear subspaces of PCA. Projection$\color{EF5645}{\text{The projection of a point $x \in M$}}$ onto a geodesic submanifold $H$ of $M$ is defined as the point on $H$ that is nearest to $x$ in Riemannian distance. Thus we define the projection operator $\pi_{H}: M \rightarrow H$ as$$\pi_{H}(x)=\underset{y \in H}{\arg \min } d(x, y)^{2}$$ PGA: Mathematical NotationsThe principal geodesic submanifolds are the images of the $V_{k}$ under the exponential map: $H_{k}=\operatorname{Exp}_{\mu}\left(V_{k}\right)$. The first principal direction is chosen to maximize the projected variance along the corresponding geodesic:$$v_{1}=\underset{\|v\|=1}{\arg \max } \sum_{i=1}^{n}\left\|\log _{\mu}\left(\pi_{H}\left(x_{i}\right)\right)\right\|^{2},$$where $H=\operatorname{Exp}_{\mu}(\operatorname{span}(\{v\}) \cap U)$.The remaining principal directions are then defined recursively as$$\begin{aligned}&v_{k}=\underset{\|v\|=1}{\arg \max } \sum_{i=1}^{n}\left\|\log _{\mu}\left(\pi_{H}\left(x_{i}\right)\right)\right\|^{2} \\&\text { where } H=\operatorname{Exp}_{\mu}\left(\operatorname{span}\left(\left\{v_{1}, \ldots, v_{k-1}, v\right\}\right) \cap U\right) .\end{aligned}$$
- The subspace $V_{k}=\operatorname{span}\left(\left\{v_{1}, \ldots, v_{k}\right\}\right)$ is: - the $k$-dimensional subspace - that maximizes the variance - of the data projected to that subspace: $\pi_{V_1}(x_i) = v \cdot x_{i}$
_____no_output_____
MIT
lectures/03_e_dimension_reduction.ipynb
bioshape-lab/ece594n
Label Detection. Face Detection and Comparison, Celebrity Recognition, Image moderation, Text in image detection
import cv2 import boto3 import numpy as np import os import matplotlib.pyplot as plt # Helpers def show_image(filename): image = cv2.imread(filename) plt.imshow(image) plt.show() # Change color channels def show_image_rgb(filename): image = cv2.imread(filename) plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) plt.show() def image_encoder(image_array): #image_uint8 = image_array.astype(np.uint8) ret, buf = cv2.imencode('.jpg', image_array) encoded = { 'Bytes':buf.tobytes() } return encoded def detect_image_entities(filename): rekognition = boto3.client('rekognition') # Read image array image = cv2.imread(filename) # Encode Image encoded = image_encoder(image) # Send to rekognition response = rekognition.detect_labels( Image = encoded ) return response['Labels'] filename = 'new-york-city.jpg' show_image(filename) detect_image_entities(filename) def detect_image_text(filename): rekognition = boto3.client('rekognition') # Read image array image = cv2.imread(filename) # Encode Image encoded = image_encoder(image) # Send to rekognition response = rekognition.detect_text( Image = encoded ) return response['TextDetections'] filename = 'innovation.jpg' show_image(filename) detect_image_text(filename) def analyze_face(filename): rekognition = boto3.client('rekognition') # Read image array image = cv2.imread(filename) # Encode Image encoded = image_encoder(image) # Send to rekognition response = rekognition.detect_faces( Image = encoded, Attributes=[ 'ALL', ] ) return response['FaceDetails'] filename = 'harry_megan.JPG' show_image_rgb(filename) analyze_face(filename) def detect_celebrity(filename): rekognition = boto3.client('rekognition') # Read image array image = cv2.imread(filename) # Encode Image encoded = image_encoder(image) # Send to rekognition response = rekognition.recognize_celebrities( Image = encoded ) return response['CelebrityFaces'] filename = 'elon.jpg' show_image_rgb(filename) detect_celebrity(filename) def compare_faces(filename1,filename2): rekognition = boto3.client('rekognition') # Read image array image1 = cv2.imread(filename1) image2 = cv2.imread(filename2) # Encode Image encoded1 = image_encoder(image1) encoded2 = image_encoder(image2) # Send to rekognition response = rekognition.compare_faces( SourceImage = encoded1, TargetImage = encoded2 ) return response['SourceImageFace'], response['FaceMatches'], response['UnmatchedFaces'] filename1 = 'obama1.jpg' filename2 = 'obama2.jpg' show_image_rgb(filename1) show_image_rgb(filename2) compare_faces(filename1,filename2)
_____no_output_____
MIT
Rekognition.ipynb
jsalomon-mdsol/medihack-aws-code
In this notebook I'm generating the movements and the states variables (torques and angles) in order to produce this movement using the 2dof simulator. Some of the algorithms (or inspiration) to simulate the 2dof arm came from: http://www.gribblelab.org/compneuro/ Here starts the 2 joint arm study Main functions to the 2 joint arm simulation
# Makes possible to show the output from matplotlib inline %matplotlib inline import matplotlib.pyplot as plt # Makes the figures in the PNG format: # For more information see %config InlineBackend %config InlineBackend.figure_formats=set([u'png']) plt.rcParams['figure.figsize'] = 20, 10 import numpy import sys import save_load_file as slf # Loads the modules and starts the object to be used with the parallel processing iPython stuff... # Remember to execute at the shell: ipcluster start -n 4 # or from the iPython notebook interface! # from IPython.parallel import Client from ipyparallel import Client # When using the ipython in my desktop, launch the cluster in the right profile :) cli = Client() # lbview = cli.load_balanced_view() dview = cli[:] %%file simulation_2DoF_Arm_physics.py # 2 DOF Simulator module # Strongly based on http://www.gribblelab.org/compneuro/index.html import numpy # forward kinematics def joints_to_hand(A,aparams): """ Given joint angles A=(a1,a2) and anthropometric params aparams, returns hand position H=(hx,hy) and elbow position E=(ex,ey) Note1: A must be type matrix (or array([[a1,a2],...])) Note2: If A has multiple lines, H and E will have the same number of lines. """ l1 = aparams['l1'] l2 = aparams['l2'] n = numpy.shape(A)[0] E = numpy.zeros((n,2)) H = numpy.zeros((n,2)) for i in range(n): E[i,0] = l1 * numpy.cos(A[i,0]) E[i,1] = l1 * numpy.sin(A[i,0]) H[i,0] = E[i,0] + (l2 * numpy.cos(A[i,0]+A[i,1])) H[i,1] = E[i,1] + (l2 * numpy.sin(A[i,0]+A[i,1])) return H,E # I could do the inverse kinematics using all the possible values (workspace) of the arm and creating a numpy array. Then I # could use the argmin trick to find the value. # In order to solve the problem when multiple solutions appear I could use the minimum jerk criterion. The user should enter # the actual position and the next one, then the system solves according to the one that uses mininum energy. # One curious thing about inverse kinematics is that as human beings we cannot do a inverse kinematic of our hand position # without taking in account the actual position. The function, below, doesn't care about the actual position, and that is why # more than one solution appears. # So, I don't think the brain solves the problem of multiple solutions. Who solves this problem is the morphology of the limbs. # It is impossible to change trajectories instantaneously, therefore the continuity of the movements is guaranteed. # Summary: there are no positions, but trajectories :) # inverse kinematics def hand_to_joints(H,aparams): """ Given hand position H=(hx,hy) and anthropometric params aparams, returns joint angles A=(a1,a2) Note1: H must be type matrix (or array([[hx,hy],...])) Note2: If H has multiple lines, A will have the same number of lines. """ l1 = aparams['l1'] l2 = aparams['l2'] n = numpy.shape(H)[0] A = numpy.zeros((n,2)) for i in range(n): A[i,1] = numpy.arccos(((H[i,0]*H[i,0])+(H[i,1]*H[i,1])-(l1*l1)-(l2*l2))/(2.0*l1*l2)) A[i,0] = numpy.arctan2(H[i,1],H[i,0]) - numpy.arctan2((l2*numpy.sin(A[i,1])),(l1+(l2*numpy.cos(A[i,1])))) # if A[i,0] < 0: # print "<0:",A[i,0] # A[i,0] = A[i,0] + pi # elif A[i,0] > pi: # print ">0:",A[i,0] # A[i,0] = A[i,0] - pi return A # inverse kinematics def hand_to_joints(H,aparams,ang_error=0.01): """ Given hand position H=(hx,hy) and anthropometric params aparams, returns joint angles A=(a1,a2) Note1: H must be type matrix (or array([[hx,hy],...])) Note2: If H has multiple lines, A will have the same number of lines. """ l1 = aparams['l1'] l2 = aparams['l2'] n = numpy.shape(H)[0] A = numpy.zeros((n,2)) t_bias=[0,0] for i in range(n): A[i,1] = numpy.arccos(((H[i,0]*H[i,0])+(H[i,1]*H[i,1])-(l1*l1)-(l2*l2))/(2.0*l1*l2)) + t_bias[1] A[i,0] = numpy.arctan2(H[i,1],H[i,0]) - numpy.arctan2((l2*numpy.sin(A[i,1])),(l1+(l2*numpy.cos(A[i,1])))) + t_bias[0] if i>0: # Here I'm trying to avoid descontinuity problems when there's a 2pi difference between them! if 0<=abs(abs((A[i,1]-A[i-1,1])/numpy.pi)-2)<=ang_error: print "Correction on Joint 2:",(A[i,1],A[i-1,1]) if (A[i,1]-A[i-1,1])>0: A[i,1]-=2*numpy.pi t_bias[1]-=2*numpy.pi else: A[i,1]+=2*numpy.pi t_bias[1]+=2*numpy.pi if 0<=abs(abs((A[i,0]-A[i-1,0])/numpy.pi)-2)<=ang_error: print "Correction on Joint 1:",(A[i,0],A[i-1,0]) if (A[i,0]-A[i-1,0])>0: A[i,0]-=2*numpy.pi t_bias[0]-=2*numpy.pi else: A[i,0]+=2*numpy.pi t_bias[0]+=2*numpy.pi return A # Generates the movements according to: # Flash, Tamar and Neville Hogan. 1985. The Coordination of Arm Movements: An Experimentally Confirmed Mathematical Model. The Journal of Neuroscience 5 (7): 1688-1703 def cartesian_movement_generation_training(xstart,ystart,xdest,ydest,MT,t): ''' xstart,ystart: initial position of the trajectory xdest,ydest: final position of the trajectory MT: total time spent doing the trajectory t: current time returns a matrix: [[x0,y0],[x1,y1],...] ''' x_t=xstart+(xstart-xdest)*(15*(t/MT)**4-6*(t/MT)**5-10*(t/MT)**3) y_t=ystart+(ystart-ydest)*(15*(t/MT)**4-6*(t/MT)**5-10*(t/MT)**3) return numpy.array([x_t,y_t]).T # Used to generate the velocities and the accelerations using the position and time vectors def derivator(v,t): return numpy.array([(v[i+1]-v[i])/(t[i+1]-t[i]) for i in range(len(t)-1)]) def twojointarm_torques(state, t, aparams): """ Calculates the necessaries torques to generate the accelerations """ import numpy a1,a2,a1d,a2d,a1dd,a2dd = state # joint_angle_a1,joint_angle_a2,joint_vel_a1,joint_vel_a2,joint_acc_a1,joint_acc_a2 l1,l2 = aparams['l1'], aparams['l2'] # lenght link 1 and 2 m1,m2 = aparams['m1'], aparams['m2'] # mass link 1 and 2 i1,i2 = aparams['i1'], aparams['i2'] # moment of inertia link 1 and 2 lc1,lc2 = aparams['lc1'], aparams['lc2'] # distance to the center of mass of link 1 and 2 M11 = i1 + i2 + (m1*lc1*lc1) + (m2*((l1*l1) + (lc2*lc2) + (2*l1*lc2*numpy.cos(a2)))) M12 = i2 + (m2*((lc2*lc2) + (l1*lc2*numpy.cos(a2)))) M21 = M12 M22 = i2 + (m2*lc2*lc2) M = numpy.matrix([[M11,M12],[M21,M22]]) # H matrix C1 = -(m2*l1*a2d*a2d*lc2*numpy.sin(a2)) - (2*m2*l1*a1d*a2d*lc2*numpy.sin(a2)) C2 = m2*l1*a1d*a1d*lc2*numpy.sin(a2) C = numpy.matrix([[C1],[C2]]) ACC = numpy.array([[a1dd],[a2dd]]) T = M*ACC + C return numpy.array([T[0,0],T[1,0]]) # forward dynamics equations of our two-joint arm def twojointarm(state, t, aparams, torque): import numpy """ two-joint arm in plane X is fwd(+) and back(-) Y is up(+) and down(-) shoulder angle a1 relative to Y vert, +ve counter-clockwise elbow angle a2 relative to upper arm, +ve counter-clockwise """ a1,a2,a1d,a2d = state # joint_angle_a1, joint_angle_a2, joint_velocity_a1, joint_velocity_a2 l1,l2 = aparams['l1'], aparams['l2'] # lenght link 1 and 2 m1,m2 = aparams['m1'], aparams['m2'] # mass link 1 and 2 i1,i2 = aparams['i1'], aparams['i2'] # moment of inertia link 1 and 2 lc1,lc2 = aparams['lc1'], aparams['lc2'] # distance to the center of mass of link 1 and 2 M11 = i1 + i2 + (m1*lc1*lc1) + (m2*((l1*l1) + (lc2*lc2) + (2*l1*lc2*numpy.cos(a2)))) M12 = i2 + (m2*((lc2*lc2) + (l1*lc2*numpy.cos(a2)))) M21 = M12 M22 = i2 + (m2*lc2*lc2) M = numpy.matrix([[M11,M12],[M21,M22]]) # H matrix C1 = -(m2*l1*a2d*a2d*lc2*numpy.sin(a2)) - (2*m2*l1*a1d*a2d*lc2*numpy.sin(a2)) C2 = m2*l1*a1d*a1d*lc2*numpy.sin(a2) C = numpy.matrix([[C1],[C2]]) T = numpy.matrix([[torque[0]],[torque[1]]]) ACC = numpy.linalg.inv(M) * (T-C) # calculates the accelerations of joints 1 and 2 a1dd,a2dd = ACC[0,0], ACC[1,0] return [a1d, a2d, a1dd, a2dd] # It returns the first and second derivatives of the joints def animatearm(state,t,aparams): """ animate the twojointarm """ import matplotlib.pyplot as plt import numpy import time A = state[:,[0,1]] # Gets the angles a1 and a2 from the states matrix A[:,0] = A[:,0] H,E = joints_to_hand(A,aparams) l1,l2 = aparams['l1'], aparams['l2'] plt.figure() plt.plot(0,0,'b.') plt.plot(H[:,0],H[:,1],'g.-'); p1, = plt.plot(E[0,0],E[0,1],'b.') p2, = plt.plot(H[0,0],H[0,1],'b.') p3, = plt.plot((0,E[0,0],H[0,0]),(0,E[0,1],H[0,1]),'b-') plt.xlim([-l1-l2, l1+l2]) plt.ylim([-l1-l2, l1+l2]) dt = t[1]-t[0] tt = plt.title("Click on this plot to continue...") plt.ginput(1) for i in xrange(0,numpy.shape(state)[0]): time.sleep(0.05) p1.set_xdata((E[i,0])) p1.set_ydata((E[i,1])) p2.set_xdata((H[i,0])) p2.set_ydata((H[i,1])) p3.set_xdata((0,E[i,0],H[i,0])) p3.set_ydata((0,E[i,1],H[i,1])) tt.set_text("Current time:%4.2f sec - click to next slide!" % (i*dt)) plt.draw() tt.set_text("Current time:%4.2f sec - finished!" % ((numpy.shape(state)[0]-1)*dt)) plt.draw() def animatearm_JS(state,t,aparams): """ animate the twojointarm """ import matplotlib.pyplot as plt import numpy from JSAnimation import IPython_display from matplotlib import animation A = state[:,[0,1]] # Gets the angles a1 and a2 from the states matrix A[:,0] = A[:,0] H,E = joints_to_hand(A,aparams) l1,l2 = aparams['l1'], aparams['l2'] # Set up the axes, making sure the axis ratio is equal # ax = fig.add_axes([0, 0, 1, 1], xlim=(-0.02, 13.02), ylim=(-0.02, 5.02), # xticks=range(14), yticks=range(6), aspect='equal', frameon=False) fig = plt.figure(figsize=(6, 6),dpi=100) ax = plt.axes(xlim=(-1, 1), ylim=(-1, 1), aspect='equal') ax.plot(0,0,'b.') ax.plot(H[:,0],H[:,1],'g.-'); p1, = ax.plot(E[0,0],E[0,1],'b.') p2, = ax.plot(H[0,0],H[0,1],'b.') p3, = ax.plot((0,E[0,0],H[0,0]),(0,E[0,1],H[0,1]),'b-') def init(): p1.set_data([],[]) p2.set_data([],[]) p3.set_data([],[]) return p1,p2,p3 def animate(i): p1.set_data([E[i,0]],[E[i,1]]) p2.set_data(H[i,0],H[i,1]) p3.set_data((0,E[i,0],H[i,0]),(0,E[i,1],H[i,1])) return p1,p2,p3 anim = animation.FuncAnimation(fig, animate, init_func=init, frames=len(E[:,0]), interval=20, blit=True) return anim # In order to make the JSAnimation to work it is necessary that the function returns the animation object! def odeint_arms(twojointarm, state, t, aparams, torque): ''' twojointarm: function object. Must receive (state,t,aparams,torque) and return [a1d,a2d,a1dd,a2dd] state: current states => [a1(t),a2(t),a1d(t),a2d(t)] t: array([t,t+1]) => current time step and next (t+1) returns next states [a1(t+1),a2(t+1),a1d(t+1),a2d(t+1)] ''' from scipy.integrate import odeint return odeint(twojointarm, state, t, args=(aparams,torque)) def moving_average (values,window=6): weights = numpy.repeat(1.0, window)/window sma = numpy.convolve(numpy.concatenate((numpy.zeros(int((window-1)/2.0)),values,numpy.zeros((window-1)-int((window-1)/2.0)))), weights, 'valid') # I should try the function numpy.lib.pad instead of concatenating manually return sma def moving_average (values, window=6): weights = numpy.repeat(1.0, window)/window sma = numpy.convolve(values, weights, 'valid') # I should try the function numpy.lib.pad instead of concatenating manually return numpy.lib.pad(sma, (int((window-1)/2.0),(window-1)-int((window-1)/2.0)), 'edge') from simulation_2DoF_Arm_physics import * @dview.parallel(block=True) def generate_trajectories(sim_inputs): import numpy import sys import save_load_file as slf import simulation_2DoF_Arm_physics reload(sys.modules['simulation_2DoF_Arm_physics']) # Makes sure the interpreter is going to reload the module s2ap = simulation_2DoF_Arm_physics tji,positions,sim_params = sim_inputs xstart,ystart = positions[0] xdest,ydest = positions[1] sim_set,base_dir,MT,time_step,Ninput,aparams = sim_params t_mov=numpy.arange(0, MT, time_step) # t starts in 0s and steps time_step(s) until reaches MT(s) # Generates the movements according to: # Flash, Tamar, and Neville Hogan. 1985 H_path=s2ap.cartesian_movement_generation_training(xstart, ystart, xdest, ydest, MT, t_mov) # These are the values teta1 and teta2 can have because the system limits the resolution. # According to Joshi/Maass paper there are 50 neurons to code the positions of each variable. # # teta1=numpy.linspace(-numpy.pi/6,numpy.pi,num=Ninput) teta2=numpy.linspace(0,numpy.pi,num=Ninput) teta1_teta2 = numpy.array([teta1,teta2]).T # This is the matrix to use with the function # to generate the x,y values of the workspace # Joint's workspace: all the possible combinations between teta1 and teta2. teta_workspace = numpy.array([[t1,t2] for t1 in teta1 for t2 in teta2]) # Arm's workspace: x,y points that the arm (endpoint) can reach H_workspace = s2ap.joints_to_hand(teta_workspace,aparams)[0] # I'm getting the first because it returns the elbow's positions too. # Generate the joint's positions according to the ORIGINAL (X,Y) values. # I'm using the traditional geometric way to do the inverse kinematics here. I need to # implement the minimum jerk way to generate the joint's positions taking into account the movement's dynamics. Joints=s2ap.hand_to_joints(H_path, aparams,ang_error=0.1) # Here I'm extending the Joints matrix because I need two extra positions to calculate the accelerations. # Consequently, because the trajectory always finish with velocity ZERO, keeping the same position seems a good choice. Joints_extended=numpy.concatenate((Joints,[Joints[-1],Joints[-1]])) # But the time array (t_mov) must be extended too: t_mov_extended=numpy.concatenate((t_mov,[t_mov[-1]+time_step],[t_mov[-1]+2*time_step])) # Joint's velocities teta1_d=s2ap.derivator(Joints_extended[:,0],t_mov_extended) teta2_d=s2ap.derivator(Joints_extended[:,1],t_mov_extended) # Joint's accelerations teta1_dd=s2ap.derivator(teta1_d,t_mov_extended[:251]) teta2_dd=s2ap.derivator(teta2_d,t_mov_extended[:251]) # # WITH ORIGINAL JOINT'S VELOCITIES # # And generates a matrix with [teta1,teta2,teta1d,teta2d,teta1dd,teta2dd] # Using this matrix I will generate the torques. states_mov = numpy.array([[Joints[:,0][i],Joints[:,1][i],teta1_d[i],teta2_d[i],teta1_dd[i],teta2_dd[i]] for i in range(len(Joints[:,0]))]) # Applying the function to all lines of the states_mov matrix, I generate the torques matrix T_mov=numpy.array([s2ap.twojointarm_torques(states_mov[i], t_mov, aparams) for i in range(numpy.shape(states_mov)[0])]) # Here I calculate the states using the calculated torques, just to make sure it is working! state = states_mov[0,[0,1,2,3]] # This is the initial state. The rest of the states will be generated dynamically # according to the input torques. state_v2 = [state] t = t_mov torque = T_mov for i in range(len(t)-1): print "state:",state state = s2ap.odeint_arms(s2ap.twojointarm, state, [t[i], t[i+1]], aparams, torque[i])[1] state_v2.append(state) state_v2=numpy.array(state_v2) # slf.save_to_file_gz([numpy.array(state_v2),numpy.array(torque)],"./"+base_dir+"/"+sim_set+"/States_Torques_movement"+str(tji)+".gzpickle") return tji
_____no_output_____
MIT
2DofArm_simulation_data_generator_and_physics.ipynb
ricardodeazambuja/IJCNN2017
End of the main functions! Adjusting the parameters:
# Experiment identifier sim_sets = ["set_A", "set_B", "set_C", "set_D"] sim_set = sim_sets[0] # Base dir to save / access base_dir = "2DofArm_simulation_data" # List with all trajectories to be generated # [[[start_x,start_y],[final_x,final_y]],...] trajectories = [[[0.75,0.25],[0.0,0.5]], [[0.25,0.60],[-0.25,0.60]], [[-0.10,0.75],[-0.10,0.25]],[[-0.75,0.50],[-0.40,0.00]]] # The values below must match the ones used with the SNN simulation: # Total time spent during the movement (in seconds) MT = 0.5 # Simulation time step (in seconds) time_step = 2/1000.0 # Number of neurons at the input layer (defines the resolution of the system) Ninput = 50 # Arm parametres used with the 2 dof arm simulator # (according to Joshi/Maass 2006 paper) aparams = { 'l1' : 0.5, # metres 'l2' : 0.5, 'lc1' : 0.25, 'lc2' : 0.25, 'm1' : 1.0, # kg 'm2' : 1.0, 'i1' : 0.03, # kg*m*m 'i2' : 0.03 } # Variable only used to pass the parameters sim_params = sim_set,base_dir,MT,time_step,Ninput,aparams %time results = generate_trajectories.map([(tji,positions,sim_params) for tji,positions in zip(range(1,len(trajectories)+1),trajectories)])
CPU times: user 15.6 ms, sys: 6.15 ms, total: 21.7 ms Wall time: 1.69 s
MIT
2DofArm_simulation_data_generator_and_physics.ipynb
ricardodeazambuja/IJCNN2017
Plotly - Create Waterfall chart (Advanced) **Tags:** plotly chart warterfall dataviz Input Install packages
!pip install numpy !pip install matplotlib
_____no_output_____
BSD-3-Clause
Plotly/Create Waterfall chart (Advanced).ipynb
Charles-de-Montigny/awesome-notebooks
Import library
import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib.ticker import FuncFormatter
_____no_output_____
BSD-3-Clause
Plotly/Create Waterfall chart (Advanced).ipynb
Charles-de-Montigny/awesome-notebooks
Model Create the waterfall chart
#Use python 2.7+ syntax to format currency def money(x, pos): 'The two args are the value and tick position' return "${:,.0f}".format(x) formatter = FuncFormatter(money) #Data to plot. Do not include a total, it will be calculated index = ['sales','returns','credit fees','rebates','late charges','shipping'] data = {'amount': [350000,-30000,-7500,-25000,95000,-7000]} #Store data and create a blank series to use for the waterfall trans = pd.DataFrame(data=data,index=index) blank = trans.amount.cumsum().shift(1).fillna(0) #Get the net total number for the final element in the waterfall total = trans.sum().amount trans.loc["net"]= total blank.loc["net"] = total #The steps graphically show the levels as well as used for label placement step = blank.reset_index(drop=True).repeat(3).shift(-1) step[1::3] = np.nan #When plotting the last element, we want to show the full bar, #Set the blank to 0 blank.loc["net"] = 0 #Plot and label my_plot = trans.plot(kind='bar', stacked=True, bottom=blank,legend=None, figsize=(10, 5), title="2014 Sales Waterfall") my_plot.plot(step.index, step.values,'k') my_plot.set_xlabel("Transaction Types") #Format the axis for dollars my_plot.yaxis.set_major_formatter(formatter) #Get the y-axis position for the labels y_height = trans.amount.cumsum().shift(1).fillna(0) #Get an offset so labels don't sit right on top of the bar max = trans.max() neg_offset = max / 25 pos_offset = max / 50 plot_offset = int(max / 15) #Start label loop loop = 0 for index, row in trans.iterrows(): # For the last item in the list, we don't want to double count if row['amount'] == total: y = y_height[loop] else: y = y_height[loop] + row['amount'] # Determine if we want a neg or pos offset if row['amount'] > 0: y += pos_offset else: y -= neg_offset my_plot.annotate("{:,.0f}".format(row['amount']),(loop,y),ha="center") loop+=1
_____no_output_____
BSD-3-Clause
Plotly/Create Waterfall chart (Advanced).ipynb
Charles-de-Montigny/awesome-notebooks
Output Display result
#Scale up the y axis so there is room for the labels my_plot.set_ylim(0,blank.max()+int(plot_offset)) #Rotate the labels my_plot.set_xticklabels(trans.index,rotation=0) my_plot.get_figure().savefig("waterfall.png",dpi=200,bbox_inches='tight')
_____no_output_____
BSD-3-Clause
Plotly/Create Waterfall chart (Advanced).ipynb
Charles-de-Montigny/awesome-notebooks
State feedback control for the mass-spring-damper systemGiven the mass-spring-damper system, we want to control it in order to have a step response with zero error at steady state and a settling time for 5% tolerance band of less than 6 s.The system's equations written in state space form are:$$\begin{bmatrix}\dot{x_1} \\\dot{x_2}\end{bmatrix}=\underbrace{\begin{bmatrix}0 && 1 \\-\frac{k}{m} && -\frac{c}{m}\end{bmatrix}}_{A}\begin{bmatrix}x_1 \\x_2\end{bmatrix}+\underbrace{\begin{bmatrix}0 \\\frac{1}{m}\end{bmatrix}}_{B}u,$$with $m=5$ kg, $k=2$ N/m, $c=1$ Ns/m, $x_1$ representing the position and $x_2$ the velocity. By defining the gain matrix of the state feedback as $K=\begin{bmatrix}k_1&k_2\end{bmatrix}^T$ and substituting it in $A-BK$ we obtain:$$A-BK = \begin{bmatrix}0&1\\-\frac{2}{5}-\frac{k_1}{5}&-\frac{1}{5}-\frac{k_2}{5}\end{bmatrix}\,.$$Note that the system is in canonical controllability form, the characteristic polynomial is$$\lambda^2+(\frac{k_2}{5}+\frac{1}{5})\lambda+(\frac{k_1}{5}+\frac{2}{5})$$and imposing the roots to be equal to $\lambda_{1,2}=-1$ rad/s $\left((\lambda+1)^2=\lambda^2+2\lambda+1\right)$ we find the values $k_1 = 3$ and $k_2=9$.In order to reach zero steady-state error, it is possible to simply adjust the closed-loop gain: we multiply the reference input $u_{ref}$ by the inverse of the closed-loop gain to have the closed-loop transfer function staying at $0$ dB at low frequencies.The static gain is calculated as $G(0)=C(-A+BK)^{-1}B$ with $C=\begin{bmatrix}1&0\end{bmatrix}$.The final controlled system, that is still SISO from the input $u_{ref}$ to the position $x_1$, is:$$\begin{cases}\begin{bmatrix}\dot{x_1} \\\dot{x_2}\end{bmatrix}=\underbrace{\begin{bmatrix}0 && 1 \\-1 && -2\end{bmatrix}}_{A-BK}\begin{bmatrix}x_1 \\x_2\end{bmatrix}+\underbrace{\begin{bmatrix}0 \\\frac{1}{5}\end{bmatrix}}_{B}\frac{1}{0.2}u_{\text{ref}} \\y = \begin{bmatrix}1&0\end{bmatrix}\begin{bmatrix}x_1 \\x_2\end{bmatrix}\end{cases}$$ How to use this notebook?Try to change the eigenvalues and adjust the reference signal gain to achieve zero steady-state error.
%matplotlib inline import control as control import numpy import sympy as sym from IPython.display import display, Markdown import ipywidgets as widgets import matplotlib.pyplot as plt #print a matrix latex-like def bmatrix(a): """Returns a LaTeX bmatrix - by Damir Arbula (ICCT project) :a: numpy array :returns: LaTeX bmatrix as a string """ if len(a.shape) > 2: raise ValueError('bmatrix can at most display two dimensions') lines = str(a).replace('[', '').replace(']', '').splitlines() rv = [r'\begin{bmatrix}'] rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines] rv += [r'\end{bmatrix}'] return '\n'.join(rv) # Display formatted matrix: def vmatrix(a): if len(a.shape) > 2: raise ValueError('bmatrix can at most display two dimensions') lines = str(a).replace('[', '').replace(']', '').splitlines() rv = [r'\begin{vmatrix}'] rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines] rv += [r'\end{vmatrix}'] return '\n'.join(rv) #matrixWidget is a matrix looking widget built with a VBox of HBox(es) that returns a numPy array as value ! class matrixWidget(widgets.VBox): def updateM(self,change): for irow in range(0,self.n): for icol in range(0,self.m): self.M_[irow,icol] = self.children[irow].children[icol].value #print(self.M_[irow,icol]) self.value = self.M_ def dummychangecallback(self,change): pass def __init__(self,n,m): self.n = n self.m = m self.M_ = numpy.matrix(numpy.zeros((self.n,self.m))) self.value = self.M_ widgets.VBox.__init__(self, children = [ widgets.HBox(children = [widgets.FloatText(value=0.0, layout=widgets.Layout(width='90px')) for i in range(m)] ) for j in range(n) ]) #fill in widgets and tell interact to call updateM each time a children changes value for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].value = self.M_[irow,icol] self.children[irow].children[icol].observe(self.updateM, names='value') #value = Unicode('example@example.com', help="The email value.").tag(sync=True) self.observe(self.updateM, names='value', type= 'All') def setM(self, newM): #disable callbacks, change values, and reenable self.unobserve(self.updateM, names='value', type= 'All') for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].unobserve(self.updateM, names='value') self.M_ = newM self.value = self.M_ for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].value = self.M_[irow,icol] for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].observe(self.updateM, names='value') self.observe(self.updateM, names='value', type= 'All') #self.children[irow].children[icol].observe(self.updateM, names='value') #overlaod class for state space systems that DO NOT remove "useless" states (what "professor" of automatic control would do this?) class sss(control.StateSpace): def __init__(self,*args): #call base class init constructor control.StateSpace.__init__(self,*args) #disable function below in base class def _remove_useless_states(self): pass # Preparatory cell A = numpy.matrix([[0,1],[-2/5,-1/5]]) B = numpy.matrix('0; 1') C = numpy.matrix('1 0') X0 = numpy.matrix('0; 0') K = numpy.matrix([3,9]) Aw = matrixWidget(2,2) Aw.setM(A) Bw = matrixWidget(2,1) Bw.setM(B) Cw = matrixWidget(1,2) Cw.setM(C) X0w = matrixWidget(2,1) X0w.setM(X0) Kw = matrixWidget(1,2) Kw.setM(K) eig1c = matrixWidget(1,1) eig2c = matrixWidget(2,1) eig1c.setM(numpy.matrix([-1])) eig2c.setM(numpy.matrix([[-1],[0]])) # Misc #create dummy widget DW = widgets.FloatText(layout=widgets.Layout(width='0px', height='0px')) #create button widget START = widgets.Button( description='Test', disabled=False, button_style='', # 'success', 'info', 'warning', 'danger' or '' tooltip='Test', icon='check' ) def on_start_button_clicked(b): #This is a workaround to have intreactive_output call the callback: # force the value of the dummy widget to change if DW.value> 0 : DW.value = -1 else: DW.value = 1 pass START.on_click(on_start_button_clicked) # Define type of method selm = widgets.Dropdown( options= ['Set K', 'Set the eigenvalues'], value= 'Set the eigenvalues', description='', disabled=False ) # Define the number of complex eigenvalues for the observer selc = widgets.Dropdown( options= ['0 complex eigenvalues', '2 complex eigenvalues'], value= '0 complex eigenvalues', description='Eigenvalues:', disabled=False ) #define type of ipout selu = widgets.Dropdown( options=['impulse', 'step', 'sinusoid', 'square wave'], value='step', description='Type of reference:', style = {'description_width': 'initial'}, disabled=False ) # Define the values of the input u = widgets.FloatSlider( value=1, min=0, max=20.0, step=0.1, description='Input reference:', style = {'description_width': 'initial'}, disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.1f', ) period = widgets.FloatSlider( value=1, min=0.01, max=4, step=0.01, description='Period: ', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.2f', ) gain = widgets.FloatText( value=0.2, description='Inverse reference gain:', style = {'description_width': 'initial'}, disabled=False ) m = widgets.FloatSlider( value=5, min=0.1, max=10.0, step=0.1, description='m [kg]:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.1f', ) k = widgets.FloatSlider( value=2, min=0, max=10.0, step=0.1, description='k [N/m]:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.1f', ) c = widgets.FloatSlider( value=1, min=0, max=10.0, step=0.1, description='c [Ns/m]:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.1f', ) # Support functions def eigen_choice(selc): if selc == '0 complex eigenvalues': eig1c.children[0].children[0].disabled = False eig2c.children[1].children[0].disabled = True eigc = 0 if selc == '2 complex eigenvalues': eig1c.children[0].children[0].disabled = True eig2c.children[1].children[0].disabled = False eigc = 2 return eigc def method_choice(selm): if selm == 'Set K': method = 1 selc.disabled = True if selm == 'Set the eigenvalues': method = 2 selc.disabled = False return method def main_callback(m, k, c, gain, X0w, K, eig1c, eig2c, u, period, selm, selc, selu, DW): A, B = numpy.matrix([[0,1],[-k/m,-c/m]]), numpy.matrix([[0],[1/m]]) sols = numpy.linalg.eig(A) eigc = eigen_choice(selc) method = method_choice(selm) if method == 1: sol = numpy.linalg.eig(A-B*K) if method == 2: if eigc == 0: K = control.acker(A, B, [eig1c[0,0], eig2c[0,0]]) Kw.setM(K) if eigc == 2: K = control.acker(A, B, [numpy.complex(eig2c[0,0],eig2c[1,0]), numpy.complex(eig2c[0,0],-eig2c[1,0])]) Kw.setM(K) sol = numpy.linalg.eig(A-B*K) print('The system\'s eigenvalues are:',round(sols[0][0],4),'and',round(sols[0][1],4)) print('The controlled system\'s eigenvalues are:',round(sol[0][0],4),'and',round(sol[0][1],4)) sys1 = sss(A-B*K,B,C,0) sg = control.evalfr(sys1,0) print('The static gain of the controlled system is: %f' %sg) if gain != 0: sys = sss(A-B*K,B*1/gain,C,0) else: print('The inverse gain setted is 0 and it is changed to 1') sys = sss(A-B*K,B,C,0) T = numpy.linspace(0, 10, 1000) if selu == 'impulse': #selu U = [0 for t in range(0,len(T))] U[0] = u T, yout, xout = control.forced_response(sys,T,U,X0w) if selu == 'step': U = [u for t in range(0,len(T))] T, yout, xout = control.forced_response(sys,T,U,X0w) if selu == 'sinusoid': U = u*numpy.sin(2*numpy.pi/period*T) T, yout, xout = control.forced_response(sys,T,U,X0w) if selu == 'square wave': U = u*numpy.sign(numpy.sin(2*numpy.pi/period*T)) T, yout, xout = control.forced_response(sys,T,U,X0w) fig = plt.figure(num='Bode plot', figsize=(16,10)) control.bode_plot(sys) fig.suptitle('Bode plot', fontsize=16) plt.figure(num='Simulation', figsize=(16,4)) plt.title('Position input response') plt.ylabel('position vs ref') plt.plot(T,xout[0],T,U,'r--') plt.xlabel('$t$ [s]') plt.axvline(x=0,color='black',linewidth=0.8) plt.axhline(y=0,color='black',linewidth=0.8) plt.legend(['position','Reference']) plt.grid() alltogether = widgets.VBox([widgets.HBox([selm, selc, selu]), widgets.Label(' ',border=3), widgets.HBox([widgets.Label('K:',border=3), Kw, widgets.Label(' ',border=3), widgets.Label(' ',border=3), widgets.Label('Eigenvalues:',border=3), eig1c, eig2c, widgets.Label(' ',border=3), widgets.Label(' ',border=3), widgets.Label('X0:',border=3), X0w]), widgets.Label(' ',border=3), widgets.HBox([u, period, START]), widgets.Label(' ',border=3), widgets.HBox([m, k, c, gain])]) out = widgets.interactive_output(main_callback, {'m':m, 'k':k, 'c':c, 'gain':gain, 'X0w':X0w, 'K':Kw, 'eig1c':eig1c, 'eig2c':eig2c, 'u':u, 'period':period, 'selm':selm, 'selc':selc, 'selu':selu, 'DW':DW}) out.layout.height = '1050px' display(out, alltogether)
_____no_output_____
BSD-3-Clause
ICCT_en/examples/04/SS-33_State_feedback_control_for_the_mass-spring-damper_system.ipynb
ICCTerasmus/ICCT
Operations for indexing, splitting, slicing and iterating over a dataset
import numpy as np
_____no_output_____
MIT
Chapter_01.Operations_numpy_and_pandas/Numpy_operations.Indexing_slicing_splitting_iterator_sorting_combining_reshaping.ipynb
Eduardo0697/DataVisualizationWorkshop
Indexing
dataset = np.genfromtxt('../Datasets/normal_distribution_splittable.csv', delimiter=',') # Mean of the second row second_row = dataset[1] np.mean(second_row) # Mean of the last row last_row = dataset[-1] np.mean(last_row) # Mean of the first value of the first row first_val_first_row = dataset[0][0] print(np.mean(first_val_first_row)) print(first_val_first_row) # Index the value of the last element in the second last row last_val_second_last_row = dataset[-2, -1] np.mean(last_val_second_last_row)
_____no_output_____
MIT
Chapter_01.Operations_numpy_and_pandas/Numpy_operations.Indexing_slicing_splitting_iterator_sorting_combining_reshaping.ipynb
Eduardo0697/DataVisualizationWorkshop
Slicing
# Create a 2x2 matrix that starts in the second row and second column subsection_2x2 = dataset[1:3, 1:3] np.mean(subsection_2x2) # Get every element in the 5th row, but only get every second element of that row every_other_elem = dataset[4, ::2] print(dataset[4]) print(every_other_elem) print(np.mean(every_other_elem)) # Revesed last row of the dataset reversed_last_row = dataset[-1, ::-1] print(dataset[-1]) print(reversed_last_row) print(np.mean(reversed_last_row))
[ 94.11176915 99.62387832 104.51786419 97.62787811 93.97853495 98.75108352 106.05042487 100.07721494 106.89005002] [106.89005002 100.07721494 106.05042487 98.75108352 93.97853495 97.62787811 104.51786419 99.62387832 94.11176915] 100.18096645222222
MIT
Chapter_01.Operations_numpy_and_pandas/Numpy_operations.Indexing_slicing_splitting_iterator_sorting_combining_reshaping.ipynb
Eduardo0697/DataVisualizationWorkshop
Splitting
# Split horizontally the dataset in three equal subsets hor_splits = np.hsplit(dataset,(3)) # Split the first third in 2 equal vertically parts ver_splits = np.vsplit(hor_splits[0],(2)) print("Dataset", dataset.shape) print("Subset", ver_splits[0].shape)
Dataset (24, 9) Subset (12, 3)
MIT
Chapter_01.Operations_numpy_and_pandas/Numpy_operations.Indexing_slicing_splitting_iterator_sorting_combining_reshaping.ipynb
Eduardo0697/DataVisualizationWorkshop
Iterating
# Iterate over the whole dataset using nditer curr_index = 0 for x in np.nditer(dataset): print(x, curr_index) curr_index += 1 # Iterate over the whole dataset using ndenumerate for index, value in np.ndenumerate(dataset): print(index, value)
(0, 0) 99.14931546 (0, 1) 104.03852715 (0, 2) 107.43534677 (0, 3) 97.85230675 (0, 4) 98.74986914 (0, 5) 98.80833412 (0, 6) 96.81964892 (0, 7) 98.56783189 (0, 8) 101.34745901 (1, 0) 92.02628776 (1, 1) 97.10439252 (1, 2) 99.32066924 (1, 3) 97.24584816 (1, 4) 92.9267508 (1, 5) 92.65657752 (1, 6) 105.7197853 (1, 7) 101.23162942 (1, 8) 93.87155456 (2, 0) 95.66253664 (2, 1) 95.17750125 (2, 2) 90.93318132 (2, 3) 110.18889465 (2, 4) 98.80084371 (2, 5) 105.95297652 (2, 6) 98.37481387 (2, 7) 106.54654286 (2, 8) 107.22482426 (3, 0) 91.37294597 (3, 1) 100.96781394 (3, 2) 100.40118279 (3, 3) 113.42090475 (3, 4) 105.48508838 (3, 5) 91.6604946 (3, 6) 106.1472841 (3, 7) 95.08715803 (3, 8) 103.40412146 (4, 0) 101.20862522 (4, 1) 103.5730309 (4, 2) 100.28690912 (4, 3) 105.85269352 (4, 4) 93.37126331 (4, 5) 108.57980357 (4, 6) 100.79478953 (4, 7) 94.20019732 (4, 8) 96.10020311 (5, 0) 102.80387079 (5, 1) 98.29687616 (5, 2) 93.24376389 (5, 3) 97.24130034 (5, 4) 89.03452725 (5, 5) 96.2832753 (5, 6) 104.60344836 (5, 7) 101.13442416 (5, 8) 97.62787811 (6, 0) 106.71751618 (6, 1) 102.97585605 (6, 2) 98.45723272 (6, 3) 100.72418901 (6, 4) 106.39798503 (6, 5) 95.46493436 (6, 6) 94.35373179 (6, 7) 106.83273763 (6, 8) 100.07721494 (7, 0) 96.02548256 (7, 1) 102.82360856 (7, 2) 106.47551845 (7, 3) 101.34745901 (7, 4) 102.45651798 (7, 5) 98.74767493 (7, 6) 97.57544275 (7, 7) 92.5748759 (7, 8) 91.37294597 (8, 0) 105.30350449 (8, 1) 92.87730812 (8, 2) 103.19258339 (8, 3) 104.40518318 (8, 4) 101.29326772 (8, 5) 100.85447132 (8, 6) 101.2226037 (8, 7) 106.03868807 (8, 8) 97.85230675 (9, 0) 110.44484313 (9, 1) 93.87155456 (9, 2) 101.5363647 (9, 3) 97.65393524 (9, 4) 92.75048583 (9, 5) 101.72074646 (9, 6) 96.96851209 (9, 7) 103.29147111 (9, 8) 99.14931546 (10, 0) 101.3514185 (10, 1) 100.37372248 (10, 2) 106.6471081 (10, 3) 100.61742813 (10, 4) 105.0320535 (10, 5) 99.35999981 (10, 6) 98.87007532 (10, 7) 95.85284217 (10, 8) 93.97853495 (11, 0) 97.21315663 (11, 1) 107.02874163 (11, 2) 102.17642112 (11, 3) 96.74630281 (11, 4) 95.93799169 (11, 5) 102.62384733 (11, 6) 105.07475277 (11, 7) 97.59572169 (11, 8) 106.57364584 (12, 0) 95.65982034 (12, 1) 107.22482426 (12, 2) 107.19119932 (12, 3) 102.93039474 (12, 4) 85.98839623 (12, 5) 95.19184343 (12, 6) 91.32093303 (12, 7) 102.35313953 (12, 8) 100.39303522 (13, 0) 100.39303522 (13, 1) 92.0108226 (13, 2) 97.75887636 (13, 3) 93.18884302 (13, 4) 100.44940274 (13, 5) 108.09423367 (13, 6) 96.50342927 (13, 7) 99.58664719 (13, 8) 95.19184343 (14, 0) 103.1521596 (14, 1) 109.40523174 (14, 2) 93.83969256 (14, 3) 99.95827854 (14, 4) 101.83462816 (14, 5) 99.69982772 (14, 6) 103.05289628 (14, 7) 103.93383957 (14, 8) 104.15899829 (15, 0) 106.11454989 (15, 1) 88.80221141 (15, 2) 94.5081787 (15, 3) 94.59300658 (15, 4) 101.08830521 (15, 5) 96.34622848 (15, 6) 96.89244283 (15, 7) 98.07122664 (15, 8) 100.28690912 (16, 0) 96.78266211 (16, 1) 99.84251605 (16, 2) 104.03478031 (16, 3) 106.57052697 (16, 4) 105.13668343 (16, 5) 105.37011896 (16, 6) 99.07551254 (16, 7) 104.15899829 (16, 8) 98.75108352 (17, 0) 101.86186193 (17, 1) 103.61720152 (17, 2) 99.57859892 (17, 3) 99.4889538 (17, 4) 103.05541444 (17, 5) 98.65912661 (17, 6) 98.72774132 (17, 7) 104.70526438 (17, 8) 110.44484313 (18, 0) 97.49594839 (18, 1) 96.59385486 (18, 2) 104.63817694 (18, 3) 102.55198606 (18, 4) 105.86078488 (18, 5) 96.5937781 (18, 6) 93.04610867 (18, 7) 99.92159953 (18, 8) 100.96781394 (19, 0) 96.76814836 (19, 1) 91.6779221 (19, 2) 101.79132774 (19, 3) 101.20773355 (19, 4) 98.29243952 (19, 5) 101.83845792 (19, 6) 97.94046856 (19, 7) 102.20618501 (19, 8) 91.37294597 (20, 0) 106.89005002 (20, 1) 106.57364584 (20, 2) 102.26648279 (20, 3) 107.40064604 (20, 4) 99.94318168 (20, 5) 103.40412146 (20, 6) 106.38276709 (20, 7) 98.00253006 (20, 8) 97.10439252 (21, 0) 99.80873105 (21, 1) 101.63973121 (21, 2) 106.46476468 (21, 3) 110.43976681 (21, 4) 100.69156231 (21, 5) 99.99579473 (21, 6) 101.32113654 (21, 7) 94.76253572 (21, 8) 97.24130034 (22, 0) 96.10020311 (22, 1) 94.57421727 (22, 2) 100.80409326 (22, 3) 105.02389857 (22, 4) 98.61325194 (22, 5) 95.62359311 (22, 6) 97.99762409 (22, 7) 103.83852459 (22, 8) 101.2226037 (23, 0) 94.11176915 (23, 1) 99.62387832 (23, 2) 104.51786419 (23, 3) 97.62787811 (23, 4) 93.97853495 (23, 5) 98.75108352 (23, 6) 106.05042487 (23, 7) 100.07721494 (23, 8) 106.89005002
MIT
Chapter_01.Operations_numpy_and_pandas/Numpy_operations.Indexing_slicing_splitting_iterator_sorting_combining_reshaping.ipynb
Eduardo0697/DataVisualizationWorkshop
Filtering
vals_greater_five = dataset[dataset > 105] vals_greater_five vals_between_90_95 = np.extract((dataset > 90) & (dataset < 95), dataset) vals_between_90_95 rows, cols = np.where(abs(dataset - 100) < 1) # Create a list comprehension one_away_indices = [[rows[index], cols[index]] for (index, _) in np.ndenumerate(rows)] one_away_indices
_____no_output_____
MIT
Chapter_01.Operations_numpy_and_pandas/Numpy_operations.Indexing_slicing_splitting_iterator_sorting_combining_reshaping.ipynb
Eduardo0697/DataVisualizationWorkshop
Sorting
# Each row will be sorted row_sorted = np.sort(dataset) row_sorted # Sort each column col_sorted = np.sort(dataset, axis=0) col_sorted # create a sorted index list using a fancy indexing to keep the order of the dataset and only obtain the values of index index_sorted = np.argsort(dataset[0]) dataset[0][index_sorted]
_____no_output_____
MIT
Chapter_01.Operations_numpy_and_pandas/Numpy_operations.Indexing_slicing_splitting_iterator_sorting_combining_reshaping.ipynb
Eduardo0697/DataVisualizationWorkshop
Combining
# Dividimos horizontalmente en 3 partes nuestro dataset es decir si son 12 columnas serian 3 bloques de 4 columnas thirds = np.hsplit(dataset, (3)) print(dataset.shape) print(thirds[0].shape) #Dividimos verticalmente el primer bloque de los 3, en 2 partes , es decir si son 10 filas serian 2 bloques de 5 filas c/u halfed_first = np.vsplit(thirds[0], (2)) print(halfed_first[0].shape) # Imprimimos el primer bloque de esta mitad halfed_first[0] # Apilamos verticalmente las 2 mitades, esto nos deberia devolver el primer tercio thirds[0] first_col = np.vstack([halfed_first[0], halfed_first[1]]) print(thirds[0] == first_col) # Combinamos los 3 tercios de nuestros datos que serian igual a dataset first_second_col = np.hstack([first_col, thirds[1]]) full_data = np.hstack([first_second_col, thirds[2]])
_____no_output_____
MIT
Chapter_01.Operations_numpy_and_pandas/Numpy_operations.Indexing_slicing_splitting_iterator_sorting_combining_reshaping.ipynb
Eduardo0697/DataVisualizationWorkshop
Reshaping
# Reshape the dataset in to a single list single_list = np.reshape(dataset, (1, -1)) print(dataset.shape) print(single_list.shape) # reshaping to a matrix with two columns # -1 Tells python to figure oyt the dimension out itself two_col_dataset = dataset.reshape(-1, 2) print(two_col_dataset.shape)
(108, 2)
MIT
Chapter_01.Operations_numpy_and_pandas/Numpy_operations.Indexing_slicing_splitting_iterator_sorting_combining_reshaping.ipynb
Eduardo0697/DataVisualizationWorkshop
Colombian Identity Codes Introduction The function `clean_co_nit()` cleans a column containing Colombian identity code (NIT) strings, and standardizes them in a given format. The function `validate_co_nit()` validates either a single NIT strings, a column of NIT strings or a DataFrame of NIT strings, returning `True` if the value is valid, and `False` otherwise. NIT strings can be converted to the following formats via the `output_format` parameter:* `compact`: only number strings without any seperators or whitespace, like "2131234321"* `standard`: NIT strings with proper whitespace in the proper places, like "213.123.432-1"Invalid parsing is handled with the `errors` parameter:* `coerce` (default): invalid parsing will be set to NaN* `ignore`: invalid parsing will return the input* `raise`: invalid parsing will raise an exceptionThe following sections demonstrate the functionality of `clean_co_nit()` and `validate_co_nit()`. An example dataset containing NIT strings
import pandas as pd import numpy as np df = pd.DataFrame( { "nit": [ "2131234321", "2131234325", "51824753556", "51 824 753 556", "hello", np.nan, "NULL" ], "address": [ "123 Pine Ave.", "main st", "1234 west main heights 57033", "apt 1 789 s maple rd manhattan", "robie house, 789 north main street", "(staples center) 1111 S Figueroa St, Los Angeles", "hello", ] } ) df
_____no_output_____
MIT
docs/source/user_guide/clean/clean_co_nit.ipynb
jwa345/dataprep
1. Default `clean_co_nit`By default, `clean_co_nit` will clean nit strings and output them in the standard format with proper separators.
from dataprep.clean import clean_co_nit clean_co_nit(df, column = "nit")
_____no_output_____
MIT
docs/source/user_guide/clean/clean_co_nit.ipynb
jwa345/dataprep
2. Output formats This section demonstrates the output parameter. `standard` (default)
clean_co_nit(df, column = "nit", output_format="standard")
_____no_output_____
MIT
docs/source/user_guide/clean/clean_co_nit.ipynb
jwa345/dataprep
`compact`
clean_co_nit(df, column = "nit", output_format="compact")
_____no_output_____
MIT
docs/source/user_guide/clean/clean_co_nit.ipynb
jwa345/dataprep
3. `inplace` parameterThis deletes the given column from the returned DataFrame. A new column containing cleaned NIT strings is added with a title in the format `"{original title}_clean"`.
clean_co_nit(df, column="nit", inplace=True)
_____no_output_____
MIT
docs/source/user_guide/clean/clean_co_nit.ipynb
jwa345/dataprep
4. `errors` parameter `coerce` (default)
clean_co_nit(df, "nit", errors="coerce")
_____no_output_____
MIT
docs/source/user_guide/clean/clean_co_nit.ipynb
jwa345/dataprep
`ignore`
clean_co_nit(df, "nit", errors="ignore")
_____no_output_____
MIT
docs/source/user_guide/clean/clean_co_nit.ipynb
jwa345/dataprep
4. `validate_co_nit()` `validate_co_nit()` returns `True` when the input is a valid NIT. Otherwise it returns `False`.The input of `validate_co_nit()` can be a string, a Pandas DataSeries, a Dask DataSeries, a Pandas DataFrame and a dask DataFrame.When the input is a string, a Pandas DataSeries or a Dask DataSeries, user doesn't need to specify a column name to be validated. When the input is a Pandas DataFrame or a dask DataFrame, user can both specify or not specify a column name to be validated. If user specify the column name, `validate_co_nit()` only returns the validation result for the specified column. If user doesn't specify the column name, `validate_co_nit()` returns the validation result for the whole DataFrame.
from dataprep.clean import validate_co_nit print(validate_co_nit("2131234321")) print(validate_co_nit("2131234325")) print(validate_co_nit("51824753556")) print(validate_co_nit("51 824 753 556")) print(validate_co_nit("hello")) print(validate_co_nit(np.nan)) print(validate_co_nit("NULL"))
_____no_output_____
MIT
docs/source/user_guide/clean/clean_co_nit.ipynb
jwa345/dataprep
Series
validate_co_nit(df["nit"])
_____no_output_____
MIT
docs/source/user_guide/clean/clean_co_nit.ipynb
jwa345/dataprep
DataFrame + Specify Column
validate_co_nit(df, column="nit")
_____no_output_____
MIT
docs/source/user_guide/clean/clean_co_nit.ipynb
jwa345/dataprep
Only DataFrame
validate_co_nit(df)
_____no_output_____
MIT
docs/source/user_guide/clean/clean_co_nit.ipynb
jwa345/dataprep
Setup
import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3"
_____no_output_____
Apache-2.0
AICA_v2.ipynb
Mayner0220/AICA
To prevent elements such as Tensorflow import logs, perform these tasks.
import glob import numpy as np import tensorflow as tf import matplotlib.pyplot as plt try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print("Device:", tpu.master()) tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) except: strategy = tf.distribute.get_strategy() print("Number of replicas:", strategy.num_replicas_in_sync) AUTOTUNE = tf.data.experimental.AUTOTUNE BATCH_SIZE = 16 * strategy.num_replicas_in_sync IMAGE_SIZE = [176, 208] EPOCHS = 100
_____no_output_____
Apache-2.0
AICA_v2.ipynb
Mayner0220/AICA
Convert the data
def _bytes_feature(value: [str, bytes]) -> tf.train.Feature: """string / byte를 byte_list로 반환합니다.""" if isinstance(value, type(tf.constant(0))): value = value.numpy() # BytesList는 EagerTensor에서 문자열을 풀지 않습니다. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _float_feature(value: float) -> tf.train.Feature: """float / double를 float_list로 반환합니다.""" return tf.train.Feature(float_list=tf.train.FloatList(value=[value])) def _int64_feature(value: [bool, int]) -> tf.train.Feature: """bool / enum / int / uint를 int64_list로 반환합니다.""" return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) def serialize_example(image: bytes, label: int) -> tf.train.Example.SerializeToString: """ 파일을 만들기 위해서 tf.train.Example 메시지를 만듭니다. """ feature = { "raw_image": _bytes_feature(image), "label": _int64_feature(label), } return tf.train.Example(features=tf.train.Features(feature=feature)) def write_tfrecord(main_path: str) -> None: """ datset의 위치를 입력 받아, 이미지와 라벨 등을 구하여 반환한다. """ train_paths = glob.glob(main_path + "/train/*/*.jpg") test_paths = glob.glob(main_path + "/test/*/*.jpg") image_labels = {"NonDemented": 0, "VeryMildDemented": 1, "MildDemented": 2, "ModerateDemented": 3} train_file = "./tfrecord/train.tfrecord" test_file = "./tfrecord/test.tfrecord" # train TFRecord file with tf.io.TFRecordWriter(train_file) as writer: for path in train_paths: image_string = open(path, "rb").read() label_str = path.split("\\")[1] label = image_labels[label_str] tf_example = serialize_example(image_string, label) writer.write(tf_example.SerializeToString()) print("Train TFRecord Converting Done!") # test TFRecord file with tf.io.TFRecordWriter(test_file) as writer: for path in test_paths: image_string = open(path, "rb").read() label_str = path.split("\\")[1] label = image_labels[label_str] tf_example = serialize_example(image_string, label) writer.write(tf_example.SerializeToString()) print("Test TFRecord Converting Done!") dataset_path = "./dataset" write_tfrecord(dataset_path)
_____no_output_____
Apache-2.0
AICA_v2.ipynb
Mayner0220/AICA
Load the data
train_dataset = tf.data.TFRecordDataset("./tfrecord/train.tfrecord") test_dataset = tf.data.TFRecordDataset("./tfrecord/test.tfrecord") TRAIN_DATA_SIZE = len(list(train_dataset)) train_size = int(0.75 * TRAIN_DATA_SIZE) train_dataset = train_dataset.shuffle(1000) test_dataset = test_dataset.shuffle(1000) validation_dataset = train_dataset.skip(train_size) train_dataset = train_dataset.take(train_size) train_len = len(list(train_dataset)) validation_len = len(list(validation_dataset)) test_len = len(list(test_dataset)) print("Train dataset:", train_len) print("Validation dataset:", validation_len) print("Test dataset:", test_len) image_feature_description = { "raw_image": tf.io.FixedLenFeature([], tf.string), "label": tf.io.FixedLenFeature([], tf.int64), } @tf.autograph.experimental.do_not_convert def _parse_image_function(example_proto): features = tf.io.parse_single_example(example_proto, image_feature_description) for feature in features: image = tf.io.decode_raw(feature['image'], tf.uint8) image.set_shape([3 * 176 * 208]) image = tf.reshape(image, [176, 208, 3]) label = tf.cast(feature["label"].numpy(), tf.int64) label = tf.one_hot(label, 4) return image, label def read_dataset(epochs, batch_size, dataset): dataset = dataset.map(_parse_image_function) dataset = dataset.prefetch(10) dataset = dataset.repeat(epochs) dataset = dataset.shuffle(buffer_size=10 * batch_size) dataset = dataset.batch(batch_size, drop_remainder=True) return dataset train_dataset = read_dataset(EPOCHS, BATCH_SIZE, train_dataset) validation_dataset = read_dataset(EPOCHS, BATCH_SIZE, validation_dataset) test_dataset = read_dataset(EPOCHS, BATCH_SIZE, test_dataset) parsed_train_dataset.take(train_len)
_____no_output_____
Apache-2.0
AICA_v2.ipynb
Mayner0220/AICA
Visualize dataset
# train TFRecord for image_features in parsed_train_dataset.take(1): image_raw = image_features["raw_image"].numpy() image_label = image_features["label"].numpy() display.display(display.Image(data=image_raw)) print("Label:", image_label) # test TFRecord for image_features in parsed_test_dataset.take(1): image_raw = image_features["raw_image"].numpy() image_label = image_features["label"].numpy() display.display(display.Image(data=image_raw)) print("Label:", image_label)
_____no_output_____
Apache-2.0
AICA_v2.ipynb
Mayner0220/AICA
Build Model
# 경증 치매, 중증도 치매, 비 치매, 매우 경미한 치매 CLASS_NAMES = ['MildDementia', 'ModerateDementia', 'NonDementia', 'VeryMildDementia'] NUM_CLASSES = len(CLASS_NAMES) TRAIN_DATA_SIZE = len(list(parsed_train_dataset)) train_size = int(0.75 * TRAIN_DATA_SIZE) # val_size = int(0.25 * TRAIN_DATA_SIZE) # 테스트용 데이터셋은 따로 존재하기에 분할하지 않는다. # test_size = # train / validation data split train_dataset = parsed_train_dataset.shuffle(100) train_dataset = train_dataset.take(train_size) validation_dataset = train_dataset.skip(train_size) train_dataset = train_dataset.batch(BATCH_SIZE) validation_dataset = validation_dataset.batch(BATCH_SIZE) def conv_block(filters): block = tf.keras.Sequential([ tf.keras.layers.SeparableConv2D(filters, 3, activation='relu', padding='same'), tf.keras.layers.SeparableConv2D(filters, 3, activation='relu', padding='same'), tf.keras.layers.BatchNormalization(), tf.keras.layers.MaxPool2D() ]) return block def dense_block(units, dropout_rate): block = tf.keras.Sequential([ tf.keras.layers.Dense(units, activation='relu'), tf.keras.layers.BatchNormalization(), tf.keras.layers.Dropout(dropout_rate) ]) return block def build_model(): model = tf.keras.Sequential([ tf.keras.Input(shape=(*IMAGE_SIZE, 3)), tf.keras.layers.Conv2D(16, 3, activation='relu', padding='same'), tf.keras.layers.Conv2D(16, 3, activation='relu', padding='same'), tf.keras.layers.MaxPool2D(), conv_block(32), conv_block(64), conv_block(128), tf.keras.layers.Dropout(0.2), conv_block(256), tf.keras.layers.Dropout(0.2), tf.keras.layers.Flatten(), dense_block(512, 0.7), dense_block(128, 0.5), dense_block(64, 0.3), tf.keras.layers.Dense(NUM_CLASSES, activation='softmax') ]) return model with strategy.scope(): model = build_model() METRICS = [tf.keras.metrics.AUC(name='auc')] model.compile( optimizer='adam', loss=tf.losses.CategoricalCrossentropy(), metrics=METRICS ) model.summary()
_____no_output_____
Apache-2.0
AICA_v2.ipynb
Mayner0220/AICA
Train Model
@tf.autograph.experimental.do_not_convert def exponential_decay(lr0, s): def exponential_decay_fn(epoch): return lr0 * 0.1 **(epoch / s) return exponential_decay_fn exponential_decay_fn = exponential_decay(0.01, 20) lr_scheduler = tf.keras.callbacks.LearningRateScheduler(exponential_decay_fn) checkpoint_cb = tf.keras.callbacks.ModelCheckpoint("AICAv2.h5", save_best_only=True) early_stopping_cb = tf.keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True) history = model.fit( train_dataset, validation_data=validation_dataset, callbacks=[checkpoint_cb, early_stopping_cb, lr_scheduler], epochs=EPOCHS )
_____no_output_____
Apache-2.0
AICA_v2.ipynb
Mayner0220/AICA
**SONAR_ISSUES**This notebook the selection of the rellevant attributes of the table `SONAR_ISSUES`.First, we import the libraries we need and, then, we read the corresponding csv.
import pandas as pd sonarIssues = pd.read_csv("../../../data/raw/SONAR_ISSUES.csv") print(sonarIssues.shape) list(sonarIssues)
(1941508, 18)
MIT
notebooks/2-DataPreparation/1-SelectData/3-DB-SONAR-ISSUES.ipynb
chus-chus/softwareDevTypes
We select the desired attributes of the table.
attributes = ['projectID', 'creationDate', 'closeDate', 'creationCommitHash', 'closeCommitHash', 'type', 'severity', 'debt', 'author'] sonarIssues = sonarIssues[attributes] print(sonarIssues.shape) sonarIssues.head()
(1941508, 9)
MIT
notebooks/2-DataPreparation/1-SelectData/3-DB-SONAR-ISSUES.ipynb
chus-chus/softwareDevTypes
We save this new table into a csv.
sonarIssues.to_csv('../../../data/interim/DataPreparation/SelectData/SONAR_ISSUES_select.csv', header=True)
_____no_output_____
MIT
notebooks/2-DataPreparation/1-SelectData/3-DB-SONAR-ISSUES.ipynb
chus-chus/softwareDevTypes
R - Week 2 (exercises) R-code on solving equations with inverse matrix Solve the following system of equations: 1. $2x+y+2z=3$2. $x-3z=-5$3. $2y+5z=4$ $$ \begin{bmatrix} 2 & 1 & 2 \\ 1 & 6 & -3 \\ 0 & 2 & 5 \end{bmatrix} \cdot \begin{bmatrix} x \\ y \\ z \end{bmatrix} = \begin{bmatrix} 3 & -5 & 4 \end{bmatrix} $$ $ A \vec{x} = \vec{b}$$A^{-1}\cdot A \vec{x} = A^{-1} \vec{b}$$I \vec{x} = A^{-1} \vec{b}$ Define matrix $A$:
A = matrix(c(2,1,2,1,6,-3,0,2,5), nrow=3) A
_____no_output_____
Unlicense
Applied Math/Y1S4/Data Science/.ipynb_checkpoints/R - Week 2 (exercises)-checkpoint.ipynb
darkeclipz/jupyter-notebooks
The inverse $A^{-1}$ is:
solve(A)
_____no_output_____
Unlicense
Applied Math/Y1S4/Data Science/.ipynb_checkpoints/R - Week 2 (exercises)-checkpoint.ipynb
darkeclipz/jupyter-notebooks
Define vector $\vec{b}$:
b = c(3, -5, 4)
_____no_output_____
Unlicense
Applied Math/Y1S4/Data Science/.ipynb_checkpoints/R - Week 2 (exercises)-checkpoint.ipynb
darkeclipz/jupyter-notebooks
Solve the system with R functions `solve(A,b)`:
solve(A,b)
_____no_output_____
Unlicense
Applied Math/Y1S4/Data Science/.ipynb_checkpoints/R - Week 2 (exercises)-checkpoint.ipynb
darkeclipz/jupyter-notebooks
Solve the system with $\vec{x}=A^{-1}\vec{b}$:
solve(A) %*% b
_____no_output_____
Unlicense
Applied Math/Y1S4/Data Science/.ipynb_checkpoints/R - Week 2 (exercises)-checkpoint.ipynb
darkeclipz/jupyter-notebooks
R-code on least square method $y=ax+b$ $A\cdot \vec{x} = \vec{b}$$A^T\cdot A \vec{x} = A^T \vec{b}$$(A^T A)^{-1}A^T A \vec{x} = A^T \vec{b}$$(A^T A)^{-1} ...$ Define $\vec{x}=\begin{bmatrix}12 & 2 & 3 & 5 & 10 & 9 & 8 \end{bmatrix}$:
x = c(12, 2, 3, 5, 10, 9, 8) x
_____no_output_____
Unlicense
Applied Math/Y1S4/Data Science/.ipynb_checkpoints/R - Week 2 (exercises)-checkpoint.ipynb
darkeclipz/jupyter-notebooks
Define $\vec{y} = \begin{bmatrix}125 & 30 & 43 & 62 & 108 & 102 & 90 \end{bmatrix}$:
y = c(125, 30, 43, 62, 108, 102, 90) y length(x)==length(y) A = matrix(union(x,y), nrow=length(x)) A lm(y~x) fit <- function(x) 9.488*x+13.583 fit(5) plot(x,sapply(x, fit), 'l', col='blue') points(x,y) x y
_____no_output_____
Unlicense
Applied Math/Y1S4/Data Science/.ipynb_checkpoints/R - Week 2 (exercises)-checkpoint.ipynb
darkeclipz/jupyter-notebooks
$y=ax+b$
X = matrix(c(y, rep(1, length(y))), ncol=2) X b = matrix(y) b
_____no_output_____
Unlicense
Applied Math/Y1S4/Data Science/.ipynb_checkpoints/R - Week 2 (exercises)-checkpoint.ipynb
darkeclipz/jupyter-notebooks
Dit is de vorm $A\cdot\vec{x} = \vec{b}$. Wat we op willen lossen met $A^{-1}A\vec{x}=A^{-1}\vec{b}$, maar dit werkt niet omdat $A$ geen vierkante matrix is en daardoor geen inverse kan bepalen voor $A$.Door gebruik te maken van de getransponeerde $A^T$ kunnen we een vierkante matrix krijgen. Dus matrix-vermenigvuldigen van $A\vec{x}=\vec{b}$ met $A^T$ geeft $A^T\cdot A\vec{x} = A^T\cdot\vec{b}$.
t(A) %*% A
_____no_output_____
Unlicense
Applied Math/Y1S4/Data Science/.ipynb_checkpoints/R - Week 2 (exercises)-checkpoint.ipynb
darkeclipz/jupyter-notebooks
Wat inderdaad een vierkant matrix geeft. Vervolgens is deze op te lossen door de inverse te bepalen. De hele formule wordt dan:$$ (A^T \cdot A)^{-1}\cdot(A^T \cdot A)\cdot\vec{x} = (A^T \cdot A)^{-1} \cdot A^T \cdot \vec{b} $$Laat $B = (A^T \cdot A)^{-1}$ zijn. Substitueren en vereenvoudigen geeft: $$ I\cdot\vec{x} = B \cdot A^T \cdot \vec{b} $$
B = solve(t(A) %*% A) B B %*% t(A) %*% b lm(y~x)
_____no_output_____
Unlicense
Applied Math/Y1S4/Data Science/.ipynb_checkpoints/R - Week 2 (exercises)-checkpoint.ipynb
darkeclipz/jupyter-notebooks
**Versimpeld voorbeeld lreg**
A = matrix(c(-1,0,2,3,1,1,1,1),ncol=2) A b = c(-1,2,1,2) b x = A[0:4] solve(t(A) %*% A) %*% t(A) %*% b lm(b~x) fit <- function(x) 0.5*x+0.5 plot(-5:5, sapply(-5:5, fit), 'l') points(x,b) lreg <- function(x, y) { A = cbind(x, rep(1, length(x))) s = solve(t(A) %*% A) %*% t(A) %*% y function(x) s[1] * x + s[2] # Return f(x)=ax+b } x = c(12,2,3,5,10,9,8) y = c(125,30,43,62,108,102,90) fit <- lreg(x, y) plot(0:15, sapply(0:15, fit), 'l') points(x,y)
_____no_output_____
Unlicense
Applied Math/Y1S4/Data Science/.ipynb_checkpoints/R - Week 2 (exercises)-checkpoint.ipynb
darkeclipz/jupyter-notebooks
With other functionality: Least squares regression model Find the best fitting line $y=ax+b$ for the following data points:
x <- c(12,2,3,5,10,9,8) b <- c(125,30,43,62,108,102,90)
_____no_output_____
Unlicense
Applied Math/Y1S4/Data Science/.ipynb_checkpoints/R - Week 2 (exercises)-checkpoint.ipynb
darkeclipz/jupyter-notebooks
We can do this by solving the equation $A\vec{x}=\vec{b}$. Constructing the equation with the matrices for our data points yields:$$ \begin{bmatrix} 12 & 1 \\ 2 & 1 \\ 3 & 1 \\ 5 & 1 \\ 10 & 1 \\ 9 & 1 \\ 8 & 1 \end{bmatrix} \cdot \begin{bmatrix}a \\ b \end{bmatrix} = \begin{bmatrix} 125 \\ 30 \\ 43 \\ 62 \\ 108 \\ 102 \\ 90 \end{bmatrix} $$ First we will construct our matrix $A$:
ones <- rep(1, length(x)) A <- cbind(x, ones) A
_____no_output_____
Unlicense
Applied Math/Y1S4/Data Science/.ipynb_checkpoints/R - Week 2 (exercises)-checkpoint.ipynb
darkeclipz/jupyter-notebooks
If we want to solve the equation $A\vec{x}=\vec{b}$ we can multiply both sides $A^{-1}$ to get:$$ \begin{align} A\vec{x}&=\vec{b} \\ (A^{-1}\cdot A)\vec{x}&=A^{-1}\vec{b} \\ I\vec{x}&=A^{-1}\vec{b} \end{align} $$ However, we need to calculate the inverse of $A$, but $A$ is not a square matrix. To solve this problem we multiply $A$ with $A^T$ to get a square matrix. $$ \begin{align} A\vec{x}&=\vec{b} \\ (A^T \cdot A) \cdot \vec{x} &= A^T \cdot \vec{b} \\ (A^T \cdot A)^{-1} \cdot (A^T \cdot A) \cdot \vec{x} &= (A^T \cdot A)^{-1} \cdot A^T \cdot \vec{b} \\ I \cdot \vec{x} &= (A^T \cdot A)^{-1} \cdot A^T \cdot \vec{b} \end{align} $$
S = solve(t(A) %*% A) %*% t(A) %*% b
_____no_output_____
Unlicense
Applied Math/Y1S4/Data Science/.ipynb_checkpoints/R - Week 2 (exercises)-checkpoint.ipynb
darkeclipz/jupyter-notebooks
The resulting matrix $S$ will have our coefficients $a$ and $b$ to construct the line:
lsm = c(S[2], S[1]) lsm
_____no_output_____
Unlicense
Applied Math/Y1S4/Data Science/.ipynb_checkpoints/R - Week 2 (exercises)-checkpoint.ipynb
darkeclipz/jupyter-notebooks
If we verify the coefficients with built-in R functionality for least-squares regression, we can see that our solution is correct.
lm(b~x)
_____no_output_____
Unlicense
Applied Math/Y1S4/Data Science/.ipynb_checkpoints/R - Week 2 (exercises)-checkpoint.ipynb
darkeclipz/jupyter-notebooks
Plotting our values yields:
plot(x, b) abline(lsm)
_____no_output_____
Unlicense
Applied Math/Y1S4/Data Science/.ipynb_checkpoints/R - Week 2 (exercises)-checkpoint.ipynb
darkeclipz/jupyter-notebooks
Copyright 2018 The TensorFlow Authors.
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" } // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License.
_____no_output_____
CC-BY-4.0
docs/site/tutorials/custom_differentiation.ipynb
sendilkumarn/swift
Custom differentiationThis tutorial will show you how to define your own custom derivatives, perform derivative surgery, and implement your own gradient checkpointing API in just 5 lines of Swift. Declaring custom derivatives You can define custom derivatives for any Swift function that has differentiable parameters and results. By doing that, you can even import a C function and make it differentiable.
import Glibc func sillyExp(_ x: Float) -> Float { let 𝑒 = Float(M_E) print("Taking 𝑒(\(𝑒)) to the power of \(x)!") return pow(𝑒, x) } @differentiating(sillyExp) func sillyDerivative(_ x: Float) -> (value: Float, pullback: (Float) -> Float) { let y = sillyExp(x) return (value: y, pullback: { v in v * y }) } print("exp(3) =", sillyExp(3)) print("𝛁exp(3) =", gradient(of: sillyExp)(3))
Taking 𝑒(2.7182817) to the power of 3.0! exp(3) = 20.085535 Taking 𝑒(2.7182817) to the power of 3.0! 𝛁exp(3) = 20.085535
CC-BY-4.0
docs/site/tutorials/custom_differentiation.ipynb
sendilkumarn/swift
Stop derivatives from propagatingCommonly known as "stop gradient" in machine learning use cases, method [`withoutDerivative()`](https://www.tensorflow.org/swift/api_docs/Protocols/Differentiable/s:10TensorFlow14DifferentiablePAAE17withoutDerivativexyF) stops derivatives from propagating.Plus, `withoutDerivative()` can sometimes help the Swift compiler with identifying what not to differentiate and producing more efficient derivaitves. When it is detectable that the derivative of a function will always be zero, the Swift compiler will produce a warning. Explicitly using `.withoutDerivative()` silences that warning.
let x: Float = 2.0 let y: Float = 3.0 gradient(at: x, y) { x, y in sin(sin(sin(x))) + cos(cos(cos(y))).withoutDerivative() }
_____no_output_____
CC-BY-4.0
docs/site/tutorials/custom_differentiation.ipynb
sendilkumarn/swift
Derivative surgeryMethod [`withGradient(_:)`](https://www.tensorflow.org/swift/api_docs/Protocols/Differentiable/s:10TensorFlow14DifferentiablePAAE12withGradientyxy15CotangentVectorQzzcF) makes arbitrary operations (including mutation) run on the gradient at a value during the enclosing function’s backpropagation. Use this to debug or make experimental tweaks to backpropagation. It works anywhere All differentiation APIs provided by the standard library are defined generically over all types that conform to the `Differentiable` protocol: `Float`, `Double`, `Float80`, SIMD vectors, and even your own types!Read technical document [Differentiable Types](https://github.com/tensorflow/swift/blob/master/docs/DifferentiableTypes.md) for more insights on the `Differentiable` protocol.
var x: Float = 30 x.gradient { x -> Float in // Print the partial derivative with respect to the result of `sin(x)`. let a = sin(x).withGradient { print("∂+/∂sin = \($0)") } // Force the partial derivative with respect to `x` to be `0.5`. let b = log(x.withGradient { (dx: inout Float) in print("∂log/∂x = \(dx), but rewritten to 0.5"); dx = 0.5 }) return a + b }
∂log/∂x = 0.033333335, but rewritten to 0.5 ∂+/∂sin = 1.0
CC-BY-4.0
docs/site/tutorials/custom_differentiation.ipynb
sendilkumarn/swift
Use it in a neural network module Just like how we used it in a simple `Float` function, we can use it in any numerical application, like the following neural network built using the [Swift for TensorFlow Deep Learning Library](https://github.com/tensorflow/swift-apis).
import TensorFlow struct MLP: Layer { var layer1 = Dense<Float>(inputSize: 2, outputSize: 10, activation: relu) var layer2 = Dense<Float>(inputSize: 10, outputSize: 1, activation: relu) @differentiable func applied(to input: Tensor<Float>, in context: Context) -> Tensor<Float> { let h0 = layer1.applied(to: input, in: context).withGradient { print("∂L/∂layer1 =", $0) } return layer2.applied(to: h0, in: context) } } let optimizer = SGD<MLP, Float>(learningRate: 0.02) var classifier = MLP() let context = Context(learningPhase: .training) let x: Tensor<Float> = [[0, 0], [0, 1], [1, 0], [1, 1]] let y: Tensor<Float> = [0, 1, 1, 0] for _ in 0..<10 { let 𝛁model = classifier.gradient { classifier -> Tensor<Float> in let ŷ = classifier.applied(to: x, in: context).withGradient { print("∂L/∂ŷ =", $0) } let loss = (ŷ - y).squared().mean() print("Loss: \(loss)") return loss } optimizer.update(&classifier.allDifferentiableVariables, along: 𝛁model) }
Loss: 0.33426732 ∂L/∂ŷ = [[-0.25], [-0.078446716], [-0.12092987], [0.031454742]] ∂L/∂layer1 = [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.03357383, -0.027463656, 0.037523113, -0.002631738, -0.030937709, -0.014981618, -0.02623924, -0.026290288, 0.027446445, 0.01046889], [-0.051755875, -0.042336714, 0.057843916, -0.004056967, -0.047692157, -0.023094976, -0.040449213, -0.040527906, 0.042310182, 0.016138362], [0.013462082, 0.0110120885, -0.015045625, 0.0010552468, 0.012405078, 0.006007172, 0.010521135, 0.010541604, -0.011005187, -0.0041977055]] Loss: 0.33176333 ∂L/∂ŷ = [[-0.24746439], [-0.07523262], [-0.11674469], [0.03514868]] ∂L/∂layer1 = [[-0.10602461, -0.08665162, 0.11829134, -0.008301959, -0.09804342, -0.04726032, -0.08280819, -0.082981184, 0.08658129, 0.032860693], [-0.032232955, -0.0263433, 0.035962217, -0.0025239112, -0.029806564, -0.014367796, -0.025174841, -0.025227433, 0.026321916, 0.009990108], [-0.050018553, -0.040879082, 0.05580555, -0.003916562, -0.046253316, -0.022295699, -0.039065886, -0.0391475, 0.0408459, 0.015502479], [0.015059238, 0.01230759, -0.016801547, 0.0011791714, 0.013925628, 0.006712634, 0.011761686, 0.011786258, -0.0122976, -0.0046673785]] Loss: 0.3263967 ∂L/∂ŷ = [[-0.24090183], [-0.068522125], [-0.10922298], [0.04169026]] ∂L/∂layer1 = [[-0.10332261, -0.08436972, 0.115082964, -0.008081798, -0.09583634, -0.046007015, -0.08064418, -0.08082552, 0.08428522, 0.031835504], [-0.029389087, -0.023998126, 0.032734204, -0.002298787, -0.027259693, -0.013086237, -0.022938434, -0.022990014, 0.02397409, 0.009055292], [-0.046845652, -0.038252562, 0.052177705, -0.0036642232, -0.043451436, -0.020859215, -0.036563434, -0.03664565, 0.03821425, 0.014433966], [0.01788092, 0.01460095, -0.019916158, 0.0013986289, 0.016585354, 0.0079619335, 0.013956212, 0.013987594, -0.014586327, -0.005509425]] Loss: 0.32171223 ∂L/∂ŷ = [[-0.23473385], [-0.062339008], [-0.102276154], [0.047599167]] ∂L/∂layer1 = [[-0.10078285, -0.08222727, 0.11207248, -0.007874874, -0.09368697, -0.044829067, -0.078606784, -0.07879931, 0.082127206, 0.030880556], [-0.026765218, -0.021837354, 0.029763442, -0.0020913552, -0.024880743, -0.011905396, -0.02087585, -0.02092698, 0.021810781, 0.008201047], [-0.04391221, -0.035827335, 0.04883123, -0.0034311705, -0.040820457, -0.019532524, -0.03424985, -0.034333736, 0.035783738, 0.013455003], [0.020436676, 0.016673988, -0.02272598, 0.0015968615, 0.018997777, 0.009090407, 0.015939828, 0.015978869, -0.016653698, -0.006261938]] Loss: 0.31760892 ∂L/∂ŷ = [[-0.22893232], [-0.056644887], [-0.0958622], [0.0529218]] ∂L/∂layer1 = [[-0.09839373, -0.080213994, 0.109245166, -0.007680244, -0.0915977, -0.0437211, -0.0766867, -0.076893255, 0.0800974, 0.02998989], [-0.024345629, -0.019847406, 0.02703061, -0.0019003282, -0.022664083, -0.010817943, -0.018974645, -0.019025752, 0.019818557, 0.00742042], [-0.041200995, -0.033588488, 0.045744885, -0.0032159945, -0.038355254, -0.018307598, -0.03211148, -0.03219797, 0.033539664, 0.0125578465], [0.02274547, 0.0185429, -0.025253974, 0.0017754257, 0.021174446, 0.010106915, 0.017727504, 0.01777525, -0.018515948, -0.0069327]] Loss: 0.3140006 ∂L/∂ŷ = [[-0.22347087], [-0.051403634], [-0.08994151], [0.057702184]] ∂L/∂layer1 = [[-0.09614439, -0.07832037, 0.106587306, -0.0074970224, -0.08956989, -0.04267808, -0.07487536, -0.07509866, 0.07818659, 0.029158076], [-0.022115506, -0.018015554, 0.024517624, -0.0017244942, -0.020603213, -0.009816977, -0.017223122, -0.017274486, 0.017984781, 0.0067070536], [-0.038695745, -0.031522017, 0.04289876, -0.0030173666, -0.03604967, -0.017176874, -0.030135486, -0.030225359, 0.03146817, 0.011735407], [0.024825346, 0.020223022, -0.027521798, 0.0019357986, 0.02312775, 0.011019863, 0.019333491, 0.01939115, -0.020188477, -0.0075288774]] Loss: 0.3108136 ∂L/∂ŷ = [[-0.21832475], [-0.046581082], [-0.084476836], [0.061981946]] ∂L/∂layer1 = [[-0.094024695, -0.07653748, 0.10408614, -0.0073243803, -0.08760406, -0.041695286, -0.07316483, -0.07340741, 0.076386094, 0.02838015], [-0.020060813, -0.016329797, 0.022207491, -0.0015627067, -0.018690927, -0.008895975, -0.015610217, -0.015661974, 0.016297497, 0.0060551], [-0.036381166, -0.029614802, 0.04027426, -0.0028340372, -0.033896815, -0.01613324, -0.028309815, -0.028403677, 0.029556224, 0.010981189], [0.026693417, 0.021728832, -0.02954984, 0.0020793765, 0.024870614, 0.011837205, 0.020771343, 0.020840213, -0.021685854, -0.008057066]] Loss: 0.30798542 ∂L/∂ŷ = [[-0.2134709], [-0.042145163], [-0.07943327], [0.06580055]] ∂L/∂layer1 = [[-0.092025176, -0.07485708, 0.10172984, -0.007161543, -0.08570006, -0.040768307, -0.07154774, -0.07181193, 0.07468786, 0.02765159], [-0.018168358, -0.0147788925, 0.020084333, -0.00141389, -0.016919604, -0.00804881, -0.014125537, -0.014177696, 0.014745485, 0.0054592015], [-0.034242887, -0.027854579, 0.03785403, -0.0026648352, -0.031889293, -0.015170028, -0.026623163, -0.02672147, 0.027791614, 0.010289253], [0.028365958, 0.023074042, -0.031357337, 0.0022074832, 0.026416298, 0.012566475, 0.022053968, 0.022135403, -0.023021882, -0.008523362]] Loss: 0.30546278 ∂L/∂ŷ = [[-0.20888776], [-0.03806588], [-0.07477814], [0.069194704]] ∂L/∂layer1 = [[-0.09013698, -0.07327145, 0.099507414, -0.007007787, -0.08385716, -0.039893024, -0.07001727, -0.070305154, 0.07308434, 0.026968256], [-0.016425777, -0.013352349, 0.018133363, -0.0012770379, -0.015281396, -0.007269756, -0.012759335, -0.012811797, 0.013318251, 0.0049144593], [-0.03226745, -0.02622989, 0.035621904, -0.0025086643, -0.030019386, -0.014281, -0.025064949, -0.025168007, 0.026162906, 0.009654161], [0.02985815, 0.024271391, -0.032962132, 0.0023213506, 0.027777938, 0.013214685, 0.023193432, 0.023288796, -0.024209408, -0.008933316]] Loss: 0.30320063 ∂L/∂ŷ = [[-0.20455518], [-0.0343152], [-0.07048094], [0.07219905]] ∂L/∂layer1 = [[-0.08835188, -0.07177346, 0.09740865, -0.0068624374, -0.082074195, -0.039065596, -0.068567075, -0.06888049, 0.07156848, 0.02632637], [-0.014821488, -0.012040373, 0.016340809, -0.0011512097, -0.013768374, -0.0065534576, -0.011502485, -0.011555062, 0.012005987, 0.0044163857], [-0.03044227, -0.024730057, 0.033562843, -0.0023645016, -0.028279249, -0.013460329, -0.023625273, -0.023733262, 0.02465943, 0.0090709375], [0.031184357, 0.025332898, -0.034381, 0.0024221407, 0.028968606, 0.01378845, 0.024201185, 0.024311805, -0.025260549, -0.009292059]]
CC-BY-4.0
docs/site/tutorials/custom_differentiation.ipynb
sendilkumarn/swift
Recomputing activations during backpropagation to save memory (checkpointing)Checkpointing is a traditional technique in reverse-mode automatic differentiation to save memory when computing derivatives by making large intermediate values in the original computation not be saved in memory for backpropagation, but instead recomputed as needed during backpropagation. This technique has been realized in modern deep learning libraries as well. In Swift, API [`withComputationInPullbacks(_:)`](https://www.tensorflow.org/swift/api_docs/Protocols/Differentiable/s:10TensorFlow14DifferentiablePAAE28withRecomputationInPullbacksyqd__qd__xcAaBRd__lF) makes you able to control what to recompute during backpropagation, and it is available on all `Differentiable` types.But today, let us learn how to define our own gradient checkpointing APIs from scratch, in just a few lines of code. My gradient checkpointing API We can define our own gradient checkpointing API, `makeRecomputedInGradient(_:)`, in terms of standard library function [`differentiableFunction(from:)`](https://www.tensorflow.org/swift/api_docs/Functions/s:10TensorFlow22differentiableFunction4fromq0_x_q_tcq0_5value_15CotangentVectorQz_AEQy_tAEQy0_c8pullbacktx_q_tc_tAA14DifferentiableRzAaJR_AaJR0_r1_lF), which is a shorthand for creating a differentiable function directly from a derivative function (also called a "vector-Jacobian products (VJP) function").As we have seen before, the derivative function returns a tuple of the original function's result and a pullback closure. We return `original(x)` in `value:`, and call `pullback(at:in:)` on `original` to evaluate the original function again and get a pullback.
/// Given a differentiable function, returns the same differentiable function except when /// derivatives of this function is being computed, values in the original function that are needed /// for computing the derivatives will be recomputed, instead of being captured by the differnetial /// or pullback. /// /// - Parameter body: The body of the differentiable function. /// - Returns: The same differentiable function whose derivatives, when computed, will recompute /// some values from the original function. func makeRecomputedInGradient<T: Differentiable, U: Differentiable>( _ original: @escaping @differentiable (T) -> U ) -> @differentiable (T) -> U { return differentiableFunction { x in (value: original(x), pullback: { v in pullback(at: x, in: original)(v) }) } }
_____no_output_____
CC-BY-4.0
docs/site/tutorials/custom_differentiation.ipynb
sendilkumarn/swift
Verify it works
let input: Float = 10.0 print("Running original computation...") // Differentiable multiplication with checkpointing. let square = makeRecomputedInGradient { (x: Float) -> Float in print(" Computing square...") return x * x } // Differentiate `f(x) = (cos(x))^2`. let (output, backprop) = input.valueWithPullback { input -> Float in return square(cos(input)) } print("Running backpropagation...") let grad = backprop(1) print("Gradient = \(grad)")
Running original computation... Computing square... Running backpropagation... Computing square... Gradient = -0.9129453
CC-BY-4.0
docs/site/tutorials/custom_differentiation.ipynb
sendilkumarn/swift
Extend it to neural network modulesIn this example, we define a simple convolutional neural network.```swiftstruct Model: Layer { var conv = Conv2D(filterShape: (5, 5, 3, 6)) var maxPool = MaxPool2D(poolSize: (2, 2), strides: (2, 2)) var flatten = Flatten() var dense = Dense(inputSize: 36 * 6, outputSize: 10) @differentiable func applied(to input: Tensor, in context: Context) -> Tensor { return input.sequenced(in: context, through: conv, maxPool, flatten, dense) }}```We want to make activations in the convolution layer (`conv`) be recomputed during backpropagation. However, using `makeRecomputedInGradient(_:)` could make the resulting code look cumbersome, especially when we want to apply layers sequentially using [`sequenced(in:through:_:_:_:_:)`](https://www.tensorflow.org/swift/api_docs/Protocols/Differentiable/s:10TensorFlow14DifferentiablePAAE9sequenced2in7through____6OutputQyd_3_AA7ContextC_qd__qd_0_qd_1_qd_2_qd_3_t5InputQyd__RszAA5LayerRd__AaMRd_0_AaMRd_1_AaMRd_2_AaMRd_3_AKQyd_0_AGRtd__AKQyd_1_AGRtd_0_AKQyd_2_AGRtd_1_AKQyd_3_AGRtd_2_r3_lF).```swiftinput.sequenced(in: context, through: conv, maxPool, flatten, dense)```So, why don't we define a **special layer type** that wraps a layer and makes its activations be recomputed during backpropagation? Let's do it. First, we define a `makeRecomputedInGradient(_:)` function that takes a binary function.
// Same as the previous `makeRecomputedInGradient(_:)`, except it's for binary functions. func makeRecomputedInGradient<T: Differentiable, U: Differentiable, V: Differentiable>( _ original: @escaping @differentiable (T, U) -> V ) -> @differentiable (T, U) -> V { return differentiableFunction { x, y in (value: original(x, y), pullback: { v in pullback(at: x, y, in: original)(v) }) } }
_____no_output_____
CC-BY-4.0
docs/site/tutorials/custom_differentiation.ipynb
sendilkumarn/swift
Then, we define a generic layer `ActivationRecomputing`.
/// A layer wrapper that makes the underlying layer's activations be discarded during application /// and recomputed during backpropagation. struct ActivationDiscarding<Wrapped: Layer>: Layer where Wrapped.AllDifferentiableVariables == Wrapped.CotangentVector { /// The wrapped layer. var wrapped: Wrapped @differentiable func applied(to input: Wrapped.Input, in context: Context) -> Wrapped.Output { let apply = makeRecomputedInGradient { (layer: Wrapped, input: Input) -> Wrapped.Output in print(" Applying \(Wrapped.self) layer...") return layer.applied(to: input, in: context) } return apply(wrapped, input) } }
_____no_output_____
CC-BY-4.0
docs/site/tutorials/custom_differentiation.ipynb
sendilkumarn/swift
Finally, we can add a method on all layers that returns the same layer except its activations are discarded during application and recomputeed during backpropagation.
extension Layer where AllDifferentiableVariables == CotangentVector { func discardingActivations() -> ActivationDiscarding<Self> { return ActivationDiscarding(wrapped: self) } }
_____no_output_____
CC-BY-4.0
docs/site/tutorials/custom_differentiation.ipynb
sendilkumarn/swift
Back in the model, all we have to change is to wrap the convolution layer into the activation-recomputing layer.```swiftvar conv = Conv2D(filterShape: (5, 5, 3, 6)).discardingActivations()``` Now, simply use it in the model!
struct Model: Layer { var conv = Conv2D<Float>(filterShape: (5, 5, 3, 6)).discardingActivations() var maxPool = MaxPool2D<Float>(poolSize: (2, 2), strides: (2, 2)) var flatten = Flatten<Float>() var dense = Dense<Float>(inputSize: 36 * 6, outputSize: 10) @differentiable func applied(to input: Tensor<Float>, in context: Context) -> Tensor<Float> { return input.sequenced(in: context, through: conv, maxPool, flatten, dense) } }
_____no_output_____
CC-BY-4.0
docs/site/tutorials/custom_differentiation.ipynb
sendilkumarn/swift
When we run a training loop, we can see that the convolution layer's activations are computed twice: once during layer application, and once during backpropagation.
// Use random training data. let x = Tensor<Float>(randomNormal: [10, 16, 16, 3]) let y = Tensor<Int32>(rangeFrom: 0, to: 10, stride: 1) var model = Model() let opt = SGD<Model, Float>() let context = Context(learningPhase: .training) for i in 1...5 { print("Starting training step \(i)") print(" Running original computation...") let (logits, backprop) = model.appliedForBackpropagation(to: x, in: context) let (loss, dL_dŷ) = logits.valueWithGradient { logits in softmaxCrossEntropy(logits: logits, labels: y) } print(" Loss: \(loss)") print(" Running backpropagation...") let (dL_dθ, _) = backprop(dL_dŷ) opt.update(&model.allDifferentiableVariables, along: dL_dθ) }
Starting training step 1 Running original computation... Applying Conv2D<Float> layer... Loss: 3.6660562 Running backpropagation... Applying Conv2D<Float> layer... Starting training step 2 Running original computation... Applying Conv2D<Float> layer... Loss: 3.1203392 Running backpropagation... Applying Conv2D<Float> layer... Starting training step 3 Running original computation... Applying Conv2D<Float> layer... Loss: 2.7324893 Running backpropagation... Applying Conv2D<Float> layer... Starting training step 4 Running original computation... Applying Conv2D<Float> layer... Loss: 2.4246051 Running backpropagation... Applying Conv2D<Float> layer... Starting training step 5 Running original computation... Applying Conv2D<Float> layer... Loss: 2.1656146 Running backpropagation... Applying Conv2D<Float> layer...
CC-BY-4.0
docs/site/tutorials/custom_differentiation.ipynb
sendilkumarn/swift
케라스로 AlexNet 만들기 이 노트북에서 [AlexNet](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks)과 비슷한 심층 합성곱 신경망으로 [Oxford Flowers](http://www.robots.ox.ac.uk/~vgg/data/flowers/17/) 데이터셋의 꽃을 17개의 카테고리로 분류하겠습니다. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/rickiepark/dl-illustrated/blob/master/notebooks/10-2.alexnet_in_keras.ipynb) 라이브러리를 적재합니다.
from tensorflow import keras from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D from tensorflow.keras.layers import BatchNormalization
_____no_output_____
MIT
notebooks/10-2.alexnet_in_keras.ipynb
sunny191019/dl-illustrated
데이터를 적재하고 전처리합니다. 원서 노트북은 tflearn을 사용해 oxflower17 데이터셋을 다운로드합니다. 이 라이브러리는 텐서플로 2와 호환되지 않습니다. 여기에서는 사전에 tflearn으로 다운받은 데이터를 다운로드하여 사용합니다.이 데이터셋에 대한 자세한 내용은 http://www.robots.ox.ac.uk/~vgg/data/flowers/17/ 을 참고하세요.
!rm oxflower17* !wget https://bit.ly/31IvwtD -O oxflower17.npz import numpy as np data = np.load('oxflower17.npz') X = data['X'] Y = data['Y'] X.shape, Y.shape
_____no_output_____
MIT
notebooks/10-2.alexnet_in_keras.ipynb
sunny191019/dl-illustrated
신경망 모델을 만듭니다.
model = Sequential() model.add(Conv2D(96, kernel_size=(11, 11), strides=(4, 4), activation='relu', input_shape=(224, 224, 3))) model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) model.add(BatchNormalization()) model.add(Conv2D(256, kernel_size=(5, 5), activation='relu')) model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) model.add(BatchNormalization()) model.add(Conv2D(256, kernel_size=(3, 3), activation='relu')) model.add(Conv2D(384, kernel_size=(3, 3), activation='relu')) model.add(Conv2D(384, kernel_size=(3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) model.add(BatchNormalization()) model.add(Flatten()) model.add(Dense(4096, activation='tanh')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='tanh')) model.add(Dropout(0.5)) model.add(Dense(17, activation='softmax')) model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 54, 54, 96) 34944 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 26, 26, 96) 0 _________________________________________________________________ batch_normalization (BatchNo (None, 26, 26, 96) 384 _________________________________________________________________ conv2d_1 (Conv2D) (None, 22, 22, 256) 614656 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 10, 10, 256) 0 _________________________________________________________________ batch_normalization_1 (Batch (None, 10, 10, 256) 1024 _________________________________________________________________ conv2d_2 (Conv2D) (None, 8, 8, 256) 590080 _________________________________________________________________ conv2d_3 (Conv2D) (None, 6, 6, 384) 885120 _________________________________________________________________ conv2d_4 (Conv2D) (None, 4, 4, 384) 1327488 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 1, 1, 384) 0 _________________________________________________________________ batch_normalization_2 (Batch (None, 1, 1, 384) 1536 _________________________________________________________________ flatten (Flatten) (None, 384) 0 _________________________________________________________________ dense (Dense) (None, 4096) 1576960 _________________________________________________________________ dropout (Dropout) (None, 4096) 0 _________________________________________________________________ dense_1 (Dense) (None, 4096) 16781312 _________________________________________________________________ dropout_1 (Dropout) (None, 4096) 0 _________________________________________________________________ dense_2 (Dense) (None, 17) 69649 ================================================================= Total params: 21,883,153 Trainable params: 21,881,681 Non-trainable params: 1,472 _________________________________________________________________
MIT
notebooks/10-2.alexnet_in_keras.ipynb
sunny191019/dl-illustrated
모델을 설정합니다.
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
_____no_output_____
MIT
notebooks/10-2.alexnet_in_keras.ipynb
sunny191019/dl-illustrated
훈련!
model.fit(X, Y, batch_size=64, epochs=100, verbose=1, validation_split=0.1, shuffle=True)
Epoch 1/100 20/20 [==============================] - 9s 78ms/step - loss: 4.6429 - accuracy: 0.1772 - val_loss: 6.9113 - val_accuracy: 0.0662 Epoch 2/100 20/20 [==============================] - 1s 50ms/step - loss: 3.2046 - accuracy: 0.2823 - val_loss: 3.8402 - val_accuracy: 0.1838 Epoch 3/100 20/20 [==============================] - 1s 50ms/step - loss: 2.3982 - accuracy: 0.3301 - val_loss: 5.0392 - val_accuracy: 0.1250 Epoch 4/100 20/20 [==============================] - 1s 50ms/step - loss: 2.4482 - accuracy: 0.3819 - val_loss: 4.1104 - val_accuracy: 0.2279 Epoch 5/100 20/20 [==============================] - 1s 51ms/step - loss: 2.2152 - accuracy: 0.4310 - val_loss: 4.6752 - val_accuracy: 0.2206 Epoch 6/100 20/20 [==============================] - 1s 50ms/step - loss: 2.2077 - accuracy: 0.4286 - val_loss: 4.6224 - val_accuracy: 0.2279 Epoch 7/100 20/20 [==============================] - 1s 51ms/step - loss: 1.9991 - accuracy: 0.4783 - val_loss: 3.2980 - val_accuracy: 0.3015 Epoch 8/100 20/20 [==============================] - 1s 51ms/step - loss: 1.8836 - accuracy: 0.4758 - val_loss: 4.4707 - val_accuracy: 0.2721 Epoch 9/100 20/20 [==============================] - 1s 50ms/step - loss: 2.0022 - accuracy: 0.4745 - val_loss: 6.2244 - val_accuracy: 0.2059 Epoch 10/100 20/20 [==============================] - 1s 50ms/step - loss: 1.7743 - accuracy: 0.5284 - val_loss: 4.5157 - val_accuracy: 0.2574 Epoch 11/100 20/20 [==============================] - 1s 50ms/step - loss: 1.8367 - accuracy: 0.5051 - val_loss: 3.1568 - val_accuracy: 0.3750 Epoch 12/100 20/20 [==============================] - 1s 52ms/step - loss: 1.9101 - accuracy: 0.4916 - val_loss: 2.5058 - val_accuracy: 0.3971 Epoch 13/100 20/20 [==============================] - 1s 52ms/step - loss: 1.7524 - accuracy: 0.5305 - val_loss: 2.6367 - val_accuracy: 0.3824 Epoch 14/100 20/20 [==============================] - 1s 51ms/step - loss: 1.6340 - accuracy: 0.5884 - val_loss: 3.8476 - val_accuracy: 0.3382 Epoch 15/100 20/20 [==============================] - 1s 51ms/step - loss: 1.7423 - accuracy: 0.5187 - val_loss: 2.8836 - val_accuracy: 0.4412 Epoch 16/100 20/20 [==============================] - 1s 50ms/step - loss: 1.4916 - accuracy: 0.5700 - val_loss: 5.7071 - val_accuracy: 0.3382 Epoch 17/100 20/20 [==============================] - 1s 49ms/step - loss: 1.2675 - accuracy: 0.6554 - val_loss: 3.1568 - val_accuracy: 0.4118 Epoch 18/100 20/20 [==============================] - 1s 51ms/step - loss: 1.1675 - accuracy: 0.6507 - val_loss: 4.5942 - val_accuracy: 0.4191 Epoch 19/100 20/20 [==============================] - 1s 50ms/step - loss: 1.3940 - accuracy: 0.6306 - val_loss: 2.8359 - val_accuracy: 0.4706 Epoch 20/100 20/20 [==============================] - 1s 49ms/step - loss: 1.2050 - accuracy: 0.6855 - val_loss: 2.7855 - val_accuracy: 0.5515 Epoch 21/100 20/20 [==============================] - 1s 51ms/step - loss: 1.1311 - accuracy: 0.6832 - val_loss: 3.0645 - val_accuracy: 0.4853 Epoch 22/100 20/20 [==============================] - 1s 52ms/step - loss: 1.2334 - accuracy: 0.6764 - val_loss: 3.5505 - val_accuracy: 0.4706 Epoch 23/100 20/20 [==============================] - 1s 52ms/step - loss: 1.0275 - accuracy: 0.7142 - val_loss: 3.0626 - val_accuracy: 0.4706 Epoch 24/100 20/20 [==============================] - 1s 50ms/step - loss: 0.9573 - accuracy: 0.7388 - val_loss: 2.9081 - val_accuracy: 0.5221 Epoch 25/100 20/20 [==============================] - 1s 51ms/step - loss: 0.7289 - accuracy: 0.7712 - val_loss: 2.2599 - val_accuracy: 0.5809 Epoch 26/100 20/20 [==============================] - 1s 52ms/step - loss: 0.8660 - accuracy: 0.7556 - val_loss: 2.5860 - val_accuracy: 0.5809 Epoch 27/100 20/20 [==============================] - 1s 52ms/step - loss: 0.7782 - accuracy: 0.7686 - val_loss: 4.9205 - val_accuracy: 0.3676 Epoch 28/100 20/20 [==============================] - 1s 51ms/step - loss: 0.7287 - accuracy: 0.7853 - val_loss: 2.6654 - val_accuracy: 0.5368 Epoch 29/100 20/20 [==============================] - 1s 52ms/step - loss: 0.6768 - accuracy: 0.7977 - val_loss: 3.3202 - val_accuracy: 0.5294 Epoch 30/100 20/20 [==============================] - 1s 52ms/step - loss: 0.7499 - accuracy: 0.7763 - val_loss: 2.9776 - val_accuracy: 0.5368 Epoch 31/100 20/20 [==============================] - 1s 50ms/step - loss: 1.0794 - accuracy: 0.7134 - val_loss: 4.4612 - val_accuracy: 0.4559 Epoch 32/100 20/20 [==============================] - 1s 51ms/step - loss: 0.7277 - accuracy: 0.7920 - val_loss: 3.6071 - val_accuracy: 0.4632 Epoch 33/100 20/20 [==============================] - 1s 55ms/step - loss: 0.6720 - accuracy: 0.8274 - val_loss: 5.9109 - val_accuracy: 0.3309 Epoch 34/100 20/20 [==============================] - 1s 53ms/step - loss: 0.5504 - accuracy: 0.8461 - val_loss: 4.8567 - val_accuracy: 0.4338 Epoch 35/100 20/20 [==============================] - 1s 51ms/step - loss: 0.6322 - accuracy: 0.8097 - val_loss: 5.7461 - val_accuracy: 0.4485 Epoch 36/100 20/20 [==============================] - 1s 51ms/step - loss: 1.0962 - accuracy: 0.7786 - val_loss: 4.8283 - val_accuracy: 0.4338 Epoch 37/100 20/20 [==============================] - 1s 52ms/step - loss: 0.7069 - accuracy: 0.7959 - val_loss: 3.1211 - val_accuracy: 0.5441 Epoch 38/100 20/20 [==============================] - 1s 53ms/step - loss: 0.5671 - accuracy: 0.8275 - val_loss: 3.0753 - val_accuracy: 0.5809 Epoch 39/100 20/20 [==============================] - 1s 52ms/step - loss: 0.7584 - accuracy: 0.8193 - val_loss: 3.6496 - val_accuracy: 0.4412 Epoch 40/100 20/20 [==============================] - 1s 53ms/step - loss: 0.7445 - accuracy: 0.8087 - val_loss: 4.3113 - val_accuracy: 0.5000 Epoch 41/100 20/20 [==============================] - 1s 52ms/step - loss: 0.6313 - accuracy: 0.8254 - val_loss: 3.3609 - val_accuracy: 0.5515 Epoch 42/100 20/20 [==============================] - 1s 52ms/step - loss: 1.3299 - accuracy: 0.7039 - val_loss: 5.8714 - val_accuracy: 0.4118 Epoch 43/100 20/20 [==============================] - 1s 52ms/step - loss: 1.2052 - accuracy: 0.7081 - val_loss: 6.4298 - val_accuracy: 0.3382 Epoch 44/100 20/20 [==============================] - 1s 52ms/step - loss: 0.8349 - accuracy: 0.7821 - val_loss: 3.2248 - val_accuracy: 0.5368 Epoch 45/100 20/20 [==============================] - 1s 52ms/step - loss: 0.6766 - accuracy: 0.8154 - val_loss: 2.9413 - val_accuracy: 0.5735 Epoch 46/100 20/20 [==============================] - 1s 52ms/step - loss: 0.8999 - accuracy: 0.7800 - val_loss: 5.3587 - val_accuracy: 0.3529 Epoch 47/100 20/20 [==============================] - 1s 51ms/step - loss: 0.7444 - accuracy: 0.7849 - val_loss: 3.3938 - val_accuracy: 0.5147 Epoch 48/100 20/20 [==============================] - 1s 51ms/step - loss: 0.5099 - accuracy: 0.8597 - val_loss: 3.2823 - val_accuracy: 0.5882 Epoch 49/100 20/20 [==============================] - 1s 52ms/step - loss: 0.6309 - accuracy: 0.8320 - val_loss: 3.0612 - val_accuracy: 0.6471 Epoch 50/100 20/20 [==============================] - 1s 53ms/step - loss: 0.4649 - accuracy: 0.8590 - val_loss: 3.9522 - val_accuracy: 0.5441 Epoch 51/100 20/20 [==============================] - 1s 51ms/step - loss: 0.5488 - accuracy: 0.8589 - val_loss: 3.7538 - val_accuracy: 0.5662 Epoch 52/100 20/20 [==============================] - 1s 52ms/step - loss: 0.4251 - accuracy: 0.8835 - val_loss: 2.6496 - val_accuracy: 0.6544 Epoch 53/100 20/20 [==============================] - 1s 52ms/step - loss: 0.2903 - accuracy: 0.9169 - val_loss: 2.7501 - val_accuracy: 0.6618 Epoch 54/100 20/20 [==============================] - 1s 53ms/step - loss: 0.4575 - accuracy: 0.8965 - val_loss: 3.5671 - val_accuracy: 0.6324 Epoch 55/100 20/20 [==============================] - 1s 51ms/step - loss: 0.2682 - accuracy: 0.9198 - val_loss: 2.7003 - val_accuracy: 0.6765 Epoch 56/100 20/20 [==============================] - 1s 52ms/step - loss: 0.2174 - accuracy: 0.9196 - val_loss: 3.3102 - val_accuracy: 0.6029 Epoch 57/100 20/20 [==============================] - 1s 51ms/step - loss: 0.4567 - accuracy: 0.8849 - val_loss: 5.5328 - val_accuracy: 0.4632 Epoch 58/100 20/20 [==============================] - 1s 52ms/step - loss: 1.0789 - accuracy: 0.7738 - val_loss: 3.0945 - val_accuracy: 0.5515 Epoch 59/100 20/20 [==============================] - 1s 52ms/step - loss: 0.5780 - accuracy: 0.8461 - val_loss: 4.2850 - val_accuracy: 0.4926 Epoch 60/100 20/20 [==============================] - 1s 53ms/step - loss: 0.4107 - accuracy: 0.8884 - val_loss: 4.2642 - val_accuracy: 0.4853 Epoch 61/100 20/20 [==============================] - 1s 51ms/step - loss: 0.2438 - accuracy: 0.9170 - val_loss: 2.4405 - val_accuracy: 0.6691 Epoch 62/100 20/20 [==============================] - 1s 52ms/step - loss: 0.1929 - accuracy: 0.9441 - val_loss: 2.9831 - val_accuracy: 0.6912 Epoch 63/100 20/20 [==============================] - 1s 52ms/step - loss: 0.1585 - accuracy: 0.9387 - val_loss: 3.8142 - val_accuracy: 0.5956 Epoch 64/100 20/20 [==============================] - 1s 52ms/step - loss: 0.2168 - accuracy: 0.9435 - val_loss: 3.9020 - val_accuracy: 0.5735 Epoch 65/100 20/20 [==============================] - 1s 51ms/step - loss: 0.1878 - accuracy: 0.9484 - val_loss: 3.6544 - val_accuracy: 0.6029 Epoch 66/100 20/20 [==============================] - 1s 52ms/step - loss: 0.5521 - accuracy: 0.9019 - val_loss: 4.1064 - val_accuracy: 0.5294 Epoch 67/100 20/20 [==============================] - 1s 51ms/step - loss: 0.4251 - accuracy: 0.8947 - val_loss: 3.4000 - val_accuracy: 0.5956 Epoch 68/100 20/20 [==============================] - 1s 52ms/step - loss: 0.3610 - accuracy: 0.8991 - val_loss: 3.0546 - val_accuracy: 0.6324 Epoch 69/100 20/20 [==============================] - 1s 52ms/step - loss: 0.2694 - accuracy: 0.9378 - val_loss: 3.6128 - val_accuracy: 0.6544 Epoch 70/100 20/20 [==============================] - 1s 51ms/step - loss: 0.1124 - accuracy: 0.9633 - val_loss: 2.7719 - val_accuracy: 0.7132 Epoch 71/100 20/20 [==============================] - 1s 51ms/step - loss: 0.1700 - accuracy: 0.9529 - val_loss: 3.3141 - val_accuracy: 0.6912 Epoch 72/100 20/20 [==============================] - 1s 53ms/step - loss: 0.1228 - accuracy: 0.9611 - val_loss: 3.3079 - val_accuracy: 0.6838 Epoch 73/100 20/20 [==============================] - 1s 51ms/step - loss: 0.1909 - accuracy: 0.9535 - val_loss: 4.6782 - val_accuracy: 0.5662 Epoch 74/100 20/20 [==============================] - 1s 53ms/step - loss: 0.3665 - accuracy: 0.9193 - val_loss: 6.2701 - val_accuracy: 0.4779 Epoch 75/100 20/20 [==============================] - 1s 55ms/step - loss: 0.1703 - accuracy: 0.9522 - val_loss: 4.0180 - val_accuracy: 0.5809 Epoch 76/100 20/20 [==============================] - 1s 52ms/step - loss: 0.1623 - accuracy: 0.9538 - val_loss: 4.4584 - val_accuracy: 0.5956 Epoch 77/100 20/20 [==============================] - 1s 51ms/step - loss: 0.1211 - accuracy: 0.9786 - val_loss: 3.2625 - val_accuracy: 0.6618 Epoch 78/100 20/20 [==============================] - 1s 51ms/step - loss: 0.0457 - accuracy: 0.9852 - val_loss: 2.9800 - val_accuracy: 0.6691 Epoch 79/100 20/20 [==============================] - 1s 52ms/step - loss: 0.2802 - accuracy: 0.9531 - val_loss: 3.1548 - val_accuracy: 0.6838 Epoch 80/100 20/20 [==============================] - 1s 52ms/step - loss: 0.1678 - accuracy: 0.9532 - val_loss: 3.5188 - val_accuracy: 0.6544 Epoch 81/100 20/20 [==============================] - 1s 52ms/step - loss: 0.3620 - accuracy: 0.9322 - val_loss: 6.0728 - val_accuracy: 0.4338 Epoch 82/100 20/20 [==============================] - 1s 52ms/step - loss: 0.6820 - accuracy: 0.8755 - val_loss: 3.5640 - val_accuracy: 0.5809 Epoch 83/100 20/20 [==============================] - 1s 51ms/step - loss: 0.4729 - accuracy: 0.8956 - val_loss: 3.7106 - val_accuracy: 0.5882 Epoch 84/100 20/20 [==============================] - 1s 52ms/step - loss: 0.3984 - accuracy: 0.9084 - val_loss: 3.6485 - val_accuracy: 0.6324 Epoch 85/100 20/20 [==============================] - 1s 51ms/step - loss: 0.1942 - accuracy: 0.9470 - val_loss: 4.6554 - val_accuracy: 0.5441 Epoch 86/100 20/20 [==============================] - 1s 52ms/step - loss: 0.0973 - accuracy: 0.9677 - val_loss: 3.3203 - val_accuracy: 0.6691 Epoch 87/100 20/20 [==============================] - 1s 52ms/step - loss: 0.0920 - accuracy: 0.9700 - val_loss: 2.9472 - val_accuracy: 0.6765 Epoch 88/100 20/20 [==============================] - 1s 51ms/step - loss: 0.1521 - accuracy: 0.9595 - val_loss: 2.9696 - val_accuracy: 0.6985 Epoch 89/100 20/20 [==============================] - 1s 53ms/step - loss: 0.2279 - accuracy: 0.9486 - val_loss: 3.7965 - val_accuracy: 0.6324 Epoch 90/100 20/20 [==============================] - 1s 52ms/step - loss: 0.2021 - accuracy: 0.9517 - val_loss: 3.2243 - val_accuracy: 0.6691 Epoch 91/100 20/20 [==============================] - 1s 52ms/step - loss: 0.1544 - accuracy: 0.9627 - val_loss: 3.6171 - val_accuracy: 0.6985 Epoch 92/100 20/20 [==============================] - 1s 52ms/step - loss: 0.2457 - accuracy: 0.9491 - val_loss: 4.3328 - val_accuracy: 0.6103 Epoch 93/100 20/20 [==============================] - 1s 51ms/step - loss: 0.1223 - accuracy: 0.9587 - val_loss: 3.0888 - val_accuracy: 0.7132 Epoch 94/100 20/20 [==============================] - 1s 52ms/step - loss: 0.1074 - accuracy: 0.9683 - val_loss: 3.4378 - val_accuracy: 0.6765 Epoch 95/100 20/20 [==============================] - 1s 52ms/step - loss: 0.4015 - accuracy: 0.9229 - val_loss: 4.6498 - val_accuracy: 0.5882 Epoch 96/100 20/20 [==============================] - 1s 53ms/step - loss: 0.5772 - accuracy: 0.8908 - val_loss: 4.6537 - val_accuracy: 0.5735 Epoch 97/100 20/20 [==============================] - 1s 50ms/step - loss: 0.4412 - accuracy: 0.8848 - val_loss: 6.5095 - val_accuracy: 0.4632 Epoch 98/100 20/20 [==============================] - 1s 52ms/step - loss: 0.1980 - accuracy: 0.9405 - val_loss: 3.0124 - val_accuracy: 0.6618 Epoch 99/100 20/20 [==============================] - 1s 52ms/step - loss: 0.2180 - accuracy: 0.9522 - val_loss: 3.4620 - val_accuracy: 0.6250 Epoch 100/100 20/20 [==============================] - 1s 51ms/step - loss: 0.2533 - accuracy: 0.9323 - val_loss: 5.3905 - val_accuracy: 0.5294
MIT
notebooks/10-2.alexnet_in_keras.ipynb
sunny191019/dl-illustrated
NOTE:In the cell below you **MUST** use a batch size of 10 (`batch_size=10`) for the `train_generator` and the `validation_generator`. Using a batch size greater than 10 will exceed memory limits on the Coursera platform.
TRAINING_DIR = '/tmp/cats-v-dogs/training/' train_datagen = ImageDataGenerator( rescale = 1.0/255. ) # NOTE: YOU MUST USE A BATCH SIZE OF 10 (batch_size=10) FOR THE # TRAIN GENERATOR. train_generator = train_datagen.flow_from_directory(TRAINING_DIR, batch_size=10, class_mode='binary', target_size=(150, 150)) VALIDATION_DIR = '/tmp/cats-v-dogs/testing/' validation_datagen = ImageDataGenerator( rescale = 1.0/255. ) # NOTE: YOU MUST USE A BACTH SIZE OF 10 (batch_size=10) FOR THE # VALIDATION GENERATOR. validation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR, batch_size=10, class_mode='binary', target_size=(150, 150)) # Expected Output: # Found 2700 images belonging to 2 classes. # Found 300 images belonging to 2 classes. history = model.fit_generator(train_generator, epochs=2, verbose=1, validation_data=validation_generator) # PLOT LOSS AND ACCURACY %matplotlib inline import matplotlib.image as mpimg import matplotlib.pyplot as plt #----------------------------------------------------------- # Retrieve a list of list results on training and test data # sets for each training epoch #----------------------------------------------------------- acc=history.history['acc'] val_acc=history.history['val_acc'] loss=history.history['loss'] val_loss=history.history['val_loss'] epochs=range(len(acc)) # Get number of epochs #------------------------------------------------ # Plot training and validation accuracy per epoch #------------------------------------------------ plt.plot(epochs, acc, 'r', "Training Accuracy") plt.plot(epochs, val_acc, 'b', "Validation Accuracy") plt.title('Training and validation accuracy') plt.figure() #------------------------------------------------ # Plot training and validation loss per epoch #------------------------------------------------ plt.plot(epochs, loss, 'r', "Training Loss") plt.plot(epochs, val_loss, 'b', "Validation Loss") plt.title('Training and validation loss') # Desired output. Charts with training and validation metrics. No crash :)
_____no_output_____
MIT
Exercise_1_Cats_vs_Dogs_Question-FINAL.ipynb
Mostafa-wael/Convolutional-Neural-Networks-in-TensorFlow
Submission Instructions
# Now click the 'Submit Assignment' button above.
_____no_output_____
MIT
Exercise_1_Cats_vs_Dogs_Question-FINAL.ipynb
Mostafa-wael/Convolutional-Neural-Networks-in-TensorFlow
When you're done or would like to take a break, please run the two cells below to save your work and close the Notebook. This will free up resources for your fellow learners.
%%javascript <!-- Save the notebook --> IPython.notebook.save_checkpoint(); %%javascript IPython.notebook.session.delete(); window.onbeforeunload = null setTimeout(function() { window.close(); }, 1000);
_____no_output_____
MIT
Exercise_1_Cats_vs_Dogs_Question-FINAL.ipynb
Mostafa-wael/Convolutional-Neural-Networks-in-TensorFlow
Helper function to plot
def plot_graph(axis_title, x, y_train, y_val, xlabel, ylabel, xtick_range, ytick_range, save_path=None): fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(9, 6)) line1 = ax.plot(x, y_train, color="blue", label="train") line2 = ax.plot(x, y_val, color="red", label="val") # Nicer visuals. ax.set_title(axis_title) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.grid(b=True, which="major", axis="both", color="#d3d3d3", linestyle="-") ax.grid(b=True, which="minor", axis="both", color="#e7e7e7", linestyle="dashed") ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.xaxis.set_ticks(np.arange(xtick_range[0], xtick_range[1], xtick_range[2])) ax.xaxis.set_minor_locator(MultipleLocator(5)) ax.set_xlim(left=0) ax.yaxis.set_ticks(np.arange(ytick_range[0], ytick_range[1], ytick_range[2])) ax.yaxis.set_minor_locator(MultipleLocator(0.01)) ax.patch.set_alpha(0) handles, labels = ax.get_legend_handles_labels() ax.legend(handles=handles, labels=labels, loc="best") # Save graph plt.tight_layout() if save_path: plt.savefig(fname=save_path, dpi=300)
_____no_output_____
MIT
notebooks/plotTrainingGraphs.ipynb
CleonWong/Can-You-Find-The-Tumour
Batch-size = 64
df_64 = pd.read_csv("../results/fit/20201203_040325___INSTANCE/csv_logger/csv_logger.csv") df_64.head() plot_graph(axis_title="Batch Size = 64", x=df_64["epoch"], y_train=df_64["loss"], y_val=df_64["val_loss"], xlabel="Epoch", ylabel="Binary crossentropy loss", xtick_range=(0, 50, 10), ytick_range=(0, 0.16, 0.02), save_path="../results/fit/20201203_040325___INSTANCE/csv_logger/loss.png") plot_graph(axis_title="Batch Size = 64", x=df_64["epoch"], y_train=df_64["iouMetric"], y_val=df_64["val_iouMetric"], xlabel="Epoch", ylabel="iouMetric", xtick_range=(0, 50, 10), ytick_range=(0, 0.26, 0.02), save_path="../results/fit/20201203_040325___INSTANCE/csv_logger/iouMetric.png")
_____no_output_____
MIT
notebooks/plotTrainingGraphs.ipynb
CleonWong/Can-You-Find-The-Tumour
Batch-size = 10
df_10 = pd.read_csv("../results/fit/20201203_013807___INSTANCE/csv_logger/csv_logger.csv") df_10.head() plot_graph(axis_title="Batch Size = 10", x=df_10["epoch"], y_train=df_10["loss"], y_val=df_10["val_loss"], xlabel="Epoch", ylabel="Binary crossentropy loss", xtick_range=(0, 50, 10), ytick_range=(0, 0.16, 0.02), save_path="../results/fit/20201203_013807___INSTANCE/csv_logger/loss.png") plot_graph(axis_title="Batch Size = 10", x=df_10["epoch"], y_train=df_10["iouMetric"], y_val=df_10["val_iouMetric"], xlabel="Epoch", ylabel="iouMetric", xtick_range=(0, 50, 10), ytick_range=(0, 0.26, 0.02), save_path="../results/fit/20201203_013807___INSTANCE/csv_logger/iouMetric.png")
_____no_output_____
MIT
notebooks/plotTrainingGraphs.ipynb
CleonWong/Can-You-Find-The-Tumour
--- Combine .csv
mass_train_df = pd.read_csv("../data/raw_data/csv-description-updated/Mass-Training-Description-UPDATED.csv") mass_test_df = pd.read_csv("../data/raw_data/csv-description-updated/Mass-Test-Description-UPDATED.csv") mass_df = pd.concat([mass_train_df, mass_test_df]) mass_df # Create identifier column. mass_df.insert(loc=0, column="identifier", value=np.nan) mass_df["identifier"] = mass_df.apply(lambda x: "_".join([x["patient_id"], x["left_or_right_breast"], x["image_view"]]), axis=1) # Drop filepath columns, they are useless because the filepaths always change. mass_df.drop(["image_file_path", "cropped_image_file_path", "ROI_mask_file_path", "full_path", "mask_path", "crop_path"], axis=1, inplace=True) # Sort by identifier column. mass_df.sort_values(by=["identifier"]) mass_df.to_csv("../data/csv/Mass_all.csv", index=False) # Get list of test and train image identifiers. train_identifiers = [] test_identifiers = [] train_path = "../data/preprocessed/Mass/Train_FULL" test_path = "../data/preprocessed/Mass/Test_FULL" # Train images. for curdir, dirs, files in os.walk(train_path): files.sort() for f in files: if f.endswith(".png"): f = f.replace("_FULL___PRE.png", "") train_identifiers.append(f) # Test images. for curdir, dirs, files in os.walk(test_path): files.sort() for f in files: if f.endswith(".png"): f = f.replace("_FULL___PRE.png", "") test_identifiers.append(f) print(len(train_identifiers)) print(train_identifiers[:5]) print(len(test_identifiers)) print(test_identifiers[:5]) # Create dataframe for train images. mass_train_df_new = mass_df[mass_df["identifier"].isin(train_identifiers)] # Create dataframe for test images. mass_test_df_new = mass_df[mass_df["identifier"].isin(test_identifiers)] print(mass_train_df_new.shape) print(mass_test_df_new.shape) mass_train_df_new.to_csv("../data/csv/Mass_train.csv", index=False) mass_test_df_new.to_csv("../data/csv/Mass_test.csv", index=False)
_____no_output_____
MIT
notebooks/plotTrainingGraphs.ipynb
CleonWong/Can-You-Find-The-Tumour
Start create
# Image read dir street_dir = '/root/notebooks/0858611-2/final_project/caltech_pedestrian_extractor/video_extractor/*' people_dir = '/root/notebooks/0858611-2/final_project/caltech_pedestrian_extractor/js_on_image/people_img/Market-1501-v15.09.15' # Image save dir save_dir = '/root/notebooks/0858611-2/final_project/caltech_pedestrian_extractor/0603_result' num_imgs = 10 # Check dir folder exit # If not, create one if os.path.exists(save_dir) == False: os.makedirs(save_dir) for s in ['people', 'mask', 'street', 'street_json','json']: if os.path.exists(os.path.join(save_dir, s)) == False: os.makedirs(os.path.join(save_dir, s)) street_imgs = glob.glob(street_dir+'/**/*.jpg', recursive=True) #street_imgs = random.shuffle(random.sample(street_imgs, 5000)) street_imgs = random.sample(street_imgs, num_imgs) random.shuffle(street_imgs) people_imgs = glob.glob(people_dir+'/bounding_box_train/*.jpg', recursive=True) people_imgs = random.sample(people_imgs, num_imgs) random.shuffle(people_imgs) for i in range(num_imgs): if (i%100==0): print("Process (",i,"/",num_imgs,") ","{:.2f}".format(100*i/num_imgs)," %") # create mask and save try: mask_img = detectron_mask_img(people_imgs[i],(64,128)) mask_img = Image.fromarray(mask_img) except Exception as e: print("Skip image :",i) continue mask_img.save(save_dir+'/mask/'+str('{0:06}'.format(i))+'.jpg') # save street img street_img = cv2.imread(street_imgs[i]) street_img = cv2.resize(street_img,(640,480)) cv2.imwrite(save_dir+'/street/'+str('{0:06}'.format(i))+'.jpg', street_img) ################################################################ img_path = street_imgs[i] json_dir = img_path.replace('images', 'annotations') json_dir = json_dir.replace('jpg', 'json') shutil.copyfile(json_dir, save_dir+'/street_json/'+str('{0:06}'.format(count))+'.json') ################################################################ # save poeple img people_img = cv2.imread(people_imgs[i]) people_img = cv2.resize(people_img,(64,128)) cv2.imwrite(save_dir+'/people/'+str('{0:06}'.format(i))+'.jpg', people_img) # create json file and save create_json_file(save_dir+'/street/'+str('{0:06}'.format(i))+'.jpg', save_dir+'/street_json/'+str('{0:06}'.format(i))+'.json', save_dir+'/people/'+str('{0:06}'.format(i))+'.jpg', save_dir+'/json/'+str('{0:06}'.format(i))+'.json', function="random") import json json_path = '/root/notebooks/0858611-2/final_project/caltech_pedestrian_extractor/video_extractor/set00/V009/annotations/I00018.json' input_file = open (json_path) json_array = json.load(input_file) json_array print(type(json_array[0]))
_____no_output_____
Apache-2.0
01_gene_train_dataset/discard/gandatamask3_test.ipynb
tony92151/pedestrian_generator
Import Packages
# Import packages import glob import csv import pandas as pd import numpy as np from sqlalchemy import create_engine import psycopg2
_____no_output_____
CC-BY-3.0
__Project Files/.ipynb_checkpoints/Data Cleaning_merge all data together_backup-checkpoint.ipynb
joannasys/Predictions-of-ICU-Mortality
Append each .txt file into a DataFrameEach txt file is a row
# Iterate through each file name main = pd.DataFrame() for filename in glob.iglob('./training_set_a/*.txt'): # Open each file as data with open(filename) as inputfile: data = list(csv.reader(inputfile)) # list of list data = pd.DataFrame(data[1:],columns=data[0]) # Convert list of list to DataFrame data.Value = data.Value.astype(float) # Change Value to float # Pivot_table to convert from long to wide dataset # Creation of new features - aggregate across the time series to find mean, min, max values # mean is chosen rather than median because we want to take into the account of 'outlier values' wide_data = pd.pivot_table(data,values=['Value'],columns='Parameter',aggfunc=[np.mean,np.min,np.max]) wide_data.columns = wide_data.columns.droplevel(level=0) # rename new columns & lower capitalise new_columns = [] for ind, col in enumerate(wide_data.columns): if ind < wide_data.columns.shape[0]/3: col = 'mean_'+col new_columns.append(col) elif ind >= wide_data.columns.shape[0]/3 and ind < 2*wide_data.columns.shape[0]/3: col = 'min_'+col new_columns.append(col) else: col = 'max_'+col new_columns.append(col) wide_data.columns = new_columns wide_data.columns = wide_data.columns.str.lower() # rename descriptor row wide_data.rename(columns={'mean_age':'age','mean_gender':'gender','mean_height':'height', 'mean_icutype':'icutype','mean_recordid':'recordid'},inplace=True) # drop min/max descriptor rows wide_data.drop(['min_age','max_age','min_gender','max_gender','min_height','max_height', 'min_icutype','max_icutype','min_recordid','max_recordid'],axis=1,inplace=True) # set recordid as index wide_data.set_index(['recordid'],inplace = True) main = main.append(wide_data) # Open set a outcomes file as dataframe with open('training_outcomes_a.txt') as outcomesfile: label = list(csv.reader(outcomesfile)) # list of list label = pd.DataFrame(label[1:],columns=label[0]) # Convert list of list to DataFrame label = label.astype(float) # Change all values to float label.columns = label.columns.str.lower() # Change all column to lowercase label.set_index(['recordid'],inplace = True) # set recordid as index # merge main data and label data mortality = main.merge(label,how='outer',left_index=True,right_index=True) mortality.head(5)
_____no_output_____
CC-BY-3.0
__Project Files/.ipynb_checkpoints/Data Cleaning_merge all data together_backup-checkpoint.ipynb
joannasys/Predictions-of-ICU-Mortality